diff mbox series

[v2,2/2] crypto: hash - Use nth_page instead of doing it by hand

Message ID a68366725ab6130fea3a5e3257e92c8109b7f86a.1741753576.git.herbert@gondor.apana.org.au
State New
Headers show
Series crypto: Use nth_page instead of doing it by hand | expand

Commit Message

Herbert Xu March 12, 2025, 4:30 a.m. UTC
Use nth_page instead of adding n to the page pointer.

This also fixes a real bug in shash_ahash_digest where the the
check for continguous hash data may be incorrect in the presence
of highmem.  This could result in an incorrect hash or worse.

Fixes: 5f7082ed4f48 ("crypto: hash - Export shash through hash")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
 crypto/ahash.c | 42 +++++++++++++++++++++++++++---------------
 1 file changed, 27 insertions(+), 15 deletions(-)

Comments

Eric Biggers March 13, 2025, 3:07 a.m. UTC | #1
On Thu, Mar 13, 2025 at 10:36:44AM +0800, Herbert Xu wrote:
> On Wed, Mar 12, 2025 at 01:09:08PM -0700, Eric Biggers wrote:
> >
> > It seems that the "real bug" mentioned above is the case of
> > scatterlist::offset > PAGE_SIZE.  That's unrelated to the nth_page() fix, which
> > seems to be for scatterlist elements that span physical memory sections.  Also,
> 
> Alright I'll try to split it up.
> 
> > Note that there is also page arithmetic being done in scatterwalk_done_dst() and
> > scomp_acomp_comp_decomp().  Those presumably need the nth_page() fix too.
> 
> Thanks, I had missed the flushing code in scatterwalk.
> 
> As for scomp yes that's already fixed in my acomp series which I
> will repost soon.
> 
> > scomp_acomp_comp_decomp() also assumes that if the first page in a given
> > scatterlist element is lowmem, then any additional pages are lowmem too.  That
> 
> Yes I've fixed that by changing it to test the last page rather than
> the first, assuming that highmem indeed comes after lowmem.
> 
> > sounds like another potentially wrong assumption.  Can scatterlist elements span
> > memory zones?  Or just physical memory sections?
> 
> Theoretically it can cross anything.  Check out the block merging
> code in __blk_rq_map_sg, it tries to merge any physically contiguous
> page.

Actually the block layer avoids this edge case.  See the comment above struct
bio_vec, and the corresponding code in bvec_try_merge_page():

        if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
                return false;

- Eric
Herbert Xu March 13, 2025, 4:04 a.m. UTC | #2
On Thu, Mar 13, 2025 at 03:07:41AM +0000, Eric Biggers wrote:
>
> Actually the block layer avoids this edge case.  See the comment above struct
> bio_vec, and the corresponding code in bvec_try_merge_page():
> 
>         if (bv->bv_page + bv_end / PAGE_SIZE != page + off / PAGE_SIZE)
>                 return false;

Well spotted.  It looks like block scatterlists don't need nth_page:

commit 52d52d1c98a90cfe860b83498e4b6074aad95c15
Author: Christoph Hellwig <hch@lst.de>
Date:   Thu Apr 11 08:23:31 2019 +0200

    block: only allow contiguous page structs in a bio_vec

That still leaves the question as to why folios are using nth_page
for folio_page.  It seems that the only reason is that the same
function is also used by folio_next, which can cross boundaries.

So I will get rid of the nth_page changes, and steer clear of
folio_page too.

Thanks,
diff mbox series

Patch

diff --git a/crypto/ahash.c b/crypto/ahash.c
index 9c26175c21a8..aff0d3387f3a 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -16,6 +16,7 @@ 
 #include <linux/cryptouser.h>
 #include <linux/err.h>
 #include <linux/kernel.h>
+#include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
@@ -79,7 +80,7 @@  static int hash_walk_new_entry(struct crypto_hash_walk *walk)
 
 	sg = walk->sg;
 	walk->offset = sg->offset;
-	walk->pg = sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
+	walk->pg = nth_page(sg_page(walk->sg), walk->offset >> PAGE_SHIFT);
 	walk->offset = offset_in_page(walk->offset);
 	walk->entrylen = sg->length;
 
@@ -201,25 +202,36 @@  int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
 	unsigned int nbytes = req->nbytes;
 	struct scatterlist *sg;
 	unsigned int offset;
+	struct page *page;
+	const u8 *data;
 	int err;
 
-	if (ahash_request_isvirt(req))
-		return crypto_shash_digest(desc, req->svirt, nbytes,
-					   req->result);
+	data = req->svirt;
+	if (!nbytes || ahash_request_isvirt(req))
+		return crypto_shash_digest(desc, data, nbytes, req->result);
 
-	if (nbytes &&
-	    (sg = req->src, offset = sg->offset,
-	     nbytes <= min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset))) {
-		void *data;
+	sg = req->src;
+	if (nbytes > sg->length)
+		return crypto_shash_init(desc) ?:
+		       shash_ahash_finup(req, desc);
 
-		data = kmap_local_page(sg_page(sg));
-		err = crypto_shash_digest(desc, data + offset, nbytes,
-					  req->result);
-		kunmap_local(data);
-	} else
-		err = crypto_shash_init(desc) ?:
-		      shash_ahash_finup(req, desc);
+	page = sg_page(sg);
+	data = lowmem_page_address(page) + offset;
+	if (!IS_ENABLED(CONFIG_HIGHMEM))
+		return crypto_shash_digest(desc, data, nbytes, req->result);
 
+	offset = sg->offset;
+	page = nth_page(page, offset >> PAGE_SHIFT);
+	offset = offset_in_page(offset);
+
+	if (nbytes > (unsigned int)PAGE_SIZE - offset)
+		return crypto_shash_init(desc) ?:
+		       shash_ahash_finup(req, desc);
+
+	data = kmap_local_page(page);
+	err = crypto_shash_digest(desc, data + offset, nbytes,
+				  req->result);
+	kunmap_local(data);
 	return err;
 }
 EXPORT_SYMBOL_GPL(shash_ahash_digest);