diff mbox series

[v4] crypto: arm64/sm4-gcm - Fix possible crash in GCM cryption

Message ID Y9t1a2HkqlPT+Zfn@gondor.apana.org.au
State Accepted
Commit 4e4a08868f15897ca236528771c3733fded42c62
Headers show
Series [v4] crypto: arm64/sm4-gcm - Fix possible crash in GCM cryption | expand

Commit Message

Herbert Xu Feb. 2, 2023, 8:33 a.m. UTC
On Wed, Feb 01, 2023 at 08:31:33PM +0800, Tianjia Zhang wrote:
>
> +	sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, walk->dst.virt.addr,
> +			       walk->src.virt.addr, iv, walk->nbytes, ghash,
> +			       ctx->ghash_table, (const u8 *)&lengths);

I still think this is error-prone.  When walk->nbytes == 0,
walk->src and walk->dst are undefined.  Sure you could argue
that the underlying assembly code won't touch the values, but
accessing uninitialised memory even if just to throw them away
is still a bit icky.

Anyway, here's my attempt at rewriting the gcm loop:

---8<---
An often overlooked aspect of the skcipher walker API is that an
error is not just indicated by a non-zero return value, but by the
fact that walk->nbytes is zero.

Thus it is an error to call skcipher_walk_done after getting back
walk->nbytes == 0 from the previous interaction with the walker.

This is because when walk->nbytes is zero the walker is left in
an undefined state and any further calls to it may try to free
uninitialised stack memory.

The sm4 arm64 ccm code gets this wrong and ends up calling
skcipher_walk_done even when walk->nbytes is zero.

This patch rewrites the loop in a form that resembles other callers.

Reported-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
Fixes: ae1b83c7d572 ("crypto: arm64/sm4 - add CE implementation for GCM mode")
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

Comments

tianjia.zhang Feb. 3, 2023, 2:44 a.m. UTC | #1
Hi Herbert,

On 2/2/23 4:33 PM, Herbert Xu wrote:
> On Wed, Feb 01, 2023 at 08:31:33PM +0800, Tianjia Zhang wrote:
>>
>> +	sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, walk->dst.virt.addr,
>> +			       walk->src.virt.addr, iv, walk->nbytes, ghash,
>> +			       ctx->ghash_table, (const u8 *)&lengths);
> 
> I still think this is error-prone.  When walk->nbytes == 0,
> walk->src and walk->dst are undefined.  Sure you could argue
> that the underlying assembly code won't touch the values, but
> accessing uninitialised memory even if just to throw them away
> is still a bit icky.

You're right, whether used or not, accessing an undefined pointer is
always ugly. This benefited me a lot.

> 
> Anyway, here's my attempt at rewriting the gcm loop:
> 
> ---8<---
> An often overlooked aspect of the skcipher walker API is that an
> error is not just indicated by a non-zero return value, but by the
> fact that walk->nbytes is zero.
> 
> Thus it is an error to call skcipher_walk_done after getting back
> walk->nbytes == 0 from the previous interaction with the walker.
> 
> This is because when walk->nbytes is zero the walker is left in
> an undefined state and any further calls to it may try to free
> uninitialised stack memory.
> 
> The sm4 arm64 ccm code gets this wrong and ends up calling
> skcipher_walk_done even when walk->nbytes is zero.
> 
> This patch rewrites the loop in a form that resembles other callers.
> 
> Reported-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>
> Fixes: ae1b83c7d572 ("crypto: arm64/sm4 - add CE implementation for GCM mode")
> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>

Thanks for the fix, this patch works find to me, so

Tested-by: Tianjia Zhang <tianjia.zhang@linux.alibaba.com>

> 
> diff --git a/arch/arm64/crypto/sm4-ce-gcm-glue.c b/arch/arm64/crypto/sm4-ce-gcm-glue.c
> index c450a2025ca9..73bfb6972d3a 100644
> --- a/arch/arm64/crypto/sm4-ce-gcm-glue.c
> +++ b/arch/arm64/crypto/sm4-ce-gcm-glue.c
> @@ -135,22 +135,23 @@ static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[])
>   }
>   
>   static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
> -		     struct sm4_gcm_ctx *ctx, u8 ghash[],
> +		     u8 ghash[], int err,
>   		     void (*sm4_ce_pmull_gcm_crypt)(const u32 *rkey_enc,
>   				u8 *dst, const u8 *src, u8 *iv,
>   				unsigned int nbytes, u8 *ghash,
>   				const u8 *ghash_table, const u8 *lengths))
>   {
> +	struct crypto_aead *aead = crypto_aead_reqtfm(req);
> +	struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
>   	u8 __aligned(8) iv[SM4_BLOCK_SIZE];
>   	be128 __aligned(8) lengths;
> -	int err;
>   
>   	memset(ghash, 0, SM4_BLOCK_SIZE);
>   
>   	lengths.a = cpu_to_be64(req->assoclen * 8);
>   	lengths.b = cpu_to_be64(walk->total * 8);
>   
> -	memcpy(iv, walk->iv, GCM_IV_SIZE);
> +	memcpy(iv, req->iv, GCM_IV_SIZE);
>   	put_unaligned_be32(2, iv + GCM_IV_SIZE);
>   
>   	kernel_neon_begin();
> @@ -158,49 +159,51 @@ static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
>   	if (req->assoclen)
>   		gcm_calculate_auth_mac(req, ghash);
>   
> -	do {
> +	while (walk->nbytes) {
>   		unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE;
>   		const u8 *src = walk->src.virt.addr;
>   		u8 *dst = walk->dst.virt.addr;
>   
>   		if (walk->nbytes == walk->total) {
> -			tail = 0;
> -
>   			sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
>   					       walk->nbytes, ghash,
>   					       ctx->ghash_table,
>   					       (const u8 *)&lengths);
> -		} else if (walk->nbytes - tail) {
> -			sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
> -					       walk->nbytes - tail, ghash,
> -					       ctx->ghash_table, NULL);
> +
> +			kernel_neon_end();
> +
> +			return skcipher_walk_done(walk, 0);
>   		}
>   
> +		sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
> +				       walk->nbytes - tail, ghash,
> +				       ctx->ghash_table, NULL);
> +
>   		kernel_neon_end();
>   
>   		err = skcipher_walk_done(walk, tail);
> -		if (err)
> -			return err;
> -		if (walk->nbytes)
> -			kernel_neon_begin();
> -	} while (walk->nbytes > 0);
>   
> -	return 0;
> +		kernel_neon_begin();
> +	}
> +
> +	sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, NULL, NULL, iv,
> +			       walk->nbytes, ghash, ctx->ghash_table,
> +			       (const u8 *)&lengths);
> +
> +	kernel_neon_end();
> +
> +	return err;
>   }
>   
>   static int gcm_encrypt(struct aead_request *req)
>   {
>   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
> -	struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
>   	u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
>   	struct skcipher_walk walk;
>   	int err;
>   
>   	err = skcipher_walk_aead_encrypt(&walk, req, false);
> -	if (err)
> -		return err;
> -
> -	err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_enc);
> +	err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_enc);
>   	if (err)
>   		return err;
>   
> @@ -215,17 +218,13 @@ static int gcm_decrypt(struct aead_request *req)
>   {
>   	struct crypto_aead *aead = crypto_aead_reqtfm(req);
>   	unsigned int authsize = crypto_aead_authsize(aead);
> -	struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
>   	u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
>   	u8 authtag[SM4_BLOCK_SIZE];
>   	struct skcipher_walk walk;
>   	int err;
>   
>   	err = skcipher_walk_aead_decrypt(&walk, req, false);
> -	if (err)
> -		return err;
> -
> -	err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_dec);
> +	err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_dec);
>   	if (err)
>   		return err;
>
diff mbox series

Patch

diff --git a/arch/arm64/crypto/sm4-ce-gcm-glue.c b/arch/arm64/crypto/sm4-ce-gcm-glue.c
index c450a2025ca9..73bfb6972d3a 100644
--- a/arch/arm64/crypto/sm4-ce-gcm-glue.c
+++ b/arch/arm64/crypto/sm4-ce-gcm-glue.c
@@ -135,22 +135,23 @@  static void gcm_calculate_auth_mac(struct aead_request *req, u8 ghash[])
 }
 
 static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
-		     struct sm4_gcm_ctx *ctx, u8 ghash[],
+		     u8 ghash[], int err,
 		     void (*sm4_ce_pmull_gcm_crypt)(const u32 *rkey_enc,
 				u8 *dst, const u8 *src, u8 *iv,
 				unsigned int nbytes, u8 *ghash,
 				const u8 *ghash_table, const u8 *lengths))
 {
+	struct crypto_aead *aead = crypto_aead_reqtfm(req);
+	struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
 	u8 __aligned(8) iv[SM4_BLOCK_SIZE];
 	be128 __aligned(8) lengths;
-	int err;
 
 	memset(ghash, 0, SM4_BLOCK_SIZE);
 
 	lengths.a = cpu_to_be64(req->assoclen * 8);
 	lengths.b = cpu_to_be64(walk->total * 8);
 
-	memcpy(iv, walk->iv, GCM_IV_SIZE);
+	memcpy(iv, req->iv, GCM_IV_SIZE);
 	put_unaligned_be32(2, iv + GCM_IV_SIZE);
 
 	kernel_neon_begin();
@@ -158,49 +159,51 @@  static int gcm_crypt(struct aead_request *req, struct skcipher_walk *walk,
 	if (req->assoclen)
 		gcm_calculate_auth_mac(req, ghash);
 
-	do {
+	while (walk->nbytes) {
 		unsigned int tail = walk->nbytes % SM4_BLOCK_SIZE;
 		const u8 *src = walk->src.virt.addr;
 		u8 *dst = walk->dst.virt.addr;
 
 		if (walk->nbytes == walk->total) {
-			tail = 0;
-
 			sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
 					       walk->nbytes, ghash,
 					       ctx->ghash_table,
 					       (const u8 *)&lengths);
-		} else if (walk->nbytes - tail) {
-			sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
-					       walk->nbytes - tail, ghash,
-					       ctx->ghash_table, NULL);
+
+			kernel_neon_end();
+
+			return skcipher_walk_done(walk, 0);
 		}
 
+		sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, dst, src, iv,
+				       walk->nbytes - tail, ghash,
+				       ctx->ghash_table, NULL);
+
 		kernel_neon_end();
 
 		err = skcipher_walk_done(walk, tail);
-		if (err)
-			return err;
-		if (walk->nbytes)
-			kernel_neon_begin();
-	} while (walk->nbytes > 0);
 
-	return 0;
+		kernel_neon_begin();
+	}
+
+	sm4_ce_pmull_gcm_crypt(ctx->key.rkey_enc, NULL, NULL, iv,
+			       walk->nbytes, ghash, ctx->ghash_table,
+			       (const u8 *)&lengths);
+
+	kernel_neon_end();
+
+	return err;
 }
 
 static int gcm_encrypt(struct aead_request *req)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
-	struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
 	u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
 	struct skcipher_walk walk;
 	int err;
 
 	err = skcipher_walk_aead_encrypt(&walk, req, false);
-	if (err)
-		return err;
-
-	err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_enc);
+	err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_enc);
 	if (err)
 		return err;
 
@@ -215,17 +218,13 @@  static int gcm_decrypt(struct aead_request *req)
 {
 	struct crypto_aead *aead = crypto_aead_reqtfm(req);
 	unsigned int authsize = crypto_aead_authsize(aead);
-	struct sm4_gcm_ctx *ctx = crypto_aead_ctx(aead);
 	u8 __aligned(8) ghash[SM4_BLOCK_SIZE];
 	u8 authtag[SM4_BLOCK_SIZE];
 	struct skcipher_walk walk;
 	int err;
 
 	err = skcipher_walk_aead_decrypt(&walk, req, false);
-	if (err)
-		return err;
-
-	err = gcm_crypt(req, &walk, ctx, ghash, sm4_ce_pmull_gcm_dec);
+	err = gcm_crypt(req, &walk, ghash, err, sm4_ce_pmull_gcm_dec);
 	if (err)
 		return err;