Message ID | 20201025143119.1054168-3-nivedita@alum.mit.edu |
---|---|
State | Superseded |
Headers | show |
Series | [v4,1/6] crypto: lib/sha256 - Use memzero_explicit() for clearing state | expand |
On Sun, 25 Oct 2020 at 15:31, Arvind Sankar <nivedita@alum.mit.edu> wrote: > > Without the barrier_data() inside memzero_explicit(), the compiler may > optimize away the state-clearing if it can tell that the state is not > used afterwards. > > Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu> Acked-by: Ard Biesheuvel <ardb@kernel.org> I agree with Eric that, even though there are cases where it is unlikely that the compiler could elide an ordinary memset() or struct assignment (even under LTO), using memzero_explicit() is better in these cases, as it also clarifies the intent of the operation, and doesn't result in worse code now that memzero_explicit() is a static inline around memset() and a barrier. > --- > arch/arm64/crypto/ghash-ce-glue.c | 2 +- > arch/arm64/crypto/poly1305-glue.c | 2 +- > arch/arm64/crypto/sha3-ce-glue.c | 2 +- > arch/x86/crypto/poly1305_glue.c | 2 +- > include/crypto/sha1_base.h | 3 ++- > include/crypto/sha256_base.h | 3 ++- > include/crypto/sha512_base.h | 3 ++- > include/crypto/sm3_base.h | 3 ++- > 8 files changed, 12 insertions(+), 8 deletions(-) > > diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c > index 8536008e3e35..2427e2f3a9a1 100644 > --- a/arch/arm64/crypto/ghash-ce-glue.c > +++ b/arch/arm64/crypto/ghash-ce-glue.c > @@ -168,7 +168,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) > put_unaligned_be64(ctx->digest[1], dst); > put_unaligned_be64(ctx->digest[0], dst + 8); > > - *ctx = (struct ghash_desc_ctx){}; > + memzero_explicit(ctx, sizeof(*ctx)); > return 0; > } > > diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c > index f33ada70c4ed..683de671741a 100644 > --- a/arch/arm64/crypto/poly1305-glue.c > +++ b/arch/arm64/crypto/poly1305-glue.c > @@ -177,7 +177,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) > } > > poly1305_emit(&dctx->h, dst, dctx->s); > - *dctx = (struct poly1305_desc_ctx){}; > + memzero_explicit(dctx, sizeof(*dctx)); > } > EXPORT_SYMBOL(poly1305_final_arch); > > diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c > index 9a4bbfc45f40..e5a2936f0886 100644 > --- a/arch/arm64/crypto/sha3-ce-glue.c > +++ b/arch/arm64/crypto/sha3-ce-glue.c > @@ -94,7 +94,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out) > if (digest_size & 4) > put_unaligned_le32(sctx->st[i], (__le32 *)digest); > > - *sctx = (struct sha3_state){}; > + memzero_explicit(sctx, sizeof(*sctx)); > return 0; > } > > diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c > index e508dbd91813..64d09520d279 100644 > --- a/arch/x86/crypto/poly1305_glue.c > +++ b/arch/x86/crypto/poly1305_glue.c > @@ -209,7 +209,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) > } > > poly1305_simd_emit(&dctx->h, dst, dctx->s); > - *dctx = (struct poly1305_desc_ctx){}; > + memzero_explicit(dctx, sizeof(*dctx)); > } > EXPORT_SYMBOL(poly1305_final_arch); > > diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h > index 20fd1f7468af..a5d6033efef7 100644 > --- a/include/crypto/sha1_base.h > +++ b/include/crypto/sha1_base.h > @@ -12,6 +12,7 @@ > #include <crypto/sha.h> > #include <linux/crypto.h> > #include <linux/module.h> > +#include <linux/string.h> > > #include <asm/unaligned.h> > > @@ -101,7 +102,7 @@ static inline int sha1_base_finish(struct shash_desc *desc, u8 *out) > for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++) > put_unaligned_be32(sctx->state[i], digest++); > > - *sctx = (struct sha1_state){}; > + memzero_explicit(sctx, sizeof(*sctx)); > return 0; > } > > diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h > index 6ded110783ae..93f9fd21cc06 100644 > --- a/include/crypto/sha256_base.h > +++ b/include/crypto/sha256_base.h > @@ -12,6 +12,7 @@ > #include <crypto/sha.h> > #include <linux/crypto.h> > #include <linux/module.h> > +#include <linux/string.h> > > #include <asm/unaligned.h> > > @@ -105,7 +106,7 @@ static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) > for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32)) > put_unaligned_be32(sctx->state[i], digest++); > > - *sctx = (struct sha256_state){}; > + memzero_explicit(sctx, sizeof(*sctx)); > return 0; > } > > diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h > index fb19c77494dc..93ab73baa38e 100644 > --- a/include/crypto/sha512_base.h > +++ b/include/crypto/sha512_base.h > @@ -12,6 +12,7 @@ > #include <crypto/sha.h> > #include <linux/crypto.h> > #include <linux/module.h> > +#include <linux/string.h> > > #include <asm/unaligned.h> > > @@ -126,7 +127,7 @@ static inline int sha512_base_finish(struct shash_desc *desc, u8 *out) > for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64)) > put_unaligned_be64(sctx->state[i], digest++); > > - *sctx = (struct sha512_state){}; > + memzero_explicit(sctx, sizeof(*sctx)); > return 0; > } > > diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h > index 1cbf9aa1fe52..2f3a32ab97bb 100644 > --- a/include/crypto/sm3_base.h > +++ b/include/crypto/sm3_base.h > @@ -13,6 +13,7 @@ > #include <crypto/sm3.h> > #include <linux/crypto.h> > #include <linux/module.h> > +#include <linux/string.h> > #include <asm/unaligned.h> > > typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks); > @@ -104,7 +105,7 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out) > for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++) > put_unaligned_be32(sctx->state[i], digest++); > > - *sctx = (struct sm3_state){}; > + memzero_explicit(sctx, sizeof(*sctx)); > return 0; > } > > -- > 2.26.2 >
diff --git a/arch/arm64/crypto/ghash-ce-glue.c b/arch/arm64/crypto/ghash-ce-glue.c index 8536008e3e35..2427e2f3a9a1 100644 --- a/arch/arm64/crypto/ghash-ce-glue.c +++ b/arch/arm64/crypto/ghash-ce-glue.c @@ -168,7 +168,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) put_unaligned_be64(ctx->digest[1], dst); put_unaligned_be64(ctx->digest[0], dst + 8); - *ctx = (struct ghash_desc_ctx){}; + memzero_explicit(ctx, sizeof(*ctx)); return 0; } diff --git a/arch/arm64/crypto/poly1305-glue.c b/arch/arm64/crypto/poly1305-glue.c index f33ada70c4ed..683de671741a 100644 --- a/arch/arm64/crypto/poly1305-glue.c +++ b/arch/arm64/crypto/poly1305-glue.c @@ -177,7 +177,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) } poly1305_emit(&dctx->h, dst, dctx->s); - *dctx = (struct poly1305_desc_ctx){}; + memzero_explicit(dctx, sizeof(*dctx)); } EXPORT_SYMBOL(poly1305_final_arch); diff --git a/arch/arm64/crypto/sha3-ce-glue.c b/arch/arm64/crypto/sha3-ce-glue.c index 9a4bbfc45f40..e5a2936f0886 100644 --- a/arch/arm64/crypto/sha3-ce-glue.c +++ b/arch/arm64/crypto/sha3-ce-glue.c @@ -94,7 +94,7 @@ static int sha3_final(struct shash_desc *desc, u8 *out) if (digest_size & 4) put_unaligned_le32(sctx->st[i], (__le32 *)digest); - *sctx = (struct sha3_state){}; + memzero_explicit(sctx, sizeof(*sctx)); return 0; } diff --git a/arch/x86/crypto/poly1305_glue.c b/arch/x86/crypto/poly1305_glue.c index e508dbd91813..64d09520d279 100644 --- a/arch/x86/crypto/poly1305_glue.c +++ b/arch/x86/crypto/poly1305_glue.c @@ -209,7 +209,7 @@ void poly1305_final_arch(struct poly1305_desc_ctx *dctx, u8 *dst) } poly1305_simd_emit(&dctx->h, dst, dctx->s); - *dctx = (struct poly1305_desc_ctx){}; + memzero_explicit(dctx, sizeof(*dctx)); } EXPORT_SYMBOL(poly1305_final_arch); diff --git a/include/crypto/sha1_base.h b/include/crypto/sha1_base.h index 20fd1f7468af..a5d6033efef7 100644 --- a/include/crypto/sha1_base.h +++ b/include/crypto/sha1_base.h @@ -12,6 +12,7 @@ #include <crypto/sha.h> #include <linux/crypto.h> #include <linux/module.h> +#include <linux/string.h> #include <asm/unaligned.h> @@ -101,7 +102,7 @@ static inline int sha1_base_finish(struct shash_desc *desc, u8 *out) for (i = 0; i < SHA1_DIGEST_SIZE / sizeof(__be32); i++) put_unaligned_be32(sctx->state[i], digest++); - *sctx = (struct sha1_state){}; + memzero_explicit(sctx, sizeof(*sctx)); return 0; } diff --git a/include/crypto/sha256_base.h b/include/crypto/sha256_base.h index 6ded110783ae..93f9fd21cc06 100644 --- a/include/crypto/sha256_base.h +++ b/include/crypto/sha256_base.h @@ -12,6 +12,7 @@ #include <crypto/sha.h> #include <linux/crypto.h> #include <linux/module.h> +#include <linux/string.h> #include <asm/unaligned.h> @@ -105,7 +106,7 @@ static inline int sha256_base_finish(struct shash_desc *desc, u8 *out) for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be32)) put_unaligned_be32(sctx->state[i], digest++); - *sctx = (struct sha256_state){}; + memzero_explicit(sctx, sizeof(*sctx)); return 0; } diff --git a/include/crypto/sha512_base.h b/include/crypto/sha512_base.h index fb19c77494dc..93ab73baa38e 100644 --- a/include/crypto/sha512_base.h +++ b/include/crypto/sha512_base.h @@ -12,6 +12,7 @@ #include <crypto/sha.h> #include <linux/crypto.h> #include <linux/module.h> +#include <linux/string.h> #include <asm/unaligned.h> @@ -126,7 +127,7 @@ static inline int sha512_base_finish(struct shash_desc *desc, u8 *out) for (i = 0; digest_size > 0; i++, digest_size -= sizeof(__be64)) put_unaligned_be64(sctx->state[i], digest++); - *sctx = (struct sha512_state){}; + memzero_explicit(sctx, sizeof(*sctx)); return 0; } diff --git a/include/crypto/sm3_base.h b/include/crypto/sm3_base.h index 1cbf9aa1fe52..2f3a32ab97bb 100644 --- a/include/crypto/sm3_base.h +++ b/include/crypto/sm3_base.h @@ -13,6 +13,7 @@ #include <crypto/sm3.h> #include <linux/crypto.h> #include <linux/module.h> +#include <linux/string.h> #include <asm/unaligned.h> typedef void (sm3_block_fn)(struct sm3_state *sst, u8 const *src, int blocks); @@ -104,7 +105,7 @@ static inline int sm3_base_finish(struct shash_desc *desc, u8 *out) for (i = 0; i < SM3_DIGEST_SIZE / sizeof(__be32); i++) put_unaligned_be32(sctx->state[i], digest++); - *sctx = (struct sm3_state){}; + memzero_explicit(sctx, sizeof(*sctx)); return 0; }
Without the barrier_data() inside memzero_explicit(), the compiler may optimize away the state-clearing if it can tell that the state is not used afterwards. Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu> --- arch/arm64/crypto/ghash-ce-glue.c | 2 +- arch/arm64/crypto/poly1305-glue.c | 2 +- arch/arm64/crypto/sha3-ce-glue.c | 2 +- arch/x86/crypto/poly1305_glue.c | 2 +- include/crypto/sha1_base.h | 3 ++- include/crypto/sha256_base.h | 3 ++- include/crypto/sha512_base.h | 3 ++- include/crypto/sm3_base.h | 3 ++- 8 files changed, 12 insertions(+), 8 deletions(-)