@@ -12,8 +12,8 @@
#include <crypto/internal/skcipher.h>
#include <crypto/scatterwalk.h>
#include <linux/module.h>
-#include <asm/fpu/api.h>
#include <asm/cpu_device_id.h>
+#include <asm/simd.h>
#define AEGIS128_BLOCK_ALIGN 16
#define AEGIS128_BLOCK_SIZE 16
@@ -85,15 +85,19 @@ static void crypto_aegis128_aesni_process_ad(
if (pos > 0) {
unsigned int fill = AEGIS128_BLOCK_SIZE - pos;
memcpy(buf.bytes + pos, src, fill);
- crypto_aegis128_aesni_ad(state,
+ kernel_fpu_begin();
+ crypto_aegis128_aesni_ad(state->blocks,
AEGIS128_BLOCK_SIZE,
buf.bytes);
+ kernel_fpu_end();
pos = 0;
left -= fill;
src += fill;
}
- crypto_aegis128_aesni_ad(state, left, src);
+ kernel_fpu_begin();
+ crypto_aegis128_aesni_ad(state->blocks, left, src);
+ kernel_fpu_end();
src += left & ~(AEGIS128_BLOCK_SIZE - 1);
left &= AEGIS128_BLOCK_SIZE - 1;
@@ -110,7 +114,9 @@ static void crypto_aegis128_aesni_process_ad(
if (pos > 0) {
memset(buf.bytes + pos, 0, AEGIS128_BLOCK_SIZE - pos);
- crypto_aegis128_aesni_ad(state, AEGIS128_BLOCK_SIZE, buf.bytes);
+ kernel_fpu_begin();
+ crypto_aegis128_aesni_ad(state->blocks, AEGIS128_BLOCK_SIZE, buf.bytes);
+ kernel_fpu_end();
}
}
@@ -118,16 +124,31 @@ static void crypto_aegis128_aesni_process_crypt(
struct aegis_state *state, struct skcipher_walk *walk,
const struct aegis_crypt_ops *ops)
{
- while (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
- ops->crypt_blocks(state,
- round_down(walk->nbytes, AEGIS128_BLOCK_SIZE),
- walk->src.virt.addr, walk->dst.virt.addr);
- skcipher_walk_done(walk, walk->nbytes % AEGIS128_BLOCK_SIZE);
+ if (walk->nbytes >= AEGIS128_BLOCK_SIZE) {
+ kernel_fpu_begin();
+ for (;;) {
+ unsigned int chunk = min(walk->nbytes, 4096U);
+
+ chunk = round_down(chunk, AEGIS128_BLOCK_SIZE);
+
+ ops->crypt_blocks(state->blocks, chunk,
+ walk->src.virt.addr, walk->dst.virt.addr);
+
+ skcipher_walk_done(walk, walk->nbytes - chunk);
+
+ if (walk->nbytes < AEGIS128_BLOCK_SIZE)
+ break;
+
+ kernel_fpu_yield();
+ }
+ kernel_fpu_end();
}
if (walk->nbytes) {
- ops->crypt_tail(state, walk->nbytes, walk->src.virt.addr,
+ kernel_fpu_begin();
+ ops->crypt_tail(state->blocks, walk->nbytes, walk->src.virt.addr,
walk->dst.virt.addr);
+ kernel_fpu_end();
skcipher_walk_done(walk, 0);
}
}
@@ -172,15 +193,17 @@ static void crypto_aegis128_aesni_crypt(struct aead_request *req,
struct skcipher_walk walk;
struct aegis_state state;
- ops->skcipher_walk_init(&walk, req, true);
+ ops->skcipher_walk_init(&walk, req, false);
kernel_fpu_begin();
+ crypto_aegis128_aesni_init(&state.blocks, ctx->key.bytes, req->iv);
+ kernel_fpu_end();
- crypto_aegis128_aesni_init(&state, ctx->key.bytes, req->iv);
crypto_aegis128_aesni_process_ad(&state, req->src, req->assoclen);
crypto_aegis128_aesni_process_crypt(&state, &walk, ops);
- crypto_aegis128_aesni_final(&state, tag_xor, req->assoclen, cryptlen);
+ kernel_fpu_begin();
+ crypto_aegis128_aesni_final(&state.blocks, tag_xor, req->assoclen, cryptlen);
kernel_fpu_end();
}
Make kernel_fpu_begin() and kernel_fpu_end() calls around each assembly language function that uses FPU context, rather than around the entire set (init, ad, crypt, final). During encryption, periodically check if the kernel scheduler wants to run something else on the CPU. If so, yield the kernel FPU context and let the scheduler intervene. Associated data is not limited. Allow the skcipher_walk functions to sleep again, since they are no longer called inside FPU context. Fixes: 1d373d4e8e15 ("crypto: x86 - Add optimized AEGIS implementations") Fixes: ba6771c0a0bc ("crypto: x86/aegis - fix handling chunked inputs and MAY_SLEEP") Signed-off-by: Robert Elliott <elliott@hpe.com> --- arch/x86/crypto/aegis128-aesni-glue.c | 49 ++++++++++++++++++++------- 1 file changed, 36 insertions(+), 13 deletions(-)