@@ -5,6 +5,7 @@
* Copyright (c) 2022 Taehee Yoo <ap420073@gmail.com>
*/
+#include <asm/simd.h>
#include <crypto/algapi.h>
#include <crypto/internal/simd.h>
#include <crypto/aria.h>
@@ -85,17 +86,19 @@ static int aria_avx_ctr_encrypt(struct skcipher_request *req)
const u8 *src = walk.src.virt.addr;
u8 *dst = walk.dst.virt.addr;
+ kernel_fpu_begin();
while (nbytes >= ARIA_AESNI_PARALLEL_BLOCK_SIZE) {
u8 keystream[ARIA_AESNI_PARALLEL_BLOCK_SIZE];
- kernel_fpu_begin();
aria_ops.aria_ctr_crypt_16way(ctx, dst, src, keystream,
walk.iv);
- kernel_fpu_end();
dst += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
src += ARIA_AESNI_PARALLEL_BLOCK_SIZE;
nbytes -= ARIA_AESNI_PARALLEL_BLOCK_SIZE;
+
+ kernel_fpu_yield();
}
+ kernel_fpu_end();
while (nbytes >= ARIA_BLOCK_SIZE) {
u8 keystream[ARIA_BLOCK_SIZE];
The x86 assembly language implementations using SIMD process data between kernel_fpu_begin() and kernel_fpu_end() calls. That disables scheduler preemption, so prevents the CPU core from being used by other threads. During ctr mode, rather than break the processing into 256 byte passes, each of which unilaterally calls kernel_fpu_begin() and kernel_fpu_end(), periodically check if the kernel scheduler wants to run something else on the CPU. If so, yield the kernel FPU context and let the scheduler intervene. Signed-off-by: Robert Elliott <elliott@hpe.com> --- arch/x86/crypto/aria_aesni_avx_glue.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-)