@@ -125,16 +125,11 @@ static void ccm_calculate_auth_mac(struct aead_request *req, u8 mac[])
scatterwalk_start(&walk, sg_next(walk.sg));
n = scatterwalk_clamp(&walk, len);
}
- n = min_t(u32, n, SZ_4K); /* yield NEON at least every 4k */
p = scatterwalk_map(&walk);
macp = ce_aes_ccm_auth_data(mac, p, n, macp, ctx->key_enc,
num_rounds(ctx));
- if (len / SZ_4K > (len - n) / SZ_4K) {
- kernel_neon_end();
- kernel_neon_begin();
- }
len -= n;
scatterwalk_unmap(p);
@@ -87,17 +87,9 @@ void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src, unsigned int bytes,
!crypto_simd_usable())
return chacha_crypt_generic(state, dst, src, bytes, nrounds);
- do {
- unsigned int todo = min_t(unsigned int, bytes, SZ_4K);
-
- kernel_neon_begin();
- chacha_doneon(state, dst, src, todo, nrounds);
- kernel_neon_end();
-
- bytes -= todo;
- src += todo;
- dst += todo;
- } while (bytes);
+ kernel_neon_begin();
+ chacha_doneon(state, dst, src, bytes, nrounds);
+ kernel_neon_end();
}
EXPORT_SYMBOL(chacha_crypt_arch);
@@ -37,18 +37,9 @@ static int crct10dif_update_pmull_p8(struct shash_desc *desc, const u8 *data,
u16 *crc = shash_desc_ctx(desc);
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
- do {
- unsigned int chunk = length;
-
- if (chunk > SZ_4K + CRC_T10DIF_PMULL_CHUNK_SIZE)
- chunk = SZ_4K;
-
- kernel_neon_begin();
- *crc = crc_t10dif_pmull_p8(*crc, data, chunk);
- kernel_neon_end();
- data += chunk;
- length -= chunk;
- } while (length);
+ kernel_neon_begin();
+ *crc = crc_t10dif_pmull_p8(*crc, data, length);
+ kernel_neon_end();
} else {
*crc = crc_t10dif_generic(*crc, data, length);
}
@@ -62,18 +53,9 @@ static int crct10dif_update_pmull_p64(struct shash_desc *desc, const u8 *data,
u16 *crc = shash_desc_ctx(desc);
if (length >= CRC_T10DIF_PMULL_CHUNK_SIZE && crypto_simd_usable()) {
- do {
- unsigned int chunk = length;
-
- if (chunk > SZ_4K + CRC_T10DIF_PMULL_CHUNK_SIZE)
- chunk = SZ_4K;
-
- kernel_neon_begin();
- *crc = crc_t10dif_pmull_p64(*crc, data, chunk);
- kernel_neon_end();
- data += chunk;
- length -= chunk;
- } while (length);
+ kernel_neon_begin();
+ *crc = crc_t10dif_pmull_p64(*crc, data, length);
+ kernel_neon_end();
} else {
*crc = crc_t10dif_generic(*crc, data, length);
}
@@ -22,15 +22,9 @@ static int nhpoly1305_neon_update(struct shash_desc *desc,
if (srclen < 64 || !crypto_simd_usable())
return crypto_nhpoly1305_update(desc, src, srclen);
- do {
- unsigned int n = min_t(unsigned int, srclen, SZ_4K);
-
- kernel_neon_begin();
- crypto_nhpoly1305_update_helper(desc, src, n, nh_neon);
- kernel_neon_end();
- src += n;
- srclen -= n;
- } while (srclen);
+ kernel_neon_begin();
+ crypto_nhpoly1305_update_helper(desc, src, srclen, nh_neon);
+ kernel_neon_end();
return 0;
}
@@ -143,20 +143,13 @@ void poly1305_update_arch(struct poly1305_desc_ctx *dctx, const u8 *src,
unsigned int len = round_down(nbytes, POLY1305_BLOCK_SIZE);
if (static_branch_likely(&have_neon) && crypto_simd_usable()) {
- do {
- unsigned int todo = min_t(unsigned int, len, SZ_4K);
-
- kernel_neon_begin();
- poly1305_blocks_neon(&dctx->h, src, todo, 1);
- kernel_neon_end();
-
- len -= todo;
- src += todo;
- } while (len);
+ kernel_neon_begin();
+ poly1305_blocks_neon(&dctx->h, src, len, 1);
+ kernel_neon_end();
} else {
poly1305_blocks(&dctx->h, src, len, 1);
- src += len;
}
+ src += len;
nbytes %= POLY1305_BLOCK_SIZE;
}
@@ -122,9 +122,8 @@ static int polyval_arm64_update(struct shash_desc *desc,
tctx->key_powers[NUM_KEY_POWERS-1]);
}
- while (srclen >= POLYVAL_BLOCK_SIZE) {
- /* allow rescheduling every 4K bytes */
- nblocks = min(srclen, 4096U) / POLYVAL_BLOCK_SIZE;
+ if (srclen >= POLYVAL_BLOCK_SIZE) {
+ nblocks = srclen / POLYVAL_BLOCK_SIZE;
internal_polyval_update(tctx, src, nblocks, dctx->buffer);
srclen -= nblocks * POLYVAL_BLOCK_SIZE;
src += nblocks * POLYVAL_BLOCK_SIZE;