@@ -109,9 +109,9 @@ asmlinkage void aes_essiv_cbc_decrypt(u8 out[], u8 const in[], u32 const rk1[],
int rounds, int blocks, u8 iv[],
u32 const rk2[]);
-asmlinkage int aes_mac_update(u8 const in[], u32 const rk[], int rounds,
- int blocks, u8 dg[], int enc_before,
- int enc_after);
+asmlinkage void aes_mac_update(u8 const in[], u32 const rk[], int rounds,
+ int blocks, u8 dg[], int enc_before,
+ int enc_after);
struct crypto_aes_xts_ctx {
struct crypto_aes_ctx key1;
@@ -880,17 +880,10 @@ static void mac_do_update(struct crypto_aes_ctx *ctx, u8 const in[], int blocks,
int rounds = 6 + ctx->key_length / 4;
if (crypto_simd_usable()) {
- int rem;
-
- do {
- kernel_neon_begin();
- rem = aes_mac_update(in, ctx->key_enc, rounds, blocks,
- dg, enc_before, enc_after);
- kernel_neon_end();
- in += (blocks - rem) * AES_BLOCK_SIZE;
- blocks = rem;
- enc_before = 0;
- } while (blocks);
+ kernel_neon_begin();
+ aes_mac_update(in, ctx->key_enc, rounds, blocks, dg,
+ enc_before, enc_after);
+ kernel_neon_end();
} else {
if (enc_before)
aes_encrypt(ctx, dg, dg);
@@ -842,7 +842,6 @@ AES_FUNC_START(aes_mac_update)
cbz w5, .Lmacout
encrypt_block v0, w2, x1, x7, w8
st1 {v0.16b}, [x4] /* return dg */
- cond_yield .Lmacout, x7, x8
b .Lmacloop4x
.Lmac1x:
add w3, w3, #4
@@ -861,6 +860,5 @@ AES_FUNC_START(aes_mac_update)
.Lmacout:
st1 {v0.16b}, [x4] /* return dg */
- mov w0, w3
ret
AES_FUNC_END(aes_mac_update)
@@ -62,8 +62,8 @@
.endm
/*
- * int __sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
- * int blocks)
+ * void __sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
+ * int blocks)
*/
SYM_FUNC_START(__sha1_ce_transform)
/* load round constants */
@@ -121,7 +121,6 @@ CPU_LE( rev32 v11.16b, v11.16b )
add dgav.4s, dgav.4s, dg0v.4s
cbz w2, 2f
- cond_yield 3f, x5, x6
b 0b
/*
@@ -145,6 +144,5 @@ CPU_LE( rev32 v11.16b, v11.16b )
/* store new state */
3: st1 {dgav.4s}, [x0]
str dgb, [x0, #16]
- mov w0, w2
ret
SYM_FUNC_END(__sha1_ce_transform)
@@ -29,23 +29,16 @@ struct sha1_ce_state {
extern const u32 sha1_ce_offsetof_count;
extern const u32 sha1_ce_offsetof_finalize;
-asmlinkage int __sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
- int blocks);
+asmlinkage void __sha1_ce_transform(struct sha1_ce_state *sst, u8 const *src,
+ int blocks);
static void sha1_ce_transform(struct sha1_state *sst, u8 const *src,
int blocks)
{
- while (blocks) {
- int rem;
-
- kernel_neon_begin();
- rem = __sha1_ce_transform(container_of(sst,
- struct sha1_ce_state,
- sst), src, blocks);
- kernel_neon_end();
- src += (blocks - rem) * SHA1_BLOCK_SIZE;
- blocks = rem;
- }
+ kernel_neon_begin();
+ __sha1_ce_transform(container_of(sst, struct sha1_ce_state, sst), src,
+ blocks);
+ kernel_neon_end();
}
const u32 sha1_ce_offsetof_count = offsetof(struct sha1_ce_state, sst.count);
@@ -71,8 +71,8 @@
.word 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2
/*
- * int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
- * int blocks)
+ * void __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+ * int blocks)
*/
.text
SYM_FUNC_START(__sha256_ce_transform)
@@ -129,7 +129,6 @@ CPU_LE( rev32 v19.16b, v19.16b )
/* handled all input blocks? */
cbz w2, 2f
- cond_yield 3f, x5, x6
b 0b
/*
@@ -152,6 +151,5 @@ CPU_LE( rev32 v19.16b, v19.16b )
/* store new state */
3: st1 {dgav.4s, dgbv.4s}, [x0]
- mov w0, w2
ret
SYM_FUNC_END(__sha256_ce_transform)
@@ -30,23 +30,16 @@ struct sha256_ce_state {
extern const u32 sha256_ce_offsetof_count;
extern const u32 sha256_ce_offsetof_finalize;
-asmlinkage int __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
- int blocks);
+asmlinkage void __sha256_ce_transform(struct sha256_ce_state *sst, u8 const *src,
+ int blocks);
static void sha256_ce_transform(struct sha256_state *sst, u8 const *src,
int blocks)
{
- while (blocks) {
- int rem;
-
- kernel_neon_begin();
- rem = __sha256_ce_transform(container_of(sst,
- struct sha256_ce_state,
- sst), src, blocks);
- kernel_neon_end();
- src += (blocks - rem) * SHA256_BLOCK_SIZE;
- blocks = rem;
- }
+ kernel_neon_begin();
+ __sha256_ce_transform(container_of(sst, struct sha256_ce_state, sst),
+ src, blocks);
+ kernel_neon_end();
}
const u32 sha256_ce_offsetof_count = offsetof(struct sha256_ce_state,
@@ -37,7 +37,7 @@
.endm
/*
- * int sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size)
+ * void sha3_ce_transform(u64 *st, const u8 *data, int blocks, int dg_size)
*/
.text
SYM_FUNC_START(sha3_ce_transform)
@@ -184,18 +184,16 @@ SYM_FUNC_START(sha3_ce_transform)
eor v0.16b, v0.16b, v31.16b
cbnz w8, 3b
- cond_yield 4f, x8, x9
cbnz w2, 0b
/* save state */
-4: st1 { v0.1d- v3.1d}, [x0], #32
+ st1 { v0.1d- v3.1d}, [x0], #32
st1 { v4.1d- v7.1d}, [x0], #32
st1 { v8.1d-v11.1d}, [x0], #32
st1 {v12.1d-v15.1d}, [x0], #32
st1 {v16.1d-v19.1d}, [x0], #32
st1 {v20.1d-v23.1d}, [x0], #32
st1 {v24.1d}, [x0]
- mov w0, w2
ret
SYM_FUNC_END(sha3_ce_transform)
@@ -28,8 +28,8 @@ MODULE_ALIAS_CRYPTO("sha3-256");
MODULE_ALIAS_CRYPTO("sha3-384");
MODULE_ALIAS_CRYPTO("sha3-512");
-asmlinkage int sha3_ce_transform(u64 *st, const u8 *data, int blocks,
- int md_len);
+asmlinkage void sha3_ce_transform(u64 *st, const u8 *data, int blocks,
+ int md_len);
static int sha3_update(struct shash_desc *desc, const u8 *data,
unsigned int len)
@@ -59,15 +59,11 @@ static int sha3_update(struct shash_desc *desc, const u8 *data,
blocks = len / sctx->rsiz;
len %= sctx->rsiz;
- while (blocks) {
- int rem;
-
+ if (blocks) {
kernel_neon_begin();
- rem = sha3_ce_transform(sctx->st, data, blocks,
- digest_size);
+ sha3_ce_transform(sctx->st, data, blocks, digest_size);
kernel_neon_end();
- data += (blocks - rem) * sctx->rsiz;
- blocks = rem;
+ data += blocks * sctx->rsiz;
}
}
@@ -102,8 +102,8 @@
.endm
/*
- * int __sha512_ce_transform(struct sha512_state *sst, u8 const *src,
- * int blocks)
+ * void __sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ * int blocks)
*/
.text
SYM_FUNC_START(__sha512_ce_transform)
@@ -195,12 +195,10 @@ CPU_LE( rev64 v19.16b, v19.16b )
add v10.2d, v10.2d, v2.2d
add v11.2d, v11.2d, v3.2d
- cond_yield 3f, x4, x5
/* handled all input blocks? */
cbnz w2, 0b
/* store new state */
-3: st1 {v8.2d-v11.2d}, [x0]
- mov w0, w2
+ st1 {v8.2d-v11.2d}, [x0]
ret
SYM_FUNC_END(__sha512_ce_transform)
@@ -26,23 +26,17 @@ MODULE_LICENSE("GPL v2");
MODULE_ALIAS_CRYPTO("sha384");
MODULE_ALIAS_CRYPTO("sha512");
-asmlinkage int __sha512_ce_transform(struct sha512_state *sst, u8 const *src,
- int blocks);
+asmlinkage void __sha512_ce_transform(struct sha512_state *sst, u8 const *src,
+ int blocks);
asmlinkage void sha512_block_data_order(u64 *digest, u8 const *src, int blocks);
static void sha512_ce_transform(struct sha512_state *sst, u8 const *src,
int blocks)
{
- while (blocks) {
- int rem;
-
- kernel_neon_begin();
- rem = __sha512_ce_transform(sst, src, blocks);
- kernel_neon_end();
- src += (blocks - rem) * SHA512_BLOCK_SIZE;
- blocks = rem;
- }
+ kernel_neon_begin();
+ __sha512_ce_transform(sst, src, blocks);
+ kernel_neon_end();
}
static void sha512_arm64_transform(struct sha512_state *sst, u8 const *src,
@@ -759,35 +759,6 @@ alternative_endif
set_sctlr sctlr_el2, \reg
.endm
- /*
- * Check whether preempt/bh-disabled asm code should yield as soon as
- * it is able. This is the case if we are currently running in task
- * context, and either a softirq is pending, or the TIF_NEED_RESCHED
- * flag is set and re-enabling preemption a single time would result in
- * a preempt count of zero. (Note that the TIF_NEED_RESCHED flag is
- * stored negated in the top word of the thread_info::preempt_count
- * field)
- */
- .macro cond_yield, lbl:req, tmp:req, tmp2:req
- get_current_task \tmp
- ldr \tmp, [\tmp, #TSK_TI_PREEMPT]
- /*
- * If we are serving a softirq, there is no point in yielding: the
- * softirq will not be preempted no matter what we do, so we should
- * run to completion as quickly as we can.
- */
- tbnz \tmp, #SOFTIRQ_SHIFT, .Lnoyield_\@
-#ifdef CONFIG_PREEMPTION
- sub \tmp, \tmp, #PREEMPT_DISABLE_OFFSET
- cbz \tmp, \lbl
-#endif
- adr_l \tmp, irq_stat + IRQ_CPUSTAT_SOFTIRQ_PENDING
- get_this_cpu_offset \tmp2
- ldr w\tmp, [\tmp, \tmp2]
- cbnz w\tmp, \lbl // yield on pending softirq in task context
-.Lnoyield_\@:
- .endm
-
/*
* Branch Target Identifier (BTI)
*/
@@ -116,10 +116,6 @@ int main(void)
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
BLANK();
- DEFINE(PREEMPT_DISABLE_OFFSET, PREEMPT_DISABLE_OFFSET);
- DEFINE(SOFTIRQ_SHIFT, SOFTIRQ_SHIFT);
- DEFINE(IRQ_CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
- BLANK();
DEFINE(CPU_BOOT_TASK, offsetof(struct secondary_data, task));
BLANK();
DEFINE(FTR_OVR_VAL_OFFSET, offsetof(struct arm64_ftr_override, val));