@@ -80,8 +80,8 @@ config CRYPTO_AES_ARM64_CE_CCM
depends on ARM64 && KERNEL_MODE_NEON
select CRYPTO_ALGAPI
select CRYPTO_AES_ARM64_CE
- select CRYPTO_AES_ARM64
select CRYPTO_AEAD
+ select CRYPTO_LIB_AES
config CRYPTO_AES_ARM64_CE_BLK
tristate "AES in ECB/CBC/CTR/XTS modes using ARMv8 Crypto Extensions"
@@ -46,8 +46,6 @@ asmlinkage void ce_aes_ccm_decrypt(u8 out[], u8 const in[], u32 cbytes,
asmlinkage void ce_aes_ccm_final(u8 mac[], u8 const ctr[], u32 const rk[],
u32 rounds);
-asmlinkage void __aes_arm64_encrypt(u32 *rk, u8 *out, const u8 *in, int rounds);
-
static int ccm_setkey(struct crypto_aead *tfm, const u8 *in_key,
unsigned int key_len)
{
@@ -127,8 +125,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
}
while (abytes >= AES_BLOCK_SIZE) {
- __aes_arm64_encrypt(key->key_enc, mac, mac,
- num_rounds(key));
+ aes_encrypt(key, mac, mac);
crypto_xor(mac, in, AES_BLOCK_SIZE);
in += AES_BLOCK_SIZE;
@@ -136,8 +133,7 @@ static void ccm_update_mac(struct crypto_aes_ctx *key, u8 mac[], u8 const in[],
}
if (abytes > 0) {
- __aes_arm64_encrypt(key->key_enc, mac, mac,
- num_rounds(key));
+ aes_encrypt(key, mac, mac);
crypto_xor(mac, in, abytes);
*macp = abytes;
}
@@ -209,10 +205,8 @@ static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
bsize = nbytes;
crypto_inc(walk->iv, AES_BLOCK_SIZE);
- __aes_arm64_encrypt(ctx->key_enc, buf, walk->iv,
- num_rounds(ctx));
- __aes_arm64_encrypt(ctx->key_enc, mac, mac,
- num_rounds(ctx));
+ aes_encrypt(ctx, buf, walk->iv);
+ aes_encrypt(ctx, mac, mac);
if (enc)
crypto_xor(mac, src, bsize);
crypto_xor_cpy(dst, src, buf, bsize);
@@ -227,8 +221,8 @@ static int ccm_crypt_fallback(struct skcipher_walk *walk, u8 mac[], u8 iv0[],
}
if (!err) {
- __aes_arm64_encrypt(ctx->key_enc, buf, iv0, num_rounds(ctx));
- __aes_arm64_encrypt(ctx->key_enc, mac, mac, num_rounds(ctx));
+ aes_encrypt(ctx, buf, iv0);
+ aes_encrypt(ctx, mac, mac);
crypto_xor(mac, buf, AES_BLOCK_SIZE);
}
return err;
The CCM code calls directly into the scalar table based AES cipher for arm64 from the fallback path, and since this implementation is known to be non-time invariant, doing so from a time invariant SIMD cipher is a bit nasty. So let's switch to the AES library - this makes the code more robust, and drops the dependency on the generic AES cipher, allowing us to omit it entirely in the future. Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> --- arch/arm64/crypto/Kconfig | 2 +- arch/arm64/crypto/aes-ce-ccm-glue.c | 18 ++++++------------ 2 files changed, 7 insertions(+), 13 deletions(-) -- 2.20.1