@@ -13,4 +13,8 @@ void aesenc_SB_SR_AK_accel(AESState *, const AESState *,
const AESState *, bool)
QEMU_ERROR("unsupported accel");
+void aesdec_ISB_ISR_AK_accel(AESState *, const AESState *,
+ const AESState *, bool)
+ QEMU_ERROR("unsupported accel");
+
#endif
@@ -41,4 +41,25 @@ static inline void aesenc_SB_SR_AK(AESState *r, const AESState *st,
}
}
+/*
+ * Perform InvSubBytes + InvShiftRows.
+ */
+
+void aesdec_ISB_ISR_AK_gen(AESState *ret, const AESState *st,
+ const AESState *rk);
+void aesdec_ISB_ISR_AK_genrev(AESState *ret, const AESState *st,
+ const AESState *rk);
+
+static inline void aesdec_ISB_ISR_AK(AESState *r, const AESState *st,
+ const AESState *rk, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesdec_ISB_ISR_AK_accel(r, st, rk, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesdec_ISB_ISR_AK_gen(r, st, rk);
+ } else {
+ aesdec_ISB_ISR_AK_genrev(r, st, rk);
+ }
+}
+
#endif /* CRYPTO_AES_ROUND_H */
@@ -1293,6 +1293,49 @@ void aesenc_SB_SR_AK_genrev(AESState *r, const AESState *s, const AESState *k)
aesenc_SB_SR_AK_swap(r, s, k, true);
}
+/* Perform InvSubBytes + InvShiftRows. */
+static inline void
+aesdec_ISB_ISR_AK_swap(AESState *ret, const AESState *st,
+ const AESState *rk, bool swap)
+{
+ const int swap_b = swap ? 15 : 0;
+ AESState t;
+
+ t.b[swap_b ^ 0x0] = AES_isbox[st->b[swap_b ^ AES_ISH_0]];
+ t.b[swap_b ^ 0x1] = AES_isbox[st->b[swap_b ^ AES_ISH_1]];
+ t.b[swap_b ^ 0x2] = AES_isbox[st->b[swap_b ^ AES_ISH_2]];
+ t.b[swap_b ^ 0x3] = AES_isbox[st->b[swap_b ^ AES_ISH_3]];
+ t.b[swap_b ^ 0x4] = AES_isbox[st->b[swap_b ^ AES_ISH_4]];
+ t.b[swap_b ^ 0x5] = AES_isbox[st->b[swap_b ^ AES_ISH_5]];
+ t.b[swap_b ^ 0x6] = AES_isbox[st->b[swap_b ^ AES_ISH_6]];
+ t.b[swap_b ^ 0x7] = AES_isbox[st->b[swap_b ^ AES_ISH_7]];
+ t.b[swap_b ^ 0x8] = AES_isbox[st->b[swap_b ^ AES_ISH_8]];
+ t.b[swap_b ^ 0x9] = AES_isbox[st->b[swap_b ^ AES_ISH_9]];
+ t.b[swap_b ^ 0xa] = AES_isbox[st->b[swap_b ^ AES_ISH_A]];
+ t.b[swap_b ^ 0xb] = AES_isbox[st->b[swap_b ^ AES_ISH_B]];
+ t.b[swap_b ^ 0xc] = AES_isbox[st->b[swap_b ^ AES_ISH_C]];
+ t.b[swap_b ^ 0xd] = AES_isbox[st->b[swap_b ^ AES_ISH_D]];
+ t.b[swap_b ^ 0xe] = AES_isbox[st->b[swap_b ^ AES_ISH_E]];
+ t.b[swap_b ^ 0xf] = AES_isbox[st->b[swap_b ^ AES_ISH_F]];
+
+ /*
+ * Perform the AddRoundKey with generic vectors.
+ * This may be expanded to either host integer or host vector code.
+ * The key and output endianness match, so no bswap required.
+ */
+ ret->v = t.v ^ rk->v;
+}
+
+void aesdec_ISB_ISR_AK_gen(AESState *r, const AESState *s, const AESState *k)
+{
+ aesdec_ISB_ISR_AK_swap(r, s, k, false);
+}
+
+void aesdec_ISB_ISR_AK_genrev(AESState *r, const AESState *s, const AESState *k)
+{
+ aesdec_ISB_ISR_AK_swap(r, s, k, true);
+}
+
/**
* Expand the cipher key into the encryption key schedule.
*/
Add a primitive for InvSubBytes + InvShiftRows + AddRoundKey. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- host/include/generic/host/aes-round.h | 4 +++ include/crypto/aes-round.h | 21 +++++++++++++ crypto/aes.c | 43 +++++++++++++++++++++++++++ 3 files changed, 68 insertions(+)