@@ -23,5 +23,8 @@ void aesdec_IMC_accel(AESState *, const AESState *, bool)
void aesdec_ISB_ISR_AK_accel(AESState *, const AESState *,
const AESState *, bool)
QEMU_ERROR("unsupported accel");
+void aesdec_ISB_ISR_IMC_AK_accel(AESState *, const AESState *,
+ const AESState *, bool)
+ QEMU_ERROR("unsupported accel");
#endif /* GENERIC_HOST_CRYPTO_AES_ROUND_H */
@@ -119,4 +119,25 @@ static inline void aesdec_ISB_ISR_AK(AESState *r, const AESState *st,
}
}
+/*
+ * Perform InvSubBytes + InvShiftRows + InvMixColumns + AddRoundKey.
+ */
+
+void aesdec_ISB_ISR_IMC_AK_gen(AESState *ret, const AESState *st,
+ const AESState *rk);
+void aesdec_ISB_ISR_IMC_AK_genrev(AESState *ret, const AESState *st,
+ const AESState *rk);
+
+static inline void aesdec_ISB_ISR_IMC_AK(AESState *r, const AESState *st,
+ const AESState *rk, bool be)
+{
+ if (HAVE_AES_ACCEL) {
+ aesdec_ISB_ISR_IMC_AK_accel(r, st, rk, be);
+ } else if (HOST_BIG_ENDIAN == be) {
+ aesdec_ISB_ISR_IMC_AK_gen(r, st, rk);
+ } else {
+ aesdec_ISB_ISR_IMC_AK_genrev(r, st, rk);
+ }
+}
+
#endif /* CRYPTO_AES_ROUND_H */
@@ -1484,6 +1484,64 @@ void aesdec_ISB_ISR_AK_genrev(AESState *r, const AESState *s, const AESState *k)
aesdec_ISB_ISR_AK_swap(r, s, k, true);
}
+/*
+ * Perform InvSubBytes + InvShiftRows + InvMixColumns + AddRoundKey.
+ */
+static inline void
+aesdec_ISB_ISR_IMC_AK_swap(AESState *r, const AESState *st,
+ const AESState *rk, bool swap)
+{
+ int swap_b = swap * 0xf;
+ int swap_w = swap * 0x3;
+ bool be = HOST_BIG_ENDIAN ^ swap;
+ uint32_t w0, w1, w2, w3;
+
+ w0 = (AES_Td0[st->b[swap_b ^ AES_ISH(0x0)]] ^
+ AES_Td1[st->b[swap_b ^ AES_ISH(0x1)]] ^
+ AES_Td2[st->b[swap_b ^ AES_ISH(0x2)]] ^
+ AES_Td3[st->b[swap_b ^ AES_ISH(0x3)]]);
+
+ w1 = (AES_Td0[st->b[swap_b ^ AES_ISH(0x4)]] ^
+ AES_Td1[st->b[swap_b ^ AES_ISH(0x5)]] ^
+ AES_Td2[st->b[swap_b ^ AES_ISH(0x6)]] ^
+ AES_Td3[st->b[swap_b ^ AES_ISH(0x7)]]);
+
+ w2 = (AES_Td0[st->b[swap_b ^ AES_ISH(0x8)]] ^
+ AES_Td1[st->b[swap_b ^ AES_ISH(0x9)]] ^
+ AES_Td2[st->b[swap_b ^ AES_ISH(0xA)]] ^
+ AES_Td3[st->b[swap_b ^ AES_ISH(0xB)]]);
+
+ w3 = (AES_Td0[st->b[swap_b ^ AES_ISH(0xC)]] ^
+ AES_Td1[st->b[swap_b ^ AES_ISH(0xD)]] ^
+ AES_Td2[st->b[swap_b ^ AES_ISH(0xE)]] ^
+ AES_Td3[st->b[swap_b ^ AES_ISH(0xF)]]);
+
+ /* Note that AES_TdX is encoded for big-endian. */
+ if (!be) {
+ w0 = bswap32(w0);
+ w1 = bswap32(w1);
+ w2 = bswap32(w2);
+ w3 = bswap32(w3);
+ }
+
+ r->w[swap_w ^ 0] = rk->w[swap_w ^ 0] ^ w0;
+ r->w[swap_w ^ 1] = rk->w[swap_w ^ 1] ^ w1;
+ r->w[swap_w ^ 2] = rk->w[swap_w ^ 2] ^ w2;
+ r->w[swap_w ^ 3] = rk->w[swap_w ^ 3] ^ w3;
+}
+
+void aesdec_ISB_ISR_IMC_AK_gen(AESState *r, const AESState *st,
+ const AESState *rk)
+{
+ aesdec_ISB_ISR_IMC_AK_swap(r, st, rk, false);
+}
+
+void aesdec_ISB_ISR_IMC_AK_genrev(AESState *r, const AESState *st,
+ const AESState *rk)
+{
+ aesdec_ISB_ISR_IMC_AK_swap(r, st, rk, true);
+}
+
/**
* Expand the cipher key into the encryption key schedule.
*/