@@ -56,12 +56,15 @@
GFP_KERNEL : GFP_ATOMIC)
/* Custom on-stack requests (for invalidation) */
-#define EIP197_SKCIPHER_REQ_SIZE sizeof(struct skcipher_request) + \
- sizeof(struct safexcel_cipher_req)
-#define EIP197_AHASH_REQ_SIZE sizeof(struct ahash_request) + \
- sizeof(struct safexcel_ahash_req)
-#define EIP197_AEAD_REQ_SIZE sizeof(struct aead_request) + \
- sizeof(struct safexcel_cipher_req)
+#define EIP197_SKCIPHER_REQ_SIZE (ALIGN(sizeof(struct skcipher_request), \
+ CRYPTO_MINALIGN) + \
+ sizeof(struct safexcel_cipher_req))
+#define EIP197_AHASH_REQ_SIZE (ALIGN(sizeof(struct ahash_request), \
+ CRYPTO_MINALIGN) + \
+ sizeof(struct safexcel_ahash_req))
+#define EIP197_AEAD_REQ_SIZE (ALIGN(sizeof(struct aead_request), \
+ CRYPTO_MINALIGN) + \
+ sizeof(struct safexcel_cipher_req))
#define EIP197_REQUEST_ON_STACK(name, type, size) \
char __##name##_desc[size] CRYPTO_MINALIGN_ATTR; \
struct type##_request *name = (void *)__##name##_desc
@@ -1108,7 +1108,6 @@ static int safexcel_cipher_exit_inv(struct crypto_tfm *tfm,
static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm)
{
EIP197_REQUEST_ON_STACK(req, skcipher, EIP197_SKCIPHER_REQ_SIZE);
- struct safexcel_cipher_req *sreq = skcipher_request_ctx(req);
struct safexcel_inv_result result = {};
memset(req, 0, sizeof(struct skcipher_request));
@@ -1117,13 +1116,13 @@ static int safexcel_skcipher_exit_inv(struct crypto_tfm *tfm)
safexcel_inv_complete, &result);
skcipher_request_set_tfm(req, __crypto_skcipher_cast(tfm));
- return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
+ return safexcel_cipher_exit_inv(tfm, &req->base,
+ skcipher_request_ctx(req), &result);
}
static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
{
EIP197_REQUEST_ON_STACK(req, aead, EIP197_AEAD_REQ_SIZE);
- struct safexcel_cipher_req *sreq = aead_request_ctx(req);
struct safexcel_inv_result result = {};
memset(req, 0, sizeof(struct aead_request));
@@ -1132,7 +1131,8 @@ static int safexcel_aead_exit_inv(struct crypto_tfm *tfm)
safexcel_inv_complete, &result);
aead_request_set_tfm(req, __crypto_aead_cast(tfm));
- return safexcel_cipher_exit_inv(tfm, &req->base, sreq, &result);
+ return safexcel_cipher_exit_inv(tfm, &req->base, aead_request_ctx(req),
+ &result);
}
static int safexcel_queue_req(struct crypto_async_request *base,
The skcipher, aead and ahash request structure types will no longer be aligned for DMA, and the padding and re-alignment of the context buffer region will be taken care of at runtime. This means that we need to update the stack representation accordingly, to ensure that the context pointer doesn't point past the allocation after rounding. Also, as getting at the context pointer of a skcipher_request will involve a check of the underlying algo's cra_flags field, as it may need to be aligned for DMA, defer grabbing the context pointer until after setting the TFM. Signed-off-by: Ard Biesheuvel <ardb@kernel.org> --- drivers/crypto/inside-secure/safexcel.h | 15 +++++++++------ drivers/crypto/inside-secure/safexcel_cipher.c | 8 ++++---- 2 files changed, 13 insertions(+), 10 deletions(-)