@@ -905,6 +905,7 @@ struct aead_edesc {
* @iv_dma: dma address of iv for checking continuity and link table
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @bklog: stored to determine if the request needs backlog
+ * @free: stored to determine if skcipher_edesc needs to be freed
* @sec4_sg_dma: bus physical mapped address of h/w link table
* @sec4_sg: pointer to h/w link table
* @hw_desc: the h/w job descriptor followed by any referenced link tables
@@ -918,6 +919,7 @@ struct skcipher_edesc {
dma_addr_t iv_dma;
int sec4_sg_bytes;
bool bklog;
+ bool free;
dma_addr_t sec4_sg_dma;
struct sec4_sg_entry *sec4_sg;
u32 hw_desc[];
@@ -1037,7 +1039,8 @@ static void skcipher_crypt_done(struct device *jrdev, u32 *desc, u32 err,
DUMP_PREFIX_ADDRESS, 16, 4, req->dst,
edesc->dst_nents > 1 ? 100 : req->cryptlen, 1);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
/*
* If no backlog flag, the completion of the request is done
@@ -1604,7 +1607,7 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
dma_addr_t iv_dma = 0;
u8 *iv;
int ivsize = crypto_skcipher_ivsize(skcipher);
- int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes;
+ int dst_sg_idx, sec4_sg_ents, sec4_sg_bytes, edesc_size = 0;
src_nents = sg_nents_for_len(req->src, req->cryptlen);
if (unlikely(src_nents < 0)) {
@@ -1675,16 +1678,30 @@ static struct skcipher_edesc *skcipher_edesc_alloc(struct skcipher_request *req,
sec4_sg_bytes = sec4_sg_ents * sizeof(struct sec4_sg_entry);
- /*
- * allocate space for base edesc and hw desc commands, link tables, IV
- */
- edesc = kzalloc(sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize,
- GFP_DMA | flags);
- if (!edesc) {
- dev_err(jrdev, "could not allocate extended descriptor\n");
- caam_unmap(jrdev, req->src, req->dst, src_nents, dst_nents, 0,
- 0, 0, 0);
- return ERR_PTR(-ENOMEM);
+ /* Check if there's enough space for edesc saved in req */
+ edesc_size = sizeof(*edesc) + desc_bytes + sec4_sg_bytes + ivsize;
+ if (edesc_size > (crypto_skcipher_reqsize(skcipher) -
+ sizeof(struct caam_skcipher_req_ctx))) {
+ /*
+ * allocate space for base edesc and hw desc commands,
+ * link tables, IV
+ */
+ edesc = kzalloc(edesc_size, GFP_DMA | flags);
+ if (!edesc) {
+ caam_unmap(jrdev, req->src, req->dst, src_nents,
+ dst_nents, 0, 0, 0, 0);
+ return ERR_PTR(-ENOMEM);
+ }
+ edesc->free = true;
+ } else {
+ /*
+ * get address for base edesc and hw desc commands,
+ * link tables, IV
+ */
+ edesc = (struct skcipher_edesc *)((u8 *)rctx +
+ sizeof(struct caam_skcipher_req_ctx));
+ /* clear memory */
+ memset(edesc, 0, sizeof(*edesc));
}
edesc->src_nents = src_nents;
@@ -1764,11 +1781,11 @@ static int skcipher_do_one_req(struct crypto_engine *engine, void *areq)
if (ret != -EINPROGRESS) {
skcipher_unmap(ctx->jrdev, rctx->edesc, req);
- kfree(rctx->edesc);
+ if (rctx->edesc->free)
+ kfree(rctx->edesc);
} else {
ret = 0;
}
-
return ret;
}
@@ -3393,10 +3410,25 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
container_of(alg, typeof(*caam_alg), skcipher);
struct caam_ctx *ctx = crypto_skcipher_ctx(tfm);
u32 alg_aai = caam_alg->caam.class1_alg_type & OP_ALG_AAI_MASK;
- int ret = 0;
+ int ret = 0, extra_reqsize = 0;
ctx->enginectx.op.do_one_request = skcipher_do_one_req;
+ /*
+ * Compute extra space needed for base edesc and
+ * hw desc commands, link tables, IV
+ */
+ extra_reqsize = sizeof(struct skcipher_edesc) +
+ DESC_JOB_IO_LEN * CAAM_CMD_SZ + /* hw desc commands */
+ /* link tables for src and dst:
+ * 4 entries max + 1 for IV, aligned = 8
+ */
+ (16 * sizeof(struct sec4_sg_entry)) +
+ AES_BLOCK_SIZE; /* ivsize */
+
+ /* Need GFP_DMA for extra request size */
+ crypto_skcipher_set_flags(tfm, CRYPTO_TFM_REQ_DMA);
+
if (alg_aai == OP_ALG_AAI_XTS) {
const char *tfm_name = crypto_tfm_alg_name(&tfm->base);
struct crypto_skcipher *fallback;
@@ -3411,9 +3443,11 @@ static int caam_cra_init(struct crypto_skcipher *tfm)
ctx->fallback = fallback;
crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
- crypto_skcipher_reqsize(fallback));
+ crypto_skcipher_reqsize(fallback) +
+ extra_reqsize);
} else {
- crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx));
+ crypto_skcipher_set_reqsize(tfm, sizeof(struct caam_skcipher_req_ctx) +
+ extra_reqsize);
}
ret = caam_init_common(ctx, &caam_alg->caam, false);
@@ -3486,8 +3520,7 @@ static void caam_skcipher_alg_init(struct caam_skcipher_alg *t_alg)
alg->base.cra_module = THIS_MODULE;
alg->base.cra_priority = CAAM_CRA_PRIORITY;
alg->base.cra_ctxsize = sizeof(struct caam_ctx);
- alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY |
- CRYPTO_ALG_KERN_DRIVER_ONLY);
+ alg->base.cra_flags |= (CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY);
alg->init = caam_cra_init;
alg->exit = caam_cra_exit;