@@ -527,6 +527,7 @@ static int acmac_setkey(struct crypto_ahash *ahash, const u8 *key,
* @src_nents: number of segments in input scatterlist
* @sec4_sg_bytes: length of dma mapped sec4_sg space
* @bklog: stored to determine if the request needs backlog
+ * @free: stored to determine if ahash_edesc needs to be freed
* @hw_desc: the h/w job descriptor followed by any referenced link tables
* @sec4_sg: h/w link table
*/
@@ -535,6 +536,7 @@ struct ahash_edesc {
int src_nents;
int sec4_sg_bytes;
bool bklog;
+ bool free;
u32 hw_desc[DESC_JOB_IO_LEN_MAX / sizeof(u32)] ____cacheline_aligned;
struct sec4_sg_entry sec4_sg[];
};
@@ -595,7 +597,8 @@ static inline void ahash_done_cpy(struct device *jrdev, u32 *desc, u32 err,
ahash_unmap_ctx(jrdev, edesc, req, digestsize, dir);
memcpy(req->result, state->caam_ctx, digestsize);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
print_hex_dump_debug("ctx@"__stringify(__LINE__)": ",
DUMP_PREFIX_ADDRESS, 16, 4, state->caam_ctx,
@@ -644,7 +647,8 @@ static inline void ahash_done_switch(struct device *jrdev, u32 *desc, u32 err,
ecode = caam_jr_strstatus(jrdev, err);
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, dir);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
scatterwalk_map_and_copy(state->buf, req->src,
req->nbytes - state->next_buflen,
@@ -701,11 +705,25 @@ static struct ahash_edesc *ahash_edesc_alloc(struct ahash_request *req,
GFP_KERNEL : GFP_ATOMIC;
struct ahash_edesc *edesc;
unsigned int sg_size = sg_num * sizeof(struct sec4_sg_entry);
-
- edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
- if (!edesc) {
- dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
- return NULL;
+ int edesc_size;
+
+ /* Check if there's enough space for edesc saved in req */
+ edesc_size = sizeof(*edesc) + sg_size;
+ if (edesc_size > (crypto_ahash_reqsize(ahash) -
+ sizeof(struct caam_hash_state))) {
+ /* allocate space for base edesc and link tables */
+ edesc = kzalloc(sizeof(*edesc) + sg_size, GFP_DMA | flags);
+ if (!edesc) {
+ dev_err(ctx->jrdev, "could not allocate extended descriptor\n");
+ return NULL;
+ }
+ edesc->free = true;
+ } else {
+ /* get address for base edesc and link tables */
+ edesc = (struct ahash_edesc *)((u8 *)state +
+ sizeof(struct caam_hash_state));
+ /* clear memory */
+ memset(edesc, 0, sizeof(*edesc));
}
state->edesc = edesc;
@@ -767,7 +785,8 @@ static int ahash_do_one_req(struct crypto_engine *engine, void *areq)
if (ret != -EINPROGRESS) {
ahash_unmap(jrdev, state->edesc, req, 0);
- kfree(state->edesc);
+ if (state->edesc->free)
+ kfree(state->edesc);
} else {
ret = 0;
}
@@ -802,7 +821,8 @@ static int ahash_enqueue_req(struct device *jrdev,
if ((ret != -EINPROGRESS) && (ret != -EBUSY)) {
ahash_unmap_ctx(jrdev, edesc, req, dst_len, dir);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
}
return ret;
@@ -930,7 +950,8 @@ static int ahash_update_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_BIDIRECTIONAL);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return ret;
}
@@ -991,7 +1012,8 @@ static int ahash_final_ctx(struct ahash_request *req)
digestsize, DMA_BIDIRECTIONAL);
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return ret;
}
@@ -1065,7 +1087,8 @@ static int ahash_finup_ctx(struct ahash_request *req)
digestsize, DMA_BIDIRECTIONAL);
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, digestsize, DMA_BIDIRECTIONAL);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return ret;
}
@@ -1114,7 +1137,8 @@ static int ahash_digest(struct ahash_request *req)
req->nbytes);
if (ret) {
ahash_unmap(jrdev, edesc, req, digestsize);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return ret;
}
@@ -1123,7 +1147,8 @@ static int ahash_digest(struct ahash_request *req)
ret = map_seq_out_ptr_ctx(desc, jrdev, state, digestsize);
if (ret) {
ahash_unmap(jrdev, edesc, req, digestsize);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return -ENOMEM;
}
@@ -1180,7 +1205,8 @@ static int ahash_final_no_ctx(struct ahash_request *req)
digestsize, DMA_FROM_DEVICE);
unmap:
ahash_unmap(jrdev, edesc, req, digestsize);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return -ENOMEM;
}
@@ -1301,7 +1327,8 @@ static int ahash_update_no_ctx(struct ahash_request *req)
return ret;
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return ret;
}
@@ -1376,7 +1403,8 @@ static int ahash_finup_no_ctx(struct ahash_request *req)
digestsize, DMA_FROM_DEVICE);
unmap:
ahash_unmap(jrdev, edesc, req, digestsize);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return -ENOMEM;
}
@@ -1484,7 +1512,8 @@ static int ahash_update_first(struct ahash_request *req)
return ret;
unmap_ctx:
ahash_unmap_ctx(jrdev, edesc, req, ctx->ctx_len, DMA_TO_DEVICE);
- kfree(edesc);
+ if (edesc->free)
+ kfree(edesc);
return ret;
}
@@ -1771,6 +1800,7 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
sh_desc_update);
dma_addr_t dma_addr;
struct caam_drv_private *priv;
+ int extra_reqsize = 0;
/*
* Get a Job ring from Job Ring driver to ensure in-order
@@ -1851,8 +1881,15 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
ctx->enginectx.op.do_one_request = ahash_do_one_req;
+ /* Compute extra space needed for base edesc and link tables */
+ extra_reqsize = sizeof(struct ahash_edesc) +
+ /* link tables for src:
+ * 4 entries max + max 2 for remaining buf, aligned = 8
+ */
+ (8 * sizeof(struct sec4_sg_entry));
+
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
- sizeof(struct caam_hash_state));
+ sizeof(struct caam_hash_state) + extra_reqsize);
/*
* For keyed hash algorithms shared descriptors
@@ -1927,7 +1964,7 @@ caam_hash_alloc(struct caam_hash_template *template,
alg->cra_priority = CAAM_CRA_PRIORITY;
alg->cra_blocksize = template->blocksize;
alg->cra_alignmask = 0;
- alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_ALLOCATES_MEMORY;
+ alg->cra_flags = CRYPTO_ALG_ASYNC;
t_alg->alg_type = template->alg_type;