@@ -568,11 +568,7 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
{
struct aead_req_ctx *areq_ctx = aead_request_ctx(req);
unsigned int hw_iv_size = areq_ctx->hw_iv_size;
- struct crypto_aead *tfm = crypto_aead_reqtfm(req);
struct cc_drvdata *drvdata = dev_get_drvdata(dev);
- u32 dummy;
- bool chained;
- u32 size_to_unmap = 0;
if (areq_ctx->mac_buf_dma_addr) {
dma_unmap_single(dev, areq_ctx->mac_buf_dma_addr,
@@ -629,22 +625,12 @@ void cc_unmap_aead_request(struct device *dev, struct aead_request *req)
dev_dbg(dev, "Unmapping src sgl: req->src=%pK areq_ctx->src.nents=%u areq_ctx->assoc.nents=%u assoclen:%u cryptlen=%u\n",
sg_virt(req->src), areq_ctx->src.nents, areq_ctx->assoc.nents,
req->assoclen, req->cryptlen);
- size_to_unmap = req->assoclen + req->cryptlen;
- if (areq_ctx->gen_ctx.op_type == DRV_CRYPTO_DIRECTION_ENCRYPT)
- size_to_unmap += areq_ctx->req_authsize;
- if (areq_ctx->is_gcm4543)
- size_to_unmap += crypto_aead_ivsize(tfm);
- dma_unmap_sg(dev, req->src,
- cc_get_sgl_nents(dev, req->src, size_to_unmap,
- &dummy, &chained),
- DMA_BIDIRECTIONAL);
+ dma_unmap_sg(dev, req->src, sg_nents(req->src), DMA_BIDIRECTIONAL);
if (req->src != req->dst) {
dev_dbg(dev, "Unmapping dst sgl: req->dst=%pK\n",
sg_virt(req->dst));
- dma_unmap_sg(dev, req->dst,
- cc_get_sgl_nents(dev, req->dst, size_to_unmap,
- &dummy, &chained),
+ dma_unmap_sg(dev, req->dst, sg_nents(req->dst),
DMA_BIDIRECTIONAL);
}
if (drvdata->coherent &&
We were trying to be clever zapping out of the cache only the required length out of scatter list on AEAD request completion and getting it wrong. As Knuth said: "when in douby, use brute force". Zap the whole length of the scatter list. Signed-off-by: Gilad Ben-Yossef <gilad@benyossef.com> --- drivers/crypto/ccree/cc_buffer_mgr.c | 18 ++---------------- 1 file changed, 2 insertions(+), 16 deletions(-) -- 2.21.0