From patchwork Mon Dec 7 07:58:42 2020 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Vic Wu X-Patchwork-Id: 339223 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org X-Spam-Level: X-Spam-Status: No, score=-16.7 required=3.0 tests=BAYES_00, HEADER_FROM_DIFFERENT_DOMAINS,INCLUDES_CR_TRAILER,INCLUDES_PATCH, MAILING_LIST_MULTI, SPF_HELO_NONE, SPF_PASS, UNPARSEABLE_RELAY, URIBL_BLOCKED, USER_AGENT_GIT autolearn=ham autolearn_force=no version=3.4.0 Received: from mail.kernel.org (mail.kernel.org [198.145.29.99]) by smtp.lore.kernel.org (Postfix) with ESMTP id 2245AC4361B for ; Mon, 7 Dec 2020 08:02:11 +0000 (UTC) Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by mail.kernel.org (Postfix) with ESMTP id A5050225A9 for ; Mon, 7 Dec 2020 08:02:10 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1726104AbgLGIBy (ORCPT ); Mon, 7 Dec 2020 03:01:54 -0500 Received: from mailgw01.mediatek.com ([210.61.82.183]:51635 "EHLO mailgw01.mediatek.com" rhost-flags-OK-FAIL-OK-FAIL) by vger.kernel.org with ESMTP id S1726126AbgLGIBy (ORCPT ); Mon, 7 Dec 2020 03:01:54 -0500 X-UUID: d5a143a33cb0436a89343ca7d30d96e5-20201207 X-UUID: d5a143a33cb0436a89343ca7d30d96e5-20201207 Received: from mtkexhb01.mediatek.inc [(172.21.101.102)] by mailgw01.mediatek.com (envelope-from ) (Cellopoint E-mail Firewall v4.1.14 Build 0819 with TLSv1.2 ECDHE-RSA-AES256-SHA384 256/256) with ESMTP id 2064371978; Mon, 07 Dec 2020 16:00:54 +0800 Received: from mtkcas10.mediatek.inc (172.21.101.39) by mtkmbs08n2.mediatek.inc (172.21.101.56) with Microsoft SMTP Server (TLS) id 15.0.1497.2; Mon, 7 Dec 2020 16:00:36 +0800 Received: from mtksdccf07.mediatek.inc (172.21.84.99) by mtkcas10.mediatek.inc (172.21.101.73) with Microsoft SMTP Server id 15.0.1497.2 via Frontend Transport; Mon, 7 Dec 2020 16:00:36 +0800 From: Vic Wu To: Herbert Xu CC: "David S . Miller" , Ryder Lee , , , , , Vic Wu Subject: [PATCH] crypto: mediatek - remove obsolete driver Date: Mon, 7 Dec 2020 15:58:42 +0800 Message-ID: <20201207075842.30090-1-vic.wu@mediatek.com> X-Mailer: git-send-email 2.18.0 MIME-Version: 1.0 X-TM-SNTS-SMTP: 1872AF6D36321E3AFA52EE94FB1F03047B830F6B635E9C28C8006DC85DC82CAB2000:8 X-MTK: N Precedence: bulk List-ID: X-Mailing-List: linux-crypto@vger.kernel.org The crypto mediatek driver has been replaced by the inside-secure driver now. Remove this driver to avoid having duplicate drivers. Signed-off-by: Vic Wu Acked-by: Ryder Lee --- drivers/crypto/Kconfig | 15 - drivers/crypto/Makefile | 1 - drivers/crypto/mediatek/Makefile | 3 - drivers/crypto/mediatek/mtk-aes.c | 1271 ---------------------- drivers/crypto/mediatek/mtk-platform.c | 586 ---------- drivers/crypto/mediatek/mtk-platform.h | 231 ---- drivers/crypto/mediatek/mtk-regs.h | 190 ---- drivers/crypto/mediatek/mtk-sha.c | 1352 ------------------------ 8 files changed, 3649 deletions(-) delete mode 100644 drivers/crypto/mediatek/Makefile delete mode 100644 drivers/crypto/mediatek/mtk-aes.c delete mode 100644 drivers/crypto/mediatek/mtk-platform.c delete mode 100644 drivers/crypto/mediatek/mtk-platform.h delete mode 100644 drivers/crypto/mediatek/mtk-regs.h delete mode 100644 drivers/crypto/mediatek/mtk-sha.c diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig index 37da0c070a88..ef3ce3ab6b47 100644 --- a/drivers/crypto/Kconfig +++ b/drivers/crypto/Kconfig @@ -771,21 +771,6 @@ config CRYPTO_DEV_ZYNQMP_AES accelerator. Select this if you want to use the ZynqMP module for AES algorithms. -config CRYPTO_DEV_MEDIATEK - tristate "MediaTek's EIP97 Cryptographic Engine driver" - depends on (ARM && ARCH_MEDIATEK) || COMPILE_TEST - select CRYPTO_LIB_AES - select CRYPTO_AEAD - select CRYPTO_SKCIPHER - select CRYPTO_SHA1 - select CRYPTO_SHA256 - select CRYPTO_SHA512 - select CRYPTO_HMAC - help - This driver allows you to utilize the hardware crypto accelerator - EIP97 which can be found on the MT7623 MT2701, MT8521p, etc .... - Select this if you want to use it for AES/SHA1/SHA2 algorithms. - source "drivers/crypto/chelsio/Kconfig" source "drivers/crypto/virtio/Kconfig" diff --git a/drivers/crypto/Makefile b/drivers/crypto/Makefile index 53fc115cf459..a3806d8ec9be 100644 --- a/drivers/crypto/Makefile +++ b/drivers/crypto/Makefile @@ -19,7 +19,6 @@ obj-$(CONFIG_CRYPTO_DEV_HIFN_795X) += hifn_795x.o obj-$(CONFIG_CRYPTO_DEV_IMGTEC_HASH) += img-hash.o obj-$(CONFIG_CRYPTO_DEV_IXP4XX) += ixp4xx_crypto.o obj-$(CONFIG_CRYPTO_DEV_MARVELL) += marvell/ -obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mediatek/ obj-$(CONFIG_CRYPTO_DEV_MXS_DCP) += mxs-dcp.o obj-$(CONFIG_CRYPTO_DEV_NIAGARA2) += n2_crypto.o n2_crypto-y := n2_core.o n2_asm.o diff --git a/drivers/crypto/mediatek/Makefile b/drivers/crypto/mediatek/Makefile deleted file mode 100644 index 196a4653974e..000000000000 --- a/drivers/crypto/mediatek/Makefile +++ /dev/null @@ -1,3 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only -obj-$(CONFIG_CRYPTO_DEV_MEDIATEK) += mtk-crypto.o -mtk-crypto-objs:= mtk-platform.o mtk-aes.o mtk-sha.o diff --git a/drivers/crypto/mediatek/mtk-aes.c b/drivers/crypto/mediatek/mtk-aes.c deleted file mode 100644 index 7323066724c3..000000000000 --- a/drivers/crypto/mediatek/mtk-aes.c +++ /dev/null @@ -1,1271 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Cryptographic API. - * - * Driver for EIP97 AES acceleration. - * - * Copyright (c) 2016 Ryder Lee - * - * Some ideas are from atmel-aes.c drivers. - */ - -#include -#include -#include -#include "mtk-platform.h" - -#define AES_QUEUE_SIZE 512 -#define AES_BUF_ORDER 2 -#define AES_BUF_SIZE ((PAGE_SIZE << AES_BUF_ORDER) \ - & ~(AES_BLOCK_SIZE - 1)) -#define AES_MAX_STATE_BUF_SIZE SIZE_IN_WORDS(AES_KEYSIZE_256 + \ - AES_BLOCK_SIZE * 2) -#define AES_MAX_CT_SIZE 6 - -#define AES_CT_CTRL_HDR cpu_to_le32(0x00220000) - -/* AES-CBC/ECB/CTR/OFB/CFB command token */ -#define AES_CMD0 cpu_to_le32(0x05000000) -#define AES_CMD1 cpu_to_le32(0x2d060000) -#define AES_CMD2 cpu_to_le32(0xe4a63806) -/* AES-GCM command token */ -#define AES_GCM_CMD0 cpu_to_le32(0x0b000000) -#define AES_GCM_CMD1 cpu_to_le32(0xa0800000) -#define AES_GCM_CMD2 cpu_to_le32(0x25000010) -#define AES_GCM_CMD3 cpu_to_le32(0x0f020000) -#define AES_GCM_CMD4 cpu_to_le32(0x21e60000) -#define AES_GCM_CMD5 cpu_to_le32(0x40e60000) -#define AES_GCM_CMD6 cpu_to_le32(0xd0070000) - -/* AES transform information word 0 fields */ -#define AES_TFM_BASIC_OUT cpu_to_le32(0x4 << 0) -#define AES_TFM_BASIC_IN cpu_to_le32(0x5 << 0) -#define AES_TFM_GCM_OUT cpu_to_le32(0x6 << 0) -#define AES_TFM_GCM_IN cpu_to_le32(0xf << 0) -#define AES_TFM_SIZE(x) cpu_to_le32((x) << 8) -#define AES_TFM_128BITS cpu_to_le32(0xb << 16) -#define AES_TFM_192BITS cpu_to_le32(0xd << 16) -#define AES_TFM_256BITS cpu_to_le32(0xf << 16) -#define AES_TFM_GHASH_DIGEST cpu_to_le32(0x2 << 21) -#define AES_TFM_GHASH cpu_to_le32(0x4 << 23) -/* AES transform information word 1 fields */ -#define AES_TFM_ECB cpu_to_le32(0x0 << 0) -#define AES_TFM_CBC cpu_to_le32(0x1 << 0) -#define AES_TFM_OFB cpu_to_le32(0x4 << 0) -#define AES_TFM_CFB128 cpu_to_le32(0x5 << 0) -#define AES_TFM_CTR_INIT cpu_to_le32(0x2 << 0) /* init counter to 1 */ -#define AES_TFM_CTR_LOAD cpu_to_le32(0x6 << 0) /* load/reuse counter */ -#define AES_TFM_3IV cpu_to_le32(0x7 << 5) /* using IV 0-2 */ -#define AES_TFM_FULL_IV cpu_to_le32(0xf << 5) /* using IV 0-3 */ -#define AES_TFM_IV_CTR_MODE cpu_to_le32(0x1 << 10) -#define AES_TFM_ENC_HASH cpu_to_le32(0x1 << 17) - -/* AES flags */ -#define AES_FLAGS_CIPHER_MSK GENMASK(4, 0) -#define AES_FLAGS_ECB BIT(0) -#define AES_FLAGS_CBC BIT(1) -#define AES_FLAGS_CTR BIT(2) -#define AES_FLAGS_OFB BIT(3) -#define AES_FLAGS_CFB128 BIT(4) -#define AES_FLAGS_GCM BIT(5) -#define AES_FLAGS_ENCRYPT BIT(6) -#define AES_FLAGS_BUSY BIT(7) - -#define AES_AUTH_TAG_ERR cpu_to_le32(BIT(26)) - -/** - * mtk_aes_info - hardware information of AES - * @cmd: command token, hardware instruction - * @tfm: transform state of cipher algorithm. - * @state: contains keys and initial vectors. - * - * Memory layout of GCM buffer: - * /-----------\ - * | AES KEY | 128/196/256 bits - * |-----------| - * | HASH KEY | a string 128 zero bits encrypted using the block cipher - * |-----------| - * | IVs | 4 * 4 bytes - * \-----------/ - * - * The engine requires all these info to do: - * - Commands decoding and control of the engine's data path. - * - Coordinating hardware data fetch and store operations. - * - Result token construction and output. - */ -struct mtk_aes_info { - __le32 cmd[AES_MAX_CT_SIZE]; - __le32 tfm[2]; - __le32 state[AES_MAX_STATE_BUF_SIZE]; -}; - -struct mtk_aes_reqctx { - u64 mode; -}; - -struct mtk_aes_base_ctx { - struct mtk_cryp *cryp; - u32 keylen; - __le32 key[12]; - __le32 keymode; - - mtk_aes_fn start; - - struct mtk_aes_info info; - dma_addr_t ct_dma; - dma_addr_t tfm_dma; - - __le32 ct_hdr; - u32 ct_size; -}; - -struct mtk_aes_ctx { - struct mtk_aes_base_ctx base; -}; - -struct mtk_aes_ctr_ctx { - struct mtk_aes_base_ctx base; - - __be32 iv[AES_BLOCK_SIZE / sizeof(u32)]; - size_t offset; - struct scatterlist src[2]; - struct scatterlist dst[2]; -}; - -struct mtk_aes_gcm_ctx { - struct mtk_aes_base_ctx base; - - u32 authsize; - size_t textlen; -}; - -struct mtk_aes_drv { - struct list_head dev_list; - /* Device list lock */ - spinlock_t lock; -}; - -static struct mtk_aes_drv mtk_aes = { - .dev_list = LIST_HEAD_INIT(mtk_aes.dev_list), - .lock = __SPIN_LOCK_UNLOCKED(mtk_aes.lock), -}; - -static inline u32 mtk_aes_read(struct mtk_cryp *cryp, u32 offset) -{ - return readl_relaxed(cryp->base + offset); -} - -static inline void mtk_aes_write(struct mtk_cryp *cryp, - u32 offset, u32 value) -{ - writel_relaxed(value, cryp->base + offset); -} - -static struct mtk_cryp *mtk_aes_find_dev(struct mtk_aes_base_ctx *ctx) -{ - struct mtk_cryp *cryp = NULL; - struct mtk_cryp *tmp; - - spin_lock_bh(&mtk_aes.lock); - if (!ctx->cryp) { - list_for_each_entry(tmp, &mtk_aes.dev_list, aes_list) { - cryp = tmp; - break; - } - ctx->cryp = cryp; - } else { - cryp = ctx->cryp; - } - spin_unlock_bh(&mtk_aes.lock); - - return cryp; -} - -static inline size_t mtk_aes_padlen(size_t len) -{ - len &= AES_BLOCK_SIZE - 1; - return len ? AES_BLOCK_SIZE - len : 0; -} - -static bool mtk_aes_check_aligned(struct scatterlist *sg, size_t len, - struct mtk_aes_dma *dma) -{ - int nents; - - if (!IS_ALIGNED(len, AES_BLOCK_SIZE)) - return false; - - for (nents = 0; sg; sg = sg_next(sg), ++nents) { - if (!IS_ALIGNED(sg->offset, sizeof(u32))) - return false; - - if (len <= sg->length) { - if (!IS_ALIGNED(len, AES_BLOCK_SIZE)) - return false; - - dma->nents = nents + 1; - dma->remainder = sg->length - len; - sg->length = len; - return true; - } - - if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE)) - return false; - - len -= sg->length; - } - - return false; -} - -static inline void mtk_aes_set_mode(struct mtk_aes_rec *aes, - const struct mtk_aes_reqctx *rctx) -{ - /* Clear all but persistent flags and set request flags. */ - aes->flags = (aes->flags & AES_FLAGS_BUSY) | rctx->mode; -} - -static inline void mtk_aes_restore_sg(const struct mtk_aes_dma *dma) -{ - struct scatterlist *sg = dma->sg; - int nents = dma->nents; - - if (!dma->remainder) - return; - - while (--nents > 0 && sg) - sg = sg_next(sg); - - if (!sg) - return; - - sg->length += dma->remainder; -} - -static inline int mtk_aes_complete(struct mtk_cryp *cryp, - struct mtk_aes_rec *aes, - int err) -{ - aes->flags &= ~AES_FLAGS_BUSY; - aes->areq->complete(aes->areq, err); - /* Handle new request */ - tasklet_schedule(&aes->queue_task); - return err; -} - -/* - * Write descriptors for processing. This will configure the engine, load - * the transform information and then start the packet processing. - */ -static int mtk_aes_xmit(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) -{ - struct mtk_ring *ring = cryp->ring[aes->id]; - struct mtk_desc *cmd = NULL, *res = NULL; - struct scatterlist *ssg = aes->src.sg, *dsg = aes->dst.sg; - u32 slen = aes->src.sg_len, dlen = aes->dst.sg_len; - int nents; - - /* Write command descriptors */ - for (nents = 0; nents < slen; ++nents, ssg = sg_next(ssg)) { - cmd = ring->cmd_next; - cmd->hdr = MTK_DESC_BUF_LEN(ssg->length); - cmd->buf = cpu_to_le32(sg_dma_address(ssg)); - - if (nents == 0) { - cmd->hdr |= MTK_DESC_FIRST | - MTK_DESC_CT_LEN(aes->ctx->ct_size); - cmd->ct = cpu_to_le32(aes->ctx->ct_dma); - cmd->ct_hdr = aes->ctx->ct_hdr; - cmd->tfm = cpu_to_le32(aes->ctx->tfm_dma); - } - - /* Shift ring buffer and check boundary */ - if (++ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) - ring->cmd_next = ring->cmd_base; - } - cmd->hdr |= MTK_DESC_LAST; - - /* Prepare result descriptors */ - for (nents = 0; nents < dlen; ++nents, dsg = sg_next(dsg)) { - res = ring->res_next; - res->hdr = MTK_DESC_BUF_LEN(dsg->length); - res->buf = cpu_to_le32(sg_dma_address(dsg)); - - if (nents == 0) - res->hdr |= MTK_DESC_FIRST; - - /* Shift ring buffer and check boundary */ - if (++ring->res_next == ring->res_base + MTK_DESC_NUM) - ring->res_next = ring->res_base; - } - res->hdr |= MTK_DESC_LAST; - - /* Pointer to current result descriptor */ - ring->res_prev = res; - - /* Prepare enough space for authenticated tag */ - if (aes->flags & AES_FLAGS_GCM) - le32_add_cpu(&res->hdr, AES_BLOCK_SIZE); - - /* - * Make sure that all changes to the DMA ring are done before we - * start engine. - */ - wmb(); - /* Start DMA transfer */ - mtk_aes_write(cryp, RDR_PREP_COUNT(aes->id), MTK_DESC_CNT(dlen)); - mtk_aes_write(cryp, CDR_PREP_COUNT(aes->id), MTK_DESC_CNT(slen)); - - return -EINPROGRESS; -} - -static void mtk_aes_unmap(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) -{ - struct mtk_aes_base_ctx *ctx = aes->ctx; - - dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), - DMA_TO_DEVICE); - - if (aes->src.sg == aes->dst.sg) { - dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, - DMA_BIDIRECTIONAL); - - if (aes->src.sg != &aes->aligned_sg) - mtk_aes_restore_sg(&aes->src); - } else { - dma_unmap_sg(cryp->dev, aes->dst.sg, aes->dst.nents, - DMA_FROM_DEVICE); - - if (aes->dst.sg != &aes->aligned_sg) - mtk_aes_restore_sg(&aes->dst); - - dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, - DMA_TO_DEVICE); - - if (aes->src.sg != &aes->aligned_sg) - mtk_aes_restore_sg(&aes->src); - } - - if (aes->dst.sg == &aes->aligned_sg) - sg_copy_from_buffer(aes->real_dst, sg_nents(aes->real_dst), - aes->buf, aes->total); -} - -static int mtk_aes_map(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) -{ - struct mtk_aes_base_ctx *ctx = aes->ctx; - struct mtk_aes_info *info = &ctx->info; - - ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), - DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) - goto exit; - - ctx->tfm_dma = ctx->ct_dma + sizeof(info->cmd); - - if (aes->src.sg == aes->dst.sg) { - aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, - aes->src.nents, - DMA_BIDIRECTIONAL); - aes->dst.sg_len = aes->src.sg_len; - if (unlikely(!aes->src.sg_len)) - goto sg_map_err; - } else { - aes->src.sg_len = dma_map_sg(cryp->dev, aes->src.sg, - aes->src.nents, DMA_TO_DEVICE); - if (unlikely(!aes->src.sg_len)) - goto sg_map_err; - - aes->dst.sg_len = dma_map_sg(cryp->dev, aes->dst.sg, - aes->dst.nents, DMA_FROM_DEVICE); - if (unlikely(!aes->dst.sg_len)) { - dma_unmap_sg(cryp->dev, aes->src.sg, aes->src.nents, - DMA_TO_DEVICE); - goto sg_map_err; - } - } - - return mtk_aes_xmit(cryp, aes); - -sg_map_err: - dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(*info), DMA_TO_DEVICE); -exit: - return mtk_aes_complete(cryp, aes, -EINVAL); -} - -/* Initialize transform information of CBC/ECB/CTR/OFB/CFB mode */ -static void mtk_aes_info_init(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, - size_t len) -{ - struct skcipher_request *req = skcipher_request_cast(aes->areq); - struct mtk_aes_base_ctx *ctx = aes->ctx; - struct mtk_aes_info *info = &ctx->info; - u32 cnt = 0; - - ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); - info->cmd[cnt++] = AES_CMD0 | cpu_to_le32(len); - info->cmd[cnt++] = AES_CMD1; - - info->tfm[0] = AES_TFM_SIZE(ctx->keylen) | ctx->keymode; - if (aes->flags & AES_FLAGS_ENCRYPT) - info->tfm[0] |= AES_TFM_BASIC_OUT; - else - info->tfm[0] |= AES_TFM_BASIC_IN; - - switch (aes->flags & AES_FLAGS_CIPHER_MSK) { - case AES_FLAGS_CBC: - info->tfm[1] = AES_TFM_CBC; - break; - case AES_FLAGS_ECB: - info->tfm[1] = AES_TFM_ECB; - goto ecb; - case AES_FLAGS_CTR: - info->tfm[1] = AES_TFM_CTR_LOAD; - goto ctr; - case AES_FLAGS_OFB: - info->tfm[1] = AES_TFM_OFB; - break; - case AES_FLAGS_CFB128: - info->tfm[1] = AES_TFM_CFB128; - break; - default: - /* Should not happen... */ - return; - } - - memcpy(info->state + ctx->keylen, req->iv, AES_BLOCK_SIZE); -ctr: - le32_add_cpu(&info->tfm[0], - le32_to_cpu(AES_TFM_SIZE(SIZE_IN_WORDS(AES_BLOCK_SIZE)))); - info->tfm[1] |= AES_TFM_FULL_IV; - info->cmd[cnt++] = AES_CMD2; -ecb: - ctx->ct_size = cnt; -} - -static int mtk_aes_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, - struct scatterlist *src, struct scatterlist *dst, - size_t len) -{ - size_t padlen = 0; - bool src_aligned, dst_aligned; - - aes->total = len; - aes->src.sg = src; - aes->dst.sg = dst; - aes->real_dst = dst; - - src_aligned = mtk_aes_check_aligned(src, len, &aes->src); - if (src == dst) - dst_aligned = src_aligned; - else - dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst); - - if (!src_aligned || !dst_aligned) { - padlen = mtk_aes_padlen(len); - - if (len + padlen > AES_BUF_SIZE) - return mtk_aes_complete(cryp, aes, -ENOMEM); - - if (!src_aligned) { - sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); - aes->src.sg = &aes->aligned_sg; - aes->src.nents = 1; - aes->src.remainder = 0; - } - - if (!dst_aligned) { - aes->dst.sg = &aes->aligned_sg; - aes->dst.nents = 1; - aes->dst.remainder = 0; - } - - sg_init_table(&aes->aligned_sg, 1); - sg_set_buf(&aes->aligned_sg, aes->buf, len + padlen); - } - - mtk_aes_info_init(cryp, aes, len + padlen); - - return mtk_aes_map(cryp, aes); -} - -static int mtk_aes_handle_queue(struct mtk_cryp *cryp, u8 id, - struct crypto_async_request *new_areq) -{ - struct mtk_aes_rec *aes = cryp->aes[id]; - struct crypto_async_request *areq, *backlog; - struct mtk_aes_base_ctx *ctx; - unsigned long flags; - int ret = 0; - - spin_lock_irqsave(&aes->lock, flags); - if (new_areq) - ret = crypto_enqueue_request(&aes->queue, new_areq); - if (aes->flags & AES_FLAGS_BUSY) { - spin_unlock_irqrestore(&aes->lock, flags); - return ret; - } - backlog = crypto_get_backlog(&aes->queue); - areq = crypto_dequeue_request(&aes->queue); - if (areq) - aes->flags |= AES_FLAGS_BUSY; - spin_unlock_irqrestore(&aes->lock, flags); - - if (!areq) - return ret; - - if (backlog) - backlog->complete(backlog, -EINPROGRESS); - - ctx = crypto_tfm_ctx(areq->tfm); - /* Write key into state buffer */ - memcpy(ctx->info.state, ctx->key, sizeof(ctx->key)); - - aes->areq = areq; - aes->ctx = ctx; - - return ctx->start(cryp, aes); -} - -static int mtk_aes_transfer_complete(struct mtk_cryp *cryp, - struct mtk_aes_rec *aes) -{ - return mtk_aes_complete(cryp, aes, 0); -} - -static int mtk_aes_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) -{ - struct skcipher_request *req = skcipher_request_cast(aes->areq); - struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req); - - mtk_aes_set_mode(aes, rctx); - aes->resume = mtk_aes_transfer_complete; - - return mtk_aes_dma(cryp, aes, req->src, req->dst, req->cryptlen); -} - -static inline struct mtk_aes_ctr_ctx * -mtk_aes_ctr_ctx_cast(struct mtk_aes_base_ctx *ctx) -{ - return container_of(ctx, struct mtk_aes_ctr_ctx, base); -} - -static int mtk_aes_ctr_transfer(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) -{ - struct mtk_aes_base_ctx *ctx = aes->ctx; - struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(ctx); - struct skcipher_request *req = skcipher_request_cast(aes->areq); - struct scatterlist *src, *dst; - u32 start, end, ctr, blocks; - size_t datalen; - bool fragmented = false; - - /* Check for transfer completion. */ - cctx->offset += aes->total; - if (cctx->offset >= req->cryptlen) - return mtk_aes_transfer_complete(cryp, aes); - - /* Compute data length. */ - datalen = req->cryptlen - cctx->offset; - blocks = DIV_ROUND_UP(datalen, AES_BLOCK_SIZE); - ctr = be32_to_cpu(cctx->iv[3]); - - /* Check 32bit counter overflow. */ - start = ctr; - end = start + blocks - 1; - if (end < start) { - ctr = 0xffffffff; - datalen = AES_BLOCK_SIZE * -start; - fragmented = true; - } - - /* Jump to offset. */ - src = scatterwalk_ffwd(cctx->src, req->src, cctx->offset); - dst = ((req->src == req->dst) ? src : - scatterwalk_ffwd(cctx->dst, req->dst, cctx->offset)); - - /* Write IVs into transform state buffer. */ - memcpy(ctx->info.state + ctx->keylen, cctx->iv, AES_BLOCK_SIZE); - - if (unlikely(fragmented)) { - /* - * Increment the counter manually to cope with the hardware - * counter overflow. - */ - cctx->iv[3] = cpu_to_be32(ctr); - crypto_inc((u8 *)cctx->iv, AES_BLOCK_SIZE); - } - - return mtk_aes_dma(cryp, aes, src, dst, datalen); -} - -static int mtk_aes_ctr_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) -{ - struct mtk_aes_ctr_ctx *cctx = mtk_aes_ctr_ctx_cast(aes->ctx); - struct skcipher_request *req = skcipher_request_cast(aes->areq); - struct mtk_aes_reqctx *rctx = skcipher_request_ctx(req); - - mtk_aes_set_mode(aes, rctx); - - memcpy(cctx->iv, req->iv, AES_BLOCK_SIZE); - cctx->offset = 0; - aes->total = 0; - aes->resume = mtk_aes_ctr_transfer; - - return mtk_aes_ctr_transfer(cryp, aes); -} - -/* Check and set the AES key to transform state buffer */ -static int mtk_aes_setkey(struct crypto_skcipher *tfm, - const u8 *key, u32 keylen) -{ - struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(tfm); - - switch (keylen) { - case AES_KEYSIZE_128: - ctx->keymode = AES_TFM_128BITS; - break; - case AES_KEYSIZE_192: - ctx->keymode = AES_TFM_192BITS; - break; - case AES_KEYSIZE_256: - ctx->keymode = AES_TFM_256BITS; - break; - - default: - return -EINVAL; - } - - ctx->keylen = SIZE_IN_WORDS(keylen); - memcpy(ctx->key, key, keylen); - - return 0; -} - -static int mtk_aes_crypt(struct skcipher_request *req, u64 mode) -{ - struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req); - struct mtk_aes_base_ctx *ctx = crypto_skcipher_ctx(skcipher); - struct mtk_aes_reqctx *rctx; - struct mtk_cryp *cryp; - - cryp = mtk_aes_find_dev(ctx); - if (!cryp) - return -ENODEV; - - rctx = skcipher_request_ctx(req); - rctx->mode = mode; - - return mtk_aes_handle_queue(cryp, !(mode & AES_FLAGS_ENCRYPT), - &req->base); -} - -static int mtk_aes_ecb_encrypt(struct skcipher_request *req) -{ - return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_ECB); -} - -static int mtk_aes_ecb_decrypt(struct skcipher_request *req) -{ - return mtk_aes_crypt(req, AES_FLAGS_ECB); -} - -static int mtk_aes_cbc_encrypt(struct skcipher_request *req) -{ - return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CBC); -} - -static int mtk_aes_cbc_decrypt(struct skcipher_request *req) -{ - return mtk_aes_crypt(req, AES_FLAGS_CBC); -} - -static int mtk_aes_ctr_encrypt(struct skcipher_request *req) -{ - return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CTR); -} - -static int mtk_aes_ctr_decrypt(struct skcipher_request *req) -{ - return mtk_aes_crypt(req, AES_FLAGS_CTR); -} - -static int mtk_aes_ofb_encrypt(struct skcipher_request *req) -{ - return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_OFB); -} - -static int mtk_aes_ofb_decrypt(struct skcipher_request *req) -{ - return mtk_aes_crypt(req, AES_FLAGS_OFB); -} - -static int mtk_aes_cfb_encrypt(struct skcipher_request *req) -{ - return mtk_aes_crypt(req, AES_FLAGS_ENCRYPT | AES_FLAGS_CFB128); -} - -static int mtk_aes_cfb_decrypt(struct skcipher_request *req) -{ - return mtk_aes_crypt(req, AES_FLAGS_CFB128); -} - -static int mtk_aes_init_tfm(struct crypto_skcipher *tfm) -{ - struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx)); - ctx->base.start = mtk_aes_start; - return 0; -} - -static int mtk_aes_ctr_init_tfm(struct crypto_skcipher *tfm) -{ - struct mtk_aes_ctx *ctx = crypto_skcipher_ctx(tfm); - - crypto_skcipher_set_reqsize(tfm, sizeof(struct mtk_aes_reqctx)); - ctx->base.start = mtk_aes_ctr_start; - return 0; -} - -static struct skcipher_alg aes_algs[] = { -{ - .base.cra_name = "cbc(aes)", - .base.cra_driver_name = "cbc-aes-mtk", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, - - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = mtk_aes_setkey, - .encrypt = mtk_aes_cbc_encrypt, - .decrypt = mtk_aes_cbc_decrypt, - .ivsize = AES_BLOCK_SIZE, - .init = mtk_aes_init_tfm, -}, -{ - .base.cra_name = "ecb(aes)", - .base.cra_driver_name = "ecb-aes-mtk", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, - - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .setkey = mtk_aes_setkey, - .encrypt = mtk_aes_ecb_encrypt, - .decrypt = mtk_aes_ecb_decrypt, - .init = mtk_aes_init_tfm, -}, -{ - .base.cra_name = "ctr(aes)", - .base.cra_driver_name = "ctr-aes-mtk", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, - - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = mtk_aes_setkey, - .encrypt = mtk_aes_ctr_encrypt, - .decrypt = mtk_aes_ctr_decrypt, - .init = mtk_aes_ctr_init_tfm, -}, -{ - .base.cra_name = "ofb(aes)", - .base.cra_driver_name = "ofb-aes-mtk", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = AES_BLOCK_SIZE, - .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, - - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = mtk_aes_setkey, - .encrypt = mtk_aes_ofb_encrypt, - .decrypt = mtk_aes_ofb_decrypt, -}, -{ - .base.cra_name = "cfb(aes)", - .base.cra_driver_name = "cfb-aes-mtk", - .base.cra_priority = 400, - .base.cra_flags = CRYPTO_ALG_ASYNC, - .base.cra_blocksize = 1, - .base.cra_ctxsize = sizeof(struct mtk_aes_ctx), - .base.cra_alignmask = 0xf, - .base.cra_module = THIS_MODULE, - - .min_keysize = AES_MIN_KEY_SIZE, - .max_keysize = AES_MAX_KEY_SIZE, - .ivsize = AES_BLOCK_SIZE, - .setkey = mtk_aes_setkey, - .encrypt = mtk_aes_cfb_encrypt, - .decrypt = mtk_aes_cfb_decrypt, -}, -}; - -static inline struct mtk_aes_gcm_ctx * -mtk_aes_gcm_ctx_cast(struct mtk_aes_base_ctx *ctx) -{ - return container_of(ctx, struct mtk_aes_gcm_ctx, base); -} - -/* - * Engine will verify and compare tag automatically, so we just need - * to check returned status which stored in the result descriptor. - */ -static int mtk_aes_gcm_tag_verify(struct mtk_cryp *cryp, - struct mtk_aes_rec *aes) -{ - __le32 status = cryp->ring[aes->id]->res_prev->ct; - - return mtk_aes_complete(cryp, aes, (status & AES_AUTH_TAG_ERR) ? - -EBADMSG : 0); -} - -/* Initialize transform information of GCM mode */ -static void mtk_aes_gcm_info_init(struct mtk_cryp *cryp, - struct mtk_aes_rec *aes, - size_t len) -{ - struct aead_request *req = aead_request_cast(aes->areq); - struct mtk_aes_base_ctx *ctx = aes->ctx; - struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); - struct mtk_aes_info *info = &ctx->info; - u32 ivsize = crypto_aead_ivsize(crypto_aead_reqtfm(req)); - u32 cnt = 0; - - ctx->ct_hdr = AES_CT_CTRL_HDR | cpu_to_le32(len); - - info->cmd[cnt++] = AES_GCM_CMD0 | cpu_to_le32(req->assoclen); - info->cmd[cnt++] = AES_GCM_CMD1 | cpu_to_le32(req->assoclen); - info->cmd[cnt++] = AES_GCM_CMD2; - info->cmd[cnt++] = AES_GCM_CMD3 | cpu_to_le32(gctx->textlen); - - if (aes->flags & AES_FLAGS_ENCRYPT) { - info->cmd[cnt++] = AES_GCM_CMD4 | cpu_to_le32(gctx->authsize); - info->tfm[0] = AES_TFM_GCM_OUT; - } else { - info->cmd[cnt++] = AES_GCM_CMD5 | cpu_to_le32(gctx->authsize); - info->cmd[cnt++] = AES_GCM_CMD6 | cpu_to_le32(gctx->authsize); - info->tfm[0] = AES_TFM_GCM_IN; - } - ctx->ct_size = cnt; - - info->tfm[0] |= AES_TFM_GHASH_DIGEST | AES_TFM_GHASH | AES_TFM_SIZE( - ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE + ivsize)) | - ctx->keymode; - info->tfm[1] = AES_TFM_CTR_INIT | AES_TFM_IV_CTR_MODE | AES_TFM_3IV | - AES_TFM_ENC_HASH; - - memcpy(info->state + ctx->keylen + SIZE_IN_WORDS(AES_BLOCK_SIZE), - req->iv, ivsize); -} - -static int mtk_aes_gcm_dma(struct mtk_cryp *cryp, struct mtk_aes_rec *aes, - struct scatterlist *src, struct scatterlist *dst, - size_t len) -{ - bool src_aligned, dst_aligned; - - aes->src.sg = src; - aes->dst.sg = dst; - aes->real_dst = dst; - - src_aligned = mtk_aes_check_aligned(src, len, &aes->src); - if (src == dst) - dst_aligned = src_aligned; - else - dst_aligned = mtk_aes_check_aligned(dst, len, &aes->dst); - - if (!src_aligned || !dst_aligned) { - if (aes->total > AES_BUF_SIZE) - return mtk_aes_complete(cryp, aes, -ENOMEM); - - if (!src_aligned) { - sg_copy_to_buffer(src, sg_nents(src), aes->buf, len); - aes->src.sg = &aes->aligned_sg; - aes->src.nents = 1; - aes->src.remainder = 0; - } - - if (!dst_aligned) { - aes->dst.sg = &aes->aligned_sg; - aes->dst.nents = 1; - aes->dst.remainder = 0; - } - - sg_init_table(&aes->aligned_sg, 1); - sg_set_buf(&aes->aligned_sg, aes->buf, aes->total); - } - - mtk_aes_gcm_info_init(cryp, aes, len); - - return mtk_aes_map(cryp, aes); -} - -/* Todo: GMAC */ -static int mtk_aes_gcm_start(struct mtk_cryp *cryp, struct mtk_aes_rec *aes) -{ - struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(aes->ctx); - struct aead_request *req = aead_request_cast(aes->areq); - struct mtk_aes_reqctx *rctx = aead_request_ctx(req); - u32 len = req->assoclen + req->cryptlen; - - mtk_aes_set_mode(aes, rctx); - - if (aes->flags & AES_FLAGS_ENCRYPT) { - u32 tag[4]; - - aes->resume = mtk_aes_transfer_complete; - /* Compute total process length. */ - aes->total = len + gctx->authsize; - /* Hardware will append authenticated tag to output buffer */ - scatterwalk_map_and_copy(tag, req->dst, len, gctx->authsize, 1); - } else { - aes->resume = mtk_aes_gcm_tag_verify; - aes->total = len; - } - - return mtk_aes_gcm_dma(cryp, aes, req->src, req->dst, len); -} - -static int mtk_aes_gcm_crypt(struct aead_request *req, u64 mode) -{ - struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(crypto_aead_reqtfm(req)); - struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); - struct mtk_aes_reqctx *rctx = aead_request_ctx(req); - struct mtk_cryp *cryp; - bool enc = !!(mode & AES_FLAGS_ENCRYPT); - - cryp = mtk_aes_find_dev(ctx); - if (!cryp) - return -ENODEV; - - /* Compute text length. */ - gctx->textlen = req->cryptlen - (enc ? 0 : gctx->authsize); - - /* Empty messages are not supported yet */ - if (!gctx->textlen && !req->assoclen) - return -EINVAL; - - rctx->mode = AES_FLAGS_GCM | mode; - - return mtk_aes_handle_queue(cryp, enc, &req->base); -} - -/* - * Because of the hardware limitation, we need to pre-calculate key(H) - * for the GHASH operation. The result of the encryption operation - * need to be stored in the transform state buffer. - */ -static int mtk_aes_gcm_setkey(struct crypto_aead *aead, const u8 *key, - u32 keylen) -{ - struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead); - union { - u32 x32[SIZE_IN_WORDS(AES_BLOCK_SIZE)]; - u8 x8[AES_BLOCK_SIZE]; - } hash = {}; - struct crypto_aes_ctx aes_ctx; - int err; - int i; - - switch (keylen) { - case AES_KEYSIZE_128: - ctx->keymode = AES_TFM_128BITS; - break; - case AES_KEYSIZE_192: - ctx->keymode = AES_TFM_192BITS; - break; - case AES_KEYSIZE_256: - ctx->keymode = AES_TFM_256BITS; - break; - - default: - return -EINVAL; - } - - ctx->keylen = SIZE_IN_WORDS(keylen); - - err = aes_expandkey(&aes_ctx, key, keylen); - if (err) - return err; - - aes_encrypt(&aes_ctx, hash.x8, hash.x8); - memzero_explicit(&aes_ctx, sizeof(aes_ctx)); - - memcpy(ctx->key, key, keylen); - - /* Why do we need to do this? */ - for (i = 0; i < SIZE_IN_WORDS(AES_BLOCK_SIZE); i++) - hash.x32[i] = swab32(hash.x32[i]); - - memcpy(ctx->key + ctx->keylen, &hash, AES_BLOCK_SIZE); - - return 0; -} - -static int mtk_aes_gcm_setauthsize(struct crypto_aead *aead, - u32 authsize) -{ - struct mtk_aes_base_ctx *ctx = crypto_aead_ctx(aead); - struct mtk_aes_gcm_ctx *gctx = mtk_aes_gcm_ctx_cast(ctx); - - /* Same as crypto_gcm_authsize() from crypto/gcm.c */ - switch (authsize) { - case 8: - case 12: - case 16: - break; - default: - return -EINVAL; - } - - gctx->authsize = authsize; - return 0; -} - -static int mtk_aes_gcm_encrypt(struct aead_request *req) -{ - return mtk_aes_gcm_crypt(req, AES_FLAGS_ENCRYPT); -} - -static int mtk_aes_gcm_decrypt(struct aead_request *req) -{ - return mtk_aes_gcm_crypt(req, 0); -} - -static int mtk_aes_gcm_init(struct crypto_aead *aead) -{ - struct mtk_aes_gcm_ctx *ctx = crypto_aead_ctx(aead); - - crypto_aead_set_reqsize(aead, sizeof(struct mtk_aes_reqctx)); - ctx->base.start = mtk_aes_gcm_start; - return 0; -} - -static struct aead_alg aes_gcm_alg = { - .setkey = mtk_aes_gcm_setkey, - .setauthsize = mtk_aes_gcm_setauthsize, - .encrypt = mtk_aes_gcm_encrypt, - .decrypt = mtk_aes_gcm_decrypt, - .init = mtk_aes_gcm_init, - .ivsize = GCM_AES_IV_SIZE, - .maxauthsize = AES_BLOCK_SIZE, - - .base = { - .cra_name = "gcm(aes)", - .cra_driver_name = "gcm-aes-mtk", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = 1, - .cra_ctxsize = sizeof(struct mtk_aes_gcm_ctx), - .cra_alignmask = 0xf, - .cra_module = THIS_MODULE, - }, -}; - -static void mtk_aes_queue_task(unsigned long data) -{ - struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; - - mtk_aes_handle_queue(aes->cryp, aes->id, NULL); -} - -static void mtk_aes_done_task(unsigned long data) -{ - struct mtk_aes_rec *aes = (struct mtk_aes_rec *)data; - struct mtk_cryp *cryp = aes->cryp; - - mtk_aes_unmap(cryp, aes); - aes->resume(cryp, aes); -} - -static irqreturn_t mtk_aes_irq(int irq, void *dev_id) -{ - struct mtk_aes_rec *aes = (struct mtk_aes_rec *)dev_id; - struct mtk_cryp *cryp = aes->cryp; - u32 val = mtk_aes_read(cryp, RDR_STAT(aes->id)); - - mtk_aes_write(cryp, RDR_STAT(aes->id), val); - - if (likely(AES_FLAGS_BUSY & aes->flags)) { - mtk_aes_write(cryp, RDR_PROC_COUNT(aes->id), MTK_CNT_RST); - mtk_aes_write(cryp, RDR_THRESH(aes->id), - MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); - - tasklet_schedule(&aes->done_task); - } else { - dev_warn(cryp->dev, "AES interrupt when no active requests.\n"); - } - return IRQ_HANDLED; -} - -/* - * The purpose of creating encryption and decryption records is - * to process outbound/inbound data in parallel, it can improve - * performance in most use cases, such as IPSec VPN, especially - * under heavy network traffic. - */ -static int mtk_aes_record_init(struct mtk_cryp *cryp) -{ - struct mtk_aes_rec **aes = cryp->aes; - int i, err = -ENOMEM; - - for (i = 0; i < MTK_REC_NUM; i++) { - aes[i] = kzalloc(sizeof(**aes), GFP_KERNEL); - if (!aes[i]) - goto err_cleanup; - - aes[i]->buf = (void *)__get_free_pages(GFP_KERNEL, - AES_BUF_ORDER); - if (!aes[i]->buf) - goto err_cleanup; - - aes[i]->cryp = cryp; - - spin_lock_init(&aes[i]->lock); - crypto_init_queue(&aes[i]->queue, AES_QUEUE_SIZE); - - tasklet_init(&aes[i]->queue_task, mtk_aes_queue_task, - (unsigned long)aes[i]); - tasklet_init(&aes[i]->done_task, mtk_aes_done_task, - (unsigned long)aes[i]); - } - - /* Link to ring0 and ring1 respectively */ - aes[0]->id = MTK_RING0; - aes[1]->id = MTK_RING1; - - return 0; - -err_cleanup: - for (; i--; ) { - free_page((unsigned long)aes[i]->buf); - kfree(aes[i]); - } - - return err; -} - -static void mtk_aes_record_free(struct mtk_cryp *cryp) -{ - int i; - - for (i = 0; i < MTK_REC_NUM; i++) { - tasklet_kill(&cryp->aes[i]->done_task); - tasklet_kill(&cryp->aes[i]->queue_task); - - free_page((unsigned long)cryp->aes[i]->buf); - kfree(cryp->aes[i]); - } -} - -static void mtk_aes_unregister_algs(void) -{ - int i; - - crypto_unregister_aead(&aes_gcm_alg); - - for (i = 0; i < ARRAY_SIZE(aes_algs); i++) - crypto_unregister_skcipher(&aes_algs[i]); -} - -static int mtk_aes_register_algs(void) -{ - int err, i; - - for (i = 0; i < ARRAY_SIZE(aes_algs); i++) { - err = crypto_register_skcipher(&aes_algs[i]); - if (err) - goto err_aes_algs; - } - - err = crypto_register_aead(&aes_gcm_alg); - if (err) - goto err_aes_algs; - - return 0; - -err_aes_algs: - for (; i--; ) - crypto_unregister_skcipher(&aes_algs[i]); - - return err; -} - -int mtk_cipher_alg_register(struct mtk_cryp *cryp) -{ - int ret; - - INIT_LIST_HEAD(&cryp->aes_list); - - /* Initialize two cipher records */ - ret = mtk_aes_record_init(cryp); - if (ret) - goto err_record; - - ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING0], mtk_aes_irq, - 0, "mtk-aes", cryp->aes[0]); - if (ret) { - dev_err(cryp->dev, "unable to request AES irq.\n"); - goto err_res; - } - - ret = devm_request_irq(cryp->dev, cryp->irq[MTK_RING1], mtk_aes_irq, - 0, "mtk-aes", cryp->aes[1]); - if (ret) { - dev_err(cryp->dev, "unable to request AES irq.\n"); - goto err_res; - } - - /* Enable ring0 and ring1 interrupt */ - mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING0), MTK_IRQ_RDR0); - mtk_aes_write(cryp, AIC_ENABLE_SET(MTK_RING1), MTK_IRQ_RDR1); - - spin_lock(&mtk_aes.lock); - list_add_tail(&cryp->aes_list, &mtk_aes.dev_list); - spin_unlock(&mtk_aes.lock); - - ret = mtk_aes_register_algs(); - if (ret) - goto err_algs; - - return 0; - -err_algs: - spin_lock(&mtk_aes.lock); - list_del(&cryp->aes_list); - spin_unlock(&mtk_aes.lock); -err_res: - mtk_aes_record_free(cryp); -err_record: - - dev_err(cryp->dev, "mtk-aes initialization failed.\n"); - return ret; -} - -void mtk_cipher_alg_release(struct mtk_cryp *cryp) -{ - spin_lock(&mtk_aes.lock); - list_del(&cryp->aes_list); - spin_unlock(&mtk_aes.lock); - - mtk_aes_unregister_algs(); - mtk_aes_record_free(cryp); -} diff --git a/drivers/crypto/mediatek/mtk-platform.c b/drivers/crypto/mediatek/mtk-platform.c deleted file mode 100644 index 9d878620e5c9..000000000000 --- a/drivers/crypto/mediatek/mtk-platform.c +++ /dev/null @@ -1,586 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Driver for EIP97 cryptographic accelerator. - * - * Copyright (c) 2016 Ryder Lee - */ - -#include -#include -#include -#include -#include -#include -#include -#include "mtk-platform.h" - -#define MTK_BURST_SIZE_MSK GENMASK(7, 4) -#define MTK_BURST_SIZE(x) ((x) << 4) -#define MTK_DESC_SIZE(x) ((x) << 0) -#define MTK_DESC_OFFSET(x) ((x) << 16) -#define MTK_DESC_FETCH_SIZE(x) ((x) << 0) -#define MTK_DESC_FETCH_THRESH(x) ((x) << 16) -#define MTK_DESC_OVL_IRQ_EN BIT(25) -#define MTK_DESC_ATP_PRESENT BIT(30) - -#define MTK_DFSE_IDLE GENMASK(3, 0) -#define MTK_DFSE_THR_CTRL_EN BIT(30) -#define MTK_DFSE_THR_CTRL_RESET BIT(31) -#define MTK_DFSE_RING_ID(x) (((x) >> 12) & GENMASK(3, 0)) -#define MTK_DFSE_MIN_DATA(x) ((x) << 0) -#define MTK_DFSE_MAX_DATA(x) ((x) << 8) -#define MTK_DFE_MIN_CTRL(x) ((x) << 16) -#define MTK_DFE_MAX_CTRL(x) ((x) << 24) - -#define MTK_IN_BUF_MIN_THRESH(x) ((x) << 8) -#define MTK_IN_BUF_MAX_THRESH(x) ((x) << 12) -#define MTK_OUT_BUF_MIN_THRESH(x) ((x) << 0) -#define MTK_OUT_BUF_MAX_THRESH(x) ((x) << 4) -#define MTK_IN_TBUF_SIZE(x) (((x) >> 4) & GENMASK(3, 0)) -#define MTK_IN_DBUF_SIZE(x) (((x) >> 8) & GENMASK(3, 0)) -#define MTK_OUT_DBUF_SIZE(x) (((x) >> 16) & GENMASK(3, 0)) -#define MTK_CMD_FIFO_SIZE(x) (((x) >> 8) & GENMASK(3, 0)) -#define MTK_RES_FIFO_SIZE(x) (((x) >> 12) & GENMASK(3, 0)) - -#define MTK_PE_TK_LOC_AVL BIT(2) -#define MTK_PE_PROC_HELD BIT(14) -#define MTK_PE_TK_TIMEOUT_EN BIT(22) -#define MTK_PE_INPUT_DMA_ERR BIT(0) -#define MTK_PE_OUTPUT_DMA_ERR BIT(1) -#define MTK_PE_PKT_PORC_ERR BIT(2) -#define MTK_PE_PKT_TIMEOUT BIT(3) -#define MTK_PE_FATAL_ERR BIT(14) -#define MTK_PE_INPUT_DMA_ERR_EN BIT(16) -#define MTK_PE_OUTPUT_DMA_ERR_EN BIT(17) -#define MTK_PE_PKT_PORC_ERR_EN BIT(18) -#define MTK_PE_PKT_TIMEOUT_EN BIT(19) -#define MTK_PE_FATAL_ERR_EN BIT(30) -#define MTK_PE_INT_OUT_EN BIT(31) - -#define MTK_HIA_SIGNATURE ((u16)0x35ca) -#define MTK_HIA_DATA_WIDTH(x) (((x) >> 25) & GENMASK(1, 0)) -#define MTK_HIA_DMA_LENGTH(x) (((x) >> 20) & GENMASK(4, 0)) -#define MTK_CDR_STAT_CLR GENMASK(4, 0) -#define MTK_RDR_STAT_CLR GENMASK(7, 0) - -#define MTK_AIC_INT_MSK GENMASK(5, 0) -#define MTK_AIC_VER_MSK (GENMASK(15, 0) | GENMASK(27, 20)) -#define MTK_AIC_VER11 0x011036c9 -#define MTK_AIC_VER12 0x012036c9 -#define MTK_AIC_G_CLR GENMASK(30, 20) - -/** - * EIP97 is an integrated security subsystem to accelerate cryptographic - * functions and protocols to offload the host processor. - * Some important hardware modules are briefly introduced below: - * - * Host Interface Adapter(HIA) - the main interface between the host - * system and the hardware subsystem. It is responsible for attaching - * processing engine to the specific host bus interface and provides a - * standardized software view for off loading tasks to the engine. - * - * Command Descriptor Ring Manager(CDR Manager) - keeps track of how many - * CD the host has prepared in the CDR. It monitors the fill level of its - * CD-FIFO and if there's sufficient space for the next block of descriptors, - * then it fires off a DMA request to fetch a block of CDs. - * - * Data fetch engine(DFE) - It is responsible for parsing the CD and - * setting up the required control and packet data DMA transfers from - * system memory to the processing engine. - * - * Result Descriptor Ring Manager(RDR Manager) - same as CDR Manager, - * but target is result descriptors, Moreover, it also handles the RD - * updates under control of the DSE. For each packet data segment - * processed, the DSE triggers the RDR Manager to write the updated RD. - * If triggered to update, the RDR Manager sets up a DMA operation to - * copy the RD from the DSE to the correct location in the RDR. - * - * Data Store Engine(DSE) - It is responsible for parsing the prepared RD - * and setting up the required control and packet data DMA transfers from - * the processing engine to system memory. - * - * Advanced Interrupt Controllers(AICs) - receive interrupt request signals - * from various sources and combine them into one interrupt output. - * The AICs are used by: - * - One for the HIA global and processing engine interrupts. - * - The others for the descriptor ring interrupts. - */ - -/* Cryptographic engine capabilities */ -struct mtk_sys_cap { - /* host interface adapter */ - u32 hia_ver; - u32 hia_opt; - /* packet engine */ - u32 pkt_eng_opt; - /* global hardware */ - u32 hw_opt; -}; - -static void mtk_desc_ring_link(struct mtk_cryp *cryp, u32 mask) -{ - /* Assign rings to DFE/DSE thread and enable it */ - writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DFE_THR_CTRL); - writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DSE_THR_CTRL); -} - -static void mtk_dfe_dse_buf_setup(struct mtk_cryp *cryp, - struct mtk_sys_cap *cap) -{ - u32 width = MTK_HIA_DATA_WIDTH(cap->hia_opt) + 2; - u32 len = MTK_HIA_DMA_LENGTH(cap->hia_opt) - 1; - u32 ipbuf = min((u32)MTK_IN_DBUF_SIZE(cap->hw_opt) + width, len); - u32 opbuf = min((u32)MTK_OUT_DBUF_SIZE(cap->hw_opt) + width, len); - u32 itbuf = min((u32)MTK_IN_TBUF_SIZE(cap->hw_opt) + width, len); - - writel(MTK_DFSE_MIN_DATA(ipbuf - 1) | - MTK_DFSE_MAX_DATA(ipbuf) | - MTK_DFE_MIN_CTRL(itbuf - 1) | - MTK_DFE_MAX_CTRL(itbuf), - cryp->base + DFE_CFG); - - writel(MTK_DFSE_MIN_DATA(opbuf - 1) | - MTK_DFSE_MAX_DATA(opbuf), - cryp->base + DSE_CFG); - - writel(MTK_IN_BUF_MIN_THRESH(ipbuf - 1) | - MTK_IN_BUF_MAX_THRESH(ipbuf), - cryp->base + PE_IN_DBUF_THRESH); - - writel(MTK_IN_BUF_MIN_THRESH(itbuf - 1) | - MTK_IN_BUF_MAX_THRESH(itbuf), - cryp->base + PE_IN_TBUF_THRESH); - - writel(MTK_OUT_BUF_MIN_THRESH(opbuf - 1) | - MTK_OUT_BUF_MAX_THRESH(opbuf), - cryp->base + PE_OUT_DBUF_THRESH); - - writel(0, cryp->base + PE_OUT_TBUF_THRESH); - writel(0, cryp->base + PE_OUT_BUF_CTRL); -} - -static int mtk_dfe_dse_state_check(struct mtk_cryp *cryp) -{ - int ret = -EINVAL; - u32 val; - - /* Check for completion of all DMA transfers */ - val = readl(cryp->base + DFE_THR_STAT); - if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) { - val = readl(cryp->base + DSE_THR_STAT); - if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) - ret = 0; - } - - if (!ret) { - /* Take DFE/DSE thread out of reset */ - writel(0, cryp->base + DFE_THR_CTRL); - writel(0, cryp->base + DSE_THR_CTRL); - } else { - return -EBUSY; - } - - return 0; -} - -static int mtk_dfe_dse_reset(struct mtk_cryp *cryp) -{ - /* Reset DSE/DFE and correct system priorities for all rings. */ - writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DFE_THR_CTRL); - writel(0, cryp->base + DFE_PRIO_0); - writel(0, cryp->base + DFE_PRIO_1); - writel(0, cryp->base + DFE_PRIO_2); - writel(0, cryp->base + DFE_PRIO_3); - - writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DSE_THR_CTRL); - writel(0, cryp->base + DSE_PRIO_0); - writel(0, cryp->base + DSE_PRIO_1); - writel(0, cryp->base + DSE_PRIO_2); - writel(0, cryp->base + DSE_PRIO_3); - - return mtk_dfe_dse_state_check(cryp); -} - -static void mtk_cmd_desc_ring_setup(struct mtk_cryp *cryp, - int i, struct mtk_sys_cap *cap) -{ - /* Full descriptor that fits FIFO minus one */ - u32 count = - ((1 << MTK_CMD_FIFO_SIZE(cap->hia_opt)) / MTK_DESC_SZ) - 1; - - /* Temporarily disable external triggering */ - writel(0, cryp->base + CDR_CFG(i)); - - /* Clear CDR count */ - writel(MTK_CNT_RST, cryp->base + CDR_PREP_COUNT(i)); - writel(MTK_CNT_RST, cryp->base + CDR_PROC_COUNT(i)); - - writel(0, cryp->base + CDR_PREP_PNTR(i)); - writel(0, cryp->base + CDR_PROC_PNTR(i)); - writel(0, cryp->base + CDR_DMA_CFG(i)); - - /* Configure CDR host address space */ - writel(0, cryp->base + CDR_BASE_ADDR_HI(i)); - writel(cryp->ring[i]->cmd_dma, cryp->base + CDR_BASE_ADDR_LO(i)); - - writel(MTK_DESC_RING_SZ, cryp->base + CDR_RING_SIZE(i)); - - /* Clear and disable all CDR interrupts */ - writel(MTK_CDR_STAT_CLR, cryp->base + CDR_STAT(i)); - - /* - * Set command descriptor offset and enable additional - * token present in descriptor. - */ - writel(MTK_DESC_SIZE(MTK_DESC_SZ) | - MTK_DESC_OFFSET(MTK_DESC_OFF) | - MTK_DESC_ATP_PRESENT, - cryp->base + CDR_DESC_SIZE(i)); - - writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) | - MTK_DESC_FETCH_THRESH(count * MTK_DESC_SZ), - cryp->base + CDR_CFG(i)); -} - -static void mtk_res_desc_ring_setup(struct mtk_cryp *cryp, - int i, struct mtk_sys_cap *cap) -{ - u32 rndup = 2; - u32 count = ((1 << MTK_RES_FIFO_SIZE(cap->hia_opt)) / rndup) - 1; - - /* Temporarily disable external triggering */ - writel(0, cryp->base + RDR_CFG(i)); - - /* Clear RDR count */ - writel(MTK_CNT_RST, cryp->base + RDR_PREP_COUNT(i)); - writel(MTK_CNT_RST, cryp->base + RDR_PROC_COUNT(i)); - - writel(0, cryp->base + RDR_PREP_PNTR(i)); - writel(0, cryp->base + RDR_PROC_PNTR(i)); - writel(0, cryp->base + RDR_DMA_CFG(i)); - - /* Configure RDR host address space */ - writel(0, cryp->base + RDR_BASE_ADDR_HI(i)); - writel(cryp->ring[i]->res_dma, cryp->base + RDR_BASE_ADDR_LO(i)); - - writel(MTK_DESC_RING_SZ, cryp->base + RDR_RING_SIZE(i)); - writel(MTK_RDR_STAT_CLR, cryp->base + RDR_STAT(i)); - - /* - * RDR manager generates update interrupts on a per-completed-packet, - * and the rd_proc_thresh_irq interrupt is fired when proc_pkt_count - * for the RDR exceeds the number of packets. - */ - writel(MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE, - cryp->base + RDR_THRESH(i)); - - /* - * Configure a threshold and time-out value for the processed - * result descriptors (or complete packets) that are written to - * the RDR. - */ - writel(MTK_DESC_SIZE(MTK_DESC_SZ) | MTK_DESC_OFFSET(MTK_DESC_OFF), - cryp->base + RDR_DESC_SIZE(i)); - - /* - * Configure HIA fetch size and fetch threshold that are used to - * fetch blocks of multiple descriptors. - */ - writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) | - MTK_DESC_FETCH_THRESH(count * rndup) | - MTK_DESC_OVL_IRQ_EN, - cryp->base + RDR_CFG(i)); -} - -static int mtk_packet_engine_setup(struct mtk_cryp *cryp) -{ - struct mtk_sys_cap cap; - int i, err; - u32 val; - - cap.hia_ver = readl(cryp->base + HIA_VERSION); - cap.hia_opt = readl(cryp->base + HIA_OPTIONS); - cap.hw_opt = readl(cryp->base + EIP97_OPTIONS); - - if (!(((u16)cap.hia_ver) == MTK_HIA_SIGNATURE)) - return -EINVAL; - - /* Configure endianness conversion method for master (DMA) interface */ - writel(0, cryp->base + EIP97_MST_CTRL); - - /* Set HIA burst size */ - val = readl(cryp->base + HIA_MST_CTRL); - val &= ~MTK_BURST_SIZE_MSK; - val |= MTK_BURST_SIZE(5); - writel(val, cryp->base + HIA_MST_CTRL); - - err = mtk_dfe_dse_reset(cryp); - if (err) { - dev_err(cryp->dev, "Failed to reset DFE and DSE.\n"); - return err; - } - - mtk_dfe_dse_buf_setup(cryp, &cap); - - /* Enable the 4 rings for the packet engines. */ - mtk_desc_ring_link(cryp, 0xf); - - for (i = 0; i < MTK_RING_MAX; i++) { - mtk_cmd_desc_ring_setup(cryp, i, &cap); - mtk_res_desc_ring_setup(cryp, i, &cap); - } - - writel(MTK_PE_TK_LOC_AVL | MTK_PE_PROC_HELD | MTK_PE_TK_TIMEOUT_EN, - cryp->base + PE_TOKEN_CTRL_STAT); - - /* Clear all pending interrupts */ - writel(MTK_AIC_G_CLR, cryp->base + AIC_G_ACK); - writel(MTK_PE_INPUT_DMA_ERR | MTK_PE_OUTPUT_DMA_ERR | - MTK_PE_PKT_PORC_ERR | MTK_PE_PKT_TIMEOUT | - MTK_PE_FATAL_ERR | MTK_PE_INPUT_DMA_ERR_EN | - MTK_PE_OUTPUT_DMA_ERR_EN | MTK_PE_PKT_PORC_ERR_EN | - MTK_PE_PKT_TIMEOUT_EN | MTK_PE_FATAL_ERR_EN | - MTK_PE_INT_OUT_EN, - cryp->base + PE_INTERRUPT_CTRL_STAT); - - return 0; -} - -static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw) -{ - u32 val; - - if (hw == MTK_RING_MAX) - val = readl(cryp->base + AIC_G_VERSION); - else - val = readl(cryp->base + AIC_VERSION(hw)); - - val &= MTK_AIC_VER_MSK; - if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12) - return -ENXIO; - - if (hw == MTK_RING_MAX) - val = readl(cryp->base + AIC_G_OPTIONS); - else - val = readl(cryp->base + AIC_OPTIONS(hw)); - - val &= MTK_AIC_INT_MSK; - if (!val || val > 32) - return -ENXIO; - - return 0; -} - -static int mtk_aic_init(struct mtk_cryp *cryp, int hw) -{ - int err; - - err = mtk_aic_cap_check(cryp, hw); - if (err) - return err; - - /* Disable all interrupts and set initial configuration */ - if (hw == MTK_RING_MAX) { - writel(0, cryp->base + AIC_G_ENABLE_CTRL); - writel(0, cryp->base + AIC_G_POL_CTRL); - writel(0, cryp->base + AIC_G_TYPE_CTRL); - writel(0, cryp->base + AIC_G_ENABLE_SET); - } else { - writel(0, cryp->base + AIC_ENABLE_CTRL(hw)); - writel(0, cryp->base + AIC_POL_CTRL(hw)); - writel(0, cryp->base + AIC_TYPE_CTRL(hw)); - writel(0, cryp->base + AIC_ENABLE_SET(hw)); - } - - return 0; -} - -static int mtk_accelerator_init(struct mtk_cryp *cryp) -{ - int i, err; - - /* Initialize advanced interrupt controller(AIC) */ - for (i = 0; i < MTK_IRQ_NUM; i++) { - err = mtk_aic_init(cryp, i); - if (err) { - dev_err(cryp->dev, "Failed to initialize AIC.\n"); - return err; - } - } - - /* Initialize packet engine */ - err = mtk_packet_engine_setup(cryp); - if (err) { - dev_err(cryp->dev, "Failed to configure packet engine.\n"); - return err; - } - - return 0; -} - -static void mtk_desc_dma_free(struct mtk_cryp *cryp) -{ - int i; - - for (i = 0; i < MTK_RING_MAX; i++) { - dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, - cryp->ring[i]->res_base, - cryp->ring[i]->res_dma); - dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, - cryp->ring[i]->cmd_base, - cryp->ring[i]->cmd_dma); - kfree(cryp->ring[i]); - } -} - -static int mtk_desc_ring_alloc(struct mtk_cryp *cryp) -{ - struct mtk_ring **ring = cryp->ring; - int i; - - for (i = 0; i < MTK_RING_MAX; i++) { - ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL); - if (!ring[i]) - goto err_cleanup; - - ring[i]->cmd_base = dma_alloc_coherent(cryp->dev, - MTK_DESC_RING_SZ, - &ring[i]->cmd_dma, - GFP_KERNEL); - if (!ring[i]->cmd_base) - goto err_cleanup; - - ring[i]->res_base = dma_alloc_coherent(cryp->dev, - MTK_DESC_RING_SZ, - &ring[i]->res_dma, - GFP_KERNEL); - if (!ring[i]->res_base) - goto err_cleanup; - - ring[i]->cmd_next = ring[i]->cmd_base; - ring[i]->res_next = ring[i]->res_base; - } - return 0; - -err_cleanup: - do { - dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, - ring[i]->res_base, ring[i]->res_dma); - dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ, - ring[i]->cmd_base, ring[i]->cmd_dma); - kfree(ring[i]); - } while (i--); - return -ENOMEM; -} - -static int mtk_crypto_probe(struct platform_device *pdev) -{ - struct mtk_cryp *cryp; - int i, err; - - cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL); - if (!cryp) - return -ENOMEM; - - cryp->base = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(cryp->base)) - return PTR_ERR(cryp->base); - - for (i = 0; i < MTK_IRQ_NUM; i++) { - cryp->irq[i] = platform_get_irq(pdev, i); - if (cryp->irq[i] < 0) - return cryp->irq[i]; - } - - cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp"); - if (IS_ERR(cryp->clk_cryp)) - return -EPROBE_DEFER; - - cryp->dev = &pdev->dev; - pm_runtime_enable(cryp->dev); - pm_runtime_get_sync(cryp->dev); - - err = clk_prepare_enable(cryp->clk_cryp); - if (err) - goto err_clk_cryp; - - /* Allocate four command/result descriptor rings */ - err = mtk_desc_ring_alloc(cryp); - if (err) { - dev_err(cryp->dev, "Unable to allocate descriptor rings.\n"); - goto err_resource; - } - - /* Initialize hardware modules */ - err = mtk_accelerator_init(cryp); - if (err) { - dev_err(cryp->dev, "Failed to initialize cryptographic engine.\n"); - goto err_engine; - } - - err = mtk_cipher_alg_register(cryp); - if (err) { - dev_err(cryp->dev, "Unable to register cipher algorithm.\n"); - goto err_cipher; - } - - err = mtk_hash_alg_register(cryp); - if (err) { - dev_err(cryp->dev, "Unable to register hash algorithm.\n"); - goto err_hash; - } - - platform_set_drvdata(pdev, cryp); - return 0; - -err_hash: - mtk_cipher_alg_release(cryp); -err_cipher: - mtk_dfe_dse_reset(cryp); -err_engine: - mtk_desc_dma_free(cryp); -err_resource: - clk_disable_unprepare(cryp->clk_cryp); -err_clk_cryp: - pm_runtime_put_sync(cryp->dev); - pm_runtime_disable(cryp->dev); - - return err; -} - -static int mtk_crypto_remove(struct platform_device *pdev) -{ - struct mtk_cryp *cryp = platform_get_drvdata(pdev); - - mtk_hash_alg_release(cryp); - mtk_cipher_alg_release(cryp); - mtk_desc_dma_free(cryp); - - clk_disable_unprepare(cryp->clk_cryp); - - pm_runtime_put_sync(cryp->dev); - pm_runtime_disable(cryp->dev); - platform_set_drvdata(pdev, NULL); - - return 0; -} - -static const struct of_device_id of_crypto_id[] = { - { .compatible = "mediatek,eip97-crypto" }, - {}, -}; -MODULE_DEVICE_TABLE(of, of_crypto_id); - -static struct platform_driver mtk_crypto_driver = { - .probe = mtk_crypto_probe, - .remove = mtk_crypto_remove, - .driver = { - .name = "mtk-crypto", - .of_match_table = of_crypto_id, - }, -}; -module_platform_driver(mtk_crypto_driver); - -MODULE_LICENSE("GPL"); -MODULE_AUTHOR("Ryder Lee "); -MODULE_DESCRIPTION("Cryptographic accelerator driver for EIP97"); diff --git a/drivers/crypto/mediatek/mtk-platform.h b/drivers/crypto/mediatek/mtk-platform.h deleted file mode 100644 index 47920c51abac..000000000000 --- a/drivers/crypto/mediatek/mtk-platform.h +++ /dev/null @@ -1,231 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Driver for EIP97 cryptographic accelerator. - * - * Copyright (c) 2016 Ryder Lee - */ - -#ifndef __MTK_PLATFORM_H_ -#define __MTK_PLATFORM_H_ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "mtk-regs.h" - -#define MTK_RDR_PROC_THRESH BIT(0) -#define MTK_RDR_PROC_MODE BIT(23) -#define MTK_CNT_RST BIT(31) -#define MTK_IRQ_RDR0 BIT(1) -#define MTK_IRQ_RDR1 BIT(3) -#define MTK_IRQ_RDR2 BIT(5) -#define MTK_IRQ_RDR3 BIT(7) - -#define SIZE_IN_WORDS(x) ((x) >> 2) - -/** - * Ring 0/1 are used by AES encrypt and decrypt. - * Ring 2/3 are used by SHA. - */ -enum { - MTK_RING0, - MTK_RING1, - MTK_RING2, - MTK_RING3, - MTK_RING_MAX -}; - -#define MTK_REC_NUM (MTK_RING_MAX / 2) -#define MTK_IRQ_NUM 5 - -/** - * struct mtk_desc - DMA descriptor - * @hdr: the descriptor control header - * @buf: DMA address of input buffer segment - * @ct: DMA address of command token that control operation flow - * @ct_hdr: the command token control header - * @tag: the user-defined field - * @tfm: DMA address of transform state - * @bound: align descriptors offset boundary - * - * Structure passed to the crypto engine to describe where source - * data needs to be fetched and how it needs to be processed. - */ -struct mtk_desc { - __le32 hdr; - __le32 buf; - __le32 ct; - __le32 ct_hdr; - __le32 tag; - __le32 tfm; - __le32 bound[2]; -}; - -#define MTK_DESC_NUM 512 -#define MTK_DESC_OFF SIZE_IN_WORDS(sizeof(struct mtk_desc)) -#define MTK_DESC_SZ (MTK_DESC_OFF - 2) -#define MTK_DESC_RING_SZ ((sizeof(struct mtk_desc) * MTK_DESC_NUM)) -#define MTK_DESC_CNT(x) ((MTK_DESC_OFF * (x)) << 2) -#define MTK_DESC_LAST cpu_to_le32(BIT(22)) -#define MTK_DESC_FIRST cpu_to_le32(BIT(23)) -#define MTK_DESC_BUF_LEN(x) cpu_to_le32(x) -#define MTK_DESC_CT_LEN(x) cpu_to_le32((x) << 24) - -/** - * struct mtk_ring - Descriptor ring - * @cmd_base: pointer to command descriptor ring base - * @cmd_next: pointer to the next command descriptor - * @cmd_dma: DMA address of command descriptor ring - * @res_base: pointer to result descriptor ring base - * @res_next: pointer to the next result descriptor - * @res_prev: pointer to the previous result descriptor - * @res_dma: DMA address of result descriptor ring - * - * A descriptor ring is a circular buffer that is used to manage - * one or more descriptors. There are two type of descriptor rings; - * the command descriptor ring and result descriptor ring. - */ -struct mtk_ring { - struct mtk_desc *cmd_base; - struct mtk_desc *cmd_next; - dma_addr_t cmd_dma; - struct mtk_desc *res_base; - struct mtk_desc *res_next; - struct mtk_desc *res_prev; - dma_addr_t res_dma; -}; - -/** - * struct mtk_aes_dma - Structure that holds sg list info - * @sg: pointer to scatter-gather list - * @nents: number of entries in the sg list - * @remainder: remainder of sg list - * @sg_len: number of entries in the sg mapped list - */ -struct mtk_aes_dma { - struct scatterlist *sg; - int nents; - u32 remainder; - u32 sg_len; -}; - -struct mtk_aes_base_ctx; -struct mtk_aes_rec; -struct mtk_cryp; - -typedef int (*mtk_aes_fn)(struct mtk_cryp *cryp, struct mtk_aes_rec *aes); - -/** - * struct mtk_aes_rec - AES operation record - * @cryp: pointer to Cryptographic device - * @queue: crypto request queue - * @areq: pointer to async request - * @done_task: the tasklet is use in AES interrupt - * @queue_task: the tasklet is used to dequeue request - * @ctx: pointer to current context - * @src: the structure that holds source sg list info - * @dst: the structure that holds destination sg list info - * @aligned_sg: the scatter list is use to alignment - * @real_dst: pointer to the destination sg list - * @resume: pointer to resume function - * @total: request buffer length - * @buf: pointer to page buffer - * @id: the current use of ring - * @flags: it's describing AES operation state - * @lock: the async queue lock - * - * Structure used to record AES execution state. - */ -struct mtk_aes_rec { - struct mtk_cryp *cryp; - struct crypto_queue queue; - struct crypto_async_request *areq; - struct tasklet_struct done_task; - struct tasklet_struct queue_task; - struct mtk_aes_base_ctx *ctx; - struct mtk_aes_dma src; - struct mtk_aes_dma dst; - - struct scatterlist aligned_sg; - struct scatterlist *real_dst; - - mtk_aes_fn resume; - - size_t total; - void *buf; - - u8 id; - unsigned long flags; - /* queue lock */ - spinlock_t lock; -}; - -/** - * struct mtk_sha_rec - SHA operation record - * @cryp: pointer to Cryptographic device - * @queue: crypto request queue - * @req: pointer to ahash request - * @done_task: the tasklet is use in SHA interrupt - * @queue_task: the tasklet is used to dequeue request - * @id: the current use of ring - * @flags: it's describing SHA operation state - * @lock: the async queue lock - * - * Structure used to record SHA execution state. - */ -struct mtk_sha_rec { - struct mtk_cryp *cryp; - struct crypto_queue queue; - struct ahash_request *req; - struct tasklet_struct done_task; - struct tasklet_struct queue_task; - - u8 id; - unsigned long flags; - /* queue lock */ - spinlock_t lock; -}; - -/** - * struct mtk_cryp - Cryptographic device - * @base: pointer to mapped register I/O base - * @dev: pointer to device - * @clk_cryp: pointer to crypto clock - * @irq: global system and rings IRQ - * @ring: pointer to descriptor rings - * @aes: pointer to operation record of AES - * @sha: pointer to operation record of SHA - * @aes_list: device list of AES - * @sha_list: device list of SHA - * @rec: it's used to select SHA record for tfm - * - * Structure storing cryptographic device information. - */ -struct mtk_cryp { - void __iomem *base; - struct device *dev; - struct clk *clk_cryp; - int irq[MTK_IRQ_NUM]; - - struct mtk_ring *ring[MTK_RING_MAX]; - struct mtk_aes_rec *aes[MTK_REC_NUM]; - struct mtk_sha_rec *sha[MTK_REC_NUM]; - - struct list_head aes_list; - struct list_head sha_list; - - bool rec; -}; - -int mtk_cipher_alg_register(struct mtk_cryp *cryp); -void mtk_cipher_alg_release(struct mtk_cryp *cryp); -int mtk_hash_alg_register(struct mtk_cryp *cryp); -void mtk_hash_alg_release(struct mtk_cryp *cryp); - -#endif /* __MTK_PLATFORM_H_ */ diff --git a/drivers/crypto/mediatek/mtk-regs.h b/drivers/crypto/mediatek/mtk-regs.h deleted file mode 100644 index d3defda7a750..000000000000 --- a/drivers/crypto/mediatek/mtk-regs.h +++ /dev/null @@ -1,190 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0-only */ -/* - * Support for MediaTek cryptographic accelerator. - * - * Copyright (c) 2016 MediaTek Inc. - * Author: Ryder Lee - */ - -#ifndef __MTK_REGS_H__ -#define __MTK_REGS_H__ - -/* HIA, Command Descriptor Ring Manager */ -#define CDR_BASE_ADDR_LO(x) (0x0 + ((x) << 12)) -#define CDR_BASE_ADDR_HI(x) (0x4 + ((x) << 12)) -#define CDR_DATA_BASE_ADDR_LO(x) (0x8 + ((x) << 12)) -#define CDR_DATA_BASE_ADDR_HI(x) (0xC + ((x) << 12)) -#define CDR_ACD_BASE_ADDR_LO(x) (0x10 + ((x) << 12)) -#define CDR_ACD_BASE_ADDR_HI(x) (0x14 + ((x) << 12)) -#define CDR_RING_SIZE(x) (0x18 + ((x) << 12)) -#define CDR_DESC_SIZE(x) (0x1C + ((x) << 12)) -#define CDR_CFG(x) (0x20 + ((x) << 12)) -#define CDR_DMA_CFG(x) (0x24 + ((x) << 12)) -#define CDR_THRESH(x) (0x28 + ((x) << 12)) -#define CDR_PREP_COUNT(x) (0x2C + ((x) << 12)) -#define CDR_PROC_COUNT(x) (0x30 + ((x) << 12)) -#define CDR_PREP_PNTR(x) (0x34 + ((x) << 12)) -#define CDR_PROC_PNTR(x) (0x38 + ((x) << 12)) -#define CDR_STAT(x) (0x3C + ((x) << 12)) - -/* HIA, Result Descriptor Ring Manager */ -#define RDR_BASE_ADDR_LO(x) (0x800 + ((x) << 12)) -#define RDR_BASE_ADDR_HI(x) (0x804 + ((x) << 12)) -#define RDR_DATA_BASE_ADDR_LO(x) (0x808 + ((x) << 12)) -#define RDR_DATA_BASE_ADDR_HI(x) (0x80C + ((x) << 12)) -#define RDR_ACD_BASE_ADDR_LO(x) (0x810 + ((x) << 12)) -#define RDR_ACD_BASE_ADDR_HI(x) (0x814 + ((x) << 12)) -#define RDR_RING_SIZE(x) (0x818 + ((x) << 12)) -#define RDR_DESC_SIZE(x) (0x81C + ((x) << 12)) -#define RDR_CFG(x) (0x820 + ((x) << 12)) -#define RDR_DMA_CFG(x) (0x824 + ((x) << 12)) -#define RDR_THRESH(x) (0x828 + ((x) << 12)) -#define RDR_PREP_COUNT(x) (0x82C + ((x) << 12)) -#define RDR_PROC_COUNT(x) (0x830 + ((x) << 12)) -#define RDR_PREP_PNTR(x) (0x834 + ((x) << 12)) -#define RDR_PROC_PNTR(x) (0x838 + ((x) << 12)) -#define RDR_STAT(x) (0x83C + ((x) << 12)) - -/* HIA, Ring AIC */ -#define AIC_POL_CTRL(x) (0xE000 - ((x) << 12)) -#define AIC_TYPE_CTRL(x) (0xE004 - ((x) << 12)) -#define AIC_ENABLE_CTRL(x) (0xE008 - ((x) << 12)) -#define AIC_RAW_STAL(x) (0xE00C - ((x) << 12)) -#define AIC_ENABLE_SET(x) (0xE00C - ((x) << 12)) -#define AIC_ENABLED_STAT(x) (0xE010 - ((x) << 12)) -#define AIC_ACK(x) (0xE010 - ((x) << 12)) -#define AIC_ENABLE_CLR(x) (0xE014 - ((x) << 12)) -#define AIC_OPTIONS(x) (0xE018 - ((x) << 12)) -#define AIC_VERSION(x) (0xE01C - ((x) << 12)) - -/* HIA, Global AIC */ -#define AIC_G_POL_CTRL 0xF800 -#define AIC_G_TYPE_CTRL 0xF804 -#define AIC_G_ENABLE_CTRL 0xF808 -#define AIC_G_RAW_STAT 0xF80C -#define AIC_G_ENABLE_SET 0xF80C -#define AIC_G_ENABLED_STAT 0xF810 -#define AIC_G_ACK 0xF810 -#define AIC_G_ENABLE_CLR 0xF814 -#define AIC_G_OPTIONS 0xF818 -#define AIC_G_VERSION 0xF81C - -/* HIA, Data Fetch Engine */ -#define DFE_CFG 0xF000 -#define DFE_PRIO_0 0xF010 -#define DFE_PRIO_1 0xF014 -#define DFE_PRIO_2 0xF018 -#define DFE_PRIO_3 0xF01C - -/* HIA, Data Fetch Engine access monitoring for CDR */ -#define DFE_RING_REGION_LO(x) (0xF080 + ((x) << 3)) -#define DFE_RING_REGION_HI(x) (0xF084 + ((x) << 3)) - -/* HIA, Data Fetch Engine thread control and status for thread */ -#define DFE_THR_CTRL 0xF200 -#define DFE_THR_STAT 0xF204 -#define DFE_THR_DESC_CTRL 0xF208 -#define DFE_THR_DESC_DPTR_LO 0xF210 -#define DFE_THR_DESC_DPTR_HI 0xF214 -#define DFE_THR_DESC_ACDPTR_LO 0xF218 -#define DFE_THR_DESC_ACDPTR_HI 0xF21C - -/* HIA, Data Store Engine */ -#define DSE_CFG 0xF400 -#define DSE_PRIO_0 0xF410 -#define DSE_PRIO_1 0xF414 -#define DSE_PRIO_2 0xF418 -#define DSE_PRIO_3 0xF41C - -/* HIA, Data Store Engine access monitoring for RDR */ -#define DSE_RING_REGION_LO(x) (0xF480 + ((x) << 3)) -#define DSE_RING_REGION_HI(x) (0xF484 + ((x) << 3)) - -/* HIA, Data Store Engine thread control and status for thread */ -#define DSE_THR_CTRL 0xF600 -#define DSE_THR_STAT 0xF604 -#define DSE_THR_DESC_CTRL 0xF608 -#define DSE_THR_DESC_DPTR_LO 0xF610 -#define DSE_THR_DESC_DPTR_HI 0xF614 -#define DSE_THR_DESC_S_DPTR_LO 0xF618 -#define DSE_THR_DESC_S_DPTR_HI 0xF61C -#define DSE_THR_ERROR_STAT 0xF620 - -/* HIA Global */ -#define HIA_MST_CTRL 0xFFF4 -#define HIA_OPTIONS 0xFFF8 -#define HIA_VERSION 0xFFFC - -/* Processing Engine Input Side, Processing Engine */ -#define PE_IN_DBUF_THRESH 0x10000 -#define PE_IN_TBUF_THRESH 0x10100 - -/* Packet Engine Configuration / Status Registers */ -#define PE_TOKEN_CTRL_STAT 0x11000 -#define PE_FUNCTION_EN 0x11004 -#define PE_CONTEXT_CTRL 0x11008 -#define PE_INTERRUPT_CTRL_STAT 0x11010 -#define PE_CONTEXT_STAT 0x1100C -#define PE_OUT_TRANS_CTRL_STAT 0x11018 -#define PE_OUT_BUF_CTRL 0x1101C - -/* Packet Engine PRNG Registers */ -#define PE_PRNG_STAT 0x11040 -#define PE_PRNG_CTRL 0x11044 -#define PE_PRNG_SEED_L 0x11048 -#define PE_PRNG_SEED_H 0x1104C -#define PE_PRNG_KEY_0_L 0x11050 -#define PE_PRNG_KEY_0_H 0x11054 -#define PE_PRNG_KEY_1_L 0x11058 -#define PE_PRNG_KEY_1_H 0x1105C -#define PE_PRNG_RES_0 0x11060 -#define PE_PRNG_RES_1 0x11064 -#define PE_PRNG_RES_2 0x11068 -#define PE_PRNG_RES_3 0x1106C -#define PE_PRNG_LFSR_L 0x11070 -#define PE_PRNG_LFSR_H 0x11074 - -/* Packet Engine AIC */ -#define PE_EIP96_AIC_POL_CTRL 0x113C0 -#define PE_EIP96_AIC_TYPE_CTRL 0x113C4 -#define PE_EIP96_AIC_ENABLE_CTRL 0x113C8 -#define PE_EIP96_AIC_RAW_STAT 0x113CC -#define PE_EIP96_AIC_ENABLE_SET 0x113CC -#define PE_EIP96_AIC_ENABLED_STAT 0x113D0 -#define PE_EIP96_AIC_ACK 0x113D0 -#define PE_EIP96_AIC_ENABLE_CLR 0x113D4 -#define PE_EIP96_AIC_OPTIONS 0x113D8 -#define PE_EIP96_AIC_VERSION 0x113DC - -/* Packet Engine Options & Version Registers */ -#define PE_EIP96_OPTIONS 0x113F8 -#define PE_EIP96_VERSION 0x113FC - -/* Processing Engine Output Side */ -#define PE_OUT_DBUF_THRESH 0x11C00 -#define PE_OUT_TBUF_THRESH 0x11D00 - -/* Processing Engine Local AIC */ -#define PE_AIC_POL_CTRL 0x11F00 -#define PE_AIC_TYPE_CTRL 0x11F04 -#define PE_AIC_ENABLE_CTRL 0x11F08 -#define PE_AIC_RAW_STAT 0x11F0C -#define PE_AIC_ENABLE_SET 0x11F0C -#define PE_AIC_ENABLED_STAT 0x11F10 -#define PE_AIC_ENABLE_CLR 0x11F14 -#define PE_AIC_OPTIONS 0x11F18 -#define PE_AIC_VERSION 0x11F1C - -/* Processing Engine General Configuration and Version */ -#define PE_IN_FLIGHT 0x11FF0 -#define PE_OPTIONS 0x11FF8 -#define PE_VERSION 0x11FFC - -/* EIP-97 - Global */ -#define EIP97_CLOCK_STATE 0x1FFE4 -#define EIP97_FORCE_CLOCK_ON 0x1FFE8 -#define EIP97_FORCE_CLOCK_OFF 0x1FFEC -#define EIP97_MST_CTRL 0x1FFF4 -#define EIP97_OPTIONS 0x1FFF8 -#define EIP97_VERSION 0x1FFFC -#endif /* __MTK_REGS_H__ */ diff --git a/drivers/crypto/mediatek/mtk-sha.c b/drivers/crypto/mediatek/mtk-sha.c deleted file mode 100644 index 3d5d7d68b03b..000000000000 --- a/drivers/crypto/mediatek/mtk-sha.c +++ /dev/null @@ -1,1352 +0,0 @@ -// SPDX-License-Identifier: GPL-2.0-only -/* - * Cryptographic API. - * - * Driver for EIP97 SHA1/SHA2(HMAC) acceleration. - * - * Copyright (c) 2016 Ryder Lee - * - * Some ideas are from atmel-sha.c and omap-sham.c drivers. - */ - -#include -#include -#include "mtk-platform.h" - -#define SHA_ALIGN_MSK (sizeof(u32) - 1) -#define SHA_QUEUE_SIZE 512 -#define SHA_BUF_SIZE ((u32)PAGE_SIZE) - -#define SHA_OP_UPDATE 1 -#define SHA_OP_FINAL 2 - -#define SHA_DATA_LEN_MSK cpu_to_le32(GENMASK(16, 0)) -#define SHA_MAX_DIGEST_BUF_SIZE 32 - -/* SHA command token */ -#define SHA_CT_SIZE 5 -#define SHA_CT_CTRL_HDR cpu_to_le32(0x02220000) -#define SHA_CMD0 cpu_to_le32(0x03020000) -#define SHA_CMD1 cpu_to_le32(0x21060000) -#define SHA_CMD2 cpu_to_le32(0xe0e63802) - -/* SHA transform information */ -#define SHA_TFM_HASH cpu_to_le32(0x2 << 0) -#define SHA_TFM_SIZE(x) cpu_to_le32((x) << 8) -#define SHA_TFM_START cpu_to_le32(0x1 << 4) -#define SHA_TFM_CONTINUE cpu_to_le32(0x1 << 5) -#define SHA_TFM_HASH_STORE cpu_to_le32(0x1 << 19) -#define SHA_TFM_SHA1 cpu_to_le32(0x2 << 23) -#define SHA_TFM_SHA256 cpu_to_le32(0x3 << 23) -#define SHA_TFM_SHA224 cpu_to_le32(0x4 << 23) -#define SHA_TFM_SHA512 cpu_to_le32(0x5 << 23) -#define SHA_TFM_SHA384 cpu_to_le32(0x6 << 23) -#define SHA_TFM_DIGEST(x) cpu_to_le32(((x) & GENMASK(3, 0)) << 24) - -/* SHA flags */ -#define SHA_FLAGS_BUSY BIT(0) -#define SHA_FLAGS_FINAL BIT(1) -#define SHA_FLAGS_FINUP BIT(2) -#define SHA_FLAGS_SG BIT(3) -#define SHA_FLAGS_ALGO_MSK GENMASK(8, 4) -#define SHA_FLAGS_SHA1 BIT(4) -#define SHA_FLAGS_SHA224 BIT(5) -#define SHA_FLAGS_SHA256 BIT(6) -#define SHA_FLAGS_SHA384 BIT(7) -#define SHA_FLAGS_SHA512 BIT(8) -#define SHA_FLAGS_HMAC BIT(9) -#define SHA_FLAGS_PAD BIT(10) - -/** - * mtk_sha_info - hardware information of AES - * @cmd: command token, hardware instruction - * @tfm: transform state of cipher algorithm. - * @state: contains keys and initial vectors. - * - */ -struct mtk_sha_info { - __le32 ctrl[2]; - __le32 cmd[3]; - __le32 tfm[2]; - __le32 digest[SHA_MAX_DIGEST_BUF_SIZE]; -}; - -struct mtk_sha_reqctx { - struct mtk_sha_info info; - unsigned long flags; - unsigned long op; - - u64 digcnt; - size_t bufcnt; - dma_addr_t dma_addr; - - __le32 ct_hdr; - u32 ct_size; - dma_addr_t ct_dma; - dma_addr_t tfm_dma; - - /* Walk state */ - struct scatterlist *sg; - u32 offset; /* Offset in current sg */ - u32 total; /* Total request */ - size_t ds; - size_t bs; - - u8 *buffer; -}; - -struct mtk_sha_hmac_ctx { - struct crypto_shash *shash; - u8 ipad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); - u8 opad[SHA512_BLOCK_SIZE] __aligned(sizeof(u32)); -}; - -struct mtk_sha_ctx { - struct mtk_cryp *cryp; - unsigned long flags; - u8 id; - u8 buf[SHA_BUF_SIZE] __aligned(sizeof(u32)); - - struct mtk_sha_hmac_ctx base[]; -}; - -struct mtk_sha_drv { - struct list_head dev_list; - /* Device list lock */ - spinlock_t lock; -}; - -static struct mtk_sha_drv mtk_sha = { - .dev_list = LIST_HEAD_INIT(mtk_sha.dev_list), - .lock = __SPIN_LOCK_UNLOCKED(mtk_sha.lock), -}; - -static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id, - struct ahash_request *req); - -static inline u32 mtk_sha_read(struct mtk_cryp *cryp, u32 offset) -{ - return readl_relaxed(cryp->base + offset); -} - -static inline void mtk_sha_write(struct mtk_cryp *cryp, - u32 offset, u32 value) -{ - writel_relaxed(value, cryp->base + offset); -} - -static inline void mtk_sha_ring_shift(struct mtk_ring *ring, - struct mtk_desc **cmd_curr, - struct mtk_desc **res_curr, - int *count) -{ - *cmd_curr = ring->cmd_next++; - *res_curr = ring->res_next++; - (*count)++; - - if (ring->cmd_next == ring->cmd_base + MTK_DESC_NUM) { - ring->cmd_next = ring->cmd_base; - ring->res_next = ring->res_base; - } -} - -static struct mtk_cryp *mtk_sha_find_dev(struct mtk_sha_ctx *tctx) -{ - struct mtk_cryp *cryp = NULL; - struct mtk_cryp *tmp; - - spin_lock_bh(&mtk_sha.lock); - if (!tctx->cryp) { - list_for_each_entry(tmp, &mtk_sha.dev_list, sha_list) { - cryp = tmp; - break; - } - tctx->cryp = cryp; - } else { - cryp = tctx->cryp; - } - - /* - * Assign record id to tfm in round-robin fashion, and this - * will help tfm to bind to corresponding descriptor rings. - */ - tctx->id = cryp->rec; - cryp->rec = !cryp->rec; - - spin_unlock_bh(&mtk_sha.lock); - - return cryp; -} - -static int mtk_sha_append_sg(struct mtk_sha_reqctx *ctx) -{ - size_t count; - - while ((ctx->bufcnt < SHA_BUF_SIZE) && ctx->total) { - count = min(ctx->sg->length - ctx->offset, ctx->total); - count = min(count, SHA_BUF_SIZE - ctx->bufcnt); - - if (count <= 0) { - /* - * Check if count <= 0 because the buffer is full or - * because the sg length is 0. In the latest case, - * check if there is another sg in the list, a 0 length - * sg doesn't necessarily mean the end of the sg list. - */ - if ((ctx->sg->length == 0) && !sg_is_last(ctx->sg)) { - ctx->sg = sg_next(ctx->sg); - continue; - } else { - break; - } - } - - scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, ctx->sg, - ctx->offset, count, 0); - - ctx->bufcnt += count; - ctx->offset += count; - ctx->total -= count; - - if (ctx->offset == ctx->sg->length) { - ctx->sg = sg_next(ctx->sg); - if (ctx->sg) - ctx->offset = 0; - else - ctx->total = 0; - } - } - - return 0; -} - -/* - * The purpose of this padding is to ensure that the padded message is a - * multiple of 512 bits (SHA1/SHA224/SHA256) or 1024 bits (SHA384/SHA512). - * The bit "1" is appended at the end of the message followed by - * "padlen-1" zero bits. Then a 64 bits block (SHA1/SHA224/SHA256) or - * 128 bits block (SHA384/SHA512) equals to the message length in bits - * is appended. - * - * For SHA1/SHA224/SHA256, padlen is calculated as followed: - * - if message length < 56 bytes then padlen = 56 - message length - * - else padlen = 64 + 56 - message length - * - * For SHA384/SHA512, padlen is calculated as followed: - * - if message length < 112 bytes then padlen = 112 - message length - * - else padlen = 128 + 112 - message length - */ -static void mtk_sha_fill_padding(struct mtk_sha_reqctx *ctx, u32 len) -{ - u32 index, padlen; - __be64 bits[2]; - u64 size = ctx->digcnt; - - size += ctx->bufcnt; - size += len; - - bits[1] = cpu_to_be64(size << 3); - bits[0] = cpu_to_be64(size >> 61); - - switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { - case SHA_FLAGS_SHA384: - case SHA_FLAGS_SHA512: - index = ctx->bufcnt & 0x7f; - padlen = (index < 112) ? (112 - index) : ((128 + 112) - index); - *(ctx->buffer + ctx->bufcnt) = 0x80; - memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1); - memcpy(ctx->buffer + ctx->bufcnt + padlen, bits, 16); - ctx->bufcnt += padlen + 16; - ctx->flags |= SHA_FLAGS_PAD; - break; - - default: - index = ctx->bufcnt & 0x3f; - padlen = (index < 56) ? (56 - index) : ((64 + 56) - index); - *(ctx->buffer + ctx->bufcnt) = 0x80; - memset(ctx->buffer + ctx->bufcnt + 1, 0, padlen - 1); - memcpy(ctx->buffer + ctx->bufcnt + padlen, &bits[1], 8); - ctx->bufcnt += padlen + 8; - ctx->flags |= SHA_FLAGS_PAD; - break; - } -} - -/* Initialize basic transform information of SHA */ -static void mtk_sha_info_init(struct mtk_sha_reqctx *ctx) -{ - struct mtk_sha_info *info = &ctx->info; - - ctx->ct_hdr = SHA_CT_CTRL_HDR; - ctx->ct_size = SHA_CT_SIZE; - - info->tfm[0] = SHA_TFM_HASH | SHA_TFM_SIZE(SIZE_IN_WORDS(ctx->ds)); - - switch (ctx->flags & SHA_FLAGS_ALGO_MSK) { - case SHA_FLAGS_SHA1: - info->tfm[0] |= SHA_TFM_SHA1; - break; - case SHA_FLAGS_SHA224: - info->tfm[0] |= SHA_TFM_SHA224; - break; - case SHA_FLAGS_SHA256: - info->tfm[0] |= SHA_TFM_SHA256; - break; - case SHA_FLAGS_SHA384: - info->tfm[0] |= SHA_TFM_SHA384; - break; - case SHA_FLAGS_SHA512: - info->tfm[0] |= SHA_TFM_SHA512; - break; - - default: - /* Should not happen... */ - return; - } - - info->tfm[1] = SHA_TFM_HASH_STORE; - info->ctrl[0] = info->tfm[0] | SHA_TFM_CONTINUE | SHA_TFM_START; - info->ctrl[1] = info->tfm[1]; - - info->cmd[0] = SHA_CMD0; - info->cmd[1] = SHA_CMD1; - info->cmd[2] = SHA_CMD2 | SHA_TFM_DIGEST(SIZE_IN_WORDS(ctx->ds)); -} - -/* - * Update input data length field of transform information and - * map it to DMA region. - */ -static int mtk_sha_info_update(struct mtk_cryp *cryp, - struct mtk_sha_rec *sha, - size_t len1, size_t len2) -{ - struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); - struct mtk_sha_info *info = &ctx->info; - - ctx->ct_hdr &= ~SHA_DATA_LEN_MSK; - ctx->ct_hdr |= cpu_to_le32(len1 + len2); - info->cmd[0] &= ~SHA_DATA_LEN_MSK; - info->cmd[0] |= cpu_to_le32(len1 + len2); - - /* Setting SHA_TFM_START only for the first iteration */ - if (ctx->digcnt) - info->ctrl[0] &= ~SHA_TFM_START; - - ctx->digcnt += len1; - - ctx->ct_dma = dma_map_single(cryp->dev, info, sizeof(*info), - DMA_BIDIRECTIONAL); - if (unlikely(dma_mapping_error(cryp->dev, ctx->ct_dma))) { - dev_err(cryp->dev, "dma %zu bytes error\n", sizeof(*info)); - return -EINVAL; - } - - ctx->tfm_dma = ctx->ct_dma + sizeof(info->ctrl) + sizeof(info->cmd); - - return 0; -} - -/* - * Because of hardware limitation, we must pre-calculate the inner - * and outer digest that need to be processed firstly by engine, then - * apply the result digest to the input message. These complex hashing - * procedures limits HMAC performance, so we use fallback SW encoding. - */ -static int mtk_sha_finish_hmac(struct ahash_request *req) -{ - struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - struct mtk_sha_hmac_ctx *bctx = tctx->base; - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); - - SHASH_DESC_ON_STACK(shash, bctx->shash); - - shash->tfm = bctx->shash; - - return crypto_shash_init(shash) ?: - crypto_shash_update(shash, bctx->opad, ctx->bs) ?: - crypto_shash_finup(shash, req->result, ctx->ds, req->result); -} - -/* Initialize request context */ -static int mtk_sha_init(struct ahash_request *req) -{ - struct crypto_ahash *tfm = crypto_ahash_reqtfm(req); - struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm); - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); - - ctx->flags = 0; - ctx->ds = crypto_ahash_digestsize(tfm); - - switch (ctx->ds) { - case SHA1_DIGEST_SIZE: - ctx->flags |= SHA_FLAGS_SHA1; - ctx->bs = SHA1_BLOCK_SIZE; - break; - case SHA224_DIGEST_SIZE: - ctx->flags |= SHA_FLAGS_SHA224; - ctx->bs = SHA224_BLOCK_SIZE; - break; - case SHA256_DIGEST_SIZE: - ctx->flags |= SHA_FLAGS_SHA256; - ctx->bs = SHA256_BLOCK_SIZE; - break; - case SHA384_DIGEST_SIZE: - ctx->flags |= SHA_FLAGS_SHA384; - ctx->bs = SHA384_BLOCK_SIZE; - break; - case SHA512_DIGEST_SIZE: - ctx->flags |= SHA_FLAGS_SHA512; - ctx->bs = SHA512_BLOCK_SIZE; - break; - default: - return -EINVAL; - } - - ctx->bufcnt = 0; - ctx->digcnt = 0; - ctx->buffer = tctx->buf; - - if (tctx->flags & SHA_FLAGS_HMAC) { - struct mtk_sha_hmac_ctx *bctx = tctx->base; - - memcpy(ctx->buffer, bctx->ipad, ctx->bs); - ctx->bufcnt = ctx->bs; - ctx->flags |= SHA_FLAGS_HMAC; - } - - return 0; -} - -static int mtk_sha_xmit(struct mtk_cryp *cryp, struct mtk_sha_rec *sha, - dma_addr_t addr1, size_t len1, - dma_addr_t addr2, size_t len2) -{ - struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); - struct mtk_ring *ring = cryp->ring[sha->id]; - struct mtk_desc *cmd, *res; - int err, count = 0; - - err = mtk_sha_info_update(cryp, sha, len1, len2); - if (err) - return err; - - /* Fill in the command/result descriptors */ - mtk_sha_ring_shift(ring, &cmd, &res, &count); - - res->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1); - cmd->hdr = MTK_DESC_FIRST | MTK_DESC_BUF_LEN(len1) | - MTK_DESC_CT_LEN(ctx->ct_size); - cmd->buf = cpu_to_le32(addr1); - cmd->ct = cpu_to_le32(ctx->ct_dma); - cmd->ct_hdr = ctx->ct_hdr; - cmd->tfm = cpu_to_le32(ctx->tfm_dma); - - if (len2) { - mtk_sha_ring_shift(ring, &cmd, &res, &count); - - res->hdr = MTK_DESC_BUF_LEN(len2); - cmd->hdr = MTK_DESC_BUF_LEN(len2); - cmd->buf = cpu_to_le32(addr2); - } - - cmd->hdr |= MTK_DESC_LAST; - res->hdr |= MTK_DESC_LAST; - - /* - * Make sure that all changes to the DMA ring are done before we - * start engine. - */ - wmb(); - /* Start DMA transfer */ - mtk_sha_write(cryp, RDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); - mtk_sha_write(cryp, CDR_PREP_COUNT(sha->id), MTK_DESC_CNT(count)); - - return -EINPROGRESS; -} - -static int mtk_sha_dma_map(struct mtk_cryp *cryp, - struct mtk_sha_rec *sha, - struct mtk_sha_reqctx *ctx, - size_t count) -{ - ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, - SHA_BUF_SIZE, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { - dev_err(cryp->dev, "dma map error\n"); - return -EINVAL; - } - - ctx->flags &= ~SHA_FLAGS_SG; - - return mtk_sha_xmit(cryp, sha, ctx->dma_addr, count, 0, 0); -} - -static int mtk_sha_update_slow(struct mtk_cryp *cryp, - struct mtk_sha_rec *sha) -{ - struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); - size_t count; - u32 final; - - mtk_sha_append_sg(ctx); - - final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; - - dev_dbg(cryp->dev, "slow: bufcnt: %zu\n", ctx->bufcnt); - - if (final) { - sha->flags |= SHA_FLAGS_FINAL; - mtk_sha_fill_padding(ctx, 0); - } - - if (final || (ctx->bufcnt == SHA_BUF_SIZE && ctx->total)) { - count = ctx->bufcnt; - ctx->bufcnt = 0; - - return mtk_sha_dma_map(cryp, sha, ctx, count); - } - return 0; -} - -static int mtk_sha_update_start(struct mtk_cryp *cryp, - struct mtk_sha_rec *sha) -{ - struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); - u32 len, final, tail; - struct scatterlist *sg; - - if (!ctx->total) - return 0; - - if (ctx->bufcnt || ctx->offset) - return mtk_sha_update_slow(cryp, sha); - - sg = ctx->sg; - - if (!IS_ALIGNED(sg->offset, sizeof(u32))) - return mtk_sha_update_slow(cryp, sha); - - if (!sg_is_last(sg) && !IS_ALIGNED(sg->length, ctx->bs)) - /* size is not ctx->bs aligned */ - return mtk_sha_update_slow(cryp, sha); - - len = min(ctx->total, sg->length); - - if (sg_is_last(sg)) { - if (!(ctx->flags & SHA_FLAGS_FINUP)) { - /* not last sg must be ctx->bs aligned */ - tail = len & (ctx->bs - 1); - len -= tail; - } - } - - ctx->total -= len; - ctx->offset = len; /* offset where to start slow */ - - final = (ctx->flags & SHA_FLAGS_FINUP) && !ctx->total; - - /* Add padding */ - if (final) { - size_t count; - - tail = len & (ctx->bs - 1); - len -= tail; - ctx->total += tail; - ctx->offset = len; /* offset where to start slow */ - - sg = ctx->sg; - mtk_sha_append_sg(ctx); - mtk_sha_fill_padding(ctx, len); - - ctx->dma_addr = dma_map_single(cryp->dev, ctx->buffer, - SHA_BUF_SIZE, DMA_TO_DEVICE); - if (unlikely(dma_mapping_error(cryp->dev, ctx->dma_addr))) { - dev_err(cryp->dev, "dma map bytes error\n"); - return -EINVAL; - } - - sha->flags |= SHA_FLAGS_FINAL; - count = ctx->bufcnt; - ctx->bufcnt = 0; - - if (len == 0) { - ctx->flags &= ~SHA_FLAGS_SG; - return mtk_sha_xmit(cryp, sha, ctx->dma_addr, - count, 0, 0); - - } else { - ctx->sg = sg; - if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { - dev_err(cryp->dev, "dma_map_sg error\n"); - return -EINVAL; - } - - ctx->flags |= SHA_FLAGS_SG; - return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), - len, ctx->dma_addr, count); - } - } - - if (!dma_map_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE)) { - dev_err(cryp->dev, "dma_map_sg error\n"); - return -EINVAL; - } - - ctx->flags |= SHA_FLAGS_SG; - - return mtk_sha_xmit(cryp, sha, sg_dma_address(ctx->sg), - len, 0, 0); -} - -static int mtk_sha_final_req(struct mtk_cryp *cryp, - struct mtk_sha_rec *sha) -{ - struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); - size_t count; - - mtk_sha_fill_padding(ctx, 0); - - sha->flags |= SHA_FLAGS_FINAL; - count = ctx->bufcnt; - ctx->bufcnt = 0; - - return mtk_sha_dma_map(cryp, sha, ctx, count); -} - -/* Copy ready hash (+ finalize hmac) */ -static int mtk_sha_finish(struct ahash_request *req) -{ - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); - __le32 *digest = ctx->info.digest; - u32 *result = (u32 *)req->result; - int i; - - /* Get the hash from the digest buffer */ - for (i = 0; i < SIZE_IN_WORDS(ctx->ds); i++) - result[i] = le32_to_cpu(digest[i]); - - if (ctx->flags & SHA_FLAGS_HMAC) - return mtk_sha_finish_hmac(req); - - return 0; -} - -static void mtk_sha_finish_req(struct mtk_cryp *cryp, - struct mtk_sha_rec *sha, - int err) -{ - if (likely(!err && (SHA_FLAGS_FINAL & sha->flags))) - err = mtk_sha_finish(sha->req); - - sha->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL); - - sha->req->base.complete(&sha->req->base, err); - - /* Handle new request */ - tasklet_schedule(&sha->queue_task); -} - -static int mtk_sha_handle_queue(struct mtk_cryp *cryp, u8 id, - struct ahash_request *req) -{ - struct mtk_sha_rec *sha = cryp->sha[id]; - struct crypto_async_request *async_req, *backlog; - struct mtk_sha_reqctx *ctx; - unsigned long flags; - int err = 0, ret = 0; - - spin_lock_irqsave(&sha->lock, flags); - if (req) - ret = ahash_enqueue_request(&sha->queue, req); - - if (SHA_FLAGS_BUSY & sha->flags) { - spin_unlock_irqrestore(&sha->lock, flags); - return ret; - } - - backlog = crypto_get_backlog(&sha->queue); - async_req = crypto_dequeue_request(&sha->queue); - if (async_req) - sha->flags |= SHA_FLAGS_BUSY; - spin_unlock_irqrestore(&sha->lock, flags); - - if (!async_req) - return ret; - - if (backlog) - backlog->complete(backlog, -EINPROGRESS); - - req = ahash_request_cast(async_req); - ctx = ahash_request_ctx(req); - - sha->req = req; - - mtk_sha_info_init(ctx); - - if (ctx->op == SHA_OP_UPDATE) { - err = mtk_sha_update_start(cryp, sha); - if (err != -EINPROGRESS && (ctx->flags & SHA_FLAGS_FINUP)) - /* No final() after finup() */ - err = mtk_sha_final_req(cryp, sha); - } else if (ctx->op == SHA_OP_FINAL) { - err = mtk_sha_final_req(cryp, sha); - } - - if (unlikely(err != -EINPROGRESS)) - /* Task will not finish it, so do it here */ - mtk_sha_finish_req(cryp, sha, err); - - return ret; -} - -static int mtk_sha_enqueue(struct ahash_request *req, u32 op) -{ - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); - struct mtk_sha_ctx *tctx = crypto_tfm_ctx(req->base.tfm); - - ctx->op = op; - - return mtk_sha_handle_queue(tctx->cryp, tctx->id, req); -} - -static void mtk_sha_unmap(struct mtk_cryp *cryp, struct mtk_sha_rec *sha) -{ - struct mtk_sha_reqctx *ctx = ahash_request_ctx(sha->req); - - dma_unmap_single(cryp->dev, ctx->ct_dma, sizeof(ctx->info), - DMA_BIDIRECTIONAL); - - if (ctx->flags & SHA_FLAGS_SG) { - dma_unmap_sg(cryp->dev, ctx->sg, 1, DMA_TO_DEVICE); - if (ctx->sg->length == ctx->offset) { - ctx->sg = sg_next(ctx->sg); - if (ctx->sg) - ctx->offset = 0; - } - if (ctx->flags & SHA_FLAGS_PAD) { - dma_unmap_single(cryp->dev, ctx->dma_addr, - SHA_BUF_SIZE, DMA_TO_DEVICE); - } - } else - dma_unmap_single(cryp->dev, ctx->dma_addr, - SHA_BUF_SIZE, DMA_TO_DEVICE); -} - -static void mtk_sha_complete(struct mtk_cryp *cryp, - struct mtk_sha_rec *sha) -{ - int err = 0; - - err = mtk_sha_update_start(cryp, sha); - if (err != -EINPROGRESS) - mtk_sha_finish_req(cryp, sha, err); -} - -static int mtk_sha_update(struct ahash_request *req) -{ - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); - - ctx->total = req->nbytes; - ctx->sg = req->src; - ctx->offset = 0; - - if ((ctx->bufcnt + ctx->total < SHA_BUF_SIZE) && - !(ctx->flags & SHA_FLAGS_FINUP)) - return mtk_sha_append_sg(ctx); - - return mtk_sha_enqueue(req, SHA_OP_UPDATE); -} - -static int mtk_sha_final(struct ahash_request *req) -{ - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); - - ctx->flags |= SHA_FLAGS_FINUP; - - if (ctx->flags & SHA_FLAGS_PAD) - return mtk_sha_finish(req); - - return mtk_sha_enqueue(req, SHA_OP_FINAL); -} - -static int mtk_sha_finup(struct ahash_request *req) -{ - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); - int err1, err2; - - ctx->flags |= SHA_FLAGS_FINUP; - - err1 = mtk_sha_update(req); - if (err1 == -EINPROGRESS || - (err1 == -EBUSY && (ahash_request_flags(req) & - CRYPTO_TFM_REQ_MAY_BACKLOG))) - return err1; - /* - * final() has to be always called to cleanup resources - * even if update() failed - */ - err2 = mtk_sha_final(req); - - return err1 ?: err2; -} - -static int mtk_sha_digest(struct ahash_request *req) -{ - return mtk_sha_init(req) ?: mtk_sha_finup(req); -} - -static int mtk_sha_setkey(struct crypto_ahash *tfm, const u8 *key, - u32 keylen) -{ - struct mtk_sha_ctx *tctx = crypto_ahash_ctx(tfm); - struct mtk_sha_hmac_ctx *bctx = tctx->base; - size_t bs = crypto_shash_blocksize(bctx->shash); - size_t ds = crypto_shash_digestsize(bctx->shash); - int err, i; - - if (keylen > bs) { - err = crypto_shash_tfm_digest(bctx->shash, key, keylen, - bctx->ipad); - if (err) - return err; - keylen = ds; - } else { - memcpy(bctx->ipad, key, keylen); - } - - memset(bctx->ipad + keylen, 0, bs - keylen); - memcpy(bctx->opad, bctx->ipad, bs); - - for (i = 0; i < bs; i++) { - bctx->ipad[i] ^= HMAC_IPAD_VALUE; - bctx->opad[i] ^= HMAC_OPAD_VALUE; - } - - return 0; -} - -static int mtk_sha_export(struct ahash_request *req, void *out) -{ - const struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); - - memcpy(out, ctx, sizeof(*ctx)); - return 0; -} - -static int mtk_sha_import(struct ahash_request *req, const void *in) -{ - struct mtk_sha_reqctx *ctx = ahash_request_ctx(req); - - memcpy(ctx, in, sizeof(*ctx)); - return 0; -} - -static int mtk_sha_cra_init_alg(struct crypto_tfm *tfm, - const char *alg_base) -{ - struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm); - struct mtk_cryp *cryp = NULL; - - cryp = mtk_sha_find_dev(tctx); - if (!cryp) - return -ENODEV; - - crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), - sizeof(struct mtk_sha_reqctx)); - - if (alg_base) { - struct mtk_sha_hmac_ctx *bctx = tctx->base; - - tctx->flags |= SHA_FLAGS_HMAC; - bctx->shash = crypto_alloc_shash(alg_base, 0, - CRYPTO_ALG_NEED_FALLBACK); - if (IS_ERR(bctx->shash)) { - pr_err("base driver %s could not be loaded.\n", - alg_base); - - return PTR_ERR(bctx->shash); - } - } - return 0; -} - -static int mtk_sha_cra_init(struct crypto_tfm *tfm) -{ - return mtk_sha_cra_init_alg(tfm, NULL); -} - -static int mtk_sha_cra_sha1_init(struct crypto_tfm *tfm) -{ - return mtk_sha_cra_init_alg(tfm, "sha1"); -} - -static int mtk_sha_cra_sha224_init(struct crypto_tfm *tfm) -{ - return mtk_sha_cra_init_alg(tfm, "sha224"); -} - -static int mtk_sha_cra_sha256_init(struct crypto_tfm *tfm) -{ - return mtk_sha_cra_init_alg(tfm, "sha256"); -} - -static int mtk_sha_cra_sha384_init(struct crypto_tfm *tfm) -{ - return mtk_sha_cra_init_alg(tfm, "sha384"); -} - -static int mtk_sha_cra_sha512_init(struct crypto_tfm *tfm) -{ - return mtk_sha_cra_init_alg(tfm, "sha512"); -} - -static void mtk_sha_cra_exit(struct crypto_tfm *tfm) -{ - struct mtk_sha_ctx *tctx = crypto_tfm_ctx(tfm); - - if (tctx->flags & SHA_FLAGS_HMAC) { - struct mtk_sha_hmac_ctx *bctx = tctx->base; - - crypto_free_shash(bctx->shash); - } -} - -static struct ahash_alg algs_sha1_sha224_sha256[] = { -{ - .init = mtk_sha_init, - .update = mtk_sha_update, - .final = mtk_sha_final, - .finup = mtk_sha_finup, - .digest = mtk_sha_digest, - .export = mtk_sha_export, - .import = mtk_sha_import, - .halg.digestsize = SHA1_DIGEST_SIZE, - .halg.statesize = sizeof(struct mtk_sha_reqctx), - .halg.base = { - .cra_name = "sha1", - .cra_driver_name = "mtk-sha1", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct mtk_sha_ctx), - .cra_alignmask = SHA_ALIGN_MSK, - .cra_module = THIS_MODULE, - .cra_init = mtk_sha_cra_init, - .cra_exit = mtk_sha_cra_exit, - } -}, -{ - .init = mtk_sha_init, - .update = mtk_sha_update, - .final = mtk_sha_final, - .finup = mtk_sha_finup, - .digest = mtk_sha_digest, - .export = mtk_sha_export, - .import = mtk_sha_import, - .halg.digestsize = SHA224_DIGEST_SIZE, - .halg.statesize = sizeof(struct mtk_sha_reqctx), - .halg.base = { - .cra_name = "sha224", - .cra_driver_name = "mtk-sha224", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct mtk_sha_ctx), - .cra_alignmask = SHA_ALIGN_MSK, - .cra_module = THIS_MODULE, - .cra_init = mtk_sha_cra_init, - .cra_exit = mtk_sha_cra_exit, - } -}, -{ - .init = mtk_sha_init, - .update = mtk_sha_update, - .final = mtk_sha_final, - .finup = mtk_sha_finup, - .digest = mtk_sha_digest, - .export = mtk_sha_export, - .import = mtk_sha_import, - .halg.digestsize = SHA256_DIGEST_SIZE, - .halg.statesize = sizeof(struct mtk_sha_reqctx), - .halg.base = { - .cra_name = "sha256", - .cra_driver_name = "mtk-sha256", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct mtk_sha_ctx), - .cra_alignmask = SHA_ALIGN_MSK, - .cra_module = THIS_MODULE, - .cra_init = mtk_sha_cra_init, - .cra_exit = mtk_sha_cra_exit, - } -}, -{ - .init = mtk_sha_init, - .update = mtk_sha_update, - .final = mtk_sha_final, - .finup = mtk_sha_finup, - .digest = mtk_sha_digest, - .export = mtk_sha_export, - .import = mtk_sha_import, - .setkey = mtk_sha_setkey, - .halg.digestsize = SHA1_DIGEST_SIZE, - .halg.statesize = sizeof(struct mtk_sha_reqctx), - .halg.base = { - .cra_name = "hmac(sha1)", - .cra_driver_name = "mtk-hmac-sha1", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = SHA1_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct mtk_sha_ctx) + - sizeof(struct mtk_sha_hmac_ctx), - .cra_alignmask = SHA_ALIGN_MSK, - .cra_module = THIS_MODULE, - .cra_init = mtk_sha_cra_sha1_init, - .cra_exit = mtk_sha_cra_exit, - } -}, -{ - .init = mtk_sha_init, - .update = mtk_sha_update, - .final = mtk_sha_final, - .finup = mtk_sha_finup, - .digest = mtk_sha_digest, - .export = mtk_sha_export, - .import = mtk_sha_import, - .setkey = mtk_sha_setkey, - .halg.digestsize = SHA224_DIGEST_SIZE, - .halg.statesize = sizeof(struct mtk_sha_reqctx), - .halg.base = { - .cra_name = "hmac(sha224)", - .cra_driver_name = "mtk-hmac-sha224", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = SHA224_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct mtk_sha_ctx) + - sizeof(struct mtk_sha_hmac_ctx), - .cra_alignmask = SHA_ALIGN_MSK, - .cra_module = THIS_MODULE, - .cra_init = mtk_sha_cra_sha224_init, - .cra_exit = mtk_sha_cra_exit, - } -}, -{ - .init = mtk_sha_init, - .update = mtk_sha_update, - .final = mtk_sha_final, - .finup = mtk_sha_finup, - .digest = mtk_sha_digest, - .export = mtk_sha_export, - .import = mtk_sha_import, - .setkey = mtk_sha_setkey, - .halg.digestsize = SHA256_DIGEST_SIZE, - .halg.statesize = sizeof(struct mtk_sha_reqctx), - .halg.base = { - .cra_name = "hmac(sha256)", - .cra_driver_name = "mtk-hmac-sha256", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = SHA256_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct mtk_sha_ctx) + - sizeof(struct mtk_sha_hmac_ctx), - .cra_alignmask = SHA_ALIGN_MSK, - .cra_module = THIS_MODULE, - .cra_init = mtk_sha_cra_sha256_init, - .cra_exit = mtk_sha_cra_exit, - } -}, -}; - -static struct ahash_alg algs_sha384_sha512[] = { -{ - .init = mtk_sha_init, - .update = mtk_sha_update, - .final = mtk_sha_final, - .finup = mtk_sha_finup, - .digest = mtk_sha_digest, - .export = mtk_sha_export, - .import = mtk_sha_import, - .halg.digestsize = SHA384_DIGEST_SIZE, - .halg.statesize = sizeof(struct mtk_sha_reqctx), - .halg.base = { - .cra_name = "sha384", - .cra_driver_name = "mtk-sha384", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct mtk_sha_ctx), - .cra_alignmask = SHA_ALIGN_MSK, - .cra_module = THIS_MODULE, - .cra_init = mtk_sha_cra_init, - .cra_exit = mtk_sha_cra_exit, - } -}, -{ - .init = mtk_sha_init, - .update = mtk_sha_update, - .final = mtk_sha_final, - .finup = mtk_sha_finup, - .digest = mtk_sha_digest, - .export = mtk_sha_export, - .import = mtk_sha_import, - .halg.digestsize = SHA512_DIGEST_SIZE, - .halg.statesize = sizeof(struct mtk_sha_reqctx), - .halg.base = { - .cra_name = "sha512", - .cra_driver_name = "mtk-sha512", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct mtk_sha_ctx), - .cra_alignmask = SHA_ALIGN_MSK, - .cra_module = THIS_MODULE, - .cra_init = mtk_sha_cra_init, - .cra_exit = mtk_sha_cra_exit, - } -}, -{ - .init = mtk_sha_init, - .update = mtk_sha_update, - .final = mtk_sha_final, - .finup = mtk_sha_finup, - .digest = mtk_sha_digest, - .export = mtk_sha_export, - .import = mtk_sha_import, - .setkey = mtk_sha_setkey, - .halg.digestsize = SHA384_DIGEST_SIZE, - .halg.statesize = sizeof(struct mtk_sha_reqctx), - .halg.base = { - .cra_name = "hmac(sha384)", - .cra_driver_name = "mtk-hmac-sha384", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = SHA384_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct mtk_sha_ctx) + - sizeof(struct mtk_sha_hmac_ctx), - .cra_alignmask = SHA_ALIGN_MSK, - .cra_module = THIS_MODULE, - .cra_init = mtk_sha_cra_sha384_init, - .cra_exit = mtk_sha_cra_exit, - } -}, -{ - .init = mtk_sha_init, - .update = mtk_sha_update, - .final = mtk_sha_final, - .finup = mtk_sha_finup, - .digest = mtk_sha_digest, - .export = mtk_sha_export, - .import = mtk_sha_import, - .setkey = mtk_sha_setkey, - .halg.digestsize = SHA512_DIGEST_SIZE, - .halg.statesize = sizeof(struct mtk_sha_reqctx), - .halg.base = { - .cra_name = "hmac(sha512)", - .cra_driver_name = "mtk-hmac-sha512", - .cra_priority = 400, - .cra_flags = CRYPTO_ALG_ASYNC | - CRYPTO_ALG_NEED_FALLBACK, - .cra_blocksize = SHA512_BLOCK_SIZE, - .cra_ctxsize = sizeof(struct mtk_sha_ctx) + - sizeof(struct mtk_sha_hmac_ctx), - .cra_alignmask = SHA_ALIGN_MSK, - .cra_module = THIS_MODULE, - .cra_init = mtk_sha_cra_sha512_init, - .cra_exit = mtk_sha_cra_exit, - } -}, -}; - -static void mtk_sha_queue_task(unsigned long data) -{ - struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data; - - mtk_sha_handle_queue(sha->cryp, sha->id - MTK_RING2, NULL); -} - -static void mtk_sha_done_task(unsigned long data) -{ - struct mtk_sha_rec *sha = (struct mtk_sha_rec *)data; - struct mtk_cryp *cryp = sha->cryp; - - mtk_sha_unmap(cryp, sha); - mtk_sha_complete(cryp, sha); -} - -static irqreturn_t mtk_sha_irq(int irq, void *dev_id) -{ - struct mtk_sha_rec *sha = (struct mtk_sha_rec *)dev_id; - struct mtk_cryp *cryp = sha->cryp; - u32 val = mtk_sha_read(cryp, RDR_STAT(sha->id)); - - mtk_sha_write(cryp, RDR_STAT(sha->id), val); - - if (likely((SHA_FLAGS_BUSY & sha->flags))) { - mtk_sha_write(cryp, RDR_PROC_COUNT(sha->id), MTK_CNT_RST); - mtk_sha_write(cryp, RDR_THRESH(sha->id), - MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE); - - tasklet_schedule(&sha->done_task); - } else { - dev_warn(cryp->dev, "SHA interrupt when no active requests.\n"); - } - return IRQ_HANDLED; -} - -/* - * The purpose of two SHA records is used to get extra performance. - * It is similar to mtk_aes_record_init(). - */ -static int mtk_sha_record_init(struct mtk_cryp *cryp) -{ - struct mtk_sha_rec **sha = cryp->sha; - int i, err = -ENOMEM; - - for (i = 0; i < MTK_REC_NUM; i++) { - sha[i] = kzalloc(sizeof(**sha), GFP_KERNEL); - if (!sha[i]) - goto err_cleanup; - - sha[i]->cryp = cryp; - - spin_lock_init(&sha[i]->lock); - crypto_init_queue(&sha[i]->queue, SHA_QUEUE_SIZE); - - tasklet_init(&sha[i]->queue_task, mtk_sha_queue_task, - (unsigned long)sha[i]); - tasklet_init(&sha[i]->done_task, mtk_sha_done_task, - (unsigned long)sha[i]); - } - - /* Link to ring2 and ring3 respectively */ - sha[0]->id = MTK_RING2; - sha[1]->id = MTK_RING3; - - cryp->rec = 1; - - return 0; - -err_cleanup: - for (; i--; ) - kfree(sha[i]); - return err; -} - -static void mtk_sha_record_free(struct mtk_cryp *cryp) -{ - int i; - - for (i = 0; i < MTK_REC_NUM; i++) { - tasklet_kill(&cryp->sha[i]->done_task); - tasklet_kill(&cryp->sha[i]->queue_task); - - kfree(cryp->sha[i]); - } -} - -static void mtk_sha_unregister_algs(void) -{ - int i; - - for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) - crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]); - - for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) - crypto_unregister_ahash(&algs_sha384_sha512[i]); -} - -static int mtk_sha_register_algs(void) -{ - int err, i; - - for (i = 0; i < ARRAY_SIZE(algs_sha1_sha224_sha256); i++) { - err = crypto_register_ahash(&algs_sha1_sha224_sha256[i]); - if (err) - goto err_sha_224_256_algs; - } - - for (i = 0; i < ARRAY_SIZE(algs_sha384_sha512); i++) { - err = crypto_register_ahash(&algs_sha384_sha512[i]); - if (err) - goto err_sha_384_512_algs; - } - - return 0; - -err_sha_384_512_algs: - for (; i--; ) - crypto_unregister_ahash(&algs_sha384_sha512[i]); - i = ARRAY_SIZE(algs_sha1_sha224_sha256); -err_sha_224_256_algs: - for (; i--; ) - crypto_unregister_ahash(&algs_sha1_sha224_sha256[i]); - - return err; -} - -int mtk_hash_alg_register(struct mtk_cryp *cryp) -{ - int err; - - INIT_LIST_HEAD(&cryp->sha_list); - - /* Initialize two hash records */ - err = mtk_sha_record_init(cryp); - if (err) - goto err_record; - - err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING2], mtk_sha_irq, - 0, "mtk-sha", cryp->sha[0]); - if (err) { - dev_err(cryp->dev, "unable to request sha irq0.\n"); - goto err_res; - } - - err = devm_request_irq(cryp->dev, cryp->irq[MTK_RING3], mtk_sha_irq, - 0, "mtk-sha", cryp->sha[1]); - if (err) { - dev_err(cryp->dev, "unable to request sha irq1.\n"); - goto err_res; - } - - /* Enable ring2 and ring3 interrupt for hash */ - mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING2), MTK_IRQ_RDR2); - mtk_sha_write(cryp, AIC_ENABLE_SET(MTK_RING3), MTK_IRQ_RDR3); - - spin_lock(&mtk_sha.lock); - list_add_tail(&cryp->sha_list, &mtk_sha.dev_list); - spin_unlock(&mtk_sha.lock); - - err = mtk_sha_register_algs(); - if (err) - goto err_algs; - - return 0; - -err_algs: - spin_lock(&mtk_sha.lock); - list_del(&cryp->sha_list); - spin_unlock(&mtk_sha.lock); -err_res: - mtk_sha_record_free(cryp); -err_record: - - dev_err(cryp->dev, "mtk-sha initialization failed.\n"); - return err; -} - -void mtk_hash_alg_release(struct mtk_cryp *cryp) -{ - spin_lock(&mtk_sha.lock); - list_del(&cryp->sha_list); - spin_unlock(&mtk_sha.lock); - - mtk_sha_unregister_algs(); - mtk_sha_record_free(cryp); -}