@@ -8,6 +8,9 @@
#include "adf_cfg.h"
#include "adf_common_drv.h"
+#define ADF_MAX_RING_THRESHOLD 80
+#define ADF_PERCENT(tot, percent) (((tot) * (percent)) / 100)
+
static inline u32 adf_modulo(u32 data, u32 shift)
{
u32 div = data >> shift;
@@ -77,6 +80,11 @@ static void adf_disable_ring_irq(struct adf_etr_bank_data *bank, u32 ring)
bank->irq_mask);
}
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring)
+{
+ return atomic_read(ring->inflights) > ring->threshold;
+}
+
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg)
{
struct adf_hw_csr_ops *csr_ops = GET_CSR_OPS(ring->bank->accel_dev);
@@ -217,6 +225,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
struct adf_etr_bank_data *bank;
struct adf_etr_ring_data *ring;
char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
+ int max_inflights;
u32 ring_num;
int ret;
@@ -263,6 +272,8 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
ring->ring_size = adf_verify_ring_size(msg_size, num_msgs);
ring->head = 0;
ring->tail = 0;
+ max_inflights = ADF_MAX_INFLIGHTS(ring->ring_size, ring->msg_size);
+ ring->threshold = ADF_PERCENT(max_inflights, ADF_MAX_RING_THRESHOLD);
atomic_set(ring->inflights, 0);
ret = adf_init_ring(ring);
if (ret)
@@ -14,6 +14,7 @@ int adf_create_ring(struct adf_accel_dev *accel_dev, const char *section,
const char *ring_name, adf_callback_fn callback,
int poll_mode, struct adf_etr_ring_data **ring_ptr);
+bool adf_ring_nearly_full(struct adf_etr_ring_data *ring);
int adf_send_message(struct adf_etr_ring_data *ring, u32 *msg);
void adf_remove_ring(struct adf_etr_ring_data *ring);
#endif
@@ -22,6 +22,7 @@ struct adf_etr_ring_data {
spinlock_t lock; /* protects ring data struct */
u16 head;
u16 tail;
+ u32 threshold;
u8 ring_number;
u8 ring_size;
u8 msg_size;
@@ -940,14 +940,17 @@ void qat_alg_callback(void *resp)
}
static int qat_alg_send_sym_message(struct qat_crypto_request *qat_req,
- struct qat_crypto_instance *inst)
+ struct qat_crypto_instance *inst,
+ struct crypto_async_request *base)
{
- struct qat_alg_req req;
+ struct qat_alg_req *req = &qat_req->job;
+ struct qat_instance_backlog *bl = &inst->backlog;
- req.fw_req = (u32 *)&qat_req->req;
- req.tx_ring = inst->sym_tx;
+ req->fw_req = (u32 *)&qat_req->req;
+ req->tx_ring = inst->sym_tx;
+ req->base = base;
- return qat_alg_send_message(&req);
+ return qat_alg_send_message(req, bl);
}
static int qat_alg_aead_dec(struct aead_request *areq)
@@ -987,7 +990,7 @@ static int qat_alg_aead_dec(struct aead_request *areq)
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + cipher_param->cipher_length;
- ret = qat_alg_send_sym_message(qat_req, ctx->inst);
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
@@ -1031,7 +1034,7 @@ static int qat_alg_aead_enc(struct aead_request *areq)
auth_param->auth_off = 0;
auth_param->auth_len = areq->assoclen + areq->cryptlen;
- ret = qat_alg_send_sym_message(qat_req, ctx->inst);
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &areq->base);
if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
@@ -1212,7 +1215,7 @@ static int qat_alg_skcipher_encrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req);
- ret = qat_alg_send_sym_message(qat_req, ctx->inst);
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
@@ -1278,7 +1281,7 @@ static int qat_alg_skcipher_decrypt(struct skcipher_request *req)
qat_alg_set_req_iv(qat_req);
qat_alg_update_iv(qat_req);
- ret = qat_alg_send_sym_message(qat_req, ctx->inst);
+ ret = qat_alg_send_sym_message(qat_req, ctx->inst, &req->base);
if (ret == -ENOSPC)
qat_alg_free_bufl(ctx->inst, qat_req);
@@ -6,7 +6,7 @@
#define ADF_MAX_RETRIES 20
-int qat_alg_send_message(struct qat_alg_req *req)
+static int qat_alg_send_message_retry(struct qat_alg_req *req)
{
int ret = 0, ctr = 0;
@@ -19,3 +19,73 @@ int qat_alg_send_message(struct qat_alg_req *req)
return -EINPROGRESS;
}
+
+static void qat_alg_send_backlog(struct work_struct *work)
+{
+ struct qat_instance_backlog *backlog;
+ struct qat_alg_req *req, *tmp;
+
+ backlog = container_of(work, struct qat_instance_backlog, work);
+
+ spin_lock_bh(&backlog->lock);
+ list_for_each_entry_safe(req, tmp, &backlog->list, list) {
+ if (adf_send_message(req->tx_ring, req->fw_req)) {
+ /* If adf_send_message() fails, trigger worker */
+ INIT_WORK(&backlog->work, qat_alg_send_backlog);
+ queue_work(backlog->wq, &backlog->work);
+ break;
+ }
+ list_del(&req->list);
+ req->base->complete(req->base, -EINPROGRESS);
+ }
+ spin_unlock_bh(&backlog->lock);
+}
+
+static void qat_alg_backlog_req(struct qat_alg_req *req,
+ struct qat_instance_backlog *backlog)
+{
+ spin_lock_bh(&backlog->lock);
+ if (list_empty(&backlog->list)) {
+ /* Schedule worker only for first element in the list */
+ INIT_WORK(&backlog->work, qat_alg_send_backlog);
+ queue_work(backlog->wq, &backlog->work);
+ }
+ list_add_tail(&req->list, &backlog->list);
+ spin_unlock_bh(&backlog->lock);
+}
+
+static int qat_alg_send_message_maybacklog(struct qat_alg_req *req,
+ struct qat_instance_backlog *backlog)
+{
+ struct adf_etr_ring_data *tx_ring = req->tx_ring;
+ u32 *fw_req = req->fw_req;
+
+ /* If any request is already backlogged, then add to backlog list */
+ if (!list_empty(&backlog->list))
+ goto enqueue;
+
+ /* If ring is nearly full, then add to backlog list */
+ if (adf_ring_nearly_full(tx_ring))
+ goto enqueue;
+
+ /* If adding request to HW ring fails, then add to backlog list */
+ if (adf_send_message(tx_ring, fw_req))
+ goto enqueue;
+
+ return -EINPROGRESS;
+
+enqueue:
+ qat_alg_backlog_req(req, backlog);
+
+ return -EBUSY;
+}
+
+int qat_alg_send_message(struct qat_alg_req *req, struct qat_instance_backlog *bl)
+{
+ u32 flags = req->base->flags;
+
+ if (flags & CRYPTO_TFM_REQ_MAY_BACKLOG)
+ return qat_alg_send_message_maybacklog(req, bl);
+ else
+ return qat_alg_send_message_retry(req);
+}
@@ -5,6 +5,6 @@
#include "qat_crypto.h"
-int qat_alg_send_message(struct qat_alg_req *req);
+int qat_alg_send_message(struct qat_alg_req *req, struct qat_instance_backlog *bl);
#endif
@@ -136,17 +136,21 @@ struct qat_asym_request {
} areq;
int err;
void (*cb)(struct icp_qat_fw_pke_resp *resp);
+ struct qat_alg_req job;
} __aligned(64);
static int qat_alg_send_asym_message(struct qat_asym_request *qat_req,
- struct qat_crypto_instance *inst)
+ struct qat_crypto_instance *inst,
+ struct crypto_async_request *base)
{
- struct qat_alg_req req;
+ struct qat_alg_req *req = &qat_req->job;
+ struct qat_instance_backlog *bl = &inst->backlog;
- req.fw_req = (u32 *)&qat_req->req;
- req.tx_ring = inst->pke_tx;
+ req->fw_req = (u32 *)&qat_req->req;
+ req->tx_ring = inst->pke_tx;
+ req->base = base;
- return qat_alg_send_message(&req);
+ return qat_alg_send_message(req, bl);
}
static void qat_dh_cb(struct icp_qat_fw_pke_resp *resp)
@@ -350,7 +354,7 @@ static int qat_dh_compute_value(struct kpp_request *req)
msg->input_param_count = n_input_params;
msg->output_param_count = 1;
- ret = qat_alg_send_asym_message(qat_req, ctx->inst);
+ ret = qat_alg_send_asym_message(qat_req, ctx->inst, &req->base);
if (ret == -ENOSPC)
goto unmap_all;
@@ -745,7 +749,7 @@ static int qat_rsa_enc(struct akcipher_request *req)
msg->input_param_count = 3;
msg->output_param_count = 1;
- ret = qat_alg_send_asym_message(qat_req, ctx->inst);
+ ret = qat_alg_send_asym_message(qat_req, ctx->inst, &req->base);
if (ret == -ENOSPC)
goto unmap_all;
@@ -898,7 +902,7 @@ static int qat_rsa_dec(struct akcipher_request *req)
msg->output_param_count = 1;
- ret = qat_alg_send_asym_message(qat_req, ctx->inst);
+ ret = qat_alg_send_asym_message(qat_req, ctx->inst, &req->base);
if (ret == -ENOSPC)
goto unmap_all;
@@ -16,8 +16,31 @@
static struct service_hndl qat_crypto;
+static int qat_instance_backlog_init(struct qat_instance_backlog *bl,
+ int accel_id, int instance_id)
+{
+ bl->wq = alloc_ordered_workqueue("qat_bl_%d.%d", WQ_MEM_RECLAIM,
+ accel_id, instance_id);
+ if (!bl->wq)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&bl->list);
+ spin_lock_init(&bl->lock);
+
+ return 0;
+}
+
+static void qat_instance_backlog_free(struct qat_instance_backlog *bl)
+{
+ if (bl->wq) {
+ destroy_workqueue(bl->wq);
+ bl->wq = NULL;
+ }
+}
+
void qat_crypto_put_instance(struct qat_crypto_instance *inst)
{
+ qat_instance_backlog_free(&inst->backlog);
atomic_dec(&inst->refctr);
adf_dev_put(inst->accel_dev);
}
@@ -54,6 +77,7 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
struct qat_crypto_instance *inst = NULL, *tmp_inst;
unsigned long best = ~0;
+ int ret;
list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
unsigned long ctr;
@@ -96,8 +120,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
}
}
if (inst) {
+ ret = qat_instance_backlog_init(&inst->backlog,
+ accel_dev->accel_id, inst->id);
+ if (ret)
+ return NULL;
+
if (adf_dev_get(accel_dev)) {
dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
+ qat_instance_backlog_free(&inst->backlog);
return NULL;
}
atomic_inc(&inst->refctr);
@@ -12,6 +12,15 @@
struct qat_alg_req {
u32 *fw_req;
struct adf_etr_ring_data *tx_ring;
+ struct crypto_async_request *base;
+ struct list_head list;
+};
+
+struct qat_instance_backlog {
+ struct workqueue_struct *wq;
+ struct list_head list;
+ spinlock_t lock; /* protects backlog list */
+ struct work_struct work;
};
struct qat_crypto_instance {
@@ -24,6 +33,7 @@ struct qat_crypto_instance {
unsigned long state;
int id;
atomic_t refctr;
+ struct qat_instance_backlog backlog;
};
#define QAT_MAX_BUFF_DESC 4
@@ -82,6 +92,7 @@ struct qat_crypto_request {
u8 iv[AES_BLOCK_SIZE];
};
bool encryption;
+ struct qat_alg_req job;
};
static inline bool adf_hw_dev_has_crypto(struct adf_accel_dev *accel_dev)