@@ -109,6 +109,9 @@
#define QEDN_TASK_CLEANUP_TMO 3000 /* 3 sec */
#define QEDN_DRAIN_TMO 1000 /* 1 sec */
+#define QEDN_MAX_OUTSTAND_ASYNC 32
+#define QEDN_INVALID_CCCID (-1)
+
enum qedn_state {
QEDN_STATE_CORE_PROBED = 0,
QEDN_STATE_CORE_OPEN,
@@ -196,6 +199,7 @@ struct qedn_ctx {
enum qedn_task_flags {
QEDN_TASK_IS_ICREQ,
+ QEDN_TASK_ASYNC,
QEDN_TASK_USED_BY_FW,
QEDN_TASK_WAIT_FOR_CLEANUP,
};
@@ -373,6 +377,10 @@ struct qedn_conn_ctx {
struct nvmetcp_icresp_hdr_psh icresp;
struct qedn_icreq_padding *icreq_pad;
+ DECLARE_BITMAP(async_cccid_idx_map, QEDN_MAX_OUTSTAND_ASYNC);
+ /* Spinlock for fetching pseudo CCCID for async request */
+ spinlock_t async_cccid_bitmap_lock;
+
/* "dummy" socket */
struct socket *sock;
};
@@ -328,6 +328,7 @@ static int qedn_create_queue(struct nvme_tcp_ofld_queue *queue, int qid, size_t
atomic_set(&conn_ctx->destroy_conn_indicator, 0);
spin_lock_init(&conn_ctx->conn_state_lock);
+ spin_lock_init(&conn_ctx->async_cccid_bitmap_lock);
INIT_WORK(&conn_ctx->nvme_req_fp_wq_entry, qedn_nvme_req_fp_wq_handler);
conn_ctx->nvme_req_fp_wq = qedn->nvme_req_fp_wq;
@@ -259,10 +259,45 @@ void qedn_common_clear_fw_sgl(struct storage_sgl_task_params *sgl_task_params)
sgl_task_params->num_sges = 0;
}
-inline void qedn_host_reset_cccid_itid_entry(struct qedn_conn_ctx *conn_ctx,
- u16 cccid)
+inline void qedn_host_reset_cccid_itid_entry(struct qedn_conn_ctx *conn_ctx, u16 cccid, bool async)
{
conn_ctx->host_cccid_itid[cccid].itid = cpu_to_le16(QEDN_INVALID_ITID);
+ if (unlikely(async))
+ clear_bit(cccid - NVME_AQ_DEPTH,
+ conn_ctx->async_cccid_idx_map);
+}
+
+static int qedn_get_free_idx(struct qedn_conn_ctx *conn_ctx, unsigned int size)
+{
+ int idx;
+
+ spin_lock(&conn_ctx->async_cccid_bitmap_lock);
+ idx = find_first_zero_bit(conn_ctx->async_cccid_idx_map, size);
+ if (unlikely(idx >= size)) {
+ idx = -1;
+ spin_unlock(&conn_ctx->async_cccid_bitmap_lock);
+ goto err_idx;
+ }
+ set_bit(idx, conn_ctx->async_cccid_idx_map);
+ spin_unlock(&conn_ctx->async_cccid_bitmap_lock);
+
+err_idx:
+
+ return idx;
+}
+
+int qedn_get_free_async_cccid(struct qedn_conn_ctx *conn_ctx)
+{
+ int async_cccid;
+
+ async_cccid =
+ qedn_get_free_idx(conn_ctx, QEDN_MAX_OUTSTAND_ASYNC);
+ if (unlikely(async_cccid == QEDN_INVALID_CCCID))
+ pr_err("No available CCCID for Async.\n");
+ else
+ async_cccid += NVME_AQ_DEPTH;
+
+ return async_cccid;
}
inline void qedn_host_set_cccid_itid_entry(struct qedn_conn_ctx *conn_ctx, u16 cccid, u16 itid)
@@ -363,10 +398,12 @@ void qedn_return_task_to_pool(struct qedn_conn_ctx *conn_ctx,
struct qedn_fp_queue *fp_q = conn_ctx->fp_q;
struct qedn_io_resources *io_resrc;
unsigned long lock_flags;
+ bool async;
io_resrc = &fp_q->host_resrc;
spin_lock_irqsave(&qedn_task->lock, lock_flags);
+ async = test_bit(QEDN_TASK_ASYNC, &(qedn_task)->flags);
qedn_task->valid = 0;
qedn_task->flags = 0;
qedn_clear_sgl(conn_ctx->qedn, qedn_task);
@@ -374,7 +411,7 @@ void qedn_return_task_to_pool(struct qedn_conn_ctx *conn_ctx,
spin_lock(&conn_ctx->task_list_lock);
list_del(&qedn_task->entry);
- qedn_host_reset_cccid_itid_entry(conn_ctx, qedn_task->cccid);
+ qedn_host_reset_cccid_itid_entry(conn_ctx, qedn_task->cccid, async);
spin_unlock(&conn_ctx->task_list_lock);
atomic_dec(&conn_ctx->num_active_tasks);
@@ -447,6 +484,67 @@ qedn_get_task_from_pool_insist(struct qedn_conn_ctx *conn_ctx, u16 cccid)
return qedn_task;
}
+void qedn_send_async_event_cmd(struct qedn_task_ctx *qedn_task,
+ struct qedn_conn_ctx *conn_ctx)
+{
+ struct nvme_tcp_ofld_req *async_req = qedn_task->req;
+ struct nvme_command *nvme_cmd = &async_req->nvme_cmd;
+ struct storage_sgl_task_params *sgl_task_params;
+ struct nvmetcp_task_params task_params;
+ struct nvmetcp_cmd_capsule_hdr cmd_hdr;
+ struct nvmetcp_conn_params conn_params;
+ struct nvmetcp_wqe *chain_sqe;
+ struct nvmetcp_wqe local_sqe;
+ int i;
+
+ set_bit(QEDN_TASK_ASYNC, &qedn_task->flags);
+ nvme_cmd->common.command_id = qedn_task->cccid;
+ qedn_task->task_size = 0;
+
+ /* Initialize sgl params */
+ sgl_task_params = &qedn_task->sgl_task_params;
+ sgl_task_params->total_buffer_size = 0;
+ sgl_task_params->num_sges = 0;
+ sgl_task_params->small_mid_sge = false;
+
+ task_params.opq.lo = cpu_to_le32(((u64)(qedn_task)) & 0xffffffff);
+ task_params.opq.hi = cpu_to_le32(((u64)(qedn_task)) >> 32);
+
+ /* Initialize task params */
+ task_params.context = qedn_task->fw_task_ctx;
+ task_params.sqe = &local_sqe;
+ task_params.tx_io_size = 0;
+ task_params.rx_io_size = 0;
+ task_params.conn_icid = (u16)conn_ctx->conn_handle;
+ task_params.itid = qedn_task->itid;
+ task_params.cq_rss_number = conn_ctx->default_cq;
+ task_params.send_write_incapsule = 0;
+
+ /* Initialize conn params */
+ conn_params.max_burst_length = QEDN_MAX_IO_SIZE;
+
+ /* Internal impl. - async is treated like zero len read */
+ cmd_hdr.chdr.pdu_type = nvme_tcp_cmd;
+ cmd_hdr.chdr.flags = 0;
+ cmd_hdr.chdr.hlen = sizeof(cmd_hdr);
+ cmd_hdr.chdr.pdo = 0x0;
+ cmd_hdr.chdr.plen_swapped = cpu_to_le32(__swab32(cmd_hdr.chdr.hlen));
+
+ for (i = 0; i < 16; i++)
+ cmd_hdr.pshdr.raw_swapped[i] = cpu_to_le32(__swab32(((u32 *)nvme_cmd)[i]));
+
+ qed_ops->init_read_io(&task_params, &conn_params, &cmd_hdr, &qedn_task->sgl_task_params);
+
+ set_bit(QEDN_TASK_USED_BY_FW, &qedn_task->flags);
+ atomic_inc(&conn_ctx->num_active_fw_tasks);
+
+ spin_lock(&conn_ctx->ep.doorbell_lock);
+ chain_sqe = qed_chain_produce(&conn_ctx->ep.fw_sq_chain);
+ memcpy(chain_sqe, &local_sqe, sizeof(local_sqe));
+ qedn_ring_doorbell(conn_ctx);
+ spin_unlock(&conn_ctx->ep.doorbell_lock);
+}
+
int qedn_send_read_cmd(struct qedn_task_ctx *qedn_task, struct qedn_conn_ctx *conn_ctx)
{
struct nvme_command *nvme_cmd = &qedn_task->req->nvme_cmd;
@@ -580,6 +678,24 @@ static void qedn_fetch_request(struct qedn_conn_ctx *qedn_conn)
spin_unlock(&qedn_conn->nvme_req_lock);
}
+static void qedn_return_error_req(struct nvme_tcp_ofld_req *req)
+{
+ __le16 status = cpu_to_le16(NVME_SC_HOST_PATH_ERROR << 1);
+ union nvme_result res = {};
+ struct request *rq;
+
+ if (!req)
+ return;
+
+ rq = blk_mq_rq_from_pdu(req);
+
+ /* Call request done to compelete the request */
+ if (req->done)
+ req->done(req, &res, status);
+ else
+ pr_err("request done not set !!!\n");
+}
+
static bool qedn_process_req(struct qedn_conn_ctx *qedn_conn)
{
struct qedn_task_ctx *qedn_task;
@@ -595,9 +711,16 @@ static bool qedn_process_req(struct qedn_conn_ctx *qedn_conn)
req = qedn_conn->req;
rq = blk_mq_rq_from_pdu(req);
- /* Placeholder - async */
+ if (unlikely(req->async)) {
+ cccid = qedn_get_free_async_cccid(qedn_conn);
+ if (cccid == QEDN_INVALID_CCCID) {
+ qedn_return_error_req(req);
+ goto doorbell;
+ }
+ } else {
+ cccid = rq->tag;
+ }
- cccid = rq->tag;
qedn_task = qedn_get_task_from_pool_insist(qedn_conn, cccid);
if (unlikely(!qedn_task)) {
pr_err("Not able to allocate task context\n");
@@ -607,7 +730,10 @@ static bool qedn_process_req(struct qedn_conn_ctx *qedn_conn)
req->private_data = qedn_task;
qedn_task->req = req;
- /* Placeholder - handle (req->async) */
+ if (unlikely(req->async)) {
+ qedn_send_async_event_cmd(qedn_task, qedn_conn);
+ goto doorbell;
+ }
/* Check if there are physical segments in request to determine the task size.
* The logic of nvme_tcp_set_sg_null() will be implemented as part of
@@ -732,14 +858,26 @@ static inline int qedn_comp_valid_task(struct qedn_task_ctx *qedn_task,
int qedn_process_nvme_cqe(struct qedn_task_ctx *qedn_task, struct nvme_completion *cqe)
{
+ struct qedn_conn_ctx *conn_ctx = qedn_task->qedn_conn;
+ struct nvme_tcp_ofld_req *req;
int rc = 0;
+ bool async;
+
+ async = test_bit(QEDN_TASK_ASYNC, &(qedn_task)->flags);
/* cqe arrives swapped */
qedn_swap_bytes((u32 *)cqe, (sizeof(*cqe) / sizeof(u32)));
- /* Placeholder - async */
-
- rc = qedn_comp_valid_task(qedn_task, &cqe->result, cqe->status);
+ if (unlikely(async)) {
+ qedn_return_task_to_pool(conn_ctx, qedn_task);
+ req = qedn_task->req;
+ if (req->done)
+ req->done(req, &cqe->result, cqe->status);
+ else
+ pr_err("request done not set for async request !!!\n");
+ } else {
+ rc = qedn_comp_valid_task(qedn_task, &cqe->result, cqe->status);
+ }
return rc;
}