@@ -113,6 +113,7 @@ int nvme_tcp_ofld_report_queue_err(struct nvme_tcp_ofld_queue *queue)
/**
* nvme_tcp_ofld_req_done() - NVMeTCP Offload request done callback
* function. Pointed to by nvme_tcp_ofld_req->done.
+ * Handles both NVME_TCP_F_DATA_SUCCESS flag and NVMe CQE.
* @req: NVMeTCP offload request to complete.
* @result: The nvme_result.
* @status: The completion status.
@@ -125,7 +126,10 @@ nvme_tcp_ofld_req_done(struct nvme_tcp_ofld_req *req,
union nvme_result *result,
__le16 status)
{
- /* Placeholder - complete request with/without error */
+ struct request *rq = blk_mq_rq_from_pdu(req);
+
+ if (!nvme_try_complete_req(rq, cpu_to_le16(status << 1), *result))
+ nvme_complete_rq(rq);
}
struct nvme_tcp_ofld_dev *
@@ -754,9 +758,10 @@ nvme_tcp_ofld_init_request(struct blk_mq_tag_set *set,
{
struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq);
struct nvme_tcp_ofld_ctrl *ctrl = set->driver_data;
+ int qid = (set == &ctrl->tag_set) ? hctx_idx + 1 : 0;
- /* Placeholder - init request */
-
+ req->queue = &ctrl->queues[qid];
+ nvme_req(rq)->ctrl = &ctrl->nctrl;
req->done = nvme_tcp_ofld_req_done;
ctrl->dev->ops->init_req(req);
@@ -767,9 +772,32 @@ static blk_status_t
nvme_tcp_ofld_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
- /* Call nvme_setup_cmd(...) */
+ struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(bd->rq);
+ struct nvme_tcp_ofld_queue *queue = hctx->driver_data;
+ struct nvme_tcp_ofld_ctrl *ctrl = queue->ctrl;
+ struct nvme_ns *ns = hctx->queue->queuedata;
+ struct nvme_tcp_ofld_dev *dev = queue->dev;
+ struct nvme_tcp_ofld_ops *ops = dev->ops;
+ struct request *rq = bd->rq;
+ bool queue_ready;
+ int rc;
+
+ queue_ready = test_bit(NVME_TCP_OFLD_Q_LIVE, &queue->flags);
+ if (!nvmf_check_ready(&ctrl->nctrl, rq, queue_ready))
+ return nvmf_fail_nonready_command(&ctrl->nctrl, rq);
+
+ rc = nvme_setup_cmd(ns, rq, &req->nvme_cmd);
+ if (unlikely(rc))
+ return rc;
+
+ blk_mq_start_request(rq);
+ rc = ops->map_sg(dev, req);
+ if (unlikely(rc))
+ return rc;
- /* Call ops->map_sg(...) */
+ rc = ops->send_req(req);
+ if (unlikely(rc))
+ return rc;
return BLK_STS_OK;
}
@@ -839,9 +867,47 @@ static int nvme_tcp_ofld_map_queues(struct blk_mq_tag_set *set)
static int nvme_tcp_ofld_poll(struct blk_mq_hw_ctx *hctx)
{
- /* Placeholder - Implement polling mechanism */
+ struct nvme_tcp_ofld_queue *queue = hctx->driver_data;
+ struct nvme_tcp_ofld_dev *dev = queue->dev;
+ struct nvme_tcp_ofld_ops *ops = dev->ops;
- return 0;
+ return ops->poll_queue(queue);
+}
+
+static enum blk_eh_timer_return
+nvme_tcp_ofld_timeout(struct request *rq, bool reserved)
+{
+ struct nvme_tcp_ofld_req *req = blk_mq_rq_to_pdu(rq);
+ struct nvme_tcp_ofld_ctrl *ctrl = req->queue->ctrl;
+
+ /* Restart the timer if a controller reset is already scheduled. Any
+ * timed out request would be handled before entering the connecting
+ * state.
+ */
+ if (ctrl->nctrl.state == NVME_CTRL_RESETTING)
+ return BLK_EH_RESET_TIMER;
+
+ dev_warn(ctrl->nctrl.device,
+ "queue %d: timeout request %#x type %d\n",
+ nvme_tcp_ofld_qid(req->queue), rq->tag,
+ req->nvme_cmd.common.opcode);
+
+ if (ctrl->nctrl.state != NVME_CTRL_LIVE) {
+ /*
+ * Teardown immediately if controller times out while starting
+ * or we are already started error recovery. all outstanding
+ * requests are completed on shutdown, so we return BLK_EH_DONE.
+ */
+ flush_work(&ctrl->err_work);
+ nvme_tcp_ofld_teardown_io_queues(&ctrl->nctrl, false);
+ nvme_tcp_ofld_teardown_admin_queue(&ctrl->nctrl, false);
+ return BLK_EH_DONE;
+ }
+
+ dev_warn(ctrl->nctrl.device, "starting error recovery\n");
+ nvme_tcp_ofld_error_recovery(&ctrl->nctrl);
+
+ return BLK_EH_RESET_TIMER;
}
static struct blk_mq_ops nvme_tcp_ofld_mq_ops = {
@@ -851,6 +917,7 @@ static struct blk_mq_ops nvme_tcp_ofld_mq_ops = {
.exit_request = nvme_tcp_ofld_exit_request,
.init_hctx = nvme_tcp_ofld_init_hctx,
.map_queues = nvme_tcp_ofld_map_queues,
+ .timeout = nvme_tcp_ofld_timeout,
.poll = nvme_tcp_ofld_poll,
};
@@ -860,6 +927,7 @@ static struct blk_mq_ops nvme_tcp_ofld_admin_mq_ops = {
.complete = nvme_complete_rq,
.exit_request = nvme_tcp_ofld_exit_request,
.init_hctx = nvme_tcp_ofld_init_hctx,
+ .timeout = nvme_tcp_ofld_timeout,
};
static const struct nvme_ctrl_ops nvme_tcp_ofld_ctrl_ops = {