@@ -431,7 +431,8 @@ static void nvmet_execute_identify_ctrl(struct nvmet_req *req)
id->nn = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->mnan = cpu_to_le32(NVMET_MAX_NAMESPACES);
id->oncs = cpu_to_le16(NVME_CTRL_ONCS_DSM |
- NVME_CTRL_ONCS_WRITE_ZEROES);
+ NVME_CTRL_ONCS_WRITE_ZEROES |
+ NVME_CTRL_ONCS_VERIFY);
/* XXX: don't report vwc if the underlying device is write through */
id->vwc = NVME_CTRL_VWC_PRESENT;
@@ -146,6 +146,7 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
switch (req->cmd->common.opcode) {
case nvme_cmd_dsm:
case nvme_cmd_write_zeroes:
+ case nvme_cmd_verify:
status = NVME_SC_ONCS_NOT_SUPPORTED | NVME_SC_DNR;
break;
default:
@@ -171,6 +172,9 @@ u16 blk_to_nvme_status(struct nvmet_req *req, blk_status_t blk_sts)
req->error_slba =
le64_to_cpu(req->cmd->write_zeroes.slba);
break;
+ case nvme_cmd_verify:
+ req->error_slba = le64_to_cpu(req->cmd->verify.slba);
+ break;
default:
req->error_slba = 0;
}
@@ -442,6 +446,37 @@ static void nvmet_bdev_execute_write_zeroes(struct nvmet_req *req)
}
}
+static void nvmet_bdev_execute_verify(struct nvmet_req *req)
+{
+ struct nvme_verify_cmd *verify = &req->cmd->verify;
+ struct bio *bio = NULL;
+ sector_t nr_sector;
+ sector_t sector;
+ int ret;
+
+ if (!nvmet_check_transfer_len(req, 0))
+ return;
+
+ if (!bdev_verify_sectors(req->ns->bdev)) {
+ nvmet_req_complete(req, NVME_SC_INTERNAL | NVME_SC_DNR);
+ return;
+ }
+
+ sector = le64_to_cpu(verify->slba) << (req->ns->blksize_shift - 9);
+ nr_sector = (((sector_t)le16_to_cpu(verify->length) + 1) <<
+ (req->ns->blksize_shift - 9));
+
+ ret = __blkdev_issue_verify(req->ns->bdev, sector, nr_sector,
+ GFP_KERNEL, &bio);
+ if (bio) {
+ bio->bi_private = req;
+ bio->bi_end_io = nvmet_bio_done;
+ submit_bio(bio);
+ } else {
+ nvmet_req_complete(req, errno_to_nvme_status(req, ret));
+ }
+}
+
u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
{
switch (req->cmd->common.opcode) {
@@ -460,6 +495,9 @@ u16 nvmet_bdev_parse_io_cmd(struct nvmet_req *req)
case nvme_cmd_write_zeroes:
req->execute = nvmet_bdev_execute_write_zeroes;
return 0;
+ case nvme_cmd_verify:
+ req->execute = nvmet_bdev_execute_verify;
+ return 0;
default:
return nvmet_report_invalid_opcode(req);
}
Add support for handling verify command on target. Call into __blkdev_issue_verify, which the block layer expands into the REQ_OP_VERIFY LBAs. Signed-off-by: Chaitanya Kulkarni <kch@nvidia.com> --- drivers/nvme/target/admin-cmd.c | 3 ++- drivers/nvme/target/io-cmd-bdev.c | 38 +++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+), 1 deletion(-)