@@ -28,6 +28,7 @@ typedef struct NvmeRequest {
struct NvmeNamespace *ns;
BlockAIOCB *aiocb;
uint16_t status;
+ void *opaque;
NvmeCqe cqe;
NvmeCmd cmd;
BlockAcctCookie acct;
@@ -71,6 +72,7 @@ static inline const char *nvme_io_opc_str(uint8_t opc)
case NVME_CMD_WRITE: return "NVME_NVM_CMD_WRITE";
case NVME_CMD_READ: return "NVME_NVM_CMD_READ";
case NVME_CMD_WRITE_ZEROES: return "NVME_NVM_CMD_WRITE_ZEROES";
+ case NVME_CMD_ZONE_MGMT_SEND: return "NVME_ZONED_CMD_ZONE_MGMT_SEND";
case NVME_CMD_ZONE_MGMT_RECV: return "NVME_ZONED_CMD_ZONE_MGMT_RECV";
default: return "NVME_NVM_CMD_UNKNOWN";
}
@@ -480,6 +480,7 @@ enum NvmeIoCommands {
NVME_CMD_COMPARE = 0x05,
NVME_CMD_WRITE_ZEROES = 0x08,
NVME_CMD_DSM = 0x09,
+ NVME_CMD_ZONE_MGMT_SEND = 0x79,
NVME_CMD_ZONE_MGMT_RECV = 0x7a,
};
@@ -593,6 +594,32 @@ enum {
NVME_RW_PRINFO_PRCHK_REF = 1 << 10,
};
+typedef struct QEMU_PACKED NvmeZoneManagementSendCmd {
+ uint8_t opcode;
+ uint8_t flags;
+ uint16_t cid;
+ uint32_t nsid;
+ uint32_t rsvd8[4];
+ NvmeCmdDptr dptr;
+ uint64_t slba;
+ uint32_t rsvd48;
+ uint8_t zsa;
+ uint8_t zsflags;
+ uint16_t rsvd54;
+ uint32_t rsvd56[2];
+} NvmeZoneManagementSendCmd;
+
+#define NVME_CMD_ZONE_MGMT_SEND_SELECT_ALL(zsflags) ((zsflags) & 0x1)
+
+typedef enum NvmeZoneManagementSendAction {
+ NVME_CMD_ZONE_MGMT_SEND_CLOSE = 0x1,
+ NVME_CMD_ZONE_MGMT_SEND_FINISH = 0x2,
+ NVME_CMD_ZONE_MGMT_SEND_OPEN = 0x3,
+ NVME_CMD_ZONE_MGMT_SEND_RESET = 0x4,
+ NVME_CMD_ZONE_MGMT_SEND_OFFLINE = 0x5,
+ NVME_CMD_ZONE_MGMT_SEND_SET_ZDE = 0x10,
+} NvmeZoneManagementSendAction;
+
typedef struct QEMU_PACKED NvmeZoneManagementRecvCmd {
uint8_t opcode;
uint8_t flags;
@@ -1206,6 +1233,7 @@ static inline void _nvme_check_size(void)
QEMU_BUILD_BUG_ON(sizeof(NvmeIdentify) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeRwCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeDsmCmd) != 64);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeZoneManagementSendCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeZoneManagementRecvCmd) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeRangeType) != 64);
QEMU_BUILD_BUG_ON(sizeof(NvmeErrorLog) != 64);
@@ -163,6 +163,8 @@ static const NvmeEffectsLog nvme_effects[NVME_IOCS_MAX] = {
.iocs = {
NVME_EFFECTS_NVM_INITIALIZER,
[NVME_CMD_ZONE_MGMT_RECV] = NVME_EFFECTS_CSUPP,
+ [NVME_CMD_ZONE_MGMT_SEND] = NVME_EFFECTS_CSUPP |
+ NVME_EFFECTS_LBCC,
},
},
};
@@ -1080,6 +1082,12 @@ static uint16_t nvme_check_dulbe(NvmeNamespace *ns, uint64_t slba,
return NVME_SUCCESS;
}
+static inline void nvme_zone_reset_wp(NvmeZone *zone)
+{
+ zone->zd->wp = zone->zd->zslba;
+ zone->wp_staging = nvme_zslba(zone);
+}
+
static uint16_t nvme_zrm_transition(NvmeNamespace *ns, NvmeZone *zone,
NvmeZoneState to)
{
@@ -1100,6 +1108,10 @@ static uint16_t nvme_zrm_transition(NvmeNamespace *ns, NvmeZone *zone,
case NVME_ZS_ZSEO:
switch (to) {
case NVME_ZS_ZSE:
+ nvme_zone_reset_wp(zone);
+
+ /* fallthrough */
+
case NVME_ZS_ZSO:
NVME_ZA_CLEAR_ALL(zd->za);
@@ -1120,6 +1132,10 @@ static uint16_t nvme_zrm_transition(NvmeNamespace *ns, NvmeZone *zone,
case NVME_ZS_ZSC:
switch (to) {
case NVME_ZS_ZSE:
+ nvme_zone_reset_wp(zone);
+
+ /* fallthrough */
+
case NVME_ZS_ZSO:
NVME_ZA_CLEAR_ALL(zd->za);
@@ -1152,8 +1168,12 @@ static uint16_t nvme_zrm_transition(NvmeNamespace *ns, NvmeZone *zone,
case NVME_ZS_ZSF:
switch (to) {
case NVME_ZS_ZSE:
+ nvme_zone_reset_wp(zone);
+
+ /* fallthrough */
+
case NVME_ZS_ZSO:
- NVME_ZA_CLEAR_ALL(zd->za);
+ NVME_ZA_CLEAR_ALL(zone->zd->za);
/* fallthrough */
@@ -1254,6 +1274,354 @@ static void nvme_rw_cb(void *opaque, int ret)
nvme_enqueue_req_completion(nvme_cq(req), req);
}
+static uint16_t nvme_zone_mgmt_send_close(NvmeCtrl *n, NvmeRequest *req,
+ NvmeZone *zone)
+{
+ NvmeNamespace *ns = req->ns;
+ uint16_t status;
+
+ trace_pci_nvme_zone_mgmt_send_close(nvme_cid(req), nvme_nsid(ns),
+ nvme_zslba(zone), nvme_zs_str(zone));
+
+ switch (nvme_zs(zone)) {
+ case NVME_ZS_ZSC:
+ return NVME_SUCCESS;
+
+ case NVME_ZS_ZSE:
+ /*
+ * The state machine in nvme_zrm_transition allows zones to transition
+ * from ZSE to ZSC. That transition is only valid if done as part Set
+ * Zone Descriptor, so do an early check here.
+ */
+ return NVME_INVALID_ZONE_STATE_TRANSITION | NVME_DNR;
+
+ default:
+ break;
+ }
+
+ status = nvme_zrm_transition(ns, zone, NVME_ZS_ZSC);
+ if (status) {
+ return status;
+ }
+
+ return NVME_SUCCESS;
+}
+
+static uint16_t nvme_zone_mgmt_send_finish(NvmeCtrl *n, NvmeRequest *req,
+ NvmeZone *zone)
+{
+ NvmeNamespace *ns = req->ns;
+ uint16_t status;
+
+ trace_pci_nvme_zone_mgmt_send_finish(nvme_cid(req), nvme_nsid(ns),
+ nvme_zslba(zone), nvme_zs_str(zone));
+
+ if (nvme_zs(zone) == NVME_ZS_ZSF) {
+ return NVME_SUCCESS;
+ }
+
+ status = nvme_zrm_transition(ns, zone, NVME_ZS_ZSF);
+ if (status) {
+ return status;
+ }
+
+ return NVME_SUCCESS;
+}
+
+static uint16_t nvme_zone_mgmt_send_open(NvmeCtrl *n, NvmeRequest *req,
+ NvmeZone *zone)
+{
+ NvmeNamespace *ns = req->ns;
+ uint16_t status;
+
+ trace_pci_nvme_zone_mgmt_send_open(nvme_cid(req), nvme_nsid(ns),
+ nvme_zslba(zone), nvme_zs_str(zone));
+
+ if (nvme_zs(zone) == NVME_ZS_ZSEO) {
+ return NVME_SUCCESS;
+ }
+
+ status = nvme_zrm_transition(ns, zone, NVME_ZS_ZSEO);
+ if (status) {
+ return status;
+ }
+
+ return NVME_SUCCESS;
+}
+
+static void nvme_aio_discard_cb(void *opaque, int ret)
+{
+ NvmeRequest *req = opaque;
+ int *count = req->opaque;
+
+ trace_pci_nvme_aio_discard_cb(nvme_cid(req));
+
+ if (ret) {
+ req->status = NVME_INTERNAL_DEV_ERROR;
+ trace_pci_nvme_err_aio(nvme_cid(req), strerror(ret),
+ req->status);
+ }
+
+ if (count && --(*count) > 0) {
+ return;
+ }
+
+ g_free(req->opaque);
+ req->opaque = NULL;
+
+ nvme_enqueue_req_completion(nvme_cq(req), req);
+}
+
+static uint16_t nvme_zone_mgmt_send_reset(NvmeCtrl *n, NvmeRequest *req,
+ NvmeZone *zone)
+{
+ NvmeNamespace *ns = req->ns;
+ uint64_t zslba = nvme_zslba(zone);
+ uint64_t zcap = nvme_zcap(zone);
+ int *count = req->opaque;
+
+ trace_pci_nvme_zone_mgmt_send_reset(nvme_cid(req), nvme_nsid(ns),
+ nvme_zslba(zone), nvme_zs_str(zone));
+
+ switch (nvme_zs(zone)) {
+ case NVME_ZS_ZSE:
+ return NVME_SUCCESS;
+
+ case NVME_ZS_ZSIO:
+ case NVME_ZS_ZSEO:
+ case NVME_ZS_ZSC:
+ case NVME_ZS_ZSF:
+ blk_aio_pdiscard(ns->blkconf.blk, nvme_l2b(ns, zslba),
+ nvme_l2b(ns, zcap), nvme_aio_discard_cb, req);
+
+ if (count) {
+ (*count)++;
+ }
+
+ nvme_zrm_transition(ns, zone, NVME_ZS_ZSE);
+
+ return NVME_NO_COMPLETE;
+
+ default:
+ break;
+ }
+
+ return NVME_INVALID_ZONE_STATE_TRANSITION | NVME_DNR;
+}
+
+static uint16_t nvme_zone_mgmt_send_offline(NvmeCtrl *n, NvmeRequest *req,
+ NvmeZone *zone)
+{
+ NvmeNamespace *ns = req->ns;
+
+ trace_pci_nvme_zone_mgmt_send_offline(nvme_cid(req), nvme_nsid(ns),
+ nvme_zslba(zone), nvme_zs_str(zone));
+
+ switch (nvme_zs(zone)) {
+ case NVME_ZS_ZSRO:
+ nvme_zrm_transition(ns, zone, NVME_ZS_ZSO);
+
+ /* fallthrough */
+
+ case NVME_ZS_ZSO:
+ return NVME_SUCCESS;
+
+ default:
+ break;
+ }
+
+ return NVME_INVALID_ZONE_STATE_TRANSITION | NVME_DNR;
+}
+
+static uint16_t nvme_zone_mgmt_send_set_zde(NvmeCtrl *n, NvmeRequest *req,
+ NvmeZone *zone)
+{
+ NvmeNamespace *ns = req->ns;
+ uint16_t status;
+
+ trace_pci_nvme_zone_mgmt_send_set_zde(nvme_cid(req), nvme_nsid(ns),
+ nvme_zslba(zone), nvme_zs_str(zone));
+
+ if (nvme_zs(zone) != NVME_ZS_ZSE) {
+ trace_pci_nvme_err_invalid_zone_condition(nvme_cid(req),
+ nvme_zslba(zone),
+ nvme_zs(zone));
+ return NVME_INVALID_ZONE_STATE_TRANSITION | NVME_DNR;
+ }
+
+ status = nvme_check_mdts(n, nvme_ns_zdes_bytes(ns));
+ if (status) {
+ return status;
+ }
+
+ status = nvme_dma(n, zone->zde, nvme_ns_zdes_bytes(ns),
+ DMA_DIRECTION_TO_DEVICE, req);
+ if (status) {
+ return status;
+ }
+
+ status = nvme_zrm_transition(ns, zone, NVME_ZS_ZSC);
+ if (status) {
+ return status;
+ }
+
+ NVME_ZA_SET(zone->zd->za, NVME_ZA_ZDEV);
+
+ return NVME_SUCCESS;
+}
+
+static uint16_t nvme_zone_mgmt_send_all(NvmeCtrl *n, NvmeRequest *req)
+{
+ NvmeZoneManagementSendCmd *send = (NvmeZoneManagementSendCmd *) &req->cmd;
+ NvmeNamespace *ns = req->ns;
+ NvmeZone *zone;
+ int *countp = NULL;
+ uint16_t status = NVME_SUCCESS;
+
+ trace_pci_nvme_zone_mgmt_send_all(nvme_cid(req), nvme_nsid(ns), send->zsa);
+
+ switch (send->zsa) {
+ case NVME_CMD_ZONE_MGMT_SEND_SET_ZDE:
+ return NVME_INVALID_FIELD | NVME_DNR;
+
+ case NVME_CMD_ZONE_MGMT_SEND_CLOSE:
+ for (int i = 0; i < ns->zns.num_zones; i++) {
+ zone = &ns->zns.zones[i];
+
+ switch (nvme_zs(zone)) {
+ case NVME_ZS_ZSIO:
+ case NVME_ZS_ZSEO:
+ status = nvme_zone_mgmt_send_close(n, req, zone);
+ if (status) {
+ return status;
+ }
+
+ default:
+ continue;
+ }
+ }
+
+ break;
+
+ case NVME_CMD_ZONE_MGMT_SEND_FINISH:
+ for (int i = 0; i < ns->zns.num_zones; i++) {
+ zone = &ns->zns.zones[i];
+
+ switch (nvme_zs(zone)) {
+ case NVME_ZS_ZSIO:
+ case NVME_ZS_ZSEO:
+ case NVME_ZS_ZSC:
+ status = nvme_zone_mgmt_send_finish(n, req, zone);
+ if (status) {
+ return status;
+ }
+
+ default:
+ continue;
+ }
+ }
+
+ break;
+
+ case NVME_CMD_ZONE_MGMT_SEND_OPEN:
+ for (int i = 0; i < ns->zns.num_zones; i++) {
+ zone = &ns->zns.zones[i];
+
+ if (nvme_zs(zone) == NVME_ZS_ZSC) {
+ status = nvme_zone_mgmt_send_open(n, req, zone);
+ if (status) {
+ return status;
+ }
+ }
+ }
+
+ break;
+
+ case NVME_CMD_ZONE_MGMT_SEND_RESET:
+ countp = g_new0(int, 1);
+ req->opaque = countp;
+
+ for (int i = 0; i < ns->zns.num_zones; i++) {
+ zone = &ns->zns.zones[i];
+
+ switch (nvme_zs(zone)) {
+ case NVME_ZS_ZSIO:
+ case NVME_ZS_ZSEO:
+ case NVME_ZS_ZSC:
+ case NVME_ZS_ZSF:
+ nvme_zone_mgmt_send_reset(n, req, zone);
+ default:
+ continue;
+ }
+ }
+
+ if (*countp) {
+ return NVME_NO_COMPLETE;
+ }
+
+ break;
+
+ case NVME_CMD_ZONE_MGMT_SEND_OFFLINE:
+ for (int i = 0; i < ns->zns.num_zones; i++) {
+ zone = &ns->zns.zones[i];
+
+ if (nvme_zs(zone) == NVME_ZS_ZSRO) {
+ status = nvme_zone_mgmt_send_offline(n, req, zone);
+ if (status) {
+ return status;
+ }
+ }
+ }
+
+ break;
+ }
+
+ return status;
+}
+
+static uint16_t nvme_zone_mgmt_send(NvmeCtrl *n, NvmeRequest *req)
+{
+ NvmeZoneManagementSendCmd *send = (NvmeZoneManagementSendCmd *) &req->cmd;
+ NvmeZoneManagementSendAction zsa = send->zsa;
+ NvmeNamespace *ns = req->ns;
+ NvmeZone *zone;
+ uint64_t zslba = le64_to_cpu(send->slba);
+
+ if (!nvme_ns_zoned(ns)) {
+ return NVME_INVALID_OPCODE | NVME_DNR;
+ }
+
+ trace_pci_nvme_zone_mgmt_send(nvme_cid(req), ns->params.nsid, zslba, zsa,
+ send->zsflags);
+
+ if (NVME_CMD_ZONE_MGMT_SEND_SELECT_ALL(send->zsflags)) {
+ return nvme_zone_mgmt_send_all(n, req);
+ }
+
+ zone = nvme_ns_zone(ns, zslba);
+ if (!zone || zslba != nvme_zslba(zone)) {
+ trace_pci_nvme_err_invalid_zone(nvme_cid(req), zslba);
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ switch (zsa) {
+ case NVME_CMD_ZONE_MGMT_SEND_CLOSE:
+ return nvme_zone_mgmt_send_close(n, req, zone);
+ case NVME_CMD_ZONE_MGMT_SEND_FINISH:
+ return nvme_zone_mgmt_send_finish(n, req, zone);
+ case NVME_CMD_ZONE_MGMT_SEND_OPEN:
+ return nvme_zone_mgmt_send_open(n, req, zone);
+ case NVME_CMD_ZONE_MGMT_SEND_RESET:
+ return nvme_zone_mgmt_send_reset(n, req, zone);
+ case NVME_CMD_ZONE_MGMT_SEND_OFFLINE:
+ return nvme_zone_mgmt_send_offline(n, req, zone);
+ case NVME_CMD_ZONE_MGMT_SEND_SET_ZDE:
+ return nvme_zone_mgmt_send_set_zde(n, req, zone);
+ }
+
+ return NVME_INVALID_FIELD | NVME_DNR;
+}
+
static uint16_t nvme_zone_mgmt_recv(NvmeCtrl *n, NvmeRequest *req)
{
NvmeZoneManagementRecvCmd *recv;
@@ -1424,8 +1792,10 @@ static uint16_t nvme_write_zeroes(NvmeCtrl *n, NvmeRequest *req)
case NVME_ZS_ZSEO:
break;
default:
- nvme_zrm_transition(ns, zone, NVME_ZS_ZSIO);
- break;
+ status = nvme_zrm_transition(ns, zone, NVME_ZS_ZSIO);
+ if (status) {
+ goto invalid;
+ }
}
zone->wp_staging += nlb;
@@ -1505,8 +1875,10 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeRequest *req)
case NVME_ZS_ZSEO:
break;
default:
- nvme_zrm_transition(ns, zone, NVME_ZS_ZSIO);
- break;
+ status = nvme_zrm_transition(ns, zone, NVME_ZS_ZSIO);
+ if (status) {
+ goto invalid;
+ }
}
zone->wp_staging += nlb;
@@ -1571,6 +1943,8 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
case NVME_CMD_WRITE:
case NVME_CMD_READ:
return nvme_rw(n, req);
+ case NVME_CMD_ZONE_MGMT_SEND:
+ return nvme_zone_mgmt_send(n, req);
case NVME_CMD_ZONE_MGMT_RECV:
return nvme_zone_mgmt_recv(n, req);
default:
@@ -43,7 +43,17 @@ pci_nvme_admin_cmd(uint16_t cid, uint16_t sqid, uint8_t opcode, const char *opna
pci_nvme_rw(uint16_t cid, const char *verb, uint32_t nsid, uint32_t nlb, uint64_t count, uint64_t lba) "cid %"PRIu16" opname '%s' nsid %"PRIu32" nlb %"PRIu32" count %"PRIu64" lba 0x%"PRIx64""
pci_nvme_rw_cb(uint16_t cid, const char *blkname) "cid %"PRIu16" blk '%s'"
pci_nvme_write_zeroes(uint16_t cid, uint32_t nsid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" nsid %"PRIu32" slba %"PRIu64" nlb %"PRIu32""
+pci_nvme_zone_mgmt_send(uint16_t cid, uint32_t nsid, uint64_t zslba, uint8_t zsa, uint8_t zsflags) "cid %"PRIu16" nsid %"PRIu32" zslba 0x%"PRIx64" zsa 0x%"PRIx8" zsflags 0x%"PRIx8""
+pci_nvme_zone_mgmt_send_all(uint16_t cid, uint32_t nsid, uint8_t za) "cid %"PRIu16" nsid %"PRIu32" za 0x%"PRIx8""
+pci_nvme_zone_mgmt_send_close(uint16_t cid, uint32_t nsid, uint64_t zslba, const char *zc) "cid %"PRIu16" nsid %"PRIu32" zslba 0x%"PRIx64" zc \"%s\""
+pci_nvme_zone_mgmt_send_finish(uint16_t cid, uint32_t nsid, uint64_t zslba, const char *zc) "cid %"PRIu16" nsid %"PRIu32" zslba 0x%"PRIx64" zc \"%s\""
+pci_nvme_zone_mgmt_send_open(uint16_t cid, uint32_t nsid, uint64_t zslba, const char *zc) "cid %"PRIu16" nsid %"PRIu32" zslba 0x%"PRIx64" zc \"%s\""
+pci_nvme_zone_mgmt_send_reset(uint16_t cid, uint32_t nsid, uint64_t zslba, const char *zc) "cid %"PRIu16" nsid %"PRIu32" zslba 0x%"PRIx64" zc \"%s\""
+pci_nvme_zone_mgmt_send_reset_cb(uint16_t cid, uint32_t nsid) "cid %"PRIu16" nsid %"PRIu32""
+pci_nvme_zone_mgmt_send_offline(uint16_t cid, uint32_t nsid, uint64_t zslba, const char *zc) "cid %"PRIu16" nsid %"PRIu32" zslba 0x%"PRIx64" zc \"%s\""
+pci_nvme_zone_mgmt_send_set_zde(uint16_t cid, uint32_t nsid, uint64_t zslba, const char *zc) "cid %"PRIu16" nsid %"PRIu32" zslba 0x%"PRIx64" zc \"%s\""
pci_nvme_zone_mgmt_recv(uint16_t cid, uint32_t nsid, uint64_t slba, uint64_t len, uint8_t zra, uint8_t zrasp, uint8_t zrasf) "cid %"PRIu16" nsid %"PRIu32" slba 0x%"PRIx64" len %"PRIu64" zra 0x%"PRIx8" zrasp 0x%"PRIx8" zrasf 0x%"PRIx8""
+pci_nvme_aio_discard_cb(uint16_t cid) "cid %"PRIu16""
pci_nvme_create_sq(uint64_t addr, uint16_t sqid, uint16_t cqid, uint16_t qsize, uint16_t qflags) "create submission queue, addr=0x%"PRIx64", sqid=%"PRIu16", cqid=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16""
pci_nvme_create_cq(uint64_t addr, uint16_t cqid, uint16_t vector, uint16_t size, uint16_t qflags, int ien) "create completion queue, addr=0x%"PRIx64", cqid=%"PRIu16", vector=%"PRIu16", qsize=%"PRIu16", qflags=%"PRIu16", ien=%d"
pci_nvme_del_sq(uint16_t qid) "deleting submission queue sqid=%"PRIu16""
@@ -131,6 +141,7 @@ pci_nvme_err_invalid_setfeat(uint32_t dw10) "invalid set features, dw10=0x%"PRIx
pci_nvme_err_invalid_log_page(uint16_t cid, uint16_t lid) "cid %"PRIu16" lid 0x%"PRIx16""
pci_nvme_err_invalid_zone(uint16_t cid, uint64_t lba) "cid %"PRIu16" lba 0x%"PRIx64""
pci_nvme_err_invalid_zone_condition(uint16_t cid, uint64_t zslba, uint8_t condition) "cid %"PRIu16" zslba 0x%"PRIx64" condition 0x%"PRIx8""
+pci_nvme_err_invalid_zslba(uint16_t cid, uint64_t zslba) "cid %"PRIu16" zslba 0x%"PRIx64""
pci_nvme_err_startfail_cq(void) "nvme_start_ctrl failed because there are non-admin completion queues"
pci_nvme_err_startfail_sq(void) "nvme_start_ctrl failed because there are non-admin submission queues"
pci_nvme_err_startfail_nbarasq(void) "nvme_start_ctrl failed because the admin submission queue address is null"