Message ID | 20201106234305.21339-7-dmitry.fomichev@wdc.com |
---|---|
State | New |
Headers | show |
Series | None | expand |
On Nov 7 08:42, Dmitry Fomichev wrote: > From: Niklas Cassel <niklas.cassel@wdc.com> > > Many CNS commands have "allocated" command variants. These include > a namespace as long as it is allocated, that is a namespace is > included regardless if it is active (attached) or not. > > While these commands are optional (they are mandatory for controllers > supporting the namespace attachment command), our QEMU implementation > is more complete by actually providing support for these CNS values. > > However, since our QEMU model currently does not support the namespace > attachment command, these new allocated CNS commands will return the > same result as the active CNS command variants. > > In NVMe, a namespace is active if it exists and is attached to the > controller. > > Add a new Boolean namespace flag, "attached", to provide the most > basic namespace attachment support. The default value for this new > flag is true. Also, implement the logic in the new CNS values to > include/exclude namespaces based on this new property. The only thing > missing is hooking up the actual Namespace Attachment command opcode, > which will allow a user to toggle the "attached" flag per namespace. > > The reason for not hooking up this command completely is because the > NVMe specification requires the namespace management command to be > supported if the namespace attachment command is supported. > > Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com> > Signed-off-by: Dmitry Fomichev <dmitry.fomichev@wdc.com> > Reviewed-by: Keith Busch <kbusch@kernel.org> > --- > hw/block/nvme-ns.h | 1 + > include/block/nvme.h | 20 +++++++++++-------- > hw/block/nvme-ns.c | 1 + > hw/block/nvme.c | 46 +++++++++++++++++++++++++++++++++----------- > 4 files changed, 49 insertions(+), 19 deletions(-) > > diff --git a/hw/block/nvme-ns.h b/hw/block/nvme-ns.h > index d795e44bab..2d9cd29d07 100644 > --- a/hw/block/nvme-ns.h > +++ b/hw/block/nvme-ns.h > @@ -31,6 +31,7 @@ typedef struct NvmeNamespace { > int64_t size; > NvmeIdNs id_ns; > const uint32_t *iocs; > + bool attached; Remove this too? > uint8_t csi; > > NvmeNamespaceParams params; > diff --git a/include/block/nvme.h b/include/block/nvme.h > index af23514713..394db19022 100644 > --- a/include/block/nvme.h > +++ b/include/block/nvme.h > @@ -806,14 +806,18 @@ typedef struct QEMU_PACKED NvmePSD { > #define NVME_IDENTIFY_DATA_SIZE 4096 > > enum NvmeIdCns { > - NVME_ID_CNS_NS = 0x00, > - NVME_ID_CNS_CTRL = 0x01, > - NVME_ID_CNS_NS_ACTIVE_LIST = 0x02, > - NVME_ID_CNS_NS_DESCR_LIST = 0x03, > - NVME_ID_CNS_CS_NS = 0x05, > - NVME_ID_CNS_CS_CTRL = 0x06, > - NVME_ID_CNS_CS_NS_ACTIVE_LIST = 0x07, > - NVME_ID_CNS_IO_COMMAND_SET = 0x1c, > + NVME_ID_CNS_NS = 0x00, > + NVME_ID_CNS_CTRL = 0x01, > + NVME_ID_CNS_NS_ACTIVE_LIST = 0x02, > + NVME_ID_CNS_NS_DESCR_LIST = 0x03, > + NVME_ID_CNS_CS_NS = 0x05, > + NVME_ID_CNS_CS_CTRL = 0x06, > + NVME_ID_CNS_CS_NS_ACTIVE_LIST = 0x07, > + NVME_ID_CNS_NS_PRESENT_LIST = 0x10, > + NVME_ID_CNS_NS_PRESENT = 0x11, > + NVME_ID_CNS_CS_NS_PRESENT_LIST = 0x1a, > + NVME_ID_CNS_CS_NS_PRESENT = 0x1b, > + NVME_ID_CNS_IO_COMMAND_SET = 0x1c, > }; > > typedef struct QEMU_PACKED NvmeIdCtrl { > diff --git a/hw/block/nvme-ns.c b/hw/block/nvme-ns.c > index c0362426cc..e191ef9be0 100644 > --- a/hw/block/nvme-ns.c > +++ b/hw/block/nvme-ns.c > @@ -42,6 +42,7 @@ static void nvme_ns_init(NvmeNamespace *ns) > id_ns->nsze = cpu_to_le64(nvme_ns_nlbas(ns)); > > ns->csi = NVME_CSI_NVM; > + ns->attached = true; > > /* no thin provisioning */ > id_ns->ncap = id_ns->nsze; > diff --git a/hw/block/nvme.c b/hw/block/nvme.c > index bb82dd9975..7495cdb5ef 100644 > --- a/hw/block/nvme.c > +++ b/hw/block/nvme.c > @@ -1236,6 +1236,7 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, > uint32_t trans_len; > NvmeNamespace *ns; > time_t current_ms; > + int i; This change is unrelated to the patch. > > if (off >= sizeof(smart)) { > return NVME_INVALID_FIELD | NVME_DNR; > @@ -1246,10 +1247,7 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, > if (!ns) { > return NVME_INVALID_NSID | NVME_DNR; > } > - nvme_set_blk_stats(ns, &stats); Woops, what happend here? > } else { > - int i; > - > for (i = 1; i <= n->num_namespaces; i++) { > ns = nvme_ns(n, i); > if (!ns) { > @@ -1552,7 +1550,8 @@ static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req) > return NVME_INVALID_FIELD | NVME_DNR; > } > > -static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req) > +static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, > + bool only_active) > { > NvmeNamespace *ns; > NvmeIdentify *c = (NvmeIdentify *)&req->cmd; > @@ -1569,11 +1568,16 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req) > return nvme_rpt_empty_id_struct(n, req); > } > > + if (only_active && !ns->attached) { > + return nvme_rpt_empty_id_struct(n, req); > + } > + The only_active parameter and this condition should be removed. This goes for the rest of the functions below as well. > return nvme_dma(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), > DMA_DIRECTION_FROM_DEVICE, req); > } > > -static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req) > +static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req, > + bool only_active) > { > NvmeNamespace *ns; > NvmeIdentify *c = (NvmeIdentify *)&req->cmd; > @@ -1590,6 +1594,10 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req) > return nvme_rpt_empty_id_struct(n, req); > } > > + if (only_active && !ns->attached) { > + return nvme_rpt_empty_id_struct(n, req); > + } > + > if (c->csi == NVME_CSI_NVM) { > return nvme_rpt_empty_id_struct(n, req); > } > @@ -1597,7 +1605,8 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req) > return NVME_INVALID_FIELD | NVME_DNR; > } > > -static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req) > +static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req, > + bool only_active) > { > NvmeNamespace *ns; > NvmeIdentify *c = (NvmeIdentify *)&req->cmd; > @@ -1627,6 +1636,9 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req) > if (ns->params.nsid <= min_nsid) { > continue; > } > + if (only_active && !ns->attached) { > + continue; > + } > list_ptr[j++] = cpu_to_le32(ns->params.nsid); > if (j == data_len / sizeof(uint32_t)) { > break; > @@ -1636,7 +1648,8 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req) > return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req); > } > > -static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req) > +static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req, > + bool only_active) > { > NvmeNamespace *ns; > NvmeIdentify *c = (NvmeIdentify *)&req->cmd; > @@ -1667,6 +1680,9 @@ static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req) > if (ns->params.nsid <= min_nsid) { > continue; > } > + if (only_active && !ns->attached) { > + continue; > + } > list_ptr[j++] = cpu_to_le32(ns->params.nsid); > if (j == data_len / sizeof(uint32_t)) { > break; > @@ -1740,17 +1756,25 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req) > > switch (le32_to_cpu(c->cns)) { > case NVME_ID_CNS_NS: > - return nvme_identify_ns(n, req); > + /* fall through */ > + case NVME_ID_CNS_NS_PRESENT: > + return nvme_identify_ns(n, req, true); > case NVME_ID_CNS_CS_NS: > - return nvme_identify_ns_csi(n, req); > + /* fall through */ > + case NVME_ID_CNS_CS_NS_PRESENT: > + return nvme_identify_ns_csi(n, req, true); > case NVME_ID_CNS_CTRL: > return nvme_identify_ctrl(n, req); > case NVME_ID_CNS_CS_CTRL: > return nvme_identify_ctrl_csi(n, req); > case NVME_ID_CNS_NS_ACTIVE_LIST: > - return nvme_identify_nslist(n, req); > + /* fall through */ > + case NVME_ID_CNS_NS_PRESENT_LIST: > + return nvme_identify_nslist(n, req, true); > case NVME_ID_CNS_CS_NS_ACTIVE_LIST: > - return nvme_identify_nslist_csi(n, req); > + /* fall through */ > + case NVME_ID_CNS_CS_NS_PRESENT_LIST: > + return nvme_identify_nslist_csi(n, req, true); > case NVME_ID_CNS_NS_DESCR_LIST: > return nvme_identify_ns_descr_list(n, req); > case NVME_ID_CNS_IO_COMMAND_SET: > -- > 2.21.0 > > -- One of us - No more doubt, silence or taboo about mental illness.
diff --git a/hw/block/nvme-ns.h b/hw/block/nvme-ns.h index d795e44bab..2d9cd29d07 100644 --- a/hw/block/nvme-ns.h +++ b/hw/block/nvme-ns.h @@ -31,6 +31,7 @@ typedef struct NvmeNamespace { int64_t size; NvmeIdNs id_ns; const uint32_t *iocs; + bool attached; uint8_t csi; NvmeNamespaceParams params; diff --git a/include/block/nvme.h b/include/block/nvme.h index af23514713..394db19022 100644 --- a/include/block/nvme.h +++ b/include/block/nvme.h @@ -806,14 +806,18 @@ typedef struct QEMU_PACKED NvmePSD { #define NVME_IDENTIFY_DATA_SIZE 4096 enum NvmeIdCns { - NVME_ID_CNS_NS = 0x00, - NVME_ID_CNS_CTRL = 0x01, - NVME_ID_CNS_NS_ACTIVE_LIST = 0x02, - NVME_ID_CNS_NS_DESCR_LIST = 0x03, - NVME_ID_CNS_CS_NS = 0x05, - NVME_ID_CNS_CS_CTRL = 0x06, - NVME_ID_CNS_CS_NS_ACTIVE_LIST = 0x07, - NVME_ID_CNS_IO_COMMAND_SET = 0x1c, + NVME_ID_CNS_NS = 0x00, + NVME_ID_CNS_CTRL = 0x01, + NVME_ID_CNS_NS_ACTIVE_LIST = 0x02, + NVME_ID_CNS_NS_DESCR_LIST = 0x03, + NVME_ID_CNS_CS_NS = 0x05, + NVME_ID_CNS_CS_CTRL = 0x06, + NVME_ID_CNS_CS_NS_ACTIVE_LIST = 0x07, + NVME_ID_CNS_NS_PRESENT_LIST = 0x10, + NVME_ID_CNS_NS_PRESENT = 0x11, + NVME_ID_CNS_CS_NS_PRESENT_LIST = 0x1a, + NVME_ID_CNS_CS_NS_PRESENT = 0x1b, + NVME_ID_CNS_IO_COMMAND_SET = 0x1c, }; typedef struct QEMU_PACKED NvmeIdCtrl { diff --git a/hw/block/nvme-ns.c b/hw/block/nvme-ns.c index c0362426cc..e191ef9be0 100644 --- a/hw/block/nvme-ns.c +++ b/hw/block/nvme-ns.c @@ -42,6 +42,7 @@ static void nvme_ns_init(NvmeNamespace *ns) id_ns->nsze = cpu_to_le64(nvme_ns_nlbas(ns)); ns->csi = NVME_CSI_NVM; + ns->attached = true; /* no thin provisioning */ id_ns->ncap = id_ns->nsze; diff --git a/hw/block/nvme.c b/hw/block/nvme.c index bb82dd9975..7495cdb5ef 100644 --- a/hw/block/nvme.c +++ b/hw/block/nvme.c @@ -1236,6 +1236,7 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, uint32_t trans_len; NvmeNamespace *ns; time_t current_ms; + int i; if (off >= sizeof(smart)) { return NVME_INVALID_FIELD | NVME_DNR; @@ -1246,10 +1247,7 @@ static uint16_t nvme_smart_info(NvmeCtrl *n, uint8_t rae, uint32_t buf_len, if (!ns) { return NVME_INVALID_NSID | NVME_DNR; } - nvme_set_blk_stats(ns, &stats); } else { - int i; - for (i = 1; i <= n->num_namespaces; i++) { ns = nvme_ns(n, i); if (!ns) { @@ -1552,7 +1550,8 @@ static uint16_t nvme_identify_ctrl_csi(NvmeCtrl *n, NvmeRequest *req) return NVME_INVALID_FIELD | NVME_DNR; } -static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req) +static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req, + bool only_active) { NvmeNamespace *ns; NvmeIdentify *c = (NvmeIdentify *)&req->cmd; @@ -1569,11 +1568,16 @@ static uint16_t nvme_identify_ns(NvmeCtrl *n, NvmeRequest *req) return nvme_rpt_empty_id_struct(n, req); } + if (only_active && !ns->attached) { + return nvme_rpt_empty_id_struct(n, req); + } + return nvme_dma(n, (uint8_t *)&ns->id_ns, sizeof(NvmeIdNs), DMA_DIRECTION_FROM_DEVICE, req); } -static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req) +static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req, + bool only_active) { NvmeNamespace *ns; NvmeIdentify *c = (NvmeIdentify *)&req->cmd; @@ -1590,6 +1594,10 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req) return nvme_rpt_empty_id_struct(n, req); } + if (only_active && !ns->attached) { + return nvme_rpt_empty_id_struct(n, req); + } + if (c->csi == NVME_CSI_NVM) { return nvme_rpt_empty_id_struct(n, req); } @@ -1597,7 +1605,8 @@ static uint16_t nvme_identify_ns_csi(NvmeCtrl *n, NvmeRequest *req) return NVME_INVALID_FIELD | NVME_DNR; } -static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req) +static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req, + bool only_active) { NvmeNamespace *ns; NvmeIdentify *c = (NvmeIdentify *)&req->cmd; @@ -1627,6 +1636,9 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req) if (ns->params.nsid <= min_nsid) { continue; } + if (only_active && !ns->attached) { + continue; + } list_ptr[j++] = cpu_to_le32(ns->params.nsid); if (j == data_len / sizeof(uint32_t)) { break; @@ -1636,7 +1648,8 @@ static uint16_t nvme_identify_nslist(NvmeCtrl *n, NvmeRequest *req) return nvme_dma(n, list, data_len, DMA_DIRECTION_FROM_DEVICE, req); } -static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req) +static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req, + bool only_active) { NvmeNamespace *ns; NvmeIdentify *c = (NvmeIdentify *)&req->cmd; @@ -1667,6 +1680,9 @@ static uint16_t nvme_identify_nslist_csi(NvmeCtrl *n, NvmeRequest *req) if (ns->params.nsid <= min_nsid) { continue; } + if (only_active && !ns->attached) { + continue; + } list_ptr[j++] = cpu_to_le32(ns->params.nsid); if (j == data_len / sizeof(uint32_t)) { break; @@ -1740,17 +1756,25 @@ static uint16_t nvme_identify(NvmeCtrl *n, NvmeRequest *req) switch (le32_to_cpu(c->cns)) { case NVME_ID_CNS_NS: - return nvme_identify_ns(n, req); + /* fall through */ + case NVME_ID_CNS_NS_PRESENT: + return nvme_identify_ns(n, req, true); case NVME_ID_CNS_CS_NS: - return nvme_identify_ns_csi(n, req); + /* fall through */ + case NVME_ID_CNS_CS_NS_PRESENT: + return nvme_identify_ns_csi(n, req, true); case NVME_ID_CNS_CTRL: return nvme_identify_ctrl(n, req); case NVME_ID_CNS_CS_CTRL: return nvme_identify_ctrl_csi(n, req); case NVME_ID_CNS_NS_ACTIVE_LIST: - return nvme_identify_nslist(n, req); + /* fall through */ + case NVME_ID_CNS_NS_PRESENT_LIST: + return nvme_identify_nslist(n, req, true); case NVME_ID_CNS_CS_NS_ACTIVE_LIST: - return nvme_identify_nslist_csi(n, req); + /* fall through */ + case NVME_ID_CNS_CS_NS_PRESENT_LIST: + return nvme_identify_nslist_csi(n, req, true); case NVME_ID_CNS_NS_DESCR_LIST: return nvme_identify_ns_descr_list(n, req); case NVME_ID_CNS_IO_COMMAND_SET: