@@ -16,6 +16,10 @@ typedef struct NvmeParams {
uint32_t aer_max_queued;
uint8_t mdts;
bool use_intel_id;
+
+ struct {
+ uint8_t zasl;
+ } zns;
} NvmeParams;
typedef struct NvmeAsyncEvent {
@@ -41,6 +45,7 @@ static inline bool nvme_req_is_write(NvmeRequest *req)
switch (req->cmd.opcode) {
case NVME_CMD_WRITE:
case NVME_CMD_WRITE_ZEROES:
+ case NVME_CMD_ZONE_APPEND:
return true;
default:
return false;
@@ -73,6 +78,7 @@ static inline const char *nvme_io_opc_str(uint8_t opc)
case NVME_CMD_WRITE_ZEROES: return "NVME_NVM_CMD_WRITE_ZEROES";
case NVME_CMD_ZONE_MGMT_SEND: return "NVME_ZONED_CMD_ZONE_MGMT_SEND";
case NVME_CMD_ZONE_MGMT_RECV: return "NVME_ZONED_CMD_ZONE_MGMT_RECV";
+ case NVME_CMD_ZONE_APPEND: return "NVME_ZONED_CMD_ZONE_APPEND";
default: return "NVME_NVM_CMD_UNKNOWN";
}
}
@@ -483,6 +483,7 @@ enum NvmeIoCommands {
NVME_CMD_DSM = 0x09,
NVME_CMD_ZONE_MGMT_SEND = 0x79,
NVME_CMD_ZONE_MGMT_RECV = 0x7a,
+ NVME_CMD_ZONE_APPEND = 0x7d,
};
typedef struct QEMU_PACKED NvmeDeleteQ {
@@ -1018,6 +1019,11 @@ enum NvmeIdCtrlLpa {
NVME_LPA_EXTENDED = 1 << 2,
};
+typedef struct QEMU_PACKED NvmeIdCtrlZns {
+ uint8_t zasl;
+ uint8_t rsvd1[4095];
+} NvmeIdCtrlZns;
+
#define NVME_CTRL_SQES_MIN(sqes) ((sqes) & 0xf)
#define NVME_CTRL_SQES_MAX(sqes) (((sqes) >> 4) & 0xf)
#define NVME_CTRL_CQES_MIN(cqes) ((cqes) & 0xf)
@@ -1242,6 +1248,7 @@ static inline void _nvme_check_size(void)
QEMU_BUILD_BUG_ON(sizeof(NvmeFwSlotInfoLog) != 512);
QEMU_BUILD_BUG_ON(sizeof(NvmeSmartLog) != 512);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrl) != 4096);
+ QEMU_BUILD_BUG_ON(sizeof(NvmeIdCtrlZns) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsNvm) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeIdNsZns) != 4096);
QEMU_BUILD_BUG_ON(sizeof(NvmeSglDescriptor) != 16);
@@ -166,6 +166,8 @@ static const NvmeEffectsLog nvme_effects[NVME_IOCS_MAX] = {
[NVME_CMD_ZONE_MGMT_RECV] = NVME_EFFECTS_CSUPP,
[NVME_CMD_ZONE_MGMT_SEND] = NVME_EFFECTS_CSUPP |
NVME_EFFECTS_LBCC,
+ [NVME_CMD_ZONE_APPEND] = NVME_EFFECTS_CSUPP |
+ NVME_EFFECTS_LBCC,
},
},
};
@@ -1041,6 +1043,21 @@ static inline uint16_t nvme_check_mdts(NvmeCtrl *n, size_t len)
return NVME_SUCCESS;
}
+static inline uint16_t nvme_check_zasl(NvmeCtrl *n, size_t len)
+{
+ uint8_t zasl = n->params.zns.zasl;
+
+ if (!zasl) {
+ return nvme_check_mdts(n, len);
+ }
+
+ if (len > n->page_size << zasl) {
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ return NVME_SUCCESS;
+}
+
static inline uint16_t nvme_check_bounds(NvmeCtrl *n, NvmeNamespace *ns,
uint64_t slba, uint32_t nlb)
{
@@ -1410,6 +1427,7 @@ static uint16_t nvme_do_aio(BlockBackend *blk, int64_t offset, size_t len,
break;
case NVME_CMD_WRITE:
+ case NVME_CMD_ZONE_APPEND:
is_write = true;
/* fallthrough */
@@ -1945,12 +1963,40 @@ static uint16_t nvme_rwz(NvmeCtrl *n, NvmeRequest *req)
uint32_t nlb = (uint32_t)le16_to_cpu(rw->nlb) + 1;
size_t len = nvme_l2b(ns, nlb);
- bool is_write = nvme_req_is_write(req);
+ bool is_append, is_write = nvme_req_is_write(req);
uint16_t status;
trace_pci_nvme_rwz(nvme_cid(req), nvme_io_opc_str(rw->opcode),
nvme_nsid(ns), nlb, len, slba);
+ if (req->cmd.opcode == NVME_CMD_ZONE_APPEND) {
+ uint64_t wp;
+ is_append = true;
+
+ zone = nvme_ns_get_zone(ns, slba);
+ if (!zone) {
+ trace_pci_nvme_err_invalid_zone(nvme_cid(req), slba);
+ status = NVME_INVALID_FIELD | NVME_DNR;
+ goto invalid;
+ }
+
+ wp = zone->wp_staging;
+
+ if (slba != nvme_zslba(zone)) {
+ trace_pci_nvme_err_invalid_zslba(nvme_cid(req), slba);
+ return NVME_INVALID_FIELD | NVME_DNR;
+ }
+
+ status = nvme_check_zasl(n, len);
+ if (status) {
+ trace_pci_nvme_err_zasl(nvme_cid(req), len);
+ goto invalid;
+ }
+
+ slba = wp;
+ rw->slba = req->cqe.qw0 = cpu_to_le64(wp);
+ }
+
status = nvme_check_bounds(n, ns, slba, nlb);
if (status) {
NvmeIdNsNvm *id_ns = nvme_ns_id_nvm(ns);
@@ -1959,11 +2005,13 @@ static uint16_t nvme_rwz(NvmeCtrl *n, NvmeRequest *req)
}
if (nvme_ns_zoned(ns)) {
- zone = nvme_ns_get_zone(ns, slba);
if (!zone) {
- trace_pci_nvme_err_invalid_zone(nvme_cid(req), slba);
- status = NVME_INVALID_FIELD | NVME_DNR;
- goto invalid;
+ zone = nvme_ns_get_zone(ns, slba);
+ if (!zone) {
+ trace_pci_nvme_err_invalid_zone(nvme_cid(req), slba);
+ status = NVME_INVALID_FIELD | NVME_DNR;
+ goto invalid;
+ }
}
status = nvme_check_zone(n, slba, nlb, req, zone);
@@ -1997,7 +2045,7 @@ static uint16_t nvme_rwz(NvmeCtrl *n, NvmeRequest *req)
if (is_write) {
if (zone) {
- if (zone->wp_staging != nvme_wp(zone)) {
+ if (!is_append && (zone->wp_staging != nvme_wp(zone))) {
trace_pci_nvme_err_zone_pending_writes(nvme_cid(req),
nvme_zslba(zone),
nvme_wp(zone),
@@ -2069,6 +2117,7 @@ static uint16_t nvme_io_cmd(NvmeCtrl *n, NvmeRequest *req)
case NVME_CMD_WRITE_ZEROES:
case NVME_CMD_WRITE:
case NVME_CMD_READ:
+ case NVME_CMD_ZONE_APPEND:
return nvme_rwz(n, req);
case NVME_CMD_ZONE_MGMT_SEND:
return nvme_zone_mgmt_send(n, req);
@@ -3753,6 +3802,11 @@ static void nvme_check_constraints(NvmeCtrl *n, Error **errp)
return;
}
+ if (params->zns.zasl && params->zns.zasl > params->mdts) {
+ error_setg(errp, "zns.zasl must be less than or equal to mdts");
+ return;
+ }
+
if (!n->params.cmb_size_mb && n->pmrdev) {
if (host_memory_backend_is_mapped(n->pmrdev)) {
error_setg(errp, "can't use already busy memdev: %s",
@@ -3929,12 +3983,16 @@ static void nvme_init_pci(NvmeCtrl *n, PCIDevice *pci_dev, Error **errp)
static void nvme_init_ctrl(NvmeCtrl *n, PCIDevice *pci_dev)
{
NvmeIdCtrl *id = &n->id_ctrl;
+ NvmeIdCtrlZns *id_zns;
uint8_t *pci_conf = pci_dev->config;
char *subnqn;
n->id_ctrl_iocss[NVME_IOCS_NVM] = g_new0(NvmeIdCtrl, 1);
n->id_ctrl_iocss[NVME_IOCS_ZONED] = g_new0(NvmeIdCtrl, 1);
+ id_zns = n->id_ctrl_iocss[NVME_IOCS_ZONED];
+ id_zns->zasl = n->params.zns.zasl;
+
id->vid = cpu_to_le16(pci_get_word(pci_conf + PCI_VENDOR_ID));
id->ssvid = cpu_to_le16(pci_get_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID));
strpadcpy((char *)id->mn, sizeof(id->mn), "QEMU NVMe Ctrl", ' ');
@@ -4071,6 +4129,7 @@ static Property nvme_props[] = {
DEFINE_PROP_UINT32("aer_max_queued", NvmeCtrl, params.aer_max_queued, 64),
DEFINE_PROP_UINT8("mdts", NvmeCtrl, params.mdts, 7),
DEFINE_PROP_BOOL("use-intel-id", NvmeCtrl, params.use_intel_id, false),
+ DEFINE_PROP_UINT8("zns.zasl", NvmeCtrl, params.zns.zasl, 0),
DEFINE_PROP_END_OF_LIST(),
};
@@ -103,6 +103,7 @@ pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
# nvme traces for error conditions
pci_nvme_err_mdts(uint16_t cid, size_t len) "cid %"PRIu16" len %zu"
+pci_nvme_err_zasl(uint16_t cid, size_t len) "cid %"PRIu16" len %zu"
pci_nvme_err_req_status(uint16_t cid, uint32_t nsid, uint16_t status, uint8_t opc) "cid %"PRIu16" nsid %"PRIu32" status 0x%"PRIx16" opc 0x%"PRIx8""
pci_nvme_err_dulbe(uint16_t cid, uint64_t slba, uint32_t nlb) "cid %"PRIu16" slba 0x%"PRIx64" nlb %"PRIu32""
pci_nvme_err_addr_read(uint64_t addr) "addr 0x%"PRIx64""