@@ -140,14 +140,14 @@ static inline void *nvme_addr_to_cmb(NvmeCtrl *n, hwaddr addr)
return &n->cmbuf[addr - n->ctrl_mem.addr];
}
-static void nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
+static int nvme_addr_read(NvmeCtrl *n, hwaddr addr, void *buf, int size)
{
if (n->bar.cmbsz && nvme_addr_is_cmb(n, addr)) {
memcpy(buf, nvme_addr_to_cmb(n, addr), size);
- return;
+ return 0;
}
- pci_dma_read(&n->parent_obj, addr, buf, size);
+ return pci_dma_read(&n->parent_obj, addr, buf, size);
}
static int nvme_check_sqid(NvmeCtrl *n, uint16_t sqid)
@@ -307,6 +307,7 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
int num_prps = (len >> n->page_bits) + 1;
uint16_t status;
bool prp_list_in_cmb = false;
+ int ret;
QEMUSGList *qsg = &req->qsg;
QEMUIOVector *iov = &req->iov;
@@ -347,7 +348,11 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
nents = (len + n->page_size - 1) >> n->page_bits;
prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
- nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
+ ret = nvme_addr_read(n, prp2, (void *)prp_list, prp_trans);
+ if (ret) {
+ trace_pci_nvme_err_addr_read(prp2);
+ return NVME_DATA_TRAS_ERROR;
+ }
while (len != 0) {
uint64_t prp_ent = le64_to_cpu(prp_list[i]);
@@ -364,8 +369,12 @@ static uint16_t nvme_map_prp(NvmeCtrl *n, uint64_t prp1, uint64_t prp2,
i = 0;
nents = (len + n->page_size - 1) >> n->page_bits;
prp_trans = MIN(n->max_prp_ents, nents) * sizeof(uint64_t);
- nvme_addr_read(n, prp_ent, (void *)prp_list,
- prp_trans);
+ ret = nvme_addr_read(n, prp_ent, (void *)prp_list,
+ prp_trans);
+ if (ret) {
+ trace_pci_nvme_err_addr_read(prp_ent);
+ return NVME_DATA_TRAS_ERROR;
+ }
prp_ent = le64_to_cpu(prp_list[i]);
}
@@ -457,6 +466,7 @@ static void nvme_post_cqes(void *opaque)
NvmeCQueue *cq = opaque;
NvmeCtrl *n = cq->ctrl;
NvmeRequest *req, *next;
+ int ret;
QTAILQ_FOREACH_SAFE(req, &cq->req_list, entry, next) {
NvmeSQueue *sq;
@@ -466,15 +476,21 @@ static void nvme_post_cqes(void *opaque)
break;
}
- QTAILQ_REMOVE(&cq->req_list, req, entry);
sq = req->sq;
req->cqe.status = cpu_to_le16((req->status << 1) | cq->phase);
req->cqe.sq_id = cpu_to_le16(sq->sqid);
req->cqe.sq_head = cpu_to_le16(sq->head);
addr = cq->dma_addr + cq->tail * n->cqe_size;
+ ret = pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
+ sizeof(req->cqe));
+ if (ret) {
+ trace_pci_nvme_err_addr_write(addr);
+ trace_pci_nvme_err_cfs();
+ n->bar.csts = NVME_CSTS_FAILED;
+ break;
+ }
+ QTAILQ_REMOVE(&cq->req_list, req, entry);
nvme_inc_cq_tail(cq);
- pci_dma_write(&n->parent_obj, addr, (void *)&req->cqe,
- sizeof(req->cqe));
nvme_req_exit(req);
QTAILQ_INSERT_TAIL(&sq->req_list, req, entry);
}
@@ -1611,7 +1627,12 @@ static void nvme_process_sq(void *opaque)
while (!(nvme_sq_empty(sq) || QTAILQ_EMPTY(&sq->req_list))) {
addr = sq->dma_addr + sq->head * n->sqe_size;
- nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd));
+ if (nvme_addr_read(n, addr, (void *)&cmd, sizeof(cmd))) {
+ trace_pci_nvme_err_addr_read(addr);
+ trace_pci_nvme_err_cfs();
+ n->bar.csts = NVME_CSTS_FAILED;
+ break;
+ }
nvme_inc_sq_head(sq);
req = QTAILQ_FIRST(&sq->req_list);
@@ -86,6 +86,9 @@ pci_nvme_mmio_shutdown_cleared(void) "shutdown bit cleared"
# nvme traces for error conditions
pci_nvme_err_mdts(uint16_t cid, size_t len) "cid %"PRIu16" len %zu"
+pci_nvme_err_addr_read(uint64_t addr) "addr 0x%"PRIx64""
+pci_nvme_err_addr_write(uint64_t addr) "addr 0x%"PRIx64""
+pci_nvme_err_cfs(void) "controller fatal status"
pci_nvme_err_invalid_dma(void) "PRP/SGL is too small for transfer size"
pci_nvme_err_invalid_prplist_ent(uint64_t prplist) "PRP list entry is null or not page aligned: 0x%"PRIx64""
pci_nvme_err_invalid_prp2_align(uint64_t prp2) "PRP2 is not page aligned: 0x%"PRIx64""