@@ -513,16 +513,20 @@ static void nvme_rw_cb(void *opaque, int ret)
block_acct_failed(blk_get_stats(n->conf.blk), &req->acct);
req->status = NVME_INTERNAL_DEV_ERROR;
}
- if (req->has_sg) {
+
+ if (req->qsg.nalloc) {
qemu_sglist_destroy(&req->qsg);
}
+ if (req->iov.nalloc) {
+ qemu_iovec_destroy(&req->iov);
+ }
+
nvme_enqueue_req_completion(cq, req);
}
static uint16_t nvme_flush(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
NvmeRequest *req)
{
- req->has_sg = false;
block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
BLOCK_ACCT_FLUSH);
req->aiocb = blk_aio_flush(n->conf.blk, nvme_rw_cb, req);
@@ -548,7 +552,6 @@ static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
return NVME_LBA_RANGE | NVME_DNR;
}
- req->has_sg = false;
block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
BLOCK_ACCT_WRITE);
req->aiocb = blk_aio_pwrite_zeroes(n->conf.blk, offset, count,
@@ -586,7 +589,6 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
}
if (req->qsg.nsg > 0) {
- req->has_sg = true;
block_acct_start(blk_get_stats(n->conf.blk), &req->acct, req->qsg.size,
acct);
req->aiocb = is_write ?
@@ -595,7 +597,6 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
dma_blk_read(n->conf.blk, &req->qsg, data_offset, BDRV_SECTOR_SIZE,
nvme_rw_cb, req);
} else {
- req->has_sg = false;
block_acct_start(blk_get_stats(n->conf.blk), &req->acct, req->iov.size,
acct);
req->aiocb = is_write ?
@@ -22,7 +22,6 @@ typedef struct NvmeRequest {
struct NvmeSQueue *sq;
BlockAIOCB *aiocb;
uint16_t status;
- bool has_sg;
NvmeCqe cqe;
BlockAcctCookie acct;
QEMUSGList qsg;