@@ -499,6 +499,20 @@ static void nvme_clear_events(NvmeCtrl *n, uint8_t event_type)
}
}
+static inline uint16_t nvme_check_bounds(NvmeCtrl *n, NvmeNamespace *ns,
+ uint64_t slba, uint32_t nlb,
+ NvmeRequest *req)
+{
+ uint64_t nsze = le64_to_cpu(ns->id_ns.nsze);
+
+ if (unlikely(UINT64_MAX - slba < nlb || slba + nlb > nsze)) {
+ trace_nvme_dev_err_invalid_lba_range(slba, nlb, nsze);
+ return NVME_LBA_RANGE | NVME_DNR;
+ }
+
+ return NVME_SUCCESS;
+}
+
static void nvme_rw_cb(void *opaque, int ret)
{
NvmeRequest *req = opaque;
@@ -546,12 +560,13 @@ static uint16_t nvme_write_zeros(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
uint32_t nlb = le16_to_cpu(rw->nlb) + 1;
uint64_t offset = slba << data_shift;
uint32_t count = nlb << data_shift;
+ uint16_t status;
trace_nvme_dev_write_zeroes(nvme_cid(req), slba, nlb);
- if (unlikely(slba + nlb > ns->id_ns.nsze)) {
- trace_nvme_dev_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
- return NVME_LBA_RANGE | NVME_DNR;
+ status = nvme_check_bounds(n, ns, slba, nlb, req);
+ if (status) {
+ return status;
}
block_acct_start(blk_get_stats(n->conf.blk), &req->acct, 0,
@@ -574,13 +589,14 @@ static uint16_t nvme_rw(NvmeCtrl *n, NvmeNamespace *ns, NvmeCmd *cmd,
uint64_t data_offset = slba << data_shift;
int is_write = rw->opcode == NVME_CMD_WRITE ? 1 : 0;
enum BlockAcctType acct = is_write ? BLOCK_ACCT_WRITE : BLOCK_ACCT_READ;
+ uint16_t status;
trace_nvme_dev_rw(is_write ? "write" : "read", nlb, data_size, slba);
- if (unlikely((slba + nlb) > ns->id_ns.nsze)) {
+ status = nvme_check_bounds(n, ns, slba, nlb, req);
+ if (status) {
block_acct_invalid(blk_get_stats(n->conf.blk), acct);
- trace_nvme_dev_err_invalid_lba_range(slba, nlb, ns->id_ns.nsze);
- return NVME_LBA_RANGE | NVME_DNR;
+ return status;
}
if (nvme_map(n, cmd, &req->qsg, &req->iov, data_size, req)) {