Message ID | 20201015115252.15582-3-eric.auger@redhat.com |
---|---|
State | Superseded |
Headers | show |
Series | NVMe passthrough: Support 64kB page host | expand |
On 10/15/20 1:52 PM, Eric Auger wrote: > In preparation of 64kB host page support, let's change the size > and alignment of the IDENTIFY command response buffer so that > the VFIO DMA MAP succeeds. We align on the host page size. > > Signed-off-by: Eric Auger <eric.auger@redhat.com> > --- > block/nvme.c | 10 ++++++---- > 1 file changed, 6 insertions(+), 4 deletions(-) > > diff --git a/block/nvme.c b/block/nvme.c > index e3d96f20d0..088ff1825a 100644 > --- a/block/nvme.c > +++ b/block/nvme.c > @@ -513,19 +513,21 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp) > .opcode = NVME_ADM_CMD_IDENTIFY, > .cdw10 = cpu_to_le32(0x1), > }; > + size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size); Do we really need to ALIGN_UP if we then call qemu_try_memalign()? > > - id = qemu_try_memalign(s->page_size, sizeof(*id)); > + id = qemu_try_memalign(qemu_real_host_page_size, id_size); > if (!id) { > error_setg(errp, "Cannot allocate buffer for identify response"); > goto out; > } > - r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova); > + r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova); > if (r) { > error_setg(errp, "Cannot map buffer for DMA"); > goto out; > } > > - memset(id, 0, sizeof(*id)); > + memset(id, 0, id_size); > + > cmd.dptr.prp1 = cpu_to_le64(iova); > if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) { > error_setg(errp, "Failed to identify controller"); > @@ -547,7 +549,7 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp) > s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROES); > s->supports_discard = !!(oncs & NVME_ONCS_DSM); > > - memset(id, 0, sizeof(*id)); > + memset(id, 0, id_size); > cmd.cdw10 = 0; > cmd.nsid = cpu_to_le32(namespace); > if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) { >
On 10/20/20 12:59 PM, Philippe Mathieu-Daudé wrote: > On 10/15/20 1:52 PM, Eric Auger wrote: >> In preparation of 64kB host page support, let's change the size >> and alignment of the IDENTIFY command response buffer so that >> the VFIO DMA MAP succeeds. We align on the host page size. >> >> Signed-off-by: Eric Auger <eric.auger@redhat.com> >> --- >> block/nvme.c | 10 ++++++---- >> 1 file changed, 6 insertions(+), 4 deletions(-) >> >> diff --git a/block/nvme.c b/block/nvme.c >> index e3d96f20d0..088ff1825a 100644 >> --- a/block/nvme.c >> +++ b/block/nvme.c >> @@ -513,19 +513,21 @@ static void nvme_identify(BlockDriverState *bs, >> int namespace, Error **errp) >> .opcode = NVME_ADM_CMD_IDENTIFY, >> .cdw10 = cpu_to_le32(0x1), >> }; >> + size_t id_size = QEMU_ALIGN_UP(sizeof(*id), >> qemu_real_host_page_size); > > Do we really need to ALIGN_UP if we then call qemu_try_memalign()? Ah, we need to ALIGN_UP for qemu_vfio_dma_map(), OK. Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com> > >> - id = qemu_try_memalign(s->page_size, sizeof(*id)); >> + id = qemu_try_memalign(qemu_real_host_page_size, id_size); >> if (!id) { >> error_setg(errp, "Cannot allocate buffer for identify >> response"); >> goto out; >> } >> - r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova); >> + r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova); >> if (r) { >> error_setg(errp, "Cannot map buffer for DMA"); >> goto out; >> } >> - memset(id, 0, sizeof(*id)); >> + memset(id, 0, id_size); >> + >> cmd.dptr.prp1 = cpu_to_le64(iova); >> if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) { >> error_setg(errp, "Failed to identify controller"); >> @@ -547,7 +549,7 @@ static void nvme_identify(BlockDriverState *bs, >> int namespace, Error **errp) >> s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROES); >> s->supports_discard = !!(oncs & NVME_ONCS_DSM); >> - memset(id, 0, sizeof(*id)); >> + memset(id, 0, id_size); >> cmd.cdw10 = 0; >> cmd.nsid = cpu_to_le32(namespace); >> if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) { >> >
diff --git a/block/nvme.c b/block/nvme.c index e3d96f20d0..088ff1825a 100644 --- a/block/nvme.c +++ b/block/nvme.c @@ -513,19 +513,21 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp) .opcode = NVME_ADM_CMD_IDENTIFY, .cdw10 = cpu_to_le32(0x1), }; + size_t id_size = QEMU_ALIGN_UP(sizeof(*id), qemu_real_host_page_size); - id = qemu_try_memalign(s->page_size, sizeof(*id)); + id = qemu_try_memalign(qemu_real_host_page_size, id_size); if (!id) { error_setg(errp, "Cannot allocate buffer for identify response"); goto out; } - r = qemu_vfio_dma_map(s->vfio, id, sizeof(*id), true, &iova); + r = qemu_vfio_dma_map(s->vfio, id, id_size, true, &iova); if (r) { error_setg(errp, "Cannot map buffer for DMA"); goto out; } - memset(id, 0, sizeof(*id)); + memset(id, 0, id_size); + cmd.dptr.prp1 = cpu_to_le64(iova); if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) { error_setg(errp, "Failed to identify controller"); @@ -547,7 +549,7 @@ static void nvme_identify(BlockDriverState *bs, int namespace, Error **errp) s->supports_write_zeroes = !!(oncs & NVME_ONCS_WRITE_ZEROES); s->supports_discard = !!(oncs & NVME_ONCS_DSM); - memset(id, 0, sizeof(*id)); + memset(id, 0, id_size); cmd.cdw10 = 0; cmd.nsid = cpu_to_le32(namespace); if (nvme_cmd_sync(bs, s->queues[INDEX_ADMIN], &cmd)) {
In preparation of 64kB host page support, let's change the size and alignment of the IDENTIFY command response buffer so that the VFIO DMA MAP succeeds. We align on the host page size. Signed-off-by: Eric Auger <eric.auger@redhat.com> --- block/nvme.c | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-)