Message ID | 20200629070404.10969-3-eric.auger@redhat.com |
---|---|
State | Superseded |
Headers | show |
Series | None | expand |
On Mon, Jun 29, 2020 at 09:04:01AM +0200, Eric Auger wrote: > This patch implements the PROBE request. At the moment, > only THE RESV_MEM property is handled. The first goal is > to report iommu wide reserved regions such as the MSI regions > set by the machine code. On x86 this will be the IOAPIC MSI > region, [0xFEE00000 - 0xFEEFFFFF], on ARM this may be the ITS > doorbell. > > In the future we may introduce per device reserved regions. > This will be useful when protecting host assigned devices > which may expose their own reserved regions > > Signed-off-by: Eric Auger <eric.auger@redhat.com> > Reviewed-by: Jean-Philippe Brucker <jean-philippe@linaro.org> > > --- > > v6 -> v7: > - put the assert again to make it clear there is no risk > of truncation > > v5 -> v6: > - removed validation of s->reserved_regions[i].type in the > probe request as it should rather happen in the realize() > > v4 -> v5: > - assert if reserved region type is different from RESERVED or > MSI > > v3 -> v4: > - removed any reference to the NONE property that does not > exist anymore. > > v2 -> v3: > - on probe, do not fill the reminder of the buffer with zeroes > as the buffer was already zero initialized (Bharat) > > v1 -> v2: > - move the unlock back to the same place > - remove the push label and factorize the code after the out label > - fix a bunch of cpu_to_leX according to the latest spec revision > - do not remove sizeof(last) from free space > - check the ep exists > --- > include/hw/virtio/virtio-iommu.h | 2 + > hw/virtio/virtio-iommu.c | 94 ++++++++++++++++++++++++++++++-- > hw/virtio/trace-events | 1 + > 3 files changed, 93 insertions(+), 4 deletions(-) > > diff --git a/include/hw/virtio/virtio-iommu.h b/include/hw/virtio/virtio-iommu.h > index e653004d7c..49eb105cd8 100644 > --- a/include/hw/virtio/virtio-iommu.h > +++ b/include/hw/virtio/virtio-iommu.h > @@ -53,6 +53,8 @@ typedef struct VirtIOIOMMU { > GHashTable *as_by_busptr; > IOMMUPciBus *iommu_pcibus_by_bus_num[PCI_BUS_MAX]; > PCIBus *primary_bus; > + ReservedRegion *reserved_regions; > + uint32_t nb_reserved_regions; > GTree *domains; > QemuMutex mutex; > GTree *endpoints; > diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c > index 483883ec1d..2cdaa1969b 100644 > --- a/hw/virtio/virtio-iommu.c > +++ b/hw/virtio/virtio-iommu.c > @@ -38,6 +38,7 @@ > > /* Max size */ > #define VIOMMU_DEFAULT_QUEUE_SIZE 256 > +#define VIOMMU_PROBE_SIZE 512 > > typedef struct VirtIOIOMMUDomain { > uint32_t id; > @@ -378,6 +379,65 @@ static int virtio_iommu_unmap(VirtIOIOMMU *s, > return ret; > } > > +static ssize_t virtio_iommu_fill_resv_mem_prop(VirtIOIOMMU *s, uint32_t ep, > + uint8_t *buf, size_t free) > +{ > + struct virtio_iommu_probe_resv_mem prop = {}; > + size_t size = sizeof(prop), length = size - sizeof(prop.head), total; > + int i; > + > + total = size * s->nb_reserved_regions; > + > + if (total > free) { > + return -ENOSPC; > + } > + > + for (i = 0; i < s->nb_reserved_regions; i++) { > + unsigned subtype = s->reserved_regions[i].type; > + > + assert(subtype == VIRTIO_IOMMU_RESV_MEM_T_RESERVED || > + subtype == VIRTIO_IOMMU_RESV_MEM_T_MSI); > + prop.head.type = cpu_to_le16(VIRTIO_IOMMU_PROBE_T_RESV_MEM); > + prop.head.length = cpu_to_le16(length); > + prop.subtype = subtype; > + prop.start = cpu_to_le64(s->reserved_regions[i].low); > + prop.end = cpu_to_le64(s->reserved_regions[i].high); > + > + memcpy(buf, &prop, size); > + > + trace_virtio_iommu_fill_resv_property(ep, prop.subtype, > + prop.start, prop.end); > + buf += size; > + } > + return total; > +} > + > +/** > + * virtio_iommu_probe - Fill the probe request buffer with > + * the properties the device is able to return > + */ > +static int virtio_iommu_probe(VirtIOIOMMU *s, > + struct virtio_iommu_req_probe *req, > + uint8_t *buf) > +{ > + uint32_t ep_id = le32_to_cpu(req->endpoint); > + size_t free = VIOMMU_PROBE_SIZE; > + ssize_t count; > + > + if (!virtio_iommu_mr(s, ep_id)) { > + return VIRTIO_IOMMU_S_NOENT; > + } > + > + count = virtio_iommu_fill_resv_mem_prop(s, ep_id, buf, free); > + if (count < 0) { > + return VIRTIO_IOMMU_S_INVAL; > + } > + buf += count; > + free -= count; > + > + return VIRTIO_IOMMU_S_OK; > +} > + > static int virtio_iommu_iov_to_req(struct iovec *iov, > unsigned int iov_cnt, > void *req, size_t req_sz) > @@ -407,15 +467,27 @@ virtio_iommu_handle_req(detach) > virtio_iommu_handle_req(map) > virtio_iommu_handle_req(unmap) > > +static int virtio_iommu_handle_probe(VirtIOIOMMU *s, > + struct iovec *iov, > + unsigned int iov_cnt, > + uint8_t *buf) > +{ > + struct virtio_iommu_req_probe req; > + int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); > + > + return ret ? ret : virtio_iommu_probe(s, &req, buf); > +} > + > static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq) > { > VirtIOIOMMU *s = VIRTIO_IOMMU(vdev); > struct virtio_iommu_req_head head; > struct virtio_iommu_req_tail tail = {}; > + size_t output_size = sizeof(tail), sz; > VirtQueueElement *elem; > unsigned int iov_cnt; > struct iovec *iov; > - size_t sz; > + void *buf = NULL; > > for (;;) { > elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); > @@ -452,6 +524,17 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq) > case VIRTIO_IOMMU_T_UNMAP: > tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt); > break; > + case VIRTIO_IOMMU_T_PROBE: > + { > + struct virtio_iommu_req_tail *ptail; > + > + output_size = s->config.probe_size + sizeof(tail); > + buf = g_malloc0(output_size); > + > + ptail = (struct virtio_iommu_req_tail *) > + (buf + s->config.probe_size); > + ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf); > + } > default: > tail.status = VIRTIO_IOMMU_S_UNSUPP; > } > @@ -459,12 +542,13 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq) > > out: > sz = iov_from_buf(elem->in_sg, elem->in_num, 0, > - &tail, sizeof(tail)); > - assert(sz == sizeof(tail)); > + buf ? buf : &tail, output_size); > + assert(sz == output_size); > > - virtqueue_push(vq, elem, sizeof(tail)); > + virtqueue_push(vq, elem, sz); > virtio_notify(vdev, vq); > g_free(elem); > + g_free(buf); > } > } > > @@ -667,6 +751,7 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) > s->config.page_size_mask = TARGET_PAGE_MASK; > s->config.input_range.end = -1UL; > s->config.domain_range.end = 32; > + s->config.probe_size = VIOMMU_PROBE_SIZE; > > virtio_add_feature(&s->features, VIRTIO_RING_F_EVENT_IDX); > virtio_add_feature(&s->features, VIRTIO_RING_F_INDIRECT_DESC); > @@ -676,6 +761,7 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) > virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MAP_UNMAP); > virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS); > virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO); > + virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE); > Don't we need to disable this for existing machine types? > qemu_mutex_init(&s->mutex); > > diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events > index 6427a0047d..23109f69bb 100644 > --- a/hw/virtio/trace-events > +++ b/hw/virtio/trace-events > @@ -74,3 +74,4 @@ virtio_iommu_get_domain(uint32_t domain_id) "Alloc domain=%d" > virtio_iommu_put_domain(uint32_t domain_id) "Free domain=%d" > virtio_iommu_translate_out(uint64_t virt_addr, uint64_t phys_addr, uint32_t sid) "0x%"PRIx64" -> 0x%"PRIx64 " for sid=%d" > virtio_iommu_report_fault(uint8_t reason, uint32_t flags, uint32_t endpoint, uint64_t addr) "FAULT reason=%d flags=%d endpoint=%d address =0x%"PRIx64 > +virtio_iommu_fill_resv_property(uint32_t devid, uint8_t subtype, uint64_t start, uint64_t end) "dev= %d, type=%d start=0x%"PRIx64" end=0x%"PRIx64 > -- > 2.20.1
Pls ignore, my mail is acting up serving me old patches. Sorry about the noise! -- MST
diff --git a/include/hw/virtio/virtio-iommu.h b/include/hw/virtio/virtio-iommu.h index e653004d7c..49eb105cd8 100644 --- a/include/hw/virtio/virtio-iommu.h +++ b/include/hw/virtio/virtio-iommu.h @@ -53,6 +53,8 @@ typedef struct VirtIOIOMMU { GHashTable *as_by_busptr; IOMMUPciBus *iommu_pcibus_by_bus_num[PCI_BUS_MAX]; PCIBus *primary_bus; + ReservedRegion *reserved_regions; + uint32_t nb_reserved_regions; GTree *domains; QemuMutex mutex; GTree *endpoints; diff --git a/hw/virtio/virtio-iommu.c b/hw/virtio/virtio-iommu.c index 483883ec1d..2cdaa1969b 100644 --- a/hw/virtio/virtio-iommu.c +++ b/hw/virtio/virtio-iommu.c @@ -38,6 +38,7 @@ /* Max size */ #define VIOMMU_DEFAULT_QUEUE_SIZE 256 +#define VIOMMU_PROBE_SIZE 512 typedef struct VirtIOIOMMUDomain { uint32_t id; @@ -378,6 +379,65 @@ static int virtio_iommu_unmap(VirtIOIOMMU *s, return ret; } +static ssize_t virtio_iommu_fill_resv_mem_prop(VirtIOIOMMU *s, uint32_t ep, + uint8_t *buf, size_t free) +{ + struct virtio_iommu_probe_resv_mem prop = {}; + size_t size = sizeof(prop), length = size - sizeof(prop.head), total; + int i; + + total = size * s->nb_reserved_regions; + + if (total > free) { + return -ENOSPC; + } + + for (i = 0; i < s->nb_reserved_regions; i++) { + unsigned subtype = s->reserved_regions[i].type; + + assert(subtype == VIRTIO_IOMMU_RESV_MEM_T_RESERVED || + subtype == VIRTIO_IOMMU_RESV_MEM_T_MSI); + prop.head.type = cpu_to_le16(VIRTIO_IOMMU_PROBE_T_RESV_MEM); + prop.head.length = cpu_to_le16(length); + prop.subtype = subtype; + prop.start = cpu_to_le64(s->reserved_regions[i].low); + prop.end = cpu_to_le64(s->reserved_regions[i].high); + + memcpy(buf, &prop, size); + + trace_virtio_iommu_fill_resv_property(ep, prop.subtype, + prop.start, prop.end); + buf += size; + } + return total; +} + +/** + * virtio_iommu_probe - Fill the probe request buffer with + * the properties the device is able to return + */ +static int virtio_iommu_probe(VirtIOIOMMU *s, + struct virtio_iommu_req_probe *req, + uint8_t *buf) +{ + uint32_t ep_id = le32_to_cpu(req->endpoint); + size_t free = VIOMMU_PROBE_SIZE; + ssize_t count; + + if (!virtio_iommu_mr(s, ep_id)) { + return VIRTIO_IOMMU_S_NOENT; + } + + count = virtio_iommu_fill_resv_mem_prop(s, ep_id, buf, free); + if (count < 0) { + return VIRTIO_IOMMU_S_INVAL; + } + buf += count; + free -= count; + + return VIRTIO_IOMMU_S_OK; +} + static int virtio_iommu_iov_to_req(struct iovec *iov, unsigned int iov_cnt, void *req, size_t req_sz) @@ -407,15 +467,27 @@ virtio_iommu_handle_req(detach) virtio_iommu_handle_req(map) virtio_iommu_handle_req(unmap) +static int virtio_iommu_handle_probe(VirtIOIOMMU *s, + struct iovec *iov, + unsigned int iov_cnt, + uint8_t *buf) +{ + struct virtio_iommu_req_probe req; + int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); + + return ret ? ret : virtio_iommu_probe(s, &req, buf); +} + static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq) { VirtIOIOMMU *s = VIRTIO_IOMMU(vdev); struct virtio_iommu_req_head head; struct virtio_iommu_req_tail tail = {}; + size_t output_size = sizeof(tail), sz; VirtQueueElement *elem; unsigned int iov_cnt; struct iovec *iov; - size_t sz; + void *buf = NULL; for (;;) { elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); @@ -452,6 +524,17 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq) case VIRTIO_IOMMU_T_UNMAP: tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt); break; + case VIRTIO_IOMMU_T_PROBE: + { + struct virtio_iommu_req_tail *ptail; + + output_size = s->config.probe_size + sizeof(tail); + buf = g_malloc0(output_size); + + ptail = (struct virtio_iommu_req_tail *) + (buf + s->config.probe_size); + ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf); + } default: tail.status = VIRTIO_IOMMU_S_UNSUPP; } @@ -459,12 +542,13 @@ static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq) out: sz = iov_from_buf(elem->in_sg, elem->in_num, 0, - &tail, sizeof(tail)); - assert(sz == sizeof(tail)); + buf ? buf : &tail, output_size); + assert(sz == output_size); - virtqueue_push(vq, elem, sizeof(tail)); + virtqueue_push(vq, elem, sz); virtio_notify(vdev, vq); g_free(elem); + g_free(buf); } } @@ -667,6 +751,7 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) s->config.page_size_mask = TARGET_PAGE_MASK; s->config.input_range.end = -1UL; s->config.domain_range.end = 32; + s->config.probe_size = VIOMMU_PROBE_SIZE; virtio_add_feature(&s->features, VIRTIO_RING_F_EVENT_IDX); virtio_add_feature(&s->features, VIRTIO_RING_F_INDIRECT_DESC); @@ -676,6 +761,7 @@ static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MAP_UNMAP); virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS); virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO); + virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE); qemu_mutex_init(&s->mutex); diff --git a/hw/virtio/trace-events b/hw/virtio/trace-events index 6427a0047d..23109f69bb 100644 --- a/hw/virtio/trace-events +++ b/hw/virtio/trace-events @@ -74,3 +74,4 @@ virtio_iommu_get_domain(uint32_t domain_id) "Alloc domain=%d" virtio_iommu_put_domain(uint32_t domain_id) "Free domain=%d" virtio_iommu_translate_out(uint64_t virt_addr, uint64_t phys_addr, uint32_t sid) "0x%"PRIx64" -> 0x%"PRIx64 " for sid=%d" virtio_iommu_report_fault(uint8_t reason, uint32_t flags, uint32_t endpoint, uint64_t addr) "FAULT reason=%d flags=%d endpoint=%d address =0x%"PRIx64 +virtio_iommu_fill_resv_property(uint32_t devid, uint8_t subtype, uint64_t start, uint64_t end) "dev= %d, type=%d start=0x%"PRIx64" end=0x%"PRIx64