@@ -1228,6 +1228,75 @@ static int vfio_init_container(VFIOContainer *container, int group_fd,
return 0;
}
+static int vfio_get_iommu_info(VFIOContainer *container,
+ struct vfio_iommu_type1_info **info)
+{
+
+ size_t argsz = sizeof(struct vfio_iommu_type1_info);
+
+ *info = g_new0(struct vfio_iommu_type1_info, 1);
+again:
+ (*info)->argsz = argsz;
+
+ if (ioctl(container->fd, VFIO_IOMMU_GET_INFO, *info)) {
+ g_free(*info);
+ *info = NULL;
+ return -errno;
+ }
+
+ if (((*info)->argsz > argsz)) {
+ argsz = (*info)->argsz;
+ *info = g_realloc(*info, argsz);
+ goto again;
+ }
+
+ return 0;
+}
+
+static struct vfio_info_cap_header *
+vfio_get_iommu_info_cap(struct vfio_iommu_type1_info *info, uint16_t id)
+{
+ struct vfio_info_cap_header *hdr;
+ void *ptr = info;
+
+ if (!(info->flags & VFIO_IOMMU_INFO_CAPS)) {
+ return NULL;
+ }
+
+ for (hdr = ptr + info->cap_offset; hdr != ptr; hdr = ptr + hdr->next) {
+ if (hdr->id == id) {
+ return hdr;
+ }
+ }
+
+ return NULL;
+}
+
+static void vfio_get_iommu_info_migration(VFIOContainer *container,
+ struct vfio_iommu_type1_info *info)
+{
+ struct vfio_info_cap_header *hdr;
+ struct vfio_iommu_type1_info_cap_migration *cap_mig;
+
+ hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION);
+ if (!hdr) {
+ return;
+ }
+
+ cap_mig = container_of(hdr, struct vfio_iommu_type1_info_cap_migration,
+ header);
+
+ /*
+ * cpu_physical_memory_set_dirty_lebitmap() expects pages in bitmap of
+ * TARGET_PAGE_SIZE to mark those dirty.
+ */
+ if (cap_mig->pgsize_bitmap & TARGET_PAGE_SIZE) {
+ container->dirty_pages_supported = true;
+ container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
+ container->dirty_pgsizes = cap_mig->pgsize_bitmap;
+ }
+}
+
static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
Error **errp)
{
@@ -1297,6 +1366,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
container->space = space;
container->fd = fd;
container->error = NULL;
+ container->dirty_pages_supported = false;
QLIST_INIT(&container->giommu_list);
QLIST_INIT(&container->hostwin_list);
@@ -1309,7 +1379,7 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
case VFIO_TYPE1v2_IOMMU:
case VFIO_TYPE1_IOMMU:
{
- struct vfio_iommu_type1_info info;
+ struct vfio_iommu_type1_info *info;
/*
* FIXME: This assumes that a Type1 IOMMU can map any 64-bit
@@ -1318,15 +1388,19 @@ static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
* existing Type1 IOMMUs generally support any IOVA we're
* going to actually try in practice.
*/
- info.argsz = sizeof(info);
- ret = ioctl(fd, VFIO_IOMMU_GET_INFO, &info);
- /* Ignore errors */
- if (ret || !(info.flags & VFIO_IOMMU_INFO_PGSIZES)) {
+ ret = vfio_get_iommu_info(container, &info);
+
+ if (ret || !(info->flags & VFIO_IOMMU_INFO_PGSIZES)) {
/* Assume 4k IOVA page size */
- info.iova_pgsizes = 4096;
+ info->iova_pgsizes = 4096;
}
- vfio_host_win_add(container, 0, (hwaddr)-1, info.iova_pgsizes);
- container->pgsizes = info.iova_pgsizes;
+ vfio_host_win_add(container, 0, (hwaddr)-1, info->iova_pgsizes);
+ container->pgsizes = info->iova_pgsizes;
+
+ if (!ret) {
+ vfio_get_iommu_info_migration(container, info);
+ }
+ g_free(info);
break;
}
case VFIO_SPAPR_TCE_v2_IOMMU:
@@ -832,9 +832,14 @@ err:
int vfio_migration_probe(VFIODevice *vbasedev, Error **errp)
{
+ VFIOContainer *container = vbasedev->group->container;
struct vfio_region_info *info = NULL;
Error *local_err = NULL;
- int ret;
+ int ret = -ENOTSUP;
+
+ if (!container->dirty_pages_supported) {
+ goto add_blocker;
+ }
ret = vfio_get_dev_region_info(vbasedev, VFIO_REGION_TYPE_MIGRATION,
VFIO_REGION_SUBTYPE_MIGRATION, &info);
@@ -84,6 +84,9 @@ typedef struct VFIOContainer {
unsigned iommu_type;
Error *error;
bool initialized;
+ bool dirty_pages_supported;
+ uint64_t dirty_pgsizes;
+ uint64_t max_dirty_bitmap_size;
unsigned long pgsizes;
QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;