diff mbox series

[v2,05/13] iommufd: Add IOMMUFD_OBJ_EVENTQ_VIRQ and IOMMUFD_CMD_VIRQ_ALLOC

Message ID 7f5f7adc2493c7bca7edf76ca15b377c8dc0d397.1733263737.git.nicolinc@nvidia.com
State New
Headers show
Series None | expand

Commit Message

Nicolin Chen Dec. 3, 2024, 10:10 p.m. UTC
Allow a vIOMMU object to allocate vIRQ Event Queues, with a condition that
each vIOMMU can only have one single vIRQ event queue per type.

Add iommufd_eventq_virq_alloc with an iommufd_eventq_virq_ops for this new
ioctl.

Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
---
 drivers/iommu/iommufd/iommufd_private.h |  59 ++++++++++
 include/linux/iommufd.h                 |   3 +
 include/uapi/linux/iommufd.h            |  31 ++++++
 drivers/iommu/iommufd/eventq.c          | 138 ++++++++++++++++++++++++
 drivers/iommu/iommufd/main.c            |   6 ++
 drivers/iommu/iommufd/viommu.c          |   2 +
 6 files changed, 239 insertions(+)

Comments

Tian, Kevin Dec. 11, 2024, 7:55 a.m. UTC | #1
> From: Nicolin Chen <nicolinc@nvidia.com>
> Sent: Wednesday, December 4, 2024 6:10 AM
> +
> +/* An iommufd_virq represents a vIOMMU interrupt in an eventq_virq
> queue */
> +struct iommufd_virq {
> +	struct iommufd_eventq_virq *eventq_virq;
> +	struct list_head node;
> +	ssize_t irq_len;
> +	void *irq_data;
> +};

looks the only use of eventq_virq is in below:

> +
> +static inline int iommufd_eventq_virq_handler(struct iommufd_virq *virq)
> +{
> +	return iommufd_eventq_notify(&virq->eventq_virq->common,
> &virq->node);
> +}

If there is no other intended usages upon that field, it's simpler to
remove it and directly pass the pointer in when the handler is 
called. Anyway iommufd_viommu_report_irq() needs to find the
eventq first before calling it.

> +/**
> + * struct iommu_virq_alloc - ioctl(IOMMU_VIRQ_ALLOC)
> + * @size: sizeof(struct iommu_virq_alloc)
> + * @flags: Must be 0
> + * @viommu: virtual IOMMU ID to associate the virtual IRQ with
> + * @type: Type of the virtual IRQ. Must be defined in enum
> iommu_virq_type
> + * @out_virq_id: The ID of the new virtual IRQ
> + * @out_fault_fd: The fd of the new virtual IRQ. User space must close the
> + *                successfully returned fd after using it

s/out_fault_fd/out_virq_fd/

> + *
> + * Explicitly allocate a virtual IRQ handler for a vIOMMU. A vIOMMU can
> have
> + * multiple FDs for different @type, but is confined to one FD per @type.
> + */

s/handler/interface/

> +
> +	eventq_virq->irq_wq = alloc_workqueue("viommu_irq/%d",
> WQ_UNBOUND, 0,
> +					      eventq_virq->common.obj.id);
> +	if (!eventq_virq->irq_wq) {
> +		rc = -ENOMEM;
> +		goto out_put_fdno;
> +	}

there is no use of this wq

> @@ -335,6 +335,8 @@ static const struct iommufd_ioctl_op
> iommufd_ioctl_ops[] = {
>  	IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct
> iommu_destroy, id),
>  	IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC,
> iommufd_eventq_iopf_alloc,
>  		 struct iommu_fault_alloc, out_fault_fd),
> +	IOCTL_OP(IOMMU_VIRQ_ALLOC, iommufd_eventq_virq_alloc,
> +		 struct iommu_virq_alloc, out_virq_fd),

sort it in alphabetical order.
Nicolin Chen Dec. 12, 2024, 9:20 p.m. UTC | #2
On Wed, Dec 11, 2024 at 07:55:53AM +0000, Tian, Kevin wrote:
> > From: Nicolin Chen <nicolinc@nvidia.com>
> > Sent: Wednesday, December 4, 2024 6:10 AM
> > +
> > +/* An iommufd_virq represents a vIOMMU interrupt in an eventq_virq
> > queue */
> > +struct iommufd_virq {
> > +	struct iommufd_eventq_virq *eventq_virq;
> > +	struct list_head node;
> > +	ssize_t irq_len;
> > +	void *irq_data;
> > +};
> 
> looks the only use of eventq_virq is in below:
> 
> > +
> > +static inline int iommufd_eventq_virq_handler(struct iommufd_virq *virq)
> > +{
> > +	return iommufd_eventq_notify(&virq->eventq_virq->common,
> > &virq->node);
> > +}
> 
> If there is no other intended usages upon that field, it's simpler to
> remove it and directly pass the pointer in when the handler is 
> called. Anyway iommufd_viommu_report_irq() needs to find the
> eventq first before calling it.

OK.

> > +/**
> > + * struct iommu_virq_alloc - ioctl(IOMMU_VIRQ_ALLOC)
> > + * @size: sizeof(struct iommu_virq_alloc)
> > + * @flags: Must be 0
> > + * @viommu: virtual IOMMU ID to associate the virtual IRQ with
> > + * @type: Type of the virtual IRQ. Must be defined in enum
> > iommu_virq_type
> > + * @out_virq_id: The ID of the new virtual IRQ
> > + * @out_fault_fd: The fd of the new virtual IRQ. User space must close the
> > + *                successfully returned fd after using it
> 
> s/out_fault_fd/out_virq_fd/
> 
> > + *
> > + * Explicitly allocate a virtual IRQ handler for a vIOMMU. A vIOMMU can
> > have
> > + * multiple FDs for different @type, but is confined to one FD per @type.
> > + */
> 
> s/handler/interface/
> 
> > +
> > +	eventq_virq->irq_wq = alloc_workqueue("viommu_irq/%d",
> > WQ_UNBOUND, 0,
> > +					      eventq_virq->common.obj.id);
> > +	if (!eventq_virq->irq_wq) {
> > +		rc = -ENOMEM;
> > +		goto out_put_fdno;
> > +	}
> 
> there is no use of this wq

Oops. Looks like I forgot to clean it up.

> > @@ -335,6 +335,8 @@ static const struct iommufd_ioctl_op
> > iommufd_ioctl_ops[] = {
> >  	IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct
> > iommu_destroy, id),
> >  	IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC,
> > iommufd_eventq_iopf_alloc,
> >  		 struct iommu_fault_alloc, out_fault_fd),
> > +	IOCTL_OP(IOMMU_VIRQ_ALLOC, iommufd_eventq_virq_alloc,
> > +		 struct iommu_virq_alloc, out_virq_fd),
> 
> sort it in alphabetical order.

Ack.

Thanks
Nic
diff mbox series

Patch

diff --git a/drivers/iommu/iommufd/iommufd_private.h b/drivers/iommu/iommufd/iommufd_private.h
index 1c9a101cc435..fd0b87707967 100644
--- a/drivers/iommu/iommufd/iommufd_private.h
+++ b/drivers/iommu/iommufd/iommufd_private.h
@@ -548,6 +548,51 @@  static inline int iommufd_hwpt_replace_device(struct iommufd_device *idev,
 	return iommu_group_replace_domain(idev->igroup->group, hwpt->domain);
 }
 
+/*
+ * An iommufd_eventq_virq object represents a queue to deliver vIOMMU interrupts
+ * to the user space. These objects are created/destroyed by the user space and
+ * associated with vIOMMU object(s) during the allocations.
+ */
+struct iommufd_eventq_virq {
+	struct iommufd_eventq common;
+	struct iommufd_viommu *viommu;
+	struct workqueue_struct *irq_wq;
+	struct list_head node;
+
+	unsigned int type;
+};
+
+static inline struct iommufd_eventq_virq *
+to_eventq_virq(struct iommufd_eventq *eventq)
+{
+	return container_of(eventq, struct iommufd_eventq_virq, common);
+}
+
+static inline struct iommufd_eventq_virq *
+iommufd_get_eventq_virq(struct iommufd_ucmd *ucmd, u32 id)
+{
+	return container_of(iommufd_get_object(ucmd->ictx, id,
+					       IOMMUFD_OBJ_EVENTQ_VIRQ),
+			    struct iommufd_eventq_virq, common.obj);
+}
+
+int iommufd_eventq_virq_alloc(struct iommufd_ucmd *ucmd);
+void iommufd_eventq_virq_destroy(struct iommufd_object *obj);
+void iommufd_eventq_virq_abort(struct iommufd_object *obj);
+
+/* An iommufd_virq represents a vIOMMU interrupt in an eventq_virq queue */
+struct iommufd_virq {
+	struct iommufd_eventq_virq *eventq_virq;
+	struct list_head node;
+	ssize_t irq_len;
+	void *irq_data;
+};
+
+static inline int iommufd_eventq_virq_handler(struct iommufd_virq *virq)
+{
+	return iommufd_eventq_notify(&virq->eventq_virq->common, &virq->node);
+}
+
 static inline struct iommufd_viommu *
 iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
 {
@@ -556,6 +601,20 @@  iommufd_get_viommu(struct iommufd_ucmd *ucmd, u32 id)
 			    struct iommufd_viommu, obj);
 }
 
+static inline struct iommufd_eventq_virq *
+iommufd_viommu_find_eventq_virq(struct iommufd_viommu *viommu, u32 type)
+{
+	struct iommufd_eventq_virq *eventq_virq, *next;
+
+	lockdep_assert_held(&viommu->virqs_rwsem);
+
+	list_for_each_entry_safe(eventq_virq, next, &viommu->virqs, node) {
+		if (eventq_virq->type == type)
+			return eventq_virq;
+	}
+	return NULL;
+}
+
 int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd);
 void iommufd_viommu_destroy(struct iommufd_object *obj);
 int iommufd_vdevice_alloc_ioctl(struct iommufd_ucmd *ucmd);
diff --git a/include/linux/iommufd.h b/include/linux/iommufd.h
index 7ad105ab8090..40cc9bbb1d24 100644
--- a/include/linux/iommufd.h
+++ b/include/linux/iommufd.h
@@ -32,6 +32,7 @@  enum iommufd_object_type {
 	IOMMUFD_OBJ_IOAS,
 	IOMMUFD_OBJ_ACCESS,
 	IOMMUFD_OBJ_EVENTQ_IOPF,
+	IOMMUFD_OBJ_EVENTQ_VIRQ,
 	IOMMUFD_OBJ_VIOMMU,
 	IOMMUFD_OBJ_VDEVICE,
 #ifdef CONFIG_IOMMUFD_TEST
@@ -93,6 +94,8 @@  struct iommufd_viommu {
 	const struct iommufd_viommu_ops *ops;
 
 	struct xarray vdevs;
+	struct list_head virqs;
+	struct rw_semaphore virqs_rwsem;
 
 	unsigned int type;
 };
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index 34810f6ae2b5..d9319f5b7c69 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -55,6 +55,7 @@  enum {
 	IOMMUFD_CMD_VIOMMU_ALLOC = 0x90,
 	IOMMUFD_CMD_VDEVICE_ALLOC = 0x91,
 	IOMMUFD_CMD_IOAS_CHANGE_PROCESS = 0x92,
+	IOMMUFD_CMD_VIRQ_ALLOC = 0x93,
 };
 
 /**
@@ -1012,4 +1013,34 @@  struct iommu_ioas_change_process {
 #define IOMMU_IOAS_CHANGE_PROCESS \
 	_IO(IOMMUFD_TYPE, IOMMUFD_CMD_IOAS_CHANGE_PROCESS)
 
+/**
+ * enum iommu_virq_type - Virtual IRQ Type
+ * @IOMMU_VIRQ_TYPE_NONE: INVALID type
+ */
+enum iommu_virq_type {
+	IOMMU_VIRQ_TYPE_NONE = 0,
+};
+
+/**
+ * struct iommu_virq_alloc - ioctl(IOMMU_VIRQ_ALLOC)
+ * @size: sizeof(struct iommu_virq_alloc)
+ * @flags: Must be 0
+ * @viommu: virtual IOMMU ID to associate the virtual IRQ with
+ * @type: Type of the virtual IRQ. Must be defined in enum iommu_virq_type
+ * @out_virq_id: The ID of the new virtual IRQ
+ * @out_fault_fd: The fd of the new virtual IRQ. User space must close the
+ *                successfully returned fd after using it
+ *
+ * Explicitly allocate a virtual IRQ handler for a vIOMMU. A vIOMMU can have
+ * multiple FDs for different @type, but is confined to one FD per @type.
+ */
+struct iommu_virq_alloc {
+	__u32 size;
+	__u32 flags;
+	__u32 viommu_id;
+	__u32 type;
+	__u32 out_virq_id;
+	__u32 out_virq_fd;
+};
+#define IOMMU_VIRQ_ALLOC _IO(IOMMUFD_TYPE, IOMMUFD_CMD_VIRQ_ALLOC)
 #endif
diff --git a/drivers/iommu/iommufd/eventq.c b/drivers/iommu/iommufd/eventq.c
index 3674961a45c2..cf07b3c21f1e 100644
--- a/drivers/iommu/iommufd/eventq.c
+++ b/drivers/iommu/iommufd/eventq.c
@@ -346,6 +346,75 @@  static const struct iommufd_eventq_ops iommufd_eventq_iopf_ops = {
 	.write = &iommufd_eventq_iopf_fops_write,
 };
 
+/* IOMMUFD_OBJ_EVENTQ_VIRQ Functions */
+
+void iommufd_eventq_virq_abort(struct iommufd_object *obj)
+{
+	struct iommufd_eventq *eventq =
+		container_of(obj, struct iommufd_eventq, obj);
+	struct iommufd_eventq_virq *eventq_virq = to_eventq_virq(eventq);
+	struct iommufd_viommu *viommu = eventq_virq->viommu;
+	struct iommufd_virq *virq, *next;
+
+	lockdep_assert_held_write(&viommu->virqs_rwsem);
+
+	list_for_each_entry_safe(virq, next, &eventq->deliver, node) {
+		list_del(&virq->node);
+		kfree(virq);
+	}
+
+	if (eventq_virq->irq_wq)
+		destroy_workqueue(eventq_virq->irq_wq);
+	refcount_dec(&viommu->obj.users);
+	mutex_destroy(&eventq->mutex);
+	list_del(&eventq_virq->node);
+}
+
+void iommufd_eventq_virq_destroy(struct iommufd_object *obj)
+{
+	struct iommufd_eventq_virq *eventq_virq =
+		to_eventq_virq(container_of(obj, struct iommufd_eventq, obj));
+
+	down_write(&eventq_virq->viommu->virqs_rwsem);
+	iommufd_eventq_virq_abort(obj);
+	up_write(&eventq_virq->viommu->virqs_rwsem);
+}
+
+static ssize_t iommufd_eventq_virq_fops_read(struct iommufd_eventq *eventq,
+					     char __user *buf, size_t count,
+					     loff_t *ppos)
+{
+	size_t done = 0;
+	int rc = 0;
+
+	if (*ppos)
+		return -ESPIPE;
+
+	mutex_lock(&eventq->mutex);
+	while (!list_empty(&eventq->deliver) && count > done) {
+		struct iommufd_virq *virq = list_first_entry(
+			&eventq->deliver, struct iommufd_virq, node);
+
+		if (virq->irq_len > count - done)
+			break;
+
+		if (copy_to_user(buf + done, virq->irq_data, virq->irq_len)) {
+			rc = -EFAULT;
+			break;
+		}
+		done += virq->irq_len;
+		list_del(&virq->node);
+		kfree(virq);
+	}
+	mutex_unlock(&eventq->mutex);
+
+	return done == 0 ? rc : done;
+}
+
+static const struct iommufd_eventq_ops iommufd_eventq_virq_ops = {
+	.read = &iommufd_eventq_virq_fops_read,
+};
+
 /* Common Event Queue Functions */
 
 static ssize_t iommufd_eventq_fops_read(struct file *filep, char __user *buf,
@@ -472,3 +541,72 @@  int iommufd_eventq_iopf_alloc(struct iommufd_ucmd *ucmd)
 
 	return rc;
 }
+
+int iommufd_eventq_virq_alloc(struct iommufd_ucmd *ucmd)
+{
+	struct iommu_virq_alloc *cmd = ucmd->cmd;
+	struct iommufd_eventq_virq *eventq_virq;
+	struct iommufd_viommu *viommu;
+	int fdno;
+	int rc;
+
+	if (cmd->flags || cmd->type == IOMMU_VIRQ_TYPE_NONE)
+		return -EOPNOTSUPP;
+
+	viommu = iommufd_get_viommu(ucmd, cmd->viommu_id);
+	if (IS_ERR(viommu))
+		return PTR_ERR(viommu);
+	down_write(&viommu->virqs_rwsem);
+
+	if (iommufd_viommu_find_eventq_virq(viommu, cmd->type)) {
+		rc = -EEXIST;
+		goto out_unlock_virqs;
+	}
+
+	eventq_virq = __iommufd_object_alloc(
+		ucmd->ictx, eventq_virq, IOMMUFD_OBJ_EVENTQ_VIRQ, common.obj);
+	if (IS_ERR(eventq_virq)) {
+		rc = PTR_ERR(eventq_virq);
+		goto out_unlock_virqs;
+	}
+
+	eventq_virq->type = cmd->type;
+	eventq_virq->viommu = viommu;
+	refcount_inc(&viommu->obj.users);
+	list_add_tail(&eventq_virq->node, &viommu->virqs);
+
+	fdno = iommufd_eventq_init(&eventq_virq->common, "[iommufd-viommu-irq]",
+				 ucmd->ictx, &iommufd_eventq_virq_ops);
+	if (fdno < 0) {
+		rc = fdno;
+		goto out_abort;
+	}
+
+	eventq_virq->irq_wq = alloc_workqueue("viommu_irq/%d", WQ_UNBOUND, 0,
+					      eventq_virq->common.obj.id);
+	if (!eventq_virq->irq_wq) {
+		rc = -ENOMEM;
+		goto out_put_fdno;
+	}
+
+	cmd->out_virq_id = eventq_virq->common.obj.id;
+	cmd->out_virq_fd = fdno;
+
+	rc = iommufd_ucmd_respond(ucmd, sizeof(*cmd));
+	if (rc)
+		goto out_put_fdno;
+
+	iommufd_object_finalize(ucmd->ictx, &eventq_virq->common.obj);
+	fd_install(fdno, eventq_virq->common.filep);
+	goto out_unlock_virqs;
+
+out_put_fdno:
+	put_unused_fd(fdno);
+	fput(eventq_virq->common.filep);
+out_abort:
+	iommufd_object_abort_and_destroy(ucmd->ictx, &eventq_virq->common.obj);
+out_unlock_virqs:
+	up_write(&viommu->virqs_rwsem);
+	iommufd_put_object(ucmd->ictx, &viommu->obj);
+	return rc;
+}
diff --git a/drivers/iommu/iommufd/main.c b/drivers/iommu/iommufd/main.c
index 539c24ada6d0..89e8ac56f4ce 100644
--- a/drivers/iommu/iommufd/main.c
+++ b/drivers/iommu/iommufd/main.c
@@ -335,6 +335,8 @@  static const struct iommufd_ioctl_op iommufd_ioctl_ops[] = {
 	IOCTL_OP(IOMMU_DESTROY, iommufd_destroy, struct iommu_destroy, id),
 	IOCTL_OP(IOMMU_FAULT_QUEUE_ALLOC, iommufd_eventq_iopf_alloc,
 		 struct iommu_fault_alloc, out_fault_fd),
+	IOCTL_OP(IOMMU_VIRQ_ALLOC, iommufd_eventq_virq_alloc,
+		 struct iommu_virq_alloc, out_virq_fd),
 	IOCTL_OP(IOMMU_GET_HW_INFO, iommufd_get_hw_info, struct iommu_hw_info,
 		 __reserved),
 	IOCTL_OP(IOMMU_HWPT_ALLOC, iommufd_hwpt_alloc, struct iommu_hwpt_alloc,
@@ -504,6 +506,10 @@  static const struct iommufd_object_ops iommufd_object_ops[] = {
 	[IOMMUFD_OBJ_EVENTQ_IOPF] = {
 		.destroy = iommufd_eventq_iopf_destroy,
 	},
+	[IOMMUFD_OBJ_EVENTQ_VIRQ] = {
+		.destroy = iommufd_eventq_virq_destroy,
+		.abort = iommufd_eventq_virq_abort,
+	},
 	[IOMMUFD_OBJ_VIOMMU] = {
 		.destroy = iommufd_viommu_destroy,
 	},
diff --git a/drivers/iommu/iommufd/viommu.c b/drivers/iommu/iommufd/viommu.c
index 69b88e8c7c26..075b6aed79bc 100644
--- a/drivers/iommu/iommufd/viommu.c
+++ b/drivers/iommu/iommufd/viommu.c
@@ -59,6 +59,8 @@  int iommufd_viommu_alloc_ioctl(struct iommufd_ucmd *ucmd)
 	viommu->ictx = ucmd->ictx;
 	viommu->hwpt = hwpt_paging;
 	refcount_inc(&viommu->hwpt->common.obj.users);
+	INIT_LIST_HEAD(&viommu->virqs);
+	init_rwsem(&viommu->virqs_rwsem);
 	/*
 	 * It is the most likely case that a physical IOMMU is unpluggable. A
 	 * pluggable IOMMU instance (if exists) is responsible for refcounting