diff mbox series

[v2,13/13] iommu/arm-smmu-v3: Report IRQs that belong to devices attached to vIOMMU

Message ID d335936fdeccfcf785589800b7e5d9b1b26a766d.1733263737.git.nicolinc@nvidia.com
State New
Headers show
Series None | expand

Commit Message

Nicolin Chen Dec. 3, 2024, 10:10 p.m. UTC
Aside from the IOPF framework, iommufd provides an additional pathway to
report a hardware event or IRQ, via the vIRQ of vIOMMU infrastructure.

Define an iommu_virq_arm_smmuv3 uAPI structure, and report stage-1 faults
in the threaded IRQ handler.

Signed-off-by: Nicolin Chen <nicolinc@nvidia.com>
---
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h   |  7 +++
 include/uapi/linux/iommufd.h                  | 14 +++++
 .../arm/arm-smmu-v3/arm-smmu-v3-iommufd.c     | 16 +++++
 drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c   | 62 ++++++++++---------
 4 files changed, 71 insertions(+), 28 deletions(-)

Comments

Tian, Kevin Dec. 11, 2024, 8:21 a.m. UTC | #1
> From: Nicolin Chen <nicolinc@nvidia.com>
> Sent: Wednesday, December 4, 2024 6:10 AM
> 
> +
> +/**
> + * struct iommu_virq_arm_smmuv3 - ARM SMMUv3 Virtual IRQ
> + *                                (IOMMU_VIRQ_TYPE_ARM_SMMUV3)
> + * @evt: 256-bit ARM SMMUv3 Event record, little-endian.
> + *
> + * StreamID field reports a virtual device ID. To receive a virtual IRQ for a
> + * device, a vDEVICE must be allocated via IOMMU_VDEVICE_ALLOC.
> + */

similar to what's provided for iommu_hw_info_arm_smmuv3, it'd be
good to refer to a section in smmu spec for bit definitions.

> @@ -1779,33 +1779,6 @@ static int arm_smmu_handle_evt(struct
> arm_smmu_device *smmu, u64 *evt)
>  		return -EOPNOTSUPP;
>  	}
> 
> -	if (!(evt[1] & EVTQ_1_STALL))
> -		return -EOPNOTSUPP;
> -
> -	if (evt[1] & EVTQ_1_RnW)
> -		perm |= IOMMU_FAULT_PERM_READ;
> -	else
> -		perm |= IOMMU_FAULT_PERM_WRITE;
> -
> -	if (evt[1] & EVTQ_1_InD)
> -		perm |= IOMMU_FAULT_PERM_EXEC;
> -
> -	if (evt[1] & EVTQ_1_PnU)
> -		perm |= IOMMU_FAULT_PERM_PRIV;
> -
> -	flt->type = IOMMU_FAULT_PAGE_REQ;
> -	flt->prm = (struct iommu_fault_page_request) {
> -		.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
> -		.grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
> -		.perm = perm,
> -		.addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
> -	};
> -
> -	if (ssid_valid) {
> -		flt->prm.flags |=
> IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
> -		flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
> -	}
> -
>  	mutex_lock(&smmu->streams_mutex);
>  	master = arm_smmu_find_master(smmu, sid);
>  	if (!master) {
> @@ -1813,7 +1786,40 @@ static int arm_smmu_handle_evt(struct
> arm_smmu_device *smmu, u64 *evt)
>  		goto out_unlock;
>  	}
> 
> -	ret = iommu_report_device_fault(master->dev, &fault_evt);
> +	down_read(&master->vmaster_rwsem);

this lock is not required if event is EVTQ_1_STALL?

> +	if (evt[1] & EVTQ_1_STALL) {
> +		if (evt[1] & EVTQ_1_RnW)
> +			perm |= IOMMU_FAULT_PERM_READ;
> +		else
> +			perm |= IOMMU_FAULT_PERM_WRITE;
> +
> +		if (evt[1] & EVTQ_1_InD)
> +			perm |= IOMMU_FAULT_PERM_EXEC;
> +
> +		if (evt[1] & EVTQ_1_PnU)
> +			perm |= IOMMU_FAULT_PERM_PRIV;
> +
> +		flt->type = IOMMU_FAULT_PAGE_REQ;
> +		flt->prm = (struct iommu_fault_page_request){
> +			.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
> +			.grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
> +			.perm = perm,
> +			.addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
> +		};
> +
> +		if (ssid_valid) {
> +			flt->prm.flags |=
> IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
> +			flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
> +		}
> +
> +		ret = iommu_report_device_fault(master->dev, &fault_evt);
> +	} else if (master->vmaster && !(evt[1] & EVTQ_1_S2)) {
> +		ret = arm_vmaster_report_event(master->vmaster, evt);
> +	} else {
> +		/* Unhandled events should be pinned */
> +		ret = -EFAULT;
> +	}
> +	up_read(&master->vmaster_rwsem);
>  out_unlock:
>  	mutex_unlock(&smmu->streams_mutex);
>  	return ret;
> --
> 2.43.0
Nicolin Chen Dec. 12, 2024, 9:34 p.m. UTC | #2
On Wed, Dec 11, 2024 at 08:21:42AM +0000, Tian, Kevin wrote:
> > From: Nicolin Chen <nicolinc@nvidia.com>
> > Sent: Wednesday, December 4, 2024 6:10 AM
> > 
> > +
> > +/**
> > + * struct iommu_virq_arm_smmuv3 - ARM SMMUv3 Virtual IRQ
> > + *                                (IOMMU_VIRQ_TYPE_ARM_SMMUV3)
> > + * @evt: 256-bit ARM SMMUv3 Event record, little-endian.
> > + *
> > + * StreamID field reports a virtual device ID. To receive a virtual IRQ for a
> > + * device, a vDEVICE must be allocated via IOMMU_VDEVICE_ALLOC.
> > + */
> 
> similar to what's provided for iommu_hw_info_arm_smmuv3, it'd be
> good to refer to a section in smmu spec for bit definitions.

Ack.

> >  	mutex_lock(&smmu->streams_mutex);
> >  	master = arm_smmu_find_master(smmu, sid);
> >  	if (!master) {
> > @@ -1813,7 +1786,40 @@ static int arm_smmu_handle_evt(struct
> > arm_smmu_device *smmu, u64 *evt)
> >  		goto out_unlock;
> >  	}
> > 
> > -	ret = iommu_report_device_fault(master->dev, &fault_evt);
> > +	down_read(&master->vmaster_rwsem);
> 
> this lock is not required if event is EVTQ_1_STALL?

No. It only protects master->vmaster. Perhaps I can rewrite this
piece to exclude the lock from the EVTQ_1_STALL chunk.

> > +	if (evt[1] & EVTQ_1_STALL) {
> > +		if (evt[1] & EVTQ_1_RnW)
> > +			perm |= IOMMU_FAULT_PERM_READ;
> > +		else
> > +			perm |= IOMMU_FAULT_PERM_WRITE;
> > +
> > +		if (evt[1] & EVTQ_1_InD)
> > +			perm |= IOMMU_FAULT_PERM_EXEC;
> > +
> > +		if (evt[1] & EVTQ_1_PnU)
> > +			perm |= IOMMU_FAULT_PERM_PRIV;
> > +
> > +		flt->type = IOMMU_FAULT_PAGE_REQ;
> > +		flt->prm = (struct iommu_fault_page_request){
> > +			.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
> > +			.grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
> > +			.perm = perm,
> > +			.addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
> > +		};
> > +
> > +		if (ssid_valid) {
> > +			flt->prm.flags |=
> > IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
> > +			flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
> > +		}
> > +
> > +		ret = iommu_report_device_fault(master->dev, &fault_evt);
> > +	} else if (master->vmaster && !(evt[1] & EVTQ_1_S2)) {
> > +		ret = arm_vmaster_report_event(master->vmaster, evt);

Thanks
Nic
diff mbox series

Patch

diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
index ec7cff33a0b1..05915f141eb8 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h
@@ -1037,6 +1037,7 @@  struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
 int arm_smmu_attach_prepare_vmaster(struct arm_smmu_attach_state *state,
 				    struct iommu_domain *domain);
 void arm_smmu_attach_commit_vmaster(struct arm_smmu_attach_state *state);
+int arm_vmaster_report_event(struct arm_smmu_vmaster *vmaster, u64 *evt);
 #else
 #define arm_smmu_hw_info NULL
 #define arm_vsmmu_alloc NULL
@@ -1052,6 +1053,12 @@  static inline void
 arm_smmu_attach_commit_vmaster(struct arm_smmu_attach_state *state)
 {
 }
+
+static inline int
+arm_vmaster_report_event(struct arm_smmu_vmaster *vmaster, u64 *evt)
+{
+	return -EOPNOTSUPP;
+}
 #endif /* CONFIG_ARM_SMMU_V3_IOMMUFD */
 
 #endif /* _ARM_SMMU_V3_H */
diff --git a/include/uapi/linux/iommufd.h b/include/uapi/linux/iommufd.h
index d9319f5b7c69..164920d7f0ab 100644
--- a/include/uapi/linux/iommufd.h
+++ b/include/uapi/linux/iommufd.h
@@ -1016,9 +1016,23 @@  struct iommu_ioas_change_process {
 /**
  * enum iommu_virq_type - Virtual IRQ Type
  * @IOMMU_VIRQ_TYPE_NONE: INVALID type
+ * @IOMMU_VIRQ_TYPE_ARM_SMMUV3: ARM SMMUv3 Virtual Event
  */
 enum iommu_virq_type {
 	IOMMU_VIRQ_TYPE_NONE = 0,
+	IOMMU_VIRQ_TYPE_ARM_SMMUV3 = 1,
+};
+
+/**
+ * struct iommu_virq_arm_smmuv3 - ARM SMMUv3 Virtual IRQ
+ *                                (IOMMU_VIRQ_TYPE_ARM_SMMUV3)
+ * @evt: 256-bit ARM SMMUv3 Event record, little-endian.
+ *
+ * StreamID field reports a virtual device ID. To receive a virtual IRQ for a
+ * device, a vDEVICE must be allocated via IOMMU_VDEVICE_ALLOC.
+ */
+struct iommu_virq_arm_smmuv3 {
+	__aligned_le64 evt[4];
 };
 
 /**
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
index 3a77eca949e6..e3ef77e0bffd 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3-iommufd.c
@@ -447,4 +447,20 @@  struct iommufd_viommu *arm_vsmmu_alloc(struct device *dev,
 	return &vsmmu->core;
 }
 
+int arm_vmaster_report_event(struct arm_smmu_vmaster *vmaster, u64 *evt)
+{
+	struct iommu_virq_arm_smmuv3 virq_data =
+		*(struct iommu_virq_arm_smmuv3 *)evt;
+
+	virq_data.evt[0] &= ~EVTQ_0_SID;
+	virq_data.evt[0] |= FIELD_PREP(EVTQ_0_SID, vmaster->vsid);
+
+	virq_data.evt[0] = cpu_to_le64(virq_data.evt[0]);
+	virq_data.evt[1] = cpu_to_le64(virq_data.evt[1]);
+
+	return iommufd_viommu_report_irq(&vmaster->vsmmu->core,
+					 IOMMU_VIRQ_TYPE_ARM_SMMUV3, &virq_data,
+					 sizeof(virq_data));
+}
+
 MODULE_IMPORT_NS(IOMMUFD);
diff --git a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
index 6a6113b36360..215c2d811eb7 100644
--- a/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
+++ b/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.c
@@ -1779,33 +1779,6 @@  static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
 		return -EOPNOTSUPP;
 	}
 
-	if (!(evt[1] & EVTQ_1_STALL))
-		return -EOPNOTSUPP;
-
-	if (evt[1] & EVTQ_1_RnW)
-		perm |= IOMMU_FAULT_PERM_READ;
-	else
-		perm |= IOMMU_FAULT_PERM_WRITE;
-
-	if (evt[1] & EVTQ_1_InD)
-		perm |= IOMMU_FAULT_PERM_EXEC;
-
-	if (evt[1] & EVTQ_1_PnU)
-		perm |= IOMMU_FAULT_PERM_PRIV;
-
-	flt->type = IOMMU_FAULT_PAGE_REQ;
-	flt->prm = (struct iommu_fault_page_request) {
-		.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
-		.grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
-		.perm = perm,
-		.addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
-	};
-
-	if (ssid_valid) {
-		flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
-		flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
-	}
-
 	mutex_lock(&smmu->streams_mutex);
 	master = arm_smmu_find_master(smmu, sid);
 	if (!master) {
@@ -1813,7 +1786,40 @@  static int arm_smmu_handle_evt(struct arm_smmu_device *smmu, u64 *evt)
 		goto out_unlock;
 	}
 
-	ret = iommu_report_device_fault(master->dev, &fault_evt);
+	down_read(&master->vmaster_rwsem);
+	if (evt[1] & EVTQ_1_STALL) {
+		if (evt[1] & EVTQ_1_RnW)
+			perm |= IOMMU_FAULT_PERM_READ;
+		else
+			perm |= IOMMU_FAULT_PERM_WRITE;
+
+		if (evt[1] & EVTQ_1_InD)
+			perm |= IOMMU_FAULT_PERM_EXEC;
+
+		if (evt[1] & EVTQ_1_PnU)
+			perm |= IOMMU_FAULT_PERM_PRIV;
+
+		flt->type = IOMMU_FAULT_PAGE_REQ;
+		flt->prm = (struct iommu_fault_page_request){
+			.flags = IOMMU_FAULT_PAGE_REQUEST_LAST_PAGE,
+			.grpid = FIELD_GET(EVTQ_1_STAG, evt[1]),
+			.perm = perm,
+			.addr = FIELD_GET(EVTQ_2_ADDR, evt[2]),
+		};
+
+		if (ssid_valid) {
+			flt->prm.flags |= IOMMU_FAULT_PAGE_REQUEST_PASID_VALID;
+			flt->prm.pasid = FIELD_GET(EVTQ_0_SSID, evt[0]);
+		}
+
+		ret = iommu_report_device_fault(master->dev, &fault_evt);
+	} else if (master->vmaster && !(evt[1] & EVTQ_1_S2)) {
+		ret = arm_vmaster_report_event(master->vmaster, evt);
+	} else {
+		/* Unhandled events should be pinned */
+		ret = -EFAULT;
+	}
+	up_read(&master->vmaster_rwsem);
 out_unlock:
 	mutex_unlock(&smmu->streams_mutex);
 	return ret;