@@ -1267,7 +1267,8 @@ static void qi_dump_fault(struct intel_iommu *iommu, u32 fault)
(unsigned long long)desc->qw1);
}
-static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
+static int qi_check_fault(struct intel_iommu *iommu, int index,
+ int wait_index, u32 *fsts)
{
u32 fault;
int head, tail;
@@ -1278,8 +1279,12 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
return -EAGAIN;
fault = readl(iommu->reg + DMAR_FSTS_REG);
- if (fault & (DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE))
+ fault &= DMA_FSTS_IQE | DMA_FSTS_ITE | DMA_FSTS_ICE;
+ if (fault) {
+ if (fsts)
+ *fsts |= fault;
qi_dump_fault(iommu, fault);
+ }
/*
* If IQE happens, the head points to the descriptor associated
@@ -1342,9 +1347,11 @@ static int qi_check_fault(struct intel_iommu *iommu, int index, int wait_index)
* time, a wait descriptor will be appended to each submission to ensure
* hardware has completed the invalidation before return. Wait descriptors
* can be part of the submission but it will not be polled for completion.
+ * If callers are interested in the QI faults that occur during the handling
+ * of requests, the QI faults are saved in @fault.
*/
int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
- unsigned int count, unsigned long options)
+ unsigned int count, unsigned long options, u32 *fault)
{
struct q_inval *qi = iommu->qi;
s64 devtlb_start_ktime = 0;
@@ -1430,7 +1437,7 @@ int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
* a deadlock where the interrupt context can wait indefinitely
* for free slots in the queue.
*/
- rc = qi_check_fault(iommu, index, wait_index);
+ rc = qi_check_fault(iommu, index, wait_index, fault);
if (rc)
break;
@@ -1476,7 +1483,7 @@ void qi_global_iec(struct intel_iommu *iommu)
desc.qw3 = 0;
/* should never fail */
- qi_submit_sync(iommu, &desc, 1, 0);
+ qi_submit_sync(iommu, &desc, 1, 0, NULL);
}
void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
@@ -1490,7 +1497,7 @@ void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid, u8 fm,
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(iommu, &desc, 1, 0);
+ qi_submit_sync(iommu, &desc, 1, 0, NULL);
}
void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
@@ -1514,7 +1521,7 @@ void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(iommu, &desc, 1, 0);
+ qi_submit_sync(iommu, &desc, 1, 0, NULL);
}
void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
@@ -1545,7 +1552,7 @@ void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(iommu, &desc, 1, 0);
+ qi_submit_sync(iommu, &desc, 1, 0, NULL);
}
/* PASID-based IOTLB invalidation */
@@ -1586,7 +1593,7 @@ void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
QI_EIOTLB_AM(mask);
}
- qi_submit_sync(iommu, &desc, 1, 0);
+ qi_submit_sync(iommu, &desc, 1, 0, NULL);
}
/* PASID-based device IOTLB Invalidate */
@@ -1639,7 +1646,7 @@ void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
desc.qw1 |= QI_DEV_EIOTLB_SIZE;
}
- qi_submit_sync(iommu, &desc, 1, 0);
+ qi_submit_sync(iommu, &desc, 1, 0, NULL);
}
void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
@@ -1649,7 +1656,7 @@ void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did,
desc.qw0 = QI_PC_PASID(pasid) | QI_PC_DID(did) |
QI_PC_GRAN(granu) | QI_PC_TYPE;
- qi_submit_sync(iommu, &desc, 1, 0);
+ qi_submit_sync(iommu, &desc, 1, 0, NULL);
}
/*
@@ -881,7 +881,7 @@ void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
u32 pasid);
int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
- unsigned int count, unsigned long options);
+ unsigned int count, unsigned long options, u32 *fault);
/*
* Options used in qi_submit_sync:
* QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
@@ -153,7 +153,7 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
desc.qw2 = 0;
desc.qw3 = 0;
- return qi_submit_sync(iommu, &desc, 1, 0);
+ return qi_submit_sync(iommu, &desc, 1, 0, NULL);
}
static int modify_irte(struct irq_2_iommu *irq_iommu,
@@ -467,7 +467,7 @@ pasid_cache_invalidation_with_pasid(struct intel_iommu *iommu,
desc.qw2 = 0;
desc.qw3 = 0;
- qi_submit_sync(iommu, &desc, 1, 0);
+ qi_submit_sync(iommu, &desc, 1, 0, NULL);
}
static void
@@ -543,7 +543,7 @@ void intel_drain_pasid_prq(struct device *dev, u32 pasid)
QI_DEV_IOTLB_PFSID(info->pfsid);
qi_retry:
reinit_completion(&iommu->prq_complete);
- qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN);
+ qi_submit_sync(iommu, desc, 3, QI_OPT_WAIT_DRAIN, NULL);
if (readl(iommu->reg + DMAR_PRS_REG) & DMA_PRS_PRO) {
wait_for_completion(&iommu->prq_complete);
goto qi_retry;
@@ -646,7 +646,7 @@ static void handle_bad_prq_event(struct intel_iommu *iommu,
desc.qw3 = 0;
}
- qi_submit_sync(iommu, &desc, 1, 0);
+ qi_submit_sync(iommu, &desc, 1, 0, NULL);
}
static irqreturn_t prq_event_thread(int irq, void *d)
@@ -811,7 +811,7 @@ int intel_svm_page_response(struct device *dev,
ktime_to_ns(ktime_get()) - prm->private_data[0]);
}
- qi_submit_sync(iommu, &desc, 1, 0);
+ qi_submit_sync(iommu, &desc, 1, 0, NULL);
}
out:
return ret;