@@ -1869,13 +1869,6 @@
bypassed by not enabling DMAR with this option. In
this case, gfx device will use physical address for
DMA.
- forcedac [X86-64]
- With this option iommu will not optimize to look
- for io virtual address below 32-bit forcing dual
- address cycle on pci bus for cards supporting greater
- than 32-bit addressing. The default is to look
- for translation below 32-bit and if not available
- then look in the higher range.
strict [Default Off]
With this option on every unmap_single operation will
result in a hardware IOTLB flush operation as opposed
@@ -1964,6 +1957,14 @@
nobypass [PPC/POWERNV]
Disable IOMMU bypass, using IOMMU for PCI devices.
+ iommu.forcedac= [ARM64, X86] Control IOVA allocation for PCI devices.
+ Format: { "0" | "1" }
+ 0 - Try to allocate a 32-bit DMA address first, before
+ falling back to the full range if needed.
+ 1 - Allocate directly from the full usable range,
+ forcing Dual Address Cycle for PCI cards supporting
+ greater than 32-bit addressing.
+
iommu.strict= [ARM64] Configure TLB invalidation behaviour
Format: { "0" | "1" }
0 - Lazy mode.
@@ -52,6 +52,17 @@ struct iommu_dma_cookie {
};
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled);
+bool iommu_dma_forcedac __read_mostly;
+
+static int __init iommu_dma_forcedac_setup(char *str)
+{
+ int ret = kstrtobool(str, &iommu_dma_forcedac);
+
+ if (!ret && iommu_dma_forcedac)
+ pr_info("Forcing DAC for PCI devices\n");
+ return ret;
+}
+early_param("iommu.forcedac", iommu_dma_forcedac_setup);
void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
struct iommu_domain *domain)
@@ -444,7 +455,7 @@ static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
dma_limit = min(dma_limit, (u64)domain->geometry.aperture_end);
/* Try to get PCI devices a SAC address */
- if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
+ if (dma_limit > DMA_BIT_MASK(32) && !iommu_dma_forcedac && dev_is_pci(dev))
iova = alloc_iova_fast(iovad, iova_len,
DMA_BIT_MASK(32) >> shift, false);
@@ -360,7 +360,6 @@ int intel_iommu_enabled = 0;
EXPORT_SYMBOL_GPL(intel_iommu_enabled);
static int dmar_map_gfx = 1;
-static int dmar_forcedac;
static int intel_iommu_strict;
static int intel_iommu_superpage = 1;
static int iommu_identity_mapping;
@@ -451,8 +450,8 @@ static int __init intel_iommu_setup(char *str)
dmar_map_gfx = 0;
pr_info("Disable GFX device mapping\n");
} else if (!strncmp(str, "forcedac", 8)) {
- pr_info("Forcing DAC for PCI devices\n");
- dmar_forcedac = 1;
+ pr_warn("intel_iommu=forcedac deprecated; use iommu.forcedac instead\n");
+ iommu_dma_forcedac = true;
} else if (!strncmp(str, "strict", 6)) {
pr_info("Disable batched IOTLB flush\n");
intel_iommu_strict = 1;
@@ -40,6 +40,8 @@ void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
void iommu_dma_free_cpu_cached_iovas(unsigned int cpu,
struct iommu_domain *domain);
+extern bool iommu_dma_forcedac;
+
#else /* CONFIG_IOMMU_DMA */
struct iommu_domain;