@@ -245,6 +245,7 @@ void __init plat_swiotlb_setup(void)
panic("%s: Failed to allocate %zu bytes align=%lx\n",
__func__, swiotlbsize, PAGE_SIZE);
- if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs, 1) == -ENOMEM)
+ if (swiotlb_init_with_tbl(octeon_swiotlb, swiotlb_nslabs,
+ SWIOTLB_LO, 1) == -ENOMEM)
panic("Cannot allocate SWIOTLB buffer");
}
@@ -20,7 +20,7 @@ void __init swiotlb_detect_4g(void)
static int __init check_swiotlb_enabled(void)
{
if (ppc_swiotlb_enable)
- swiotlb_print_info();
+ swiotlb_print_info(SWIOTLB_LO);
else
swiotlb_exit();
@@ -52,7 +52,7 @@ void __init svm_swiotlb_init(void)
bytes = io_tlb_nslabs << IO_TLB_SHIFT;
vstart = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
- if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, false))
+ if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs, SWIOTLB_LO, false))
return;
if (io_tlb_start[SWIOTLB_LO])
@@ -67,12 +67,15 @@ void __init pci_swiotlb_init(void)
void __init pci_swiotlb_late_init(void)
{
+ int i;
+
/* An IOMMU turned us off. */
if (!swiotlb)
swiotlb_exit();
else {
printk(KERN_INFO "PCI-DMA: "
"Using software bounce buffering for IO (SWIOTLB)\n");
- swiotlb_print_info();
+ for (i = 0; i < swiotlb_nr; i++)
+ swiotlb_print_info(i);
}
}
@@ -57,7 +57,7 @@ static void sta2x11_new_instance(struct pci_dev *pdev)
int size = STA2X11_SWIOTLB_SIZE;
/* First instance: register your own swiotlb area */
dev_info(&pdev->dev, "Using SWIOTLB (size %i)\n", size);
- if (swiotlb_late_init_with_default_size(size))
+ if (swiotlb_late_init_with_default_size(size), SWIOTLB_LO)
dev_emerg(&pdev->dev, "init swiotlb failed\n");
}
list_add(&instance->list, &sta2x11_instance_list);
@@ -42,10 +42,10 @@ static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
max_order = MAX_ORDER;
#ifdef CONFIG_SWIOTLB
- if (swiotlb_nr_tbl()) {
+ if (swiotlb_nr_tbl(SWIOTLB_LO)) {
unsigned int max_segment;
- max_segment = swiotlb_max_segment();
+ max_segment = swiotlb_max_segment(SWIOTLB_LO);
if (max_segment) {
max_segment = max_t(unsigned int, max_segment,
PAGE_SIZE) >> PAGE_SHIFT;
@@ -118,7 +118,7 @@ static inline unsigned int i915_sg_page_sizes(struct scatterlist *sg)
static inline unsigned int i915_sg_segment_size(void)
{
- unsigned int size = swiotlb_max_segment();
+ unsigned int size = swiotlb_max_segment(SWIOTLB_LO);
if (size == 0)
size = UINT_MAX;
@@ -321,7 +321,7 @@ nouveau_ttm_init(struct nouveau_drm *drm)
}
#if IS_ENABLED(CONFIG_SWIOTLB) && IS_ENABLED(CONFIG_X86)
- need_swiotlb = !!swiotlb_nr_tbl();
+ need_swiotlb = !!swiotlb_nr_tbl(SWIOTLB_LO);
#endif
ret = ttm_bo_device_init(&drm->ttm.bdev, &nouveau_bo_driver,
@@ -4582,7 +4582,7 @@ int sdhci_setup_host(struct sdhci_host *host)
mmc->max_segs = SDHCI_MAX_SEGS;
} else if (host->flags & SDHCI_USE_SDMA) {
mmc->max_segs = 1;
- if (swiotlb_max_segment()) {
+ if (swiotlb_max_segment(SWIOTLB_LO)) {
unsigned int max_req_size = (1 << IO_TLB_SHIFT) *
IO_TLB_SEGSIZE;
mmc->max_req_size = min(mmc->max_req_size,
@@ -693,7 +693,7 @@ static int pcifront_connect_and_init_dma(struct pcifront_device *pdev)
spin_unlock(&pcifront_dev_lock);
- if (!err && !swiotlb_nr_tbl()) {
+ if (!err && !swiotlb_nr_tbl(SWIOTLB_LO)) {
err = pci_xen_swiotlb_init_late();
if (err)
dev_err(&pdev->xdev->dev, "Could not setup SWIOTLB!\n");
@@ -184,7 +184,7 @@ int __ref xen_swiotlb_init(int verbose, bool early)
enum xen_swiotlb_err m_ret = XEN_SWIOTLB_UNKNOWN;
unsigned int repeat = 3;
- xen_io_tlb_nslabs = swiotlb_nr_tbl();
+ xen_io_tlb_nslabs = swiotlb_nr_tbl(SWIOTLB_LO);
retry:
bytes = xen_set_nslabs(xen_io_tlb_nslabs);
order = get_order(xen_io_tlb_nslabs << IO_TLB_SHIFT);
@@ -245,16 +245,17 @@ int __ref xen_swiotlb_init(int verbose, bool early)
}
if (early) {
if (swiotlb_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs,
- verbose))
+ SWIOTLB_LO, verbose))
panic("Cannot allocate SWIOTLB buffer");
rc = 0;
} else
- rc = swiotlb_late_init_with_tbl(xen_io_tlb_start, xen_io_tlb_nslabs);
+ rc = swiotlb_late_init_with_tbl(xen_io_tlb_start,
+ xen_io_tlb_nslabs, SWIOTLB_LO);
end:
xen_io_tlb_end = xen_io_tlb_start + bytes;
if (!rc)
- swiotlb_set_max_segment(PAGE_SIZE);
+ swiotlb_set_max_segment(PAGE_SIZE, SWIOTLB_LO);
return rc;
error:
@@ -43,11 +43,14 @@ extern int swiotlb_nr;
#define IO_TLB_DEFAULT_SIZE (64UL<<20)
extern void swiotlb_init(int verbose);
-int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose);
-extern unsigned long swiotlb_nr_tbl(void);
+int swiotlb_init_with_tbl(char *tlb, unsigned long nslabs,
+ enum swiotlb_t type, int verbose);
+extern unsigned long swiotlb_nr_tbl(enum swiotlb_t type);
unsigned long swiotlb_size_or_default(void);
-extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs);
-extern int swiotlb_late_init_with_default_size(size_t default_size);
+extern int swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs,
+ enum swiotlb_t type);
+extern int swiotlb_late_init_with_default_size(size_t default_size,
+ enum swiotlb_t type);
extern void __init swiotlb_update_mem_attributes(void);
/*
@@ -83,8 +86,13 @@ extern phys_addr_t io_tlb_start[], io_tlb_end[];
static inline bool is_swiotlb_buffer(phys_addr_t paddr)
{
- return paddr >= io_tlb_start[SWIOTLB_LO] &&
- paddr < io_tlb_end[SWIOTLB_LO];
+ int i;
+
+ for (i = 0; i < swiotlb_nr; i++)
+ if (paddr >= io_tlb_start[i] && paddr < io_tlb_end[i])
+ return true;
+
+ return false;
}
static inline int swiotlb_get_type(phys_addr_t paddr)
@@ -99,7 +107,7 @@ static inline int swiotlb_get_type(phys_addr_t paddr)
}
void __init swiotlb_exit(void);
-unsigned int swiotlb_max_segment(void);
+unsigned int swiotlb_max_segment(enum swiotlb_t type);
size_t swiotlb_max_mapping_size(struct device *dev);
bool is_swiotlb_active(void);
void __init swiotlb_adjust_size(unsigned long new_size);
@@ -112,7 +120,7 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
static inline void swiotlb_exit(void)
{
}
-static inline unsigned int swiotlb_max_segment(void)
+static inline unsigned int swiotlb_max_segment(enum swiotlb_t type)
{
return 0;
}
@@ -131,7 +139,7 @@ static inline void swiotlb_adjust_size(unsigned long new_size)
}
#endif /* CONFIG_SWIOTLB */
-extern void swiotlb_print_info(void);
-extern void swiotlb_set_max_segment(unsigned int);
+extern void swiotlb_print_info(enum swiotlb_t type);
+extern void swiotlb_set_max_segment(unsigned int val, enum swiotlb_t type);
#endif /* __LINUX_SWIOTLB_H */
@@ -111,6 +111,11 @@ static int late_alloc;
int swiotlb_nr = 1;
+static const char * const swiotlb_name[] = {
+ "Low Mem",
+ "High Mem"
+};
+
static int __init
setup_io_tlb_npages(char *str)
{
@@ -121,11 +126,25 @@ setup_io_tlb_npages(char *str)
}
if (*str == ',')
++str;
+
+ if (isdigit(*str)) {
+ io_tlb_nslabs[SWIOTLB_HI] = simple_strtoul(str, &str, 0);
+ /* avoid tail segment of size < IO_TLB_SEGSIZE */
+ io_tlb_nslabs[SWIOTLB_HI] = ALIGN(io_tlb_nslabs[SWIOTLB_HI], IO_TLB_SEGSIZE);
+
+ swiotlb_nr = 2;
+ }
+
+ if (*str == ',')
+ ++str;
+
if (!strcmp(str, "force")) {
swiotlb_force = SWIOTLB_FORCE;
} else if (!strcmp(str, "noforce")) {
swiotlb_force = SWIOTLB_NO_FORCE;
io_tlb_nslabs[SWIOTLB_LO] = 1;
+ if (swiotlb_nr > 1)
+ io_tlb_nslabs[SWIOTLB_HI] = 1;
}
return 0;
@@ -134,24 +153,24 @@ early_param("swiotlb", setup_io_tlb_npages);
static bool no_iotlb_memory[SWIOTLB_MAX];
-unsigned long swiotlb_nr_tbl(void)
+unsigned long swiotlb_nr_tbl(enum swiotlb_t type)
{
- return unlikely(no_iotlb_memory[SWIOTLB_LO]) ? 0 : io_tlb_nslabs[SWIOTLB_LO];
+ return unlikely(no_iotlb_memory[type]) ? 0 : io_tlb_nslabs[type];
}
EXPORT_SYMBOL_GPL(swiotlb_nr_tbl);
-unsigned int swiotlb_max_segment(void)
+unsigned int swiotlb_max_segment(enum swiotlb_t type)
{
- return unlikely(no_iotlb_memory[SWIOTLB_LO]) ? 0 : max_segment[SWIOTLB_LO];
+ return unlikely(no_iotlb_memory[type]) ? 0 : max_segment[type];
}
EXPORT_SYMBOL_GPL(swiotlb_max_segment);
-void swiotlb_set_max_segment(unsigned int val)
+void swiotlb_set_max_segment(unsigned int val, enum swiotlb_t type)
{
if (swiotlb_force == SWIOTLB_FORCE)
- max_segment[SWIOTLB_LO] = 1;
+ max_segment[type] = 1;
else
- max_segment[SWIOTLB_LO] = rounddown(val, PAGE_SIZE);
+ max_segment[type] = rounddown(val, PAGE_SIZE);
}
unsigned long swiotlb_size_or_default(void)
@@ -181,18 +200,18 @@ void __init swiotlb_adjust_size(unsigned long new_size)
}
}
-void swiotlb_print_info(void)
+void swiotlb_print_info(enum swiotlb_t type)
{
- unsigned long bytes = io_tlb_nslabs[SWIOTLB_LO] << IO_TLB_SHIFT;
+ unsigned long bytes = io_tlb_nslabs[type] << IO_TLB_SHIFT;
- if (no_iotlb_memory[SWIOTLB_LO]) {
- pr_warn("No low mem\n");
+ if (no_iotlb_memory[type]) {
+ pr_warn("No low mem with %s\n", swiotlb_name[type]);
return;
}
- pr_info("mapped [mem %pa-%pa] (%luMB)\n",
- &io_tlb_start[SWIOTLB_LO], &io_tlb_end[SWIOTLB_LO],
- bytes >> 20);
+ pr_info("%s mapped [mem %pa-%pa] (%luMB)\n",
+ swiotlb_name[type],
+ &io_tlb_start[type], &io_tlb_end[type], bytes >> 20);
}
/*
@@ -205,88 +224,104 @@ void __init swiotlb_update_mem_attributes(void)
{
void *vaddr;
unsigned long bytes;
+ int i;
- if (no_iotlb_memory[SWIOTLB_LO] || late_alloc)
- return;
+ for (i = 0; i < swiotlb_nr; i++) {
+ if (no_iotlb_memory[i] || late_alloc)
+ continue;
- vaddr = phys_to_virt(io_tlb_start[SWIOTLB_LO]);
- bytes = PAGE_ALIGN(io_tlb_nslabs[SWIOTLB_LO] << IO_TLB_SHIFT);
- set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
- memset(vaddr, 0, bytes);
+ vaddr = phys_to_virt(io_tlb_start[i]);
+ bytes = PAGE_ALIGN(io_tlb_nslabs[i] << IO_TLB_SHIFT);
+ set_memory_decrypted((unsigned long)vaddr, bytes >> PAGE_SHIFT);
+ memset(vaddr, 0, bytes);
+ }
}
-int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
+int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs,
+ enum swiotlb_t type, int verbose)
{
unsigned long i, bytes;
size_t alloc_size;
bytes = nslabs << IO_TLB_SHIFT;
- io_tlb_nslabs[SWIOTLB_LO] = nslabs;
- io_tlb_start[SWIOTLB_LO] = __pa(tlb);
- io_tlb_end[SWIOTLB_LO] = io_tlb_start[SWIOTLB_LO] + bytes;
+ io_tlb_nslabs[type] = nslabs;
+ io_tlb_start[type] = __pa(tlb);
+ io_tlb_end[type] = io_tlb_start[type] + bytes;
/*
* Allocate and initialize the free list array. This array is used
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
- alloc_size = PAGE_ALIGN(io_tlb_nslabs[SWIOTLB_LO] * sizeof(int));
- io_tlb_list[SWIOTLB_LO] = memblock_alloc(alloc_size, PAGE_SIZE);
- if (!io_tlb_list[SWIOTLB_LO])
+ alloc_size = PAGE_ALIGN(io_tlb_nslabs[type] * sizeof(int));
+ io_tlb_list[type] = memblock_alloc(alloc_size, PAGE_SIZE);
+ if (!io_tlb_list[type])
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
__func__, alloc_size, PAGE_SIZE);
- alloc_size = PAGE_ALIGN(io_tlb_nslabs[SWIOTLB_LO] * sizeof(phys_addr_t));
- io_tlb_orig_addr[SWIOTLB_LO] = memblock_alloc(alloc_size, PAGE_SIZE);
- if (!io_tlb_orig_addr[SWIOTLB_LO])
+ alloc_size = PAGE_ALIGN(io_tlb_nslabs[type] * sizeof(phys_addr_t));
+ io_tlb_orig_addr[type] = memblock_alloc(alloc_size, PAGE_SIZE);
+ if (!io_tlb_orig_addr[type])
panic("%s: Failed to allocate %zu bytes align=0x%lx\n",
__func__, alloc_size, PAGE_SIZE);
- for (i = 0; i < io_tlb_nslabs[SWIOTLB_LO]; i++) {
- io_tlb_list[SWIOTLB_LO][i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
- io_tlb_orig_addr[SWIOTLB_LO][i] = INVALID_PHYS_ADDR;
+ for (i = 0; i < io_tlb_nslabs[type]; i++) {
+ io_tlb_list[type][i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+ io_tlb_orig_addr[type][i] = INVALID_PHYS_ADDR;
}
- io_tlb_index[SWIOTLB_LO] = 0;
- no_iotlb_memory[SWIOTLB_LO] = false;
+ io_tlb_index[type] = 0;
+ no_iotlb_memory[type] = false;
if (verbose)
- swiotlb_print_info();
+ swiotlb_print_info(type);
- swiotlb_set_max_segment(io_tlb_nslabs[SWIOTLB_LO] << IO_TLB_SHIFT);
+ swiotlb_set_max_segment(io_tlb_nslabs[type] << IO_TLB_SHIFT, type);
return 0;
}
-/*
- * Statically reserve bounce buffer space and initialize bounce buffer data
- * structures for the software IO TLB used to implement the DMA API.
- */
-void __init
-swiotlb_init(int verbose)
+static void __init
+swiotlb_init_type(enum swiotlb_t type, int verbose)
{
size_t default_size = IO_TLB_DEFAULT_SIZE;
unsigned char *vstart;
unsigned long bytes;
- if (!io_tlb_nslabs[SWIOTLB_LO]) {
- io_tlb_nslabs[SWIOTLB_LO] = (default_size >> IO_TLB_SHIFT);
- io_tlb_nslabs[SWIOTLB_LO] = ALIGN(io_tlb_nslabs[SWIOTLB_LO], IO_TLB_SEGSIZE);
+ if (!io_tlb_nslabs[type]) {
+ io_tlb_nslabs[type] = (default_size >> IO_TLB_SHIFT);
+ io_tlb_nslabs[type] = ALIGN(io_tlb_nslabs[type], IO_TLB_SEGSIZE);
}
- bytes = io_tlb_nslabs[SWIOTLB_LO] << IO_TLB_SHIFT;
+ bytes = io_tlb_nslabs[type] << IO_TLB_SHIFT;
+
+ if (type == SWIOTLB_LO)
+ vstart = memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE);
+ else
+ vstart = memblock_alloc(PAGE_ALIGN(bytes), PAGE_SIZE);
- /* Get IO TLB memory from the low pages */
- vstart = memblock_alloc_low(PAGE_ALIGN(bytes), PAGE_SIZE);
- if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs[SWIOTLB_LO], verbose))
+ if (vstart && !swiotlb_init_with_tbl(vstart, io_tlb_nslabs[type], type, verbose))
return;
- if (io_tlb_start[SWIOTLB_LO]) {
- memblock_free_early(io_tlb_start[SWIOTLB_LO],
- PAGE_ALIGN(io_tlb_nslabs[SWIOTLB_LO] << IO_TLB_SHIFT));
- io_tlb_start[SWIOTLB_LO] = 0;
+ if (io_tlb_start[type]) {
+ memblock_free_early(io_tlb_start[type],
+ PAGE_ALIGN(io_tlb_nslabs[type] << IO_TLB_SHIFT));
+ io_tlb_start[type] = 0;
}
- pr_warn("Cannot allocate buffer");
- no_iotlb_memory[SWIOTLB_LO] = true;
+ pr_warn("Cannot allocate buffer %s\n", swiotlb_name[type]);
+ no_iotlb_memory[type] = true;
+}
+
+/*
+ * Statically reserve bounce buffer space and initialize bounce buffer data
+ * structures for the software IO TLB used to implement the DMA API.
+ */
+void __init
+swiotlb_init(int verbose)
+{
+ int i;
+
+ for (i = 0; i < swiotlb_nr; i++)
+ swiotlb_init_type(i, verbose);
}
/*
@@ -295,67 +330,68 @@ swiotlb_init(int verbose)
* This should be just like above, but with some error catching.
*/
int
-swiotlb_late_init_with_default_size(size_t default_size)
+swiotlb_late_init_with_default_size(size_t default_size, enum swiotlb_t type)
{
- unsigned long bytes, req_nslabs = io_tlb_nslabs[SWIOTLB_LO];
+ unsigned long bytes, req_nslabs = io_tlb_nslabs[type];
unsigned char *vstart = NULL;
unsigned int order;
int rc = 0;
+ gfp_t gfp_mask = (type == SWIOTLB_LO) ? GFP_DMA | __GFP_NOWARN :
+ GFP_KERNEL | __GFP_NOWARN;
- if (!io_tlb_nslabs[SWIOTLB_LO]) {
- io_tlb_nslabs[SWIOTLB_LO] = (default_size >> IO_TLB_SHIFT);
- io_tlb_nslabs[SWIOTLB_LO] = ALIGN(io_tlb_nslabs[SWIOTLB_LO], IO_TLB_SEGSIZE);
+ if (!io_tlb_nslabs[type]) {
+ io_tlb_nslabs[type] = (default_size >> IO_TLB_SHIFT);
+ io_tlb_nslabs[type] = ALIGN(io_tlb_nslabs[type], IO_TLB_SEGSIZE);
}
/*
* Get IO TLB memory from the low pages
*/
- order = get_order(io_tlb_nslabs[SWIOTLB_LO] << IO_TLB_SHIFT);
- io_tlb_nslabs[SWIOTLB_LO] = SLABS_PER_PAGE << order;
- bytes = io_tlb_nslabs[SWIOTLB_LO] << IO_TLB_SHIFT;
+ order = get_order(io_tlb_nslabs[type] << IO_TLB_SHIFT);
+ io_tlb_nslabs[type] = SLABS_PER_PAGE << order;
+ bytes = io_tlb_nslabs[type] << IO_TLB_SHIFT;
while ((SLABS_PER_PAGE << order) > IO_TLB_MIN_SLABS) {
- vstart = (void *)__get_free_pages(GFP_DMA | __GFP_NOWARN,
- order);
+ vstart = (void *)__get_free_pages(gfp_mask, order);
if (vstart)
break;
order--;
}
if (!vstart) {
- io_tlb_nslabs[SWIOTLB_LO] = req_nslabs;
+ io_tlb_nslabs[type] = req_nslabs;
return -ENOMEM;
}
if (order != get_order(bytes)) {
pr_warn("only able to allocate %ld MB\n",
(PAGE_SIZE << order) >> 20);
- io_tlb_nslabs[SWIOTLB_LO] = SLABS_PER_PAGE << order;
+ io_tlb_nslabs[type] = SLABS_PER_PAGE << order;
}
- rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs[SWIOTLB_LO]);
+ rc = swiotlb_late_init_with_tbl(vstart, io_tlb_nslabs[type], type);
if (rc)
free_pages((unsigned long)vstart, order);
return rc;
}
-static void swiotlb_cleanup(void)
+static void swiotlb_cleanup(enum swiotlb_t type)
{
- io_tlb_end[SWIOTLB_LO] = 0;
- io_tlb_start[SWIOTLB_LO] = 0;
- io_tlb_nslabs[SWIOTLB_LO] = 0;
- max_segment[SWIOTLB_LO] = 0;
+ io_tlb_end[type] = 0;
+ io_tlb_start[type] = 0;
+ io_tlb_nslabs[type] = 0;
+ max_segment[type] = 0;
}
int
-swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
+swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs, enum swiotlb_t type)
{
unsigned long i, bytes;
bytes = nslabs << IO_TLB_SHIFT;
- io_tlb_nslabs[SWIOTLB_LO] = nslabs;
- io_tlb_start[SWIOTLB_LO] = virt_to_phys(tlb);
- io_tlb_end[SWIOTLB_LO] = io_tlb_start[SWIOTLB_LO] + bytes;
+ io_tlb_nslabs[type] = nslabs;
+ io_tlb_start[type] = virt_to_phys(tlb);
+ io_tlb_end[type] = io_tlb_start[type] + bytes;
set_memory_decrypted((unsigned long)tlb, bytes >> PAGE_SHIFT);
memset(tlb, 0, bytes);
@@ -365,63 +401,67 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
* to find contiguous free memory regions of size up to IO_TLB_SEGSIZE
* between io_tlb_start and io_tlb_end.
*/
- io_tlb_list[SWIOTLB_LO] = (unsigned int *)__get_free_pages(GFP_KERNEL,
- get_order(io_tlb_nslabs[SWIOTLB_LO] * sizeof(int)));
- if (!io_tlb_list[SWIOTLB_LO])
+ io_tlb_list[type] = (unsigned int *)__get_free_pages(GFP_KERNEL,
+ get_order(io_tlb_nslabs[type] * sizeof(int)));
+ if (!io_tlb_list[type])
goto cleanup3;
- io_tlb_orig_addr[SWIOTLB_LO] = (phys_addr_t *)
+ io_tlb_orig_addr[type] = (phys_addr_t *)
__get_free_pages(GFP_KERNEL,
- get_order(io_tlb_nslabs[SWIOTLB_LO] *
+ get_order(io_tlb_nslabs[type] *
sizeof(phys_addr_t)));
- if (!io_tlb_orig_addr[SWIOTLB_LO])
+ if (!io_tlb_orig_addr[type])
goto cleanup4;
- for (i = 0; i < io_tlb_nslabs[SWIOTLB_LO]; i++) {
- io_tlb_list[SWIOTLB_LO][i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
- io_tlb_orig_addr[SWIOTLB_LO][i] = INVALID_PHYS_ADDR;
+ for (i = 0; i < io_tlb_nslabs[type]; i++) {
+ io_tlb_list[type][i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+ io_tlb_orig_addr[type][i] = INVALID_PHYS_ADDR;
}
- io_tlb_index[SWIOTLB_LO] = 0;
- no_iotlb_memory[SWIOTLB_LO] = false;
+ io_tlb_index[type] = 0;
+ no_iotlb_memory[type] = false;
- swiotlb_print_info();
+ swiotlb_print_info(type);
late_alloc = 1;
- swiotlb_set_max_segment(io_tlb_nslabs[SWIOTLB_LO] << IO_TLB_SHIFT);
+ swiotlb_set_max_segment(io_tlb_nslabs[type] << IO_TLB_SHIFT, type);
return 0;
cleanup4:
- free_pages((unsigned long)io_tlb_list[SWIOTLB_LO], get_order(io_tlb_nslabs[SWIOTLB_LO] *
+ free_pages((unsigned long)io_tlb_list[type], get_order(io_tlb_nslabs[type] *
sizeof(int)));
- io_tlb_list[SWIOTLB_LO] = NULL;
+ io_tlb_list[type] = NULL;
cleanup3:
- swiotlb_cleanup();
+ swiotlb_cleanup(type);
return -ENOMEM;
}
void __init swiotlb_exit(void)
{
- if (!io_tlb_orig_addr[SWIOTLB_LO])
- return;
+ int i;
- if (late_alloc) {
- free_pages((unsigned long)io_tlb_orig_addr[SWIOTLB_LO],
- get_order(io_tlb_nslabs[SWIOTLB_LO] * sizeof(phys_addr_t)));
- free_pages((unsigned long)io_tlb_list[SWIOTLB_LO],
- get_order(io_tlb_nslabs[SWIOTLB_LO] * sizeof(int)));
- free_pages((unsigned long)phys_to_virt(io_tlb_start[SWIOTLB_LO]),
- get_order(io_tlb_nslabs[SWIOTLB_LO] << IO_TLB_SHIFT));
- } else {
- memblock_free_late(__pa(io_tlb_orig_addr[SWIOTLB_LO]),
- PAGE_ALIGN(io_tlb_nslabs[SWIOTLB_LO] * sizeof(phys_addr_t)));
- memblock_free_late(__pa(io_tlb_list[SWIOTLB_LO]),
- PAGE_ALIGN(io_tlb_nslabs[SWIOTLB_LO] * sizeof(int)));
- memblock_free_late(io_tlb_start[SWIOTLB_LO],
- PAGE_ALIGN(io_tlb_nslabs[SWIOTLB_LO] << IO_TLB_SHIFT));
+ for (i = 0; i < swiotlb_nr; i++) {
+ if (!io_tlb_orig_addr[i])
+ continue;
+
+ if (late_alloc) {
+ free_pages((unsigned long)io_tlb_orig_addr[i],
+ get_order(io_tlb_nslabs[i] * sizeof(phys_addr_t)));
+ free_pages((unsigned long)io_tlb_list[i],
+ get_order(io_tlb_nslabs[i] * sizeof(int)));
+ free_pages((unsigned long)phys_to_virt(io_tlb_start[i]),
+ get_order(io_tlb_nslabs[i] << IO_TLB_SHIFT));
+ } else {
+ memblock_free_late(__pa(io_tlb_orig_addr[i]),
+ PAGE_ALIGN(io_tlb_nslabs[i] * sizeof(phys_addr_t)));
+ memblock_free_late(__pa(io_tlb_list[i]),
+ PAGE_ALIGN(io_tlb_nslabs[i] * sizeof(int)));
+ memblock_free_late(io_tlb_start[i],
+ PAGE_ALIGN(io_tlb_nslabs[i] << IO_TLB_SHIFT));
+ }
+ swiotlb_cleanup(i);
}
- swiotlb_cleanup();
}
/*
@@ -468,7 +508,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
size_t mapping_size, size_t alloc_size,
enum dma_data_direction dir, unsigned long attrs)
{
- dma_addr_t tbl_dma_addr = phys_to_dma_unencrypted(hwdev, io_tlb_start[SWIOTLB_LO]);
+ dma_addr_t tbl_dma_addr;
unsigned long flags;
phys_addr_t tlb_addr;
unsigned int nslots, stride, index, wrap;
@@ -477,8 +517,16 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
unsigned long offset_slots;
unsigned long max_slots;
unsigned long tmp_io_tlb_used;
+ unsigned long dma_mask = min_not_zero(*hwdev->dma_mask,
+ hwdev->bus_dma_limit);
+ int type;
- if (no_iotlb_memory[SWIOTLB_LO])
+ if (swiotlb_nr > 1 && dma_mask == DMA_BIT_MASK(64))
+ type = SWIOTLB_HI;
+ else
+ type = SWIOTLB_LO;
+
+ if (no_iotlb_memory[type])
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
if (mem_encrypt_active())
@@ -490,6 +538,8 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
return (phys_addr_t)DMA_MAPPING_ERROR;
}
+ tbl_dma_addr = phys_to_dma_unencrypted(hwdev, io_tlb_start[type]);
+
mask = dma_get_seg_boundary(hwdev);
tbl_dma_addr &= mask;
@@ -521,11 +571,11 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
*/
spin_lock_irqsave(&io_tlb_lock, flags);
- if (unlikely(nslots > io_tlb_nslabs[SWIOTLB_LO] - io_tlb_used[SWIOTLB_LO]))
+ if (unlikely(nslots > io_tlb_nslabs[type] - io_tlb_used[type]))
goto not_found;
- index = ALIGN(io_tlb_index[SWIOTLB_LO], stride);
- if (index >= io_tlb_nslabs[SWIOTLB_LO])
+ index = ALIGN(io_tlb_index[type], stride);
+ if (index >= io_tlb_nslabs[type])
index = 0;
wrap = index;
@@ -533,7 +583,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
while (iommu_is_span_boundary(index, nslots, offset_slots,
max_slots)) {
index += stride;
- if (index >= io_tlb_nslabs[SWIOTLB_LO])
+ if (index >= io_tlb_nslabs[type])
index = 0;
if (index == wrap)
goto not_found;
@@ -544,42 +594,42 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
* contiguous buffers, we allocate the buffers from that slot
* and mark the entries as '0' indicating unavailable.
*/
- if (io_tlb_list[SWIOTLB_LO][index] >= nslots) {
+ if (io_tlb_list[type][index] >= nslots) {
int count = 0;
for (i = index; i < (int) (index + nslots); i++)
- io_tlb_list[SWIOTLB_LO][i] = 0;
+ io_tlb_list[type][i] = 0;
for (i = index - 1;
(OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) &&
- io_tlb_list[SWIOTLB_LO][i];
+ io_tlb_list[type][i];
i--)
- io_tlb_list[SWIOTLB_LO][i] = ++count;
- tlb_addr = io_tlb_start[SWIOTLB_LO] + (index << IO_TLB_SHIFT);
+ io_tlb_list[type][i] = ++count;
+ tlb_addr = io_tlb_start[type] + (index << IO_TLB_SHIFT);
/*
* Update the indices to avoid searching in the next
* round.
*/
- io_tlb_index[SWIOTLB_LO] = ((index + nslots) < io_tlb_nslabs[SWIOTLB_LO]
+ io_tlb_index[type] = ((index + nslots) < io_tlb_nslabs[type]
? (index + nslots) : 0);
goto found;
}
index += stride;
- if (index >= io_tlb_nslabs[SWIOTLB_LO])
+ if (index >= io_tlb_nslabs[type])
index = 0;
} while (index != wrap);
not_found:
- tmp_io_tlb_used = io_tlb_used[SWIOTLB_LO];
+ tmp_io_tlb_used = io_tlb_used[type];
spin_unlock_irqrestore(&io_tlb_lock, flags);
if (!(attrs & DMA_ATTR_NO_WARN) && printk_ratelimit())
- dev_warn(hwdev, "swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
- alloc_size, io_tlb_nslabs[SWIOTLB_LO], tmp_io_tlb_used);
+ dev_warn(hwdev, "%s swiotlb buffer is full (sz: %zd bytes), total %lu (slots), used %lu (slots)\n",
+ swiotlb_name[type], alloc_size, io_tlb_nslabs[type], tmp_io_tlb_used);
return (phys_addr_t)DMA_MAPPING_ERROR;
found:
- io_tlb_used[SWIOTLB_LO] += nslots;
+ io_tlb_used[type] += nslots;
spin_unlock_irqrestore(&io_tlb_lock, flags);
/*
@@ -588,7 +638,7 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev, phys_addr_t orig_addr,
* needed.
*/
for (i = 0; i < nslots; i++)
- io_tlb_orig_addr[SWIOTLB_LO][index+i] = orig_addr + (i << IO_TLB_SHIFT);
+ io_tlb_orig_addr[type][index+i] = orig_addr + (i << IO_TLB_SHIFT);
if (!(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL))
swiotlb_bounce(orig_addr, tlb_addr, mapping_size, DMA_TO_DEVICE);
@@ -605,8 +655,9 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
{
unsigned long flags;
int i, count, nslots = ALIGN(alloc_size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
- int index = (tlb_addr - io_tlb_start[SWIOTLB_LO]) >> IO_TLB_SHIFT;
- phys_addr_t orig_addr = io_tlb_orig_addr[SWIOTLB_LO][index];
+ int type = swiotlb_get_type(tlb_addr);
+ int index = (tlb_addr - io_tlb_start[type]) >> IO_TLB_SHIFT;
+ phys_addr_t orig_addr = io_tlb_orig_addr[type][index];
/*
* First, sync the memory before unmapping the entry
@@ -625,14 +676,14 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
spin_lock_irqsave(&io_tlb_lock, flags);
{
count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
- io_tlb_list[SWIOTLB_LO][index + nslots] : 0);
+ io_tlb_list[type][index + nslots] : 0);
/*
* Step 1: return the slots to the free list, merging the
* slots with superceeding slots
*/
for (i = index + nslots - 1; i >= index; i--) {
- io_tlb_list[SWIOTLB_LO][i] = ++count;
- io_tlb_orig_addr[SWIOTLB_LO][i] = INVALID_PHYS_ADDR;
+ io_tlb_list[type][i] = ++count;
+ io_tlb_orig_addr[type][i] = INVALID_PHYS_ADDR;
}
/*
* Step 2: merge the returned slots with the preceding slots,
@@ -640,11 +691,11 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
*/
for (i = index - 1;
(OFFSET(i, IO_TLB_SEGSIZE) != IO_TLB_SEGSIZE - 1) &&
- io_tlb_list[SWIOTLB_LO][i];
+ io_tlb_list[type][i];
i--)
- io_tlb_list[SWIOTLB_LO][i] = ++count;
+ io_tlb_list[type][i] = ++count;
- io_tlb_used[SWIOTLB_LO] -= nslots;
+ io_tlb_used[type] -= nslots;
}
spin_unlock_irqrestore(&io_tlb_lock, flags);
}
@@ -653,8 +704,9 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target)
{
- int index = (tlb_addr - io_tlb_start[SWIOTLB_LO]) >> IO_TLB_SHIFT;
- phys_addr_t orig_addr = io_tlb_orig_addr[SWIOTLB_LO][index];
+ int type = swiotlb_get_type(tlb_addr);
+ int index = (tlb_addr - io_tlb_start[type]) >> IO_TLB_SHIFT;
+ phys_addr_t orig_addr = io_tlb_orig_addr[type][index];
if (orig_addr == INVALID_PHYS_ADDR)
return;
@@ -737,6 +789,15 @@ static int __init swiotlb_create_debugfs(void)
root = debugfs_create_dir("swiotlb", NULL);
debugfs_create_ulong("io_tlb_nslabs", 0400, root, &io_tlb_nslabs[SWIOTLB_LO]);
debugfs_create_ulong("io_tlb_used", 0400, root, &io_tlb_used[SWIOTLB_LO]);
+
+ if (swiotlb_nr == 1)
+ return 0;
+
+ debugfs_create_ulong("io_tlb_nslabs-highmem", 0400,
+ root, &io_tlb_nslabs[SWIOTLB_HI]);
+ debugfs_create_ulong("io_tlb_used-highmem", 0400,
+ root, &io_tlb_used[SWIOTLB_HI]);
+
return 0;
}
This patch is to enable the 64-bit swiotlb buffer. The state of the art swiotlb pre-allocates <=32-bit memory in order to meet the DMA mask requirement for some 32-bit legacy device. Considering most devices nowadays support 64-bit DMA and IOMMU is available, the swiotlb is not used for most of the times, except: 1. The xen PVM domain requires the DMA addresses to both (1) less than the dma mask, and (2) continuous in machine address. Therefore, the 64-bit device may still require swiotlb on PVM domain. 2. From source code the AMD SME/SEV will enable SWIOTLB_FORCE. As a result it is always required to allocate from swiotlb buffer even the device dma mask is 64-bit. sme_early_init() -> if (sev_active()) swiotlb_force = SWIOTLB_FORCE; Therefore, this patch introduces the 2nd swiotlb buffer for 64-bit DMA access. For instance, the swiotlb_tbl_map_single() allocates from the 2nd 64-bit buffer if the device DMA mask is min_not_zero(*hwdev->dma_mask, hwdev->bus_dma_limit). The example to configure 64-bit swiotlb is "swiotlb=65536,524288,force" or "swiotlb=,524288,force", where 524288 is the size of 64-bit buffer. With the patch, the kernel will be able to allocate >4GB swiotlb buffer. This patch is only for swiotlb, not including xen-swiotlb. Cc: Joe Jin <joe.jin@oracle.com> Signed-off-by: Dongli Zhang <dongli.zhang@oracle.com> --- arch/mips/cavium-octeon/dma-octeon.c | 3 +- arch/powerpc/kernel/dma-swiotlb.c | 2 +- arch/powerpc/platforms/pseries/svm.c | 2 +- arch/x86/kernel/pci-swiotlb.c | 5 +- arch/x86/pci/sta2x11-fixup.c | 2 +- drivers/gpu/drm/i915/gem/i915_gem_internal.c | 4 +- drivers/gpu/drm/i915/i915_scatterlist.h | 2 +- drivers/gpu/drm/nouveau/nouveau_ttm.c | 2 +- drivers/mmc/host/sdhci.c | 2 +- drivers/pci/xen-pcifront.c | 2 +- drivers/xen/swiotlb-xen.c | 9 +- include/linux/swiotlb.h | 28 +- kernel/dma/swiotlb.c | 339 +++++++++++-------- 13 files changed, 238 insertions(+), 164 deletions(-)