@@ -118,10 +118,11 @@ static void __init arch_reserve_crashkernel(void)
* limit. If DRAM starts above 32-bit, expand the zone to the maximum
* available memory, otherwise cap it at 32-bit.
*/
-static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
+static phys_addr_t __init max_zone_phys(unsigned int zone_bits,
+ phys_addr_t zone_off)
{
phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
- phys_addr_t phys_start = memblock_start_of_DRAM();
+ phys_addr_t phys_start = memblock_start_of_DRAM() - zone_off;
if (phys_start > U32_MAX)
zone_mask = PHYS_ADDR_MAX;
@@ -137,14 +138,19 @@ static void __init zone_sizes_init(void)
unsigned int __maybe_unused acpi_zone_dma_bits;
unsigned int __maybe_unused dt_zone_dma_bits;
phys_addr_t __maybe_unused max_cpu_address;
- phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
+ phys_addr_t __maybe_unused min_cpu_address;
+ phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32, 0);
#ifdef CONFIG_ZONE_DMA
acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
- of_dma_get_cpu_limits(NULL, &max_cpu_address, NULL);
- dt_zone_dma_bits = fls64(max_cpu_address);
+ of_dma_get_cpu_limits(NULL, &max_cpu_address, &min_cpu_address);
+ dt_zone_dma_bits = fls64(max_cpu_address - min_cpu_address);
zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
- arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
+ zone_dma_off = min_cpu_address;
+ arm64_dma_phys_limit = max_zone_phys(zone_dma_bits, zone_dma_off)
+ + zone_dma_off;
+ if (zone_dma_off > U32_MAX)
+ dma32_phys_limit = arm64_dma_phys_limit;
max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
#endif
#ifdef CONFIG_ZONE_DMA32
Commit 791ab8b2e3db ("arm64: Ignore any DMA offsets in the max_zone_phys() calculation") made DMA/DMA32 zones span the entire RAM when RAM starts above 32-bits. This breaks hardware with DMA area that start above 32-bits. But the commit log says that "we haven't noticed any such hardware". It turns out that such hardware does exist. One such platform has RAM starting at 32GB with an internal bus that has the following DMA limits: #address-cells = <2>; #size-cells = <2>; dma-ranges = <0x00 0xc0000000 0x08 0x00000000 0x00 0x40000000>; Devices under this bus can see 1GB of DMA range between 3GB-4GB in each device address space. This range is mapped to CPU memory at 32GB-33GB. With current code DMA allocations for devices under this bus are not limited to DMA area, leading to run-time allocation failure. Modify 'zone_dma_bits' calculation (via dt_zone_dma_bits) to only cover the actual DMA area starting at 'zone_dma_off'. Use the newly introduced 'min' parameter of of_dma_get_cpu_limits() to set 'zone_dma_off'. DMA32 zone is useless in this configuration, so make its limit the same as the DMA zone when the lower DMA limit is higher than 32-bits. The result is DMA zone that properly reflects the hardware constraints as follows: [ 0.000000] Zone ranges: [ 0.000000] DMA [mem 0x0000000800000000-0x000000083fffffff] [ 0.000000] DMA32 empty [ 0.000000] Normal [mem 0x0000000840000000-0x0000000bffffffff] Suggested-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Baruch Siach <baruch@tkos.co.il> --- arch/arm64/mm/init.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-)