diff mbox

[v3,7/7] arm64: allow kernel Image to be loaded anywhere in physical memory

Message ID 1447672998-20981-8-git-send-email-ard.biesheuvel@linaro.org
State New
Headers show

Commit Message

Ard Biesheuvel Nov. 16, 2015, 11:23 a.m. UTC
This relaxes the kernel Image placement requirements, so that it
may be placed at any 2 MB aligned offset in physical memory.

This is accomplished by ignoring PHYS_OFFSET when installing
memblocks, and accounting for the apparent virtual offset of
the kernel Image (in addition to the 64 MB that it is moved
below PAGE_OFFSET). As a result, virtual address references
below PAGE_OFFSET are correctly mapped onto physical references
into the kernel Image regardless of where it sits in memory.

Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>

---
 Documentation/arm64/booting.txt | 12 ++---
 arch/arm64/include/asm/memory.h |  9 ++--
 arch/arm64/mm/init.c            | 49 +++++++++++++++++++-
 arch/arm64/mm/mmu.c             | 29 ++++++++++--
 4 files changed, 83 insertions(+), 16 deletions(-)

-- 
1.9.1


_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel

Comments

Catalin Marinas Dec. 7, 2015, 3:30 p.m. UTC | #1
On Mon, Nov 16, 2015 at 12:23:18PM +0100, Ard Biesheuvel wrote:
> diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h

> index 3148691bc80a..d6a237bda1f9 100644

> --- a/arch/arm64/include/asm/memory.h

> +++ b/arch/arm64/include/asm/memory.h

> @@ -120,13 +120,10 @@ extern phys_addr_t		memstart_addr;

>  extern u64 kernel_va_offset;

>  

>  /*

> - * The maximum physical address that the linear direct mapping

> - * of system RAM can cover. (PAGE_OFFSET can be interpreted as

> - * a 2's complement signed quantity and negated to derive the

> - * maximum size of the linear mapping.)

> + * Allow all memory at the discovery stage. We will clip it later.

>   */

> -#define MAX_MEMBLOCK_ADDR	({ memstart_addr - PAGE_OFFSET - 1; })

> -#define MIN_MEMBLOCK_ADDR	__pa(KIMAGE_VADDR)

> +#define MIN_MEMBLOCK_ADDR	0

> +#define MAX_MEMBLOCK_ADDR	U64_MAX


Just in case we get some random memblock information, shall we cap the
maximum to PHYS_MASK?

> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c

> index b3b0175d7135..29a7dc5327b6 100644

> --- a/arch/arm64/mm/init.c

> +++ b/arch/arm64/mm/init.c

> @@ -158,9 +159,55 @@ static int __init early_mem(char *p)

>  }

>  early_param("mem", early_mem);

>  

> +static void __init enforce_memory_limit(void)

> +{

> +	const phys_addr_t kbase = round_down(__pa(_text), MIN_KIMG_ALIGN);

> +	u64 to_remove = memblock_phys_mem_size() - memory_limit;

> +	phys_addr_t max_addr = 0;

> +	struct memblock_region *r;

> +

> +	if (memory_limit == (phys_addr_t)ULLONG_MAX)

> +		return;

> +

> +	/*

> +	 * The kernel may be high up in physical memory, so try to apply the

> +	 * limit below the kernel first, and only let the generic handling

> +	 * take over if it turns out we haven't clipped enough memory yet.

> +	 */

> +	for_each_memblock(memory, r) {

> +		if (r->base + r->size > kbase) {

> +			u64 rem = min(to_remove, kbase - r->base);

> +

> +			max_addr = r->base + rem;

> +			to_remove -= rem;

> +			break;

> +		}

> +		if (to_remove <= r->size) {

> +			max_addr = r->base + to_remove;

> +			to_remove = 0;

> +			break;

> +		}

> +		to_remove -= r->size;

> +	}

> +

> +	memblock_remove(0, max_addr);


I don't fully get the reason for this function. Do you want to keep the
kernel around in memblock? How do we guarantee that the call below
wouldn't remove it anyway?

> +

> +	if (to_remove)

> +		memblock_enforce_memory_limit(memory_limit);


Shouldn't this be memblock_enforce_memory_limit(to_remove)?

> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c

> index 526eeb7e1e97..1b9d7e48ba1e 100644

> --- a/arch/arm64/mm/mmu.c

> +++ b/arch/arm64/mm/mmu.c


> @@ -481,11 +482,33 @@ static void __init bootstrap_linear_mapping(unsigned long va_offset)

>  static void __init map_mem(void)

>  {

>  	struct memblock_region *reg;

> +	u64 new_memstart_addr;

> +	u64 new_va_offset;

>  

> -	bootstrap_linear_mapping(KIMAGE_OFFSET);

> +	/*

> +	 * Select a suitable value for the base of physical memory.

> +	 * This should be equal to or below the lowest usable physical

> +	 * memory address, and aligned to PUD/PMD size so that we can map

> +	 * it efficiently.

> +	 */

> +	new_memstart_addr = round_down(memblock_start_of_DRAM(), SZ_1G);


With this trick, we can no longer assume we have a mapping at
PAGE_OFFSET. I don't think we break any expectations but we probably
don't free the unused memmap at the beginning. We can probably set
prev_end to this rounded down address in free_unused_memmap().

-- 
Catalin

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Ard Biesheuvel Dec. 7, 2015, 3:40 p.m. UTC | #2
On 7 December 2015 at 16:30, Catalin Marinas <catalin.marinas@arm.com> wrote:
> On Mon, Nov 16, 2015 at 12:23:18PM +0100, Ard Biesheuvel wrote:

>> diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h

>> index 3148691bc80a..d6a237bda1f9 100644

>> --- a/arch/arm64/include/asm/memory.h

>> +++ b/arch/arm64/include/asm/memory.h

>> @@ -120,13 +120,10 @@ extern phys_addr_t              memstart_addr;

>>  extern u64 kernel_va_offset;

>>

>>  /*

>> - * The maximum physical address that the linear direct mapping

>> - * of system RAM can cover. (PAGE_OFFSET can be interpreted as

>> - * a 2's complement signed quantity and negated to derive the

>> - * maximum size of the linear mapping.)

>> + * Allow all memory at the discovery stage. We will clip it later.

>>   */

>> -#define MAX_MEMBLOCK_ADDR    ({ memstart_addr - PAGE_OFFSET - 1; })

>> -#define MIN_MEMBLOCK_ADDR    __pa(KIMAGE_VADDR)

>> +#define MIN_MEMBLOCK_ADDR    0

>> +#define MAX_MEMBLOCK_ADDR    U64_MAX

>

> Just in case we get some random memblock information, shall we cap the

> maximum to PHYS_MASK?

>


Yes, that makes sense.

>> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c

>> index b3b0175d7135..29a7dc5327b6 100644

>> --- a/arch/arm64/mm/init.c

>> +++ b/arch/arm64/mm/init.c

>> @@ -158,9 +159,55 @@ static int __init early_mem(char *p)

>>  }

>>  early_param("mem", early_mem);

>>

>> +static void __init enforce_memory_limit(void)

>> +{

>> +     const phys_addr_t kbase = round_down(__pa(_text), MIN_KIMG_ALIGN);

>> +     u64 to_remove = memblock_phys_mem_size() - memory_limit;

>> +     phys_addr_t max_addr = 0;

>> +     struct memblock_region *r;

>> +

>> +     if (memory_limit == (phys_addr_t)ULLONG_MAX)

>> +             return;

>> +

>> +     /*

>> +      * The kernel may be high up in physical memory, so try to apply the

>> +      * limit below the kernel first, and only let the generic handling

>> +      * take over if it turns out we haven't clipped enough memory yet.

>> +      */

>> +     for_each_memblock(memory, r) {

>> +             if (r->base + r->size > kbase) {

>> +                     u64 rem = min(to_remove, kbase - r->base);

>> +

>> +                     max_addr = r->base + rem;

>> +                     to_remove -= rem;

>> +                     break;

>> +             }

>> +             if (to_remove <= r->size) {

>> +                     max_addr = r->base + to_remove;

>> +                     to_remove = 0;

>> +                     break;

>> +             }

>> +             to_remove -= r->size;

>> +     }

>> +

>> +     memblock_remove(0, max_addr);

>

> I don't fully get the reason for this function. Do you want to keep the

> kernel around in memblock? How do we guarantee that the call below

> wouldn't remove it anyway?

>


The problem is that the ordinary memblock_enforce_memory_limit()
removes memory from the top, which means it will happily remove the
memory that covers your kernel image if it happens to be loaded high
up in physical memory.

>> +

>> +     if (to_remove)

>> +             memblock_enforce_memory_limit(memory_limit);

>

> Shouldn't this be memblock_enforce_memory_limit(to_remove)?

>


No, it takes the memory limit as input. 'to_remove + memory_limit'
will be exactly the remaining memory at this point.

>> diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c

>> index 526eeb7e1e97..1b9d7e48ba1e 100644

>> --- a/arch/arm64/mm/mmu.c

>> +++ b/arch/arm64/mm/mmu.c

>

>> @@ -481,11 +482,33 @@ static void __init bootstrap_linear_mapping(unsigned long va_offset)

>>  static void __init map_mem(void)

>>  {

>>       struct memblock_region *reg;

>> +     u64 new_memstart_addr;

>> +     u64 new_va_offset;

>>

>> -     bootstrap_linear_mapping(KIMAGE_OFFSET);

>> +     /*

>> +      * Select a suitable value for the base of physical memory.

>> +      * This should be equal to or below the lowest usable physical

>> +      * memory address, and aligned to PUD/PMD size so that we can map

>> +      * it efficiently.

>> +      */

>> +     new_memstart_addr = round_down(memblock_start_of_DRAM(), SZ_1G);

>

> With this trick, we can no longer assume we have a mapping at

> PAGE_OFFSET. I don't think we break any expectations but we probably

> don't free the unused memmap at the beginning. We can probably set

> prev_end to this rounded down address in free_unused_memmap().

>


I will look into that.

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
Catalin Marinas Dec. 7, 2015, 4:43 p.m. UTC | #3
On Mon, Dec 07, 2015 at 04:40:20PM +0100, Ard Biesheuvel wrote:
> On 7 December 2015 at 16:30, Catalin Marinas <catalin.marinas@arm.com> wrote:

> > On Mon, Nov 16, 2015 at 12:23:18PM +0100, Ard Biesheuvel wrote:

> >> diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c

> >> index b3b0175d7135..29a7dc5327b6 100644

> >> --- a/arch/arm64/mm/init.c

> >> +++ b/arch/arm64/mm/init.c

> >> @@ -158,9 +159,55 @@ static int __init early_mem(char *p)

> >>  }

> >>  early_param("mem", early_mem);

> >>

> >> +static void __init enforce_memory_limit(void)

> >> +{

> >> +     const phys_addr_t kbase = round_down(__pa(_text), MIN_KIMG_ALIGN);

> >> +     u64 to_remove = memblock_phys_mem_size() - memory_limit;

> >> +     phys_addr_t max_addr = 0;

> >> +     struct memblock_region *r;

> >> +

> >> +     if (memory_limit == (phys_addr_t)ULLONG_MAX)

> >> +             return;

> >> +

> >> +     /*

> >> +      * The kernel may be high up in physical memory, so try to apply the

> >> +      * limit below the kernel first, and only let the generic handling

> >> +      * take over if it turns out we haven't clipped enough memory yet.

> >> +      */

> >> +     for_each_memblock(memory, r) {

> >> +             if (r->base + r->size > kbase) {

> >> +                     u64 rem = min(to_remove, kbase - r->base);

> >> +

> >> +                     max_addr = r->base + rem;

> >> +                     to_remove -= rem;

> >> +                     break;

> >> +             }

> >> +             if (to_remove <= r->size) {

> >> +                     max_addr = r->base + to_remove;

> >> +                     to_remove = 0;

> >> +                     break;

> >> +             }

> >> +             to_remove -= r->size;

> >> +     }

> >> +

> >> +     memblock_remove(0, max_addr);

> >

> > I don't fully get the reason for this function. Do you want to keep the

> > kernel around in memblock? How do we guarantee that the call below

> > wouldn't remove it anyway?

> 

> The problem is that the ordinary memblock_enforce_memory_limit()

> removes memory from the top, which means it will happily remove the

> memory that covers your kernel image if it happens to be loaded high

> up in physical memory.


We could fix the memblock_reserve() call on the kernel image but apart
from that we don't care about memblock's knowledge of the kernel text. A
potential problem is freeing the init memory which assumes it's present
in the linear mapping, though we could add additional checks here as
well. Is memblock_end_of_DRAM() adjusted to the new maximum address
after memblock_enforce_memory_limit()?

> >> +

> >> +     if (to_remove)

> >> +             memblock_enforce_memory_limit(memory_limit);

> >

> > Shouldn't this be memblock_enforce_memory_limit(to_remove)?

> 

> No, it takes the memory limit as input. 'to_remove + memory_limit'

> will be exactly the remaining memory at this point.


You are right, I thought it's the memory to remove.

-- 
Catalin

_______________________________________________
linux-arm-kernel mailing list
linux-arm-kernel@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/linux-arm-kernel
diff mbox

Patch

diff --git a/Documentation/arm64/booting.txt b/Documentation/arm64/booting.txt
index 701d39d3171a..f190e708bb9b 100644
--- a/Documentation/arm64/booting.txt
+++ b/Documentation/arm64/booting.txt
@@ -117,14 +117,14 @@  Header notes:
   depending on selected features, and is effectively unbound.
 
 The Image must be placed text_offset bytes from a 2MB aligned base
-address near the start of usable system RAM and called there. Memory
-below that base address is currently unusable by Linux, and therefore it
-is strongly recommended that this location is the start of system RAM.
-The region between the 2 MB aligned base address and the start of the
-image has no special significance to the kernel, and may be used for
-other purposes.
+address anywhere in usable system RAM and called there. The region
+between the 2 MB aligned base address and the start of the image has no
+special significance to the kernel, and may be used for other purposes.
 At least image_size bytes from the start of the image must be free for
 use by the kernel.
+NOTE: versions prior to v4.5 cannot make use of memory below the
+physical offset of the Image so it is recommended that the Image be
+placed as close as possible to the start of system RAM.
 
 Any memory described to the kernel (even that below the start of the
 image) which is not marked as reserved from the kernel (e.g., with a
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 3148691bc80a..d6a237bda1f9 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -120,13 +120,10 @@  extern phys_addr_t		memstart_addr;
 extern u64 kernel_va_offset;
 
 /*
- * The maximum physical address that the linear direct mapping
- * of system RAM can cover. (PAGE_OFFSET can be interpreted as
- * a 2's complement signed quantity and negated to derive the
- * maximum size of the linear mapping.)
+ * Allow all memory at the discovery stage. We will clip it later.
  */
-#define MAX_MEMBLOCK_ADDR	({ memstart_addr - PAGE_OFFSET - 1; })
-#define MIN_MEMBLOCK_ADDR	__pa(KIMAGE_VADDR)
+#define MIN_MEMBLOCK_ADDR	0
+#define MAX_MEMBLOCK_ADDR	U64_MAX
 
 /*
  * PFNs are used to describe any physical page; this means
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index b3b0175d7135..29a7dc5327b6 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -35,6 +35,7 @@ 
 #include <linux/efi.h>
 #include <linux/swiotlb.h>
 
+#include <asm/boot.h>
 #include <asm/fixmap.h>
 #include <asm/kernel-pgtable.h>
 #include <asm/memory.h>
@@ -158,9 +159,55 @@  static int __init early_mem(char *p)
 }
 early_param("mem", early_mem);
 
+static void __init enforce_memory_limit(void)
+{
+	const phys_addr_t kbase = round_down(__pa(_text), MIN_KIMG_ALIGN);
+	u64 to_remove = memblock_phys_mem_size() - memory_limit;
+	phys_addr_t max_addr = 0;
+	struct memblock_region *r;
+
+	if (memory_limit == (phys_addr_t)ULLONG_MAX)
+		return;
+
+	/*
+	 * The kernel may be high up in physical memory, so try to apply the
+	 * limit below the kernel first, and only let the generic handling
+	 * take over if it turns out we haven't clipped enough memory yet.
+	 */
+	for_each_memblock(memory, r) {
+		if (r->base + r->size > kbase) {
+			u64 rem = min(to_remove, kbase - r->base);
+
+			max_addr = r->base + rem;
+			to_remove -= rem;
+			break;
+		}
+		if (to_remove <= r->size) {
+			max_addr = r->base + to_remove;
+			to_remove = 0;
+			break;
+		}
+		to_remove -= r->size;
+	}
+
+	memblock_remove(0, max_addr);
+
+	if (to_remove)
+		memblock_enforce_memory_limit(memory_limit);
+}
+
 void __init arm64_memblock_init(void)
 {
-	memblock_enforce_memory_limit(memory_limit);
+	/*
+	 * Remove the memory that we will not be able to cover
+	 * with the linear mapping.
+	 */
+	const s64 linear_region_size = -(s64)PAGE_OFFSET;
+
+	memblock_remove(round_down(memblock_start_of_DRAM(), SZ_1G) +
+			linear_region_size, ULLONG_MAX);
+
+	enforce_memory_limit();
 
 	/*
 	 * Register the kernel text, kernel data, initrd, and initial
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 526eeb7e1e97..1b9d7e48ba1e 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -21,6 +21,7 @@ 
 #include <linux/kernel.h>
 #include <linux/errno.h>
 #include <linux/init.h>
+#include <linux/initrd.h>
 #include <linux/libfdt.h>
 #include <linux/mman.h>
 #include <linux/nodemask.h>
@@ -481,11 +482,33 @@  static void __init bootstrap_linear_mapping(unsigned long va_offset)
 static void __init map_mem(void)
 {
 	struct memblock_region *reg;
+	u64 new_memstart_addr;
+	u64 new_va_offset;
 
-	bootstrap_linear_mapping(KIMAGE_OFFSET);
+	/*
+	 * Select a suitable value for the base of physical memory.
+	 * This should be equal to or below the lowest usable physical
+	 * memory address, and aligned to PUD/PMD size so that we can map
+	 * it efficiently.
+	 */
+	new_memstart_addr = round_down(memblock_start_of_DRAM(), SZ_1G);
+
+	/*
+	 * Calculate the offset between the kernel text mapping that exists
+	 * outside of the linear mapping, and its mapping in the linear region.
+	 */
+	new_va_offset = memstart_addr - new_memstart_addr;
+
+	bootstrap_linear_mapping(new_va_offset);
 
-	kernel_va_offset = KIMAGE_OFFSET;
-	memstart_addr -= KIMAGE_OFFSET;
+	kernel_va_offset = new_va_offset;
+	memstart_addr = new_memstart_addr;
+
+	/* Recalculate virtual addresses of initrd region */
+	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && initrd_start) {
+		initrd_start += new_va_offset;
+		initrd_end += new_va_offset;
+	}
 
 	/* map all the memory banks */
 	for_each_memblock(memory, reg) {