diff mbox series

[RFC,v2,07/26] KVM: arm64: Introduce a BSS section for use at Hyp

Message ID 20210108121524.656872-8-qperret@google.com
State New
Headers show
Series KVM/arm64: A stage 2 for the host | expand

Commit Message

Quentin Perret Jan. 8, 2021, 12:15 p.m. UTC
Currently, the hyp code cannot make full use of a bss, as the kernel
section is mapped read-only.

While this mapping could simply be changed to read-write, it would
intermingle even more the hyp and kernel state than they currently are.
Instead, introduce a __hyp_bss section, that uses reserved pages, and
create the appropriate RW hyp mappings during KVM init.

Signed-off-by: Quentin Perret <qperret@google.com>
---
 arch/arm64/include/asm/sections.h |  1 +
 arch/arm64/kernel/vmlinux.lds.S   |  7 +++++++
 arch/arm64/kvm/arm.c              | 11 +++++++++++
 arch/arm64/kvm/hyp/nvhe/hyp.lds.S |  1 +
 4 files changed, 20 insertions(+)

Comments

Will Deacon Feb. 1, 2021, 6:32 p.m. UTC | #1
On Fri, Jan 08, 2021 at 12:15:05PM +0000, Quentin Perret wrote:
> Currently, the hyp code cannot make full use of a bss, as the kernel

> section is mapped read-only.

> 

> While this mapping could simply be changed to read-write, it would

> intermingle even more the hyp and kernel state than they currently are.

> Instead, introduce a __hyp_bss section, that uses reserved pages, and

> create the appropriate RW hyp mappings during KVM init.

> 

> Signed-off-by: Quentin Perret <qperret@google.com>

> ---

>  arch/arm64/include/asm/sections.h |  1 +

>  arch/arm64/kernel/vmlinux.lds.S   |  7 +++++++

>  arch/arm64/kvm/arm.c              | 11 +++++++++++

>  arch/arm64/kvm/hyp/nvhe/hyp.lds.S |  1 +

>  4 files changed, 20 insertions(+)

> 

> diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h

> index 8ff579361731..f58cf493de16 100644

> --- a/arch/arm64/include/asm/sections.h

> +++ b/arch/arm64/include/asm/sections.h

> @@ -12,6 +12,7 @@ extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];

>  extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];

>  extern char __hyp_text_start[], __hyp_text_end[];

>  extern char __hyp_data_ro_after_init_start[], __hyp_data_ro_after_init_end[];

> +extern char __hyp_bss_start[], __hyp_bss_end[];

>  extern char __idmap_text_start[], __idmap_text_end[];

>  extern char __initdata_begin[], __initdata_end[];

>  extern char __inittext_begin[], __inittext_end[];

> diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S

> index 43af13968dfd..3eca35d5a7cf 100644

> --- a/arch/arm64/kernel/vmlinux.lds.S

> +++ b/arch/arm64/kernel/vmlinux.lds.S

> @@ -8,6 +8,13 @@

>  #define RO_EXCEPTION_TABLE_ALIGN	8

>  #define RUNTIME_DISCARD_EXIT

>  

> +#define BSS_FIRST_SECTIONS				\

> +	. = ALIGN(PAGE_SIZE);				\

> +	__hyp_bss_start = .;				\

> +	*(.hyp.bss)					\


Use HYP_SECTION_NAME() here?

> +	. = ALIGN(PAGE_SIZE);				\

> +	__hyp_bss_end = .;


Should this be gated on CONFIG_KVM like the other hyp sections are? In fact,
it might be nice to define all of those together. Yeah, it means moving
things higher up in the file, but I think it will be easier to read.

>  #include <asm-generic/vmlinux.lds.h>

>  #include <asm/cache.h>

>  #include <asm/hyp_image.h>

> diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c

> index 3ac0f3425833..51b53ca36dc5 100644

> --- a/arch/arm64/kvm/arm.c

> +++ b/arch/arm64/kvm/arm.c

> @@ -1770,7 +1770,18 @@ static int init_hyp_mode(void)

>  		goto out_err;

>  	}

>  

> +	/*

> +	 * .hyp.bss is placed at the beginning of the .bss section, so map that

> +	 * part RW, and the rest RO as the hyp shouldn't be touching it.

> +	 */

>  	err = create_hyp_mappings(kvm_ksym_ref(__bss_start),


I think it would be clearer to refer to __hyp_bss_start here ^^.
You could always add an ASSERT in the linker script if you want to catch
anybody adding something before the hyp bss in future.

Will
diff mbox series

Patch

diff --git a/arch/arm64/include/asm/sections.h b/arch/arm64/include/asm/sections.h
index 8ff579361731..f58cf493de16 100644
--- a/arch/arm64/include/asm/sections.h
+++ b/arch/arm64/include/asm/sections.h
@@ -12,6 +12,7 @@  extern char __hibernate_exit_text_start[], __hibernate_exit_text_end[];
 extern char __hyp_idmap_text_start[], __hyp_idmap_text_end[];
 extern char __hyp_text_start[], __hyp_text_end[];
 extern char __hyp_data_ro_after_init_start[], __hyp_data_ro_after_init_end[];
+extern char __hyp_bss_start[], __hyp_bss_end[];
 extern char __idmap_text_start[], __idmap_text_end[];
 extern char __initdata_begin[], __initdata_end[];
 extern char __inittext_begin[], __inittext_end[];
diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S
index 43af13968dfd..3eca35d5a7cf 100644
--- a/arch/arm64/kernel/vmlinux.lds.S
+++ b/arch/arm64/kernel/vmlinux.lds.S
@@ -8,6 +8,13 @@ 
 #define RO_EXCEPTION_TABLE_ALIGN	8
 #define RUNTIME_DISCARD_EXIT
 
+#define BSS_FIRST_SECTIONS				\
+	. = ALIGN(PAGE_SIZE);				\
+	__hyp_bss_start = .;				\
+	*(.hyp.bss)					\
+	. = ALIGN(PAGE_SIZE);				\
+	__hyp_bss_end = .;
+
 #include <asm-generic/vmlinux.lds.h>
 #include <asm/cache.h>
 #include <asm/hyp_image.h>
diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c
index 3ac0f3425833..51b53ca36dc5 100644
--- a/arch/arm64/kvm/arm.c
+++ b/arch/arm64/kvm/arm.c
@@ -1770,7 +1770,18 @@  static int init_hyp_mode(void)
 		goto out_err;
 	}
 
+	/*
+	 * .hyp.bss is placed at the beginning of the .bss section, so map that
+	 * part RW, and the rest RO as the hyp shouldn't be touching it.
+	 */
 	err = create_hyp_mappings(kvm_ksym_ref(__bss_start),
+				  kvm_ksym_ref(__hyp_bss_end), PAGE_HYP);
+	if (err) {
+		kvm_err("Cannot map hyp bss section: %d\n", err);
+		goto out_err;
+	}
+
+	err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end),
 				  kvm_ksym_ref(__bss_stop), PAGE_HYP_RO);
 	if (err) {
 		kvm_err("Cannot map bss section\n");
diff --git a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
index 5d76ff2ba63e..dc281d90063e 100644
--- a/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
+++ b/arch/arm64/kvm/hyp/nvhe/hyp.lds.S
@@ -17,4 +17,5 @@  SECTIONS {
 		PERCPU_INPUT(L1_CACHE_BYTES)
 	}
 	HYP_SECTION(.data..ro_after_init)
+	HYP_SECTION(.bss)
 }