diff mbox series

[v2,2/3] arm: asm/cache.c: Introduce arm_reserve_mmu

Message ID 20200329175741.26297-3-ovpanait@gmail.com
State Accepted
Commit 6184858b859f6fcea4b23f76cfb7988882a3c8a7
Headers show
Series common/board_f: Make reserve_mmu generic | expand

Commit Message

Ovidiu Panait March 29, 2020, 5:57 p.m. UTC
As a preparation for turning reserve_mmu into an arch-specific variant,
introduce arm_reserve_mmu on ARM. It implements the default routine for
reserving memory for MMU TLB and needs to be weakly defined in order to allow
for machines to override it.

Without this decoupling, after introducing arch_reserve_mmu, there would be two
weak definitions for it, one in common/board_f.c and one in
arch/arm/lib/cache.c.

Signed-off-by: Ovidiu Panait <ovpanait at gmail.com>
---
 arch/arm/include/asm/cache.h | 11 +++++++++++
 arch/arm/lib/cache.c         |  5 +++++
 arch/arm/mach-versal/cpu.c   |  3 ++-
 arch/arm/mach-zynqmp/cpu.c   |  3 ++-
 4 files changed, 20 insertions(+), 2 deletions(-)

Comments

Simon Glass March 30, 2020, 11:57 p.m. UTC | #1
Hi Ovidiu,

On Sun, 29 Mar 2020 at 11:59, Ovidiu Panait <ovpanait at gmail.com> wrote:
>
> As a preparation for turning reserve_mmu into an arch-specific variant,
> introduce arm_reserve_mmu on ARM. It implements the default routine for
> reserving memory for MMU TLB and needs to be weakly defined in order to allow
> for machines to override it.
>
> Without this decoupling, after introducing arch_reserve_mmu, there would be two
> weak definitions for it, one in common/board_f.c and one in
> arch/arm/lib/cache.c.
>
> Signed-off-by: Ovidiu Panait <ovpanait at gmail.com>
> ---
>  arch/arm/include/asm/cache.h | 11 +++++++++++
>  arch/arm/lib/cache.c         |  5 +++++
>  arch/arm/mach-versal/cpu.c   |  3 ++-
>  arch/arm/mach-zynqmp/cpu.c   |  3 ++-
>  4 files changed, 20 insertions(+), 2 deletions(-)
>

I'm not a fan of weak functions and here we have two layers of them.
But I'm not sure what else to suggest, other than a Kconfig to
enable/disable the generic arm_reserve_mmu().

Reviewed-by: Simon Glass <sjg at chromium.org>


> diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
> index 950ec1e793..c20e05ec7f 100644
> --- a/arch/arm/include/asm/cache.h
> +++ b/arch/arm/include/asm/cache.h
> @@ -49,4 +49,15 @@ void dram_bank_mmu_setup(int bank);
>   */
>  #define ARCH_DMA_MINALIGN      CONFIG_SYS_CACHELINE_SIZE
>
> +/*
> + * arm_reserve_mmu() - Reserve memory for MMU TLB table
> + *
> + * Default implementation for reserving memory for MMU TLB table. It is used
> + * during generic board init sequence in common/board_f.c. Weakly defined, so
> + * that machines can override it if needed.
> + *
> + * Return: 0 if OK
> + */
> +int arm_reserve_mmu(void);
> +
>  #endif /* _ASM_CACHE_H */
> diff --git a/arch/arm/lib/cache.c b/arch/arm/lib/cache.c
> index b8e1e340a1..3cbed602eb 100644
> --- a/arch/arm/lib/cache.c
> +++ b/arch/arm/lib/cache.c
> @@ -122,6 +122,11 @@ void invalidate_l2_cache(void)
>  #endif
>
>  __weak int reserve_mmu(void)
> +{
> +       return arm_reserve_mmu();
> +}
> +
> +__weak int arm_reserve_mmu(void)
>  {
>  #if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF))
>         /* reserve TLB table */
> diff --git a/arch/arm/mach-versal/cpu.c b/arch/arm/mach-versal/cpu.c
> index 6ee6cd43ec..c14c5bb39c 100644
> --- a/arch/arm/mach-versal/cpu.c
> +++ b/arch/arm/mach-versal/cpu.c
> @@ -9,6 +9,7 @@
>  #include <asm/io.h>
>  #include <asm/arch/hardware.h>
>  #include <asm/arch/sys_proto.h>
> +#include <asm/cache.h>
>
>  DECLARE_GLOBAL_DATA_PTR;
>
> @@ -98,7 +99,7 @@ u64 get_page_table_size(void)
>  }
>
>  #if defined(CONFIG_SYS_MEM_RSVD_FOR_MMU)
> -int reserve_mmu(void)
> +int arm_reserve_mmu(void)
>  {
>         tcm_init(TCM_LOCK);
>         gd->arch.tlb_size = PGTABLE_SIZE;
> diff --git a/arch/arm/mach-zynqmp/cpu.c b/arch/arm/mach-zynqmp/cpu.c
> index 442427bc11..811684a9f8 100644
> --- a/arch/arm/mach-zynqmp/cpu.c
> +++ b/arch/arm/mach-zynqmp/cpu.c
> @@ -11,6 +11,7 @@
>  #include <asm/armv8/mmu.h>
>  #include <asm/io.h>
>  #include <zynqmp_firmware.h>
> +#include <asm/cache.h>
>
>  #define ZYNQ_SILICON_VER_MASK  0xF000
>  #define ZYNQ_SILICON_VER_SHIFT 12
> @@ -116,7 +117,7 @@ void tcm_init(u8 mode)
>  #endif
>
>  #ifdef CONFIG_SYS_MEM_RSVD_FOR_MMU
> -int reserve_mmu(void)
> +int arm_reserve_mmu(void)
>  {
>         tcm_init(TCM_LOCK);
>         gd->arch.tlb_size = PGTABLE_SIZE;
> --
> 2.17.1
>
diff mbox series

Patch

diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
index 950ec1e793..c20e05ec7f 100644
--- a/arch/arm/include/asm/cache.h
+++ b/arch/arm/include/asm/cache.h
@@ -49,4 +49,15 @@  void dram_bank_mmu_setup(int bank);
  */
 #define ARCH_DMA_MINALIGN	CONFIG_SYS_CACHELINE_SIZE
 
+/*
+ * arm_reserve_mmu() - Reserve memory for MMU TLB table
+ *
+ * Default implementation for reserving memory for MMU TLB table. It is used
+ * during generic board init sequence in common/board_f.c. Weakly defined, so
+ * that machines can override it if needed.
+ *
+ * Return: 0 if OK
+ */
+int arm_reserve_mmu(void);
+
 #endif /* _ASM_CACHE_H */
diff --git a/arch/arm/lib/cache.c b/arch/arm/lib/cache.c
index b8e1e340a1..3cbed602eb 100644
--- a/arch/arm/lib/cache.c
+++ b/arch/arm/lib/cache.c
@@ -122,6 +122,11 @@  void invalidate_l2_cache(void)
 #endif
 
 __weak int reserve_mmu(void)
+{
+	return arm_reserve_mmu();
+}
+
+__weak int arm_reserve_mmu(void)
 {
 #if !(CONFIG_IS_ENABLED(SYS_ICACHE_OFF) && CONFIG_IS_ENABLED(SYS_DCACHE_OFF))
 	/* reserve TLB table */
diff --git a/arch/arm/mach-versal/cpu.c b/arch/arm/mach-versal/cpu.c
index 6ee6cd43ec..c14c5bb39c 100644
--- a/arch/arm/mach-versal/cpu.c
+++ b/arch/arm/mach-versal/cpu.c
@@ -9,6 +9,7 @@ 
 #include <asm/io.h>
 #include <asm/arch/hardware.h>
 #include <asm/arch/sys_proto.h>
+#include <asm/cache.h>
 
 DECLARE_GLOBAL_DATA_PTR;
 
@@ -98,7 +99,7 @@  u64 get_page_table_size(void)
 }
 
 #if defined(CONFIG_SYS_MEM_RSVD_FOR_MMU)
-int reserve_mmu(void)
+int arm_reserve_mmu(void)
 {
 	tcm_init(TCM_LOCK);
 	gd->arch.tlb_size = PGTABLE_SIZE;
diff --git a/arch/arm/mach-zynqmp/cpu.c b/arch/arm/mach-zynqmp/cpu.c
index 442427bc11..811684a9f8 100644
--- a/arch/arm/mach-zynqmp/cpu.c
+++ b/arch/arm/mach-zynqmp/cpu.c
@@ -11,6 +11,7 @@ 
 #include <asm/armv8/mmu.h>
 #include <asm/io.h>
 #include <zynqmp_firmware.h>
+#include <asm/cache.h>
 
 #define ZYNQ_SILICON_VER_MASK	0xF000
 #define ZYNQ_SILICON_VER_SHIFT	12
@@ -116,7 +117,7 @@  void tcm_init(u8 mode)
 #endif
 
 #ifdef CONFIG_SYS_MEM_RSVD_FOR_MMU
-int reserve_mmu(void)
+int arm_reserve_mmu(void)
 {
 	tcm_init(TCM_LOCK);
 	gd->arch.tlb_size = PGTABLE_SIZE;