diff mbox series

[2/3] x86: add wrapper functions for mtrr functions handling also pat

Message ID 20220715142549.25223-3-jgross@suse.com
State New
Headers show
Series x86: make pat and mtrr independent from each other | expand

Commit Message

Jürgen Groß July 15, 2022, 2:25 p.m. UTC
There are several MTRR functions which also do PAT handling. In order
to support PAT handling without MTRR in the future, add some wrappers
for those functions.

Cc: <stable@vger.kernel.org> # 5.17
Fixes: bdd8b6c98239 ("drm/i915: replace X86_FEATURE_PAT with pat_enabled()")
Signed-off-by: Juergen Gross <jgross@suse.com>
---
 arch/x86/include/asm/mtrr.h      |  2 --
 arch/x86/include/asm/processor.h |  7 +++++
 arch/x86/kernel/cpu/common.c     | 44 +++++++++++++++++++++++++++++++-
 arch/x86/kernel/cpu/mtrr/mtrr.c  | 25 +++---------------
 arch/x86/kernel/setup.c          |  5 +---
 arch/x86/kernel/smpboot.c        |  8 +++---
 arch/x86/power/cpu.c             |  2 +-
 7 files changed, 59 insertions(+), 34 deletions(-)

Comments

Borislav Petkov July 19, 2022, 10:47 a.m. UTC | #1
Drop stable.

On Fri, Jul 15, 2022 at 04:25:48PM +0200, Juergen Gross wrote:
> diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
> index 5c934b922450..e2140204fb7e 100644
> --- a/arch/x86/include/asm/processor.h
> +++ b/arch/x86/include/asm/processor.h
> @@ -865,7 +865,14 @@ bool arch_is_platform_page(u64 paddr);
>  #define arch_is_platform_page arch_is_platform_page
>  #endif
>  
> +extern bool cache_aps_delayed_init;
> +
>  void cache_disable(void);
>  void cache_enable(void);
> +void cache_bp_init(void);
> +void cache_ap_init(void);
> +void cache_set_aps_delayed_init(void);
> +void cache_aps_init(void);
> +void cache_bp_restore(void);
>  
>  #endif /* _ASM_X86_PROCESSOR_H */

Use arch/x86/include/asm/cacheinfo.h instead.

> diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
> index e43322f8a4ef..0a1bd14f7966 100644
> --- a/arch/x86/kernel/cpu/common.c
> +++ b/arch/x86/kernel/cpu/common.c
> @@ -1929,7 +1929,7 @@ void identify_secondary_cpu(struct cpuinfo_x86 *c)
>  #ifdef CONFIG_X86_32
>  	enable_sep_cpu();
>  #endif
> -	mtrr_ap_init();
> +	cache_ap_init();
>  	validate_apic_and_package_id(c);
>  	x86_spec_ctrl_setup_ap();
>  	update_srbds_msr();
> @@ -2403,3 +2403,45 @@ void cache_enable(void) __releases(cache_disable_lock)
>  
>  	raw_spin_unlock(&cache_disable_lock);
>  }
> +
> +void __init cache_bp_init(void)
> +{
> +	if (IS_ENABLED(CONFIG_MTRR))
> +		mtrr_bp_init();
> +	else
> +		pat_disable("PAT support disabled because CONFIG_MTRR is disabled in the kernel.");
> +}
> +
> +void cache_ap_init(void)
> +{
> +	if (cache_aps_delayed_init)
> +		return;
> +
> +	mtrr_ap_init();
> +}
> +
> +bool cache_aps_delayed_init;
> +
> +void cache_set_aps_delayed_init(void)
> +{
> +	cache_aps_delayed_init = true;
> +}

What's the point of a variable and a setter function?

You can either make this var __ro_after_init and then use it everywhere
or make it static and use a setter and getter.

> +
> +void cache_aps_init(void)
> +{
> +	/*
> +	 * Check if someone has requested the delay of AP cache initialization,
> +	 * by doing cache_set_aps_delayed_init(), prior to this point. If not,
> +	 * then we are done.
> +	 */
> +	if (!cache_aps_delayed_init)
> +		return;
> +
> +	mtrr_aps_init();
> +	cache_aps_delayed_init = false;
> +}
> +
> +void cache_bp_restore(void)
> +{
> +	mtrr_bp_restore();
> +}
> diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
> index 2746cac9d8a9..c1593cfae641 100644
> --- a/arch/x86/kernel/cpu/mtrr/mtrr.c
> +++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
> @@ -69,7 +69,6 @@ unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
>  static DEFINE_MUTEX(mtrr_mutex);
>  
>  u64 size_or_mask, size_and_mask;
> -static bool mtrr_aps_delayed_init;
>  
>  static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
>  
> @@ -176,7 +175,8 @@ static int mtrr_rendezvous_handler(void *info)
>  	if (data->smp_reg != ~0U) {
>  		mtrr_if->set(data->smp_reg, data->smp_base,
>  			     data->smp_size, data->smp_type);
> -	} else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
> +	} else if ((use_intel() && cache_aps_delayed_init) ||

What's the use_intel() for?

> +		   !cpu_online(smp_processor_id())) {
>  		mtrr_if->set_all();
>  	}
>  	return 0;
> @@ -789,7 +789,7 @@ void mtrr_ap_init(void)
>  	if (!mtrr_enabled())
>  		return;
>  
> -	if (!use_intel() || mtrr_aps_delayed_init)
> +	if (!use_intel())

And here you remove the mtrr_aps_delayed_init check but you have the
corresponding check of cache_aps_delayed_init in the caller. Hmm.

So it looks like you're pushing some of the logic into the cache_*
functions, one level up.

But it is really hard to follow what you're doing here.

And that mtrr_aps_delayed_init thing is not making it any easier. It
gets set during init unconditionally and once APs have been setup, it
gets cleared.

And, AFAICT, it is used so that the MTRRs are not set when single APs
get onlined but it is all done in one fell swoop in mtrr_aps_init() and
then that delayed_init var gets cleared.

But then I don't understand what the point is of that pushing of
cache_aps_delayed_init up into the cache_* functions.

/me greps a while longer...

Ah, ok, I think I see where this is going. The delayed thing is relevant
for PAT too because pat_init() happens also as part of the ->set_all()
rendezvous dance.

Right, so, this patch needs a *lot* more commit message text. You need
to explain why you're doing what you're doing and explain it in detail.

Perhaps even split the patch further into one adding the cache_* helpers
and another converting to them.

And, also, you probably should stick the small fix for the whole deal
in front of the patchset so that we have a stable backport - I wouldn't
want to backport all that more involved rework to stable.

Thx.
diff mbox series

Patch

diff --git a/arch/x86/include/asm/mtrr.h b/arch/x86/include/asm/mtrr.h
index 12a16caed395..900083ac9f60 100644
--- a/arch/x86/include/asm/mtrr.h
+++ b/arch/x86/include/asm/mtrr.h
@@ -43,7 +43,6 @@  extern int mtrr_del(int reg, unsigned long base, unsigned long size);
 extern int mtrr_del_page(int reg, unsigned long base, unsigned long size);
 extern void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi);
 extern void mtrr_ap_init(void);
-extern void set_mtrr_aps_delayed_init(void);
 extern void mtrr_aps_init(void);
 extern void mtrr_bp_restore(void);
 extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
@@ -86,7 +85,6 @@  static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
 {
 }
 #define mtrr_ap_init() do {} while (0)
-#define set_mtrr_aps_delayed_init() do {} while (0)
 #define mtrr_aps_init() do {} while (0)
 #define mtrr_bp_restore() do {} while (0)
 #define mtrr_disable() do {} while (0)
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 5c934b922450..e2140204fb7e 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -865,7 +865,14 @@  bool arch_is_platform_page(u64 paddr);
 #define arch_is_platform_page arch_is_platform_page
 #endif
 
+extern bool cache_aps_delayed_init;
+
 void cache_disable(void);
 void cache_enable(void);
+void cache_bp_init(void);
+void cache_ap_init(void);
+void cache_set_aps_delayed_init(void);
+void cache_aps_init(void);
+void cache_bp_restore(void);
 
 #endif /* _ASM_X86_PROCESSOR_H */
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index e43322f8a4ef..0a1bd14f7966 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1929,7 +1929,7 @@  void identify_secondary_cpu(struct cpuinfo_x86 *c)
 #ifdef CONFIG_X86_32
 	enable_sep_cpu();
 #endif
-	mtrr_ap_init();
+	cache_ap_init();
 	validate_apic_and_package_id(c);
 	x86_spec_ctrl_setup_ap();
 	update_srbds_msr();
@@ -2403,3 +2403,45 @@  void cache_enable(void) __releases(cache_disable_lock)
 
 	raw_spin_unlock(&cache_disable_lock);
 }
+
+void __init cache_bp_init(void)
+{
+	if (IS_ENABLED(CONFIG_MTRR))
+		mtrr_bp_init();
+	else
+		pat_disable("PAT support disabled because CONFIG_MTRR is disabled in the kernel.");
+}
+
+void cache_ap_init(void)
+{
+	if (cache_aps_delayed_init)
+		return;
+
+	mtrr_ap_init();
+}
+
+bool cache_aps_delayed_init;
+
+void cache_set_aps_delayed_init(void)
+{
+	cache_aps_delayed_init = true;
+}
+
+void cache_aps_init(void)
+{
+	/*
+	 * Check if someone has requested the delay of AP cache initialization,
+	 * by doing cache_set_aps_delayed_init(), prior to this point. If not,
+	 * then we are done.
+	 */
+	if (!cache_aps_delayed_init)
+		return;
+
+	mtrr_aps_init();
+	cache_aps_delayed_init = false;
+}
+
+void cache_bp_restore(void)
+{
+	mtrr_bp_restore();
+}
diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.c b/arch/x86/kernel/cpu/mtrr/mtrr.c
index 2746cac9d8a9..c1593cfae641 100644
--- a/arch/x86/kernel/cpu/mtrr/mtrr.c
+++ b/arch/x86/kernel/cpu/mtrr/mtrr.c
@@ -69,7 +69,6 @@  unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
 static DEFINE_MUTEX(mtrr_mutex);
 
 u64 size_or_mask, size_and_mask;
-static bool mtrr_aps_delayed_init;
 
 static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __ro_after_init;
 
@@ -176,7 +175,8 @@  static int mtrr_rendezvous_handler(void *info)
 	if (data->smp_reg != ~0U) {
 		mtrr_if->set(data->smp_reg, data->smp_base,
 			     data->smp_size, data->smp_type);
-	} else if (mtrr_aps_delayed_init || !cpu_online(smp_processor_id())) {
+	} else if ((use_intel() && cache_aps_delayed_init) ||
+		   !cpu_online(smp_processor_id())) {
 		mtrr_if->set_all();
 	}
 	return 0;
@@ -789,7 +789,7 @@  void mtrr_ap_init(void)
 	if (!mtrr_enabled())
 		return;
 
-	if (!use_intel() || mtrr_aps_delayed_init)
+	if (!use_intel())
 		return;
 
 	/*
@@ -823,16 +823,6 @@  void mtrr_save_state(void)
 	smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
 }
 
-void set_mtrr_aps_delayed_init(void)
-{
-	if (!mtrr_enabled())
-		return;
-	if (!use_intel())
-		return;
-
-	mtrr_aps_delayed_init = true;
-}
-
 /*
  * Delayed MTRR initialization for all AP's
  */
@@ -841,16 +831,7 @@  void mtrr_aps_init(void)
 	if (!use_intel() || !mtrr_enabled())
 		return;
 
-	/*
-	 * Check if someone has requested the delay of AP MTRR initialization,
-	 * by doing set_mtrr_aps_delayed_init(), prior to this point. If not,
-	 * then we are done.
-	 */
-	if (!mtrr_aps_delayed_init)
-		return;
-
 	set_mtrr(~0U, 0, 0, 0);
-	mtrr_aps_delayed_init = false;
 }
 
 void mtrr_bp_restore(void)
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index bd6c6fd373ae..27d61f73c68a 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1001,10 +1001,7 @@  void __init setup_arch(char **cmdline_p)
 	max_pfn = e820__end_of_ram_pfn();
 
 	/* update e820 for memory not covered by WB MTRRs */
-	if (IS_ENABLED(CONFIG_MTRR))
-		mtrr_bp_init();
-	else
-		pat_disable("PAT support disabled because CONFIG_MTRR is disabled in the kernel.");
+	cache_bp_init();
 
 	if (mtrr_trim_uncached_memory(max_pfn))
 		max_pfn = e820__end_of_ram_pfn();
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 5e7f9532a10d..535d73a47062 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -1432,7 +1432,7 @@  void __init native_smp_prepare_cpus(unsigned int max_cpus)
 
 	uv_system_init();
 
-	set_mtrr_aps_delayed_init();
+	cache_set_aps_delayed_init();
 
 	smp_quirk_init_udelay();
 
@@ -1443,12 +1443,12 @@  void __init native_smp_prepare_cpus(unsigned int max_cpus)
 
 void arch_thaw_secondary_cpus_begin(void)
 {
-	set_mtrr_aps_delayed_init();
+	cache_set_aps_delayed_init();
 }
 
 void arch_thaw_secondary_cpus_end(void)
 {
-	mtrr_aps_init();
+	cache_aps_init();
 }
 
 /*
@@ -1491,7 +1491,7 @@  void __init native_smp_cpus_done(unsigned int max_cpus)
 
 	nmi_selftest();
 	impress_friends();
-	mtrr_aps_init();
+	cache_aps_init();
 }
 
 static int __initdata setup_possible_cpus = -1;
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index bb176c72891c..21e014715322 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -261,7 +261,7 @@  static void notrace __restore_processor_state(struct saved_context *ctxt)
 	do_fpu_end();
 	tsc_verify_tsc_adjust(true);
 	x86_platform.restore_sched_clock_state();
-	mtrr_bp_restore();
+	cache_bp_restore();
 	perf_restore_debug_store();
 
 	c = &cpu_data(smp_processor_id());