@@ -44,7 +44,7 @@ static inline unsigned int brs_to(int idx)
static __always_inline void set_debug_extn_cfg(u64 val)
{
/* bits[4:3] must always be set to 11b */
- __wrmsr(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3, val >> 32);
+ native_wrmsrq(MSR_AMD_DBG_EXTN_CFG, val | 3ULL << 3);
}
static __always_inline u64 get_debug_extn_cfg(void)
@@ -214,7 +214,7 @@ static inline void native_apic_msr_write(u32 reg, u32 v)
static inline void native_apic_msr_eoi(void)
{
- __wrmsr(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK, 0);
+ native_wrmsrq(APIC_BASE_MSR + (APIC_EOI >> 4), APIC_EOI_ACK);
}
static inline u32 native_apic_msr_read(u32 reg)
@@ -149,10 +149,12 @@ static inline u64 native_read_msr_safe(u32 msr, int *err)
static inline void notrace
native_write_msr(u32 msr, u32 low, u32 high)
{
- __wrmsr(msr, low, high);
+ u64 val = (u64)high << 32 | low;
+
+ native_wrmsrq(msr, val);
if (tracepoint_enabled(write_msr))
- do_trace_write_msr(msr, ((u64)high << 32 | low), 0);
+ do_trace_write_msr(msr, val, 0);
}
/* Can be uninlined because referenced by paravirt */
@@ -1306,7 +1306,7 @@ static noinstr bool mce_check_crashing_cpu(void)
}
if (mcgstatus & MCG_STATUS_RIPV) {
- __wrmsr(MSR_IA32_MCG_STATUS, 0, 0);
+ native_wrmsrq(MSR_IA32_MCG_STATUS, 0);
return true;
}
}
@@ -483,7 +483,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* cache.
*/
saved_msr = __rdmsr(MSR_MISC_FEATURE_CONTROL);
- __wrmsr(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits, 0x0);
+ native_wrmsrq(MSR_MISC_FEATURE_CONTROL, prefetch_disable_bits);
closid_p = this_cpu_read(pqr_state.cur_closid);
rmid_p = this_cpu_read(pqr_state.cur_rmid);
mem_r = plr->kmem;
@@ -495,7 +495,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* pseudo-locked followed by reading of kernel memory to load it
* into the cache.
*/
- __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid);
+ native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid);
/*
* Cache was flushed earlier. Now access kernel memory to read it
@@ -532,7 +532,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* Critical section end: restore closid with capacity bitmask that
* does not overlap with pseudo-locked region.
*/
- __wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
+ native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
/* Re-enable the hardware prefetcher(s) */
wrmsrq(MSR_MISC_FEATURE_CONTROL, saved_msr);
__wrmsr() is the lowest level primitive MSR write API, and its direct use is NOT preferred. Use its wrapper function native_wrmsrq() instead. No functional change intended. This change also prepares for using the alternatives mechanism to access MSRs: uses of native_wrmsr{,q}() don't need to change, but the approaches how they perform MSR operations are binary patched during boot time upon availability of MSR instructions. Signed-off-by: Xin Li (Intel) <xin@zytor.com> --- Change in v2: * Use native_wrmsr() where natural [rmid_p, closid_p] high/lo parameters can be used, without the shift-uglification (Ingo). --- arch/x86/events/amd/brs.c | 2 +- arch/x86/include/asm/apic.h | 2 +- arch/x86/include/asm/msr.h | 6 ++++-- arch/x86/kernel/cpu/mce/core.c | 2 +- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 6 +++--- 5 files changed, 10 insertions(+), 8 deletions(-)