@@ -367,9 +367,9 @@ static __always_inline int rdmsrq_safe(u32 msr, u64 *val)
* / \ |
* / \ |
* native_wrmsrq_no_trace() native_write_msr_safe() |
- * / \ |
- * / \ |
- * native_wrmsr() native_write_msr() |
+ * / \ |
+ * / \ |
+ * native_wrmsr_no_trace() native_write_msr() |
* |
* |
* |
@@ -467,7 +467,7 @@ static __always_inline void native_wrmsrq_no_trace(u32 msr, u64 val)
__native_wrmsrq(msr, val, EX_TYPE_WRMSR);
}
-static __always_inline void native_wrmsr(u32 msr, u32 low, u32 high)
+static __always_inline void native_wrmsr_no_trace(u32 msr, u32 low, u32 high)
{
native_wrmsrq_no_trace(msr, (u64)high << 32 | low);
}
@@ -495,7 +495,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* pseudo-locked followed by reading of kernel memory to load it
* into the cache.
*/
- native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid);
+ native_wrmsr_no_trace(MSR_IA32_PQR_ASSOC, rmid_p, plr->closid);
/*
* Cache was flushed earlier. Now access kernel memory to read it
@@ -532,7 +532,7 @@ int resctrl_arch_pseudo_lock_fn(void *_plr)
* Critical section end: restore closid with capacity bitmask that
* does not overlap with pseudo-locked region.
*/
- native_wrmsr(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
+ native_wrmsr_no_trace(MSR_IA32_PQR_ASSOC, rmid_p, closid_p);
/* Re-enable the hardware prefetcher(s) */
wrmsrq(MSR_MISC_FEATURE_CONTROL, saved_msr);
native_wrmsr() doesn't do trace thus can be used in noinstr context, rename it to native_wrmsr_no_trace() to make it explicit. Signed-off-by: Xin Li (Intel) <xin@zytor.com> --- arch/x86/include/asm/msr.h | 8 ++++---- arch/x86/kernel/cpu/resctrl/pseudo_lock.c | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-)