diff mbox

[redhat-7.3.x] perf/x86/intel/cqm: Make sure events without RMID are always in the tail of cache_groups

Message ID 1493801688-58971-1-git-send-email-wangkefeng.wang@huawei.com
State New
Headers show

Commit Message

Kefeng Wang May 3, 2017, 8:54 a.m. UTC
From: Zefan Li <lizefan@huawei.com>


euler inclusion
category: bugfix
bugzilla: NA
DTS: DTS2017030810544
CVE: NA

-------------------------------------------------

It is assumed that the head of cache_groups always has valid RMID,
which isn't true.

When we deallocate RMID from conflicting events currently we don't
move them to the tail, and one of those events can happen to be in
the head. Another case is we allocate RMIDs for all the events except
the head event in intel_cqm_sched_in_event().

Besides there's another bug that we retry rotating without reseting
nr_needed and start in __intel_cqm_rmid_rotate().

Those bugs combined together lead to the following oops.

WARNING: at arch/x86/kernel/cpu/perf_event_intel_cqm.c:186 __put_rmid+0x28/0x80()
...
 [<ffffffff8103a578>] __put_rmid+0x28/0x80
 [<ffffffff8103a74a>] intel_cqm_rmid_rotate+0xba/0x440
 [<ffffffff8109d8cb>] process_one_work+0x17b/0x470
 [<ffffffff8109e69b>] worker_thread+0x11b/0x400
...
BUG: unable to handle kernel NULL pointer dereference at           (null)
...
 [<ffffffff8103a74a>] intel_cqm_rmid_rotate+0xba/0x440
 [<ffffffff8109d8cb>] process_one_work+0x17b/0x470
 [<ffffffff8109e69b>] worker_thread+0x11b/0x400

Cc: stable@vger.kernel.org
Signed-off-by: Zefan Li <lizefan@huawei.com>

[ kf: adjust file patch and name ]
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>

---
 arch/x86/events/intel/cqm.c | 14 ++++++++++++--
 1 file changed, 12 insertions(+), 2 deletions(-)

-- 
1.8.3.1
diff mbox

Patch

diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 0626f29..d15a0a0 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -568,6 +568,12 @@  static bool intel_cqm_sched_in_event(u32 rmid)
 
 	leader = list_first_entry(&cache_groups, struct perf_event,
 				  hw.cqm_groups_entry);
+
+	if (!list_empty(&cache_groups) && !__rmid_valid(leader->hw.cqm_rmid)) {
+		intel_cqm_xchg_rmid(leader, rmid);
+		return true;
+	}
+
 	event = leader;
 
 	list_for_each_entry_continue(event, &cache_groups,
@@ -736,6 +742,7 @@  static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
 {
 	struct perf_event *group, *g;
 	u32 rmid;
+	LIST_HEAD(conflicting_groups);
 
 	lockdep_assert_held(&cache_mutex);
 
@@ -759,6 +766,7 @@  static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
 
 		intel_cqm_xchg_rmid(group, INVALID_RMID);
 		__put_rmid(rmid);
+		list_move_tail(&group->hw.cqm_groups_entry, &conflicting_groups);
 	}
 }
 
@@ -788,9 +796,9 @@  static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
  */
 static bool __intel_cqm_rmid_rotate(void)
 {
-	struct perf_event *group, *start = NULL;
+	struct perf_event *group, *start;
 	unsigned int threshold_limit;
-	unsigned int nr_needed = 0;
+	unsigned int nr_needed;
 	unsigned int nr_available;
 	bool rotated = false;
 
@@ -804,6 +812,8 @@  again:
 	if (list_empty(&cache_groups) && list_empty(&cqm_rmid_limbo_lru))
 		goto out;
 
+	nr_needed = 0;
+	start = NULL;
 	list_for_each_entry(group, &cache_groups, hw.cqm_groups_entry) {
 		if (!__rmid_valid(group->hw.cqm_rmid)) {
 			if (!start)