diff mbox series

[4.19,015/247] jump_label/lockdep: Assert we hold the hotplug lock for _cpuslocked() operations

Message ID 20210301161032.435478568@linuxfoundation.org
State New
Headers show
Series None | expand

Commit Message

Greg KH March 1, 2021, 4:10 p.m. UTC
From: Peter Zijlstra <peterz@infradead.org>

commit cb538267ea1e9e025ec692577c9ae75797261889 upstream.

Weirdly we seem to have forgotten this...

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Will McVicker <willmcvicker@google.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
 kernel/jump_label.c |    5 +++++
 1 file changed, 5 insertions(+)
diff mbox series

Patch

--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -83,6 +83,7 @@  void static_key_slow_inc_cpuslocked(stru
 	int v, v1;
 
 	STATIC_KEY_CHECK_USE(key);
+	lockdep_assert_cpus_held();
 
 	/*
 	 * Careful if we get concurrent static_key_slow_inc() calls;
@@ -128,6 +129,7 @@  EXPORT_SYMBOL_GPL(static_key_slow_inc);
 void static_key_enable_cpuslocked(struct static_key *key)
 {
 	STATIC_KEY_CHECK_USE(key);
+	lockdep_assert_cpus_held();
 
 	if (atomic_read(&key->enabled) > 0) {
 		WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
@@ -158,6 +160,7 @@  EXPORT_SYMBOL_GPL(static_key_enable);
 void static_key_disable_cpuslocked(struct static_key *key)
 {
 	STATIC_KEY_CHECK_USE(key);
+	lockdep_assert_cpus_held();
 
 	if (atomic_read(&key->enabled) != 1) {
 		WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
@@ -183,6 +186,8 @@  static void __static_key_slow_dec_cpuslo
 					   unsigned long rate_limit,
 					   struct delayed_work *work)
 {
+	lockdep_assert_cpus_held();
+
 	/*
 	 * The negative count check is valid even when a negative
 	 * key->enabled is in use by static_key_slow_inc(); a