@@ -2139,6 +2139,7 @@ static int rcu_nocb_toggle(void *arg)
do {
r = torture_random(&rand);
cpu = (r >> 1) % (maxcpu + 1);
+ cpus_read_lock();
if (r & 0x1) {
rcu_nocb_cpumask_update(cpumask_of(cpu), true);
atomic_long_inc(&n_nocb_offload);
@@ -2146,6 +2147,7 @@ static int rcu_nocb_toggle(void *arg)
rcu_nocb_cpumask_update(cpumask_of(cpu), false);
atomic_long_inc(&n_nocb_deoffload);
}
+ cpus_read_unlock();
toggle_delay = torture_random(&rand) % toggle_fuzz + toggle_interval;
set_current_state(TASK_INTERRUPTIBLE);
schedule_hrtimeout(&toggle_delay, HRTIMER_MODE_REL);
@@ -1301,12 +1301,13 @@ int rcu_nocb_cpumask_update(const struct cpumask *cpumask, bool offload)
int err_cpu;
cpumask_var_t saved_nocb_mask;
+ lockdep_assert_cpus_held();
+
if (!alloc_cpumask_var(&saved_nocb_mask, GFP_KERNEL))
return -ENOMEM;
cpumask_copy(saved_nocb_mask, rcu_nocb_mask);
- cpus_read_lock();
mutex_lock(&rcu_state.barrier_mutex);
for_each_cpu(cpu, cpumask) {
if (offload) {
@@ -1340,7 +1341,6 @@ int rcu_nocb_cpumask_update(const struct cpumask *cpumask, bool offload)
}
mutex_unlock(&rcu_state.barrier_mutex);
- cpus_read_unlock();
free_cpumask_var(saved_nocb_mask);