diff mbox series

[ANNOUNCE] v5.14-rc7-rt13

Message ID 20210823160407.5e6xkvnfrz5f3ygw@linutronix.de
State New
Headers show
Series [ANNOUNCE] v5.14-rc7-rt13 | expand

Commit Message

Sebastian Andrzej Siewior Aug. 23, 2021, 4:04 p.m. UTC
Dear RT folks!

I'm pleased to announce the v5.14-rc7-rt13 patch set. 

Changes since v5.14-rc7-rt12:

  - A change to ww_mutex code to avoid a crash in non-RT. Reported by
    Guenter Roeck.

  - The patch for rcu-torture has been updated to address review
    comments.

Known issues
     - netconsole triggers WARN.

     - The "Memory controller" (CONFIG_MEMCG) has been disabled.

     - A RCU and ARM64 warning has been fixed by Valentin Schneider. It is
       still not clear if the RCU related change is correct.

     - Clark Williams reported issues in i915 (execlists_dequeue_irq())

     - Clark Williams reported issues with kcov enabled.

     - Valentin Schneider reported a few splats on ARM64, see
          https://https://lkml.kernel.org/r/.kernel.org/lkml/20210810134127.1394269-1-valentin.schneider@arm.com/

The delta patch against v5.14-rc7-rt12 is appended below and can be found here:
 
     https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/incr/patch-5.14-rc7-rt12-rt13.patch.xz

You can get this release via the git tree at:

    git://git.kernel.org/pub/scm/linux/kernel/git/rt/linux-rt-devel.git v5.14-rc7-rt13

The RT patch against v5.14-rc7 can be found here:

    https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patch-5.14-rc7-rt13.patch.xz

The split quilt queue is available at:

    https://cdn.kernel.org/pub/linux/kernel/projects/rt/5.14/older/patches-5.14-rc7-rt13.tar.xz

Sebastian
diff mbox series

Patch

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index 05b68931622d1..2c70213934cd4 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -614,7 +614,7 @@  __mutex_lock_common(struct mutex *lock, unsigned int state, unsigned int subclas
 
 	debug_mutex_lock_common(lock, &waiter);
 	waiter.task = current;
-	if (ww_ctx)
+	if (use_ww_ctx)
 		waiter.ww_ctx = ww_ctx;
 
 	lock_contended(&lock->dep_map, ip);
diff --git a/kernel/rcu/rcutorture.c b/kernel/rcu/rcutorture.c
index f01368a05374d..d2ef535530b10 100644
--- a/kernel/rcu/rcutorture.c
+++ b/kernel/rcu/rcutorture.c
@@ -61,13 +61,10 @@  MODULE_AUTHOR("Paul E. McKenney <paulmck@linux.ibm.com> and Josh Triplett <josh@
 #define RCUTORTURE_RDR_RBH	 0x08	/*  ... rcu_read_lock_bh(). */
 #define RCUTORTURE_RDR_SCHED	 0x10	/*  ... rcu_read_lock_sched(). */
 #define RCUTORTURE_RDR_RCU	 0x20	/*  ... entering another RCU reader. */
-#define RCUTORTURE_RDR_ATOM_BH	 0x40	/*  ... disabling bh while atomic */
-#define RCUTORTURE_RDR_ATOM_RBH	 0x80	/*  ... RBH while atomic */
-#define RCUTORTURE_RDR_NBITS	 8	/* Number of bits defined above. */
+#define RCUTORTURE_RDR_NBITS	 6	/* Number of bits defined above. */
 #define RCUTORTURE_MAX_EXTEND	 \
 	(RCUTORTURE_RDR_BH | RCUTORTURE_RDR_IRQ | RCUTORTURE_RDR_PREEMPT | \
-	 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED | \
-	 RCUTORTURE_RDR_ATOM_BH | RCUTORTURE_RDR_ATOM_RBH)
+	 RCUTORTURE_RDR_RBH | RCUTORTURE_RDR_SCHED)
 #define RCUTORTURE_RDR_MAX_LOOPS 0x7	/* Maximum reader extensions. */
 					/* Must be power of two minus one. */
 #define RCUTORTURE_RDR_MAX_SEGS (RCUTORTURE_RDR_MAX_LOOPS + 3)
@@ -1432,11 +1429,7 @@  static void rcutorture_one_extend(int *readstate, int newstate,
 	WARN_ON_ONCE((idxold >> RCUTORTURE_RDR_SHIFT) > 1);
 	rtrsp->rt_readstate = newstate;
 
-	/*
-	 * First, put new protection in place to avoid critical-section gap.
-	 * Disable preemption around the ATOM disables to ensure that
-	 * in_atomic() is true.
-	 */
+	/* First, put new protection in place to avoid critical-section gap. */
 	if (statesnew & RCUTORTURE_RDR_BH)
 		local_bh_disable();
 	if (statesnew & RCUTORTURE_RDR_RBH)
@@ -1447,29 +1440,18 @@  static void rcutorture_one_extend(int *readstate, int newstate,
 		preempt_disable();
 	if (statesnew & RCUTORTURE_RDR_SCHED)
 		rcu_read_lock_sched();
-	preempt_disable();
-	if (statesnew & RCUTORTURE_RDR_ATOM_BH)
-		local_bh_disable();
-	if (statesnew & RCUTORTURE_RDR_ATOM_RBH)
-		rcu_read_lock_bh();
-	preempt_enable();
 	if (statesnew & RCUTORTURE_RDR_RCU)
 		idxnew = cur_ops->readlock() << RCUTORTURE_RDR_SHIFT;
 
 	/*
 	 * Next, remove old protection, in decreasing order of strength
 	 * to avoid unlock paths that aren't safe in the stronger
-	 * context.  Disable preemption around the ATOM enables in
-	 * case the context was only atomic due to IRQ disabling.
+	 * context. Namely: BH can not be enabled with disabled interrupts.
+	 * Additionally PREEMPT_RT requires that BH is enabled in preemptible
+	 * context.
 	 */
-	preempt_disable();
 	if (statesold & RCUTORTURE_RDR_IRQ)
 		local_irq_enable();
-	if (statesold & RCUTORTURE_RDR_ATOM_BH)
-		local_bh_enable();
-	if (statesold & RCUTORTURE_RDR_ATOM_RBH)
-		rcu_read_unlock_bh();
-	preempt_enable();
 	if (statesold & RCUTORTURE_RDR_PREEMPT)
 		preempt_enable();
 	if (statesold & RCUTORTURE_RDR_SCHED)
@@ -1478,7 +1460,6 @@  static void rcutorture_one_extend(int *readstate, int newstate,
 		local_bh_enable();
 	if (statesold & RCUTORTURE_RDR_RBH)
 		rcu_read_unlock_bh();
-
 	if (statesold & RCUTORTURE_RDR_RCU) {
 		bool lockit = !statesnew && !(torture_random(trsp) & 0xffff);
 
@@ -1523,10 +1504,7 @@  rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
 	unsigned long randmask2 = randmask1 >> 3;
 	unsigned long preempts = RCUTORTURE_RDR_PREEMPT | RCUTORTURE_RDR_SCHED;
 	unsigned long preempts_irq = preempts | RCUTORTURE_RDR_IRQ;
-	unsigned long nonatomic_bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
-	unsigned long atomic_bhs = RCUTORTURE_RDR_ATOM_BH |
-				   RCUTORTURE_RDR_ATOM_RBH;
-	unsigned long tmp;
+	unsigned long bhs = RCUTORTURE_RDR_BH | RCUTORTURE_RDR_RBH;
 
 	WARN_ON_ONCE(mask >> RCUTORTURE_RDR_SHIFT);
 	/* Mostly only one bit (need preemption!), sometimes lots of bits. */
@@ -1538,9 +1516,8 @@  rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
 	/*
 	 * Can't enable bh w/irq disabled.
 	 */
-	tmp = atomic_bhs | nonatomic_bhs;
 	if (mask & RCUTORTURE_RDR_IRQ)
-		mask |= oldmask & tmp;
+		mask |= oldmask & bhs;
 
 	/*
 	 * Ideally these sequences would be detected in debug builds
@@ -1548,30 +1525,11 @@  rcutorture_extend_mask(int oldmask, struct torture_random_state *trsp)
 	 * them on non-RT.
 	 */
 	if (IS_ENABLED(CONFIG_PREEMPT_RT)) {
-		/*
-		 * Can't disable bh in atomic context if bh was already
-		 * disabled by another task on the same CPU. Instead of
-		 * attempting to track this, just avoid disabling bh in atomic
-		 * context.
-		 */
-		mask &= ~atomic_bhs;
-		/*
-		 * Can't release the outermost rcu lock in an irq disabled
-		 * section without preemption also being disabled, if irqs
-		 * had ever been enabled during this RCU critical section
-		 * (could leak a special flag and delay reporting the qs).
-		 */
-		if ((oldmask & RCUTORTURE_RDR_RCU) &&
-		    (mask & RCUTORTURE_RDR_IRQ) &&
-		    !(mask & preempts))
-			mask |= RCUTORTURE_RDR_RCU;
-
-		/* Can't modify non-atomic bh in atomic context */
-		tmp = nonatomic_bhs;
+		/* Can't modify BH in atomic context */
 		if (oldmask & preempts_irq)
-			mask &= ~tmp;
+			mask &= ~bhs;
 		if ((oldmask | mask) & preempts_irq)
-			mask |= oldmask & tmp;
+			mask |= oldmask & bhs;
 	}
 
 	return mask ?: RCUTORTURE_RDR_RCU;
diff --git a/localversion-rt b/localversion-rt
index 6e44e540b927b..9f7d0bdbffb18 100644
--- a/localversion-rt
+++ b/localversion-rt
@@ -1 +1 @@ 
--rt12
+-rt13