@@ -53,7 +53,7 @@ static inline struct tpm_tis_tcg_phy *to_tpm_tis_tcg_phy(struct tpm_tis_data *da
#ifdef CONFIG_PREEMPT_RT
/*
* Flushes previous write operations to chip so that a subsequent
- * ioread*()s won't stall a cpu.
+ * ioread*()s won't stall a CPU.
*/
static inline void tpm_tis_flush(void __iomem *iobase)
{
@@ -490,21 +490,21 @@ static inline bool console_is_registered(const struct console *con)
hlist_for_each_entry(con, &console_list, node)
#ifdef CONFIG_PRINTK
+extern enum cons_prio cons_atomic_enter(enum cons_prio prio);
+extern void cons_atomic_exit(enum cons_prio prio, enum cons_prio prev_prio);
extern bool console_can_proceed(struct cons_write_context *wctxt);
extern bool console_enter_unsafe(struct cons_write_context *wctxt);
extern bool console_exit_unsafe(struct cons_write_context *wctxt);
extern bool console_try_acquire(struct cons_write_context *wctxt);
extern bool console_release(struct cons_write_context *wctxt);
-extern enum cons_prio cons_atomic_enter(enum cons_prio prio);
-extern void cons_atomic_exit(enum cons_prio prio, enum cons_prio prev_prio);
#else
+static inline enum cons_prio cons_atomic_enter(enum cons_prio prio) { return CONS_PRIO_NONE; }
+static inline void cons_atomic_exit(enum cons_prio prio, enum cons_prio prev_prio) { }
static inline bool console_can_proceed(struct cons_write_context *wctxt) { return false; }
static inline bool console_enter_unsafe(struct cons_write_context *wctxt) { return false; }
static inline bool console_exit_unsafe(struct cons_write_context *wctxt) { return false; }
static inline bool console_try_acquire(struct cons_write_context *wctxt) { return false; }
static inline bool console_release(struct cons_write_context *wctxt) { return false; }
-static inline enum cons_prio cons_atomic_enter(enum cons_prio prio) { return CONS_PRIO_NONE; }
-static inline void cons_atomic_exit(enum cons_prio prio, enum cons_prio prev_prio) { }
#endif
extern int console_set_on_cmdline;
@@ -23,6 +23,7 @@
#include <linux/sched/rt.h>
#include <linux/sched/wake_q.h>
#include <linux/ww_mutex.h>
+#include <linux/blkdev.h>
#include <trace/events/lock.h>
@@ -1700,6 +1701,12 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 0;
+ /*
+ * If we are going to sleep and we have plugged IO queued, make sure to
+ * submit it to avoid deadlocks.
+ */
+ blk_flush_plug(current->plug, true);
+
return rt_mutex_slowlock(lock, NULL, state);
}
#endif /* RT_MUTEX_BUILD_MUTEX */
@@ -72,15 +72,6 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb,
int ret;
raw_spin_lock_irq(&rtm->wait_lock);
- /*
- * Allow readers, as long as the writer has not completely
- * acquired the semaphore for write.
- */
- if (atomic_read(&rwb->readers) != WRITER_BIAS) {
- atomic_inc(&rwb->readers);
- raw_spin_unlock_irq(&rtm->wait_lock);
- return 0;
- }
/*
* Call into the slow lock path with the rtmutex->wait_lock
@@ -143,6 +134,14 @@ static __always_inline int rwbase_read_lock(struct rwbase_rt *rwb,
if (rwbase_read_trylock(rwb))
return 0;
+ if (state != TASK_RTLOCK_WAIT) {
+ /*
+ * If we are going to sleep and we have plugged IO queued,
+ * make sure to submit it to avoid deadlocks.
+ */
+ blk_flush_plug(current->plug, true);
+ }
+
return __rwbase_read_lock(rwb, state);
}
@@ -67,6 +67,11 @@ __ww_rt_mutex_lock(struct ww_mutex *lock, struct ww_acquire_ctx *ww_ctx,
ww_mutex_set_context_fastpath(lock, ww_ctx);
return 0;
}
+ /*
+ * If we are going to sleep and we have plugged IO queued, make sure to
+ * submit it to avoid deadlocks.
+ */
+ blk_flush_plug(current->plug, true);
ret = rt_mutex_slowlock(&rtm->rtmutex, ww_ctx, state);
@@ -58,6 +58,16 @@ __printf(1, 0) int vprintk_deferred(const char *fmt, va_list args);
bool printk_percpu_data_ready(void);
+/*
+ * The printk_safe_enter()/_exit() macros mark code blocks using locks that
+ * would lead to deadlock if an interrupting context were to call printk()
+ * while the interrupted context was within such code blocks.
+ *
+ * When a CPU is in such a code block, an interrupting context calling
+ * printk() will only log the new message to the lockless ringbuffer and
+ * then trigger console printing using irqwork.
+ */
+
#define printk_safe_enter_irqsave(flags) \
do { \
__printk_safe_enter(&flags); \
@@ -318,10 +318,6 @@ static int __down_trylock_console_sem(unsigned long ip)
int lock_failed;
unsigned long flags;
- /* Semaphores are not NMI-safe. */
- if (in_nmi())
- return 1;
-
/*
* Here and in __up_console_sem() we need to be in safe mode,
* because spindump/WARN/etc from under console ->lock will
@@ -3150,6 +3146,10 @@ void console_unblank(void)
* In that case, attempt a trylock as best-effort.
*/
if (oops_in_progress) {
+ /* Semaphores are not NMI-safe. */
+ if (in_nmi())
+ return;
+
if (down_trylock_console_sem() != 0)
return;
} else
@@ -3210,8 +3210,12 @@ void console_flush_on_panic(enum con_flush_mode mode)
* that messages are flushed out. As this can be called from any
* context and we don't want to get preempted while flushing,
* ensure may_schedule is cleared.
+ *
+ * Since semaphores are not NMI-safe, the console lock must be
+ * ignored if the panic is in NMI context.
*/
- console_trylock();
+ if (!in_nmi())
+ console_trylock();
console_may_schedule = 0;
if (mode == CONSOLE_REPLAY_ALL) {
@@ -3226,7 +3230,8 @@ void console_flush_on_panic(enum con_flush_mode mode)
}
console_srcu_read_unlock(cookie);
}
- console_unlock();
+ if (!in_nmi())
+ console_unlock();
}
/*
@@ -24,6 +24,7 @@ static DEFINE_PER_CPU(struct printk_context, printk_context) = {
/* Can be preempted by NMI. */
void __printk_safe_enter(unsigned long *flags)
{
+ WARN_ON_ONCE(in_nmi());
local_lock_irqsave(&printk_context.cpu, *flags);
this_cpu_inc(printk_context.recursion);
}
@@ -31,6 +32,7 @@ void __printk_safe_enter(unsigned long *flags)
/* Can be preempted by NMI. */
void __printk_safe_exit(unsigned long *flags)
{
+ WARN_ON_ONCE(in_nmi());
this_cpu_dec(printk_context.recursion);
local_unlock_irqrestore(&printk_context.cpu, *flags);
}
@@ -1 +1 @@
--rt4
+-rt5