@@ -480,10 +480,12 @@ static void execute_on_irq_stack(void *func, unsigned long param1)
*irq_stack_in_use = 1;
}
+#ifndef CONFIG_PREEMPT_RT
void do_softirq_own_stack(void)
{
execute_on_irq_stack(__do_softirq, 0);
}
+#endif
#endif /* CONFIG_IRQSTACKS */
/* ONLY called from entry.S:intr_extint() */
@@ -5,9 +5,10 @@
#include <asm/lowcore.h>
#include <asm/stacktrace.h>
+#ifndef CONFIG_PREEMPT_RT
static inline void do_softirq_own_stack(void)
{
call_on_stack(0, S390_lowcore.async_stack, void, __do_softirq);
}
-
+#endif
#endif /* __ASM_S390_SOFTIRQ_STACK_H */
@@ -2085,14 +2085,10 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
return;
if (!async && !(hctx->flags & BLK_MQ_F_BLOCKING)) {
- migrate_disable();
if (cpumask_test_cpu(raw_smp_processor_id(), hctx->cpumask)) {
__blk_mq_run_hw_queue(hctx);
- migrate_enable();
return;
}
-
- migrate_enable();
}
kblockd_mod_delayed_work_on(blk_mq_hctx_next_cpu(hctx), &hctx->run_work,
@@ -3345,20 +3345,22 @@ unsigned long wait_task_inactive(struct task_struct *p, unsigned int match_state
* is actually now running somewhere else!
*/
while (task_running(rq, p)) {
+
if (match_state) {
+ bool mismatch = false;
+#ifndef CONFIG_PREEMPT_RT
+ if (READ_ONCE(p->__state != match_state)
+ mismatch = true;
+#else
unsigned long flags;
- bool missmatch = false;
raw_spin_lock_irqsave(&p->pi_lock, flags);
-#ifdef CONFIG_PREEMPT_RT
- if ((READ_ONCE(p->__state) != match_state) &&
- (READ_ONCE(p->saved_state) != match_state))
-#else
- if (READ_ONCE(p->__state) != match_state)
-#endif
- missmatch = true;
+ if (READ_ONCE(p->__state) != match_state &&
+ READ_ONCE(p->saved_state) != match_state)
+ mismatch = true;
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- if (missmatch)
+#endif
+ if (mismatch)
return 0;
}
cpu_relax();
@@ -2294,8 +2294,16 @@ static int ptrace_stop(int exit_code, int why, unsigned long message,
if (gstop_done && (!current->ptrace || ptrace_reparented(current)))
do_notify_parent_cldstop(current, false, why);
+ /*
+ * Don't want to allow preemption here, because
+ * sys_ptrace() needs this task to be inactive.
+ */
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_disable();
read_unlock(&tasklist_lock);
cgroup_enter_frozen();
+ if (!IS_ENABLED(CONFIG_PREEMPT_RT))
+ preempt_enable_no_resched();
freezable_schedule();
cgroup_leave_frozen(true);
@@ -1 +1 @@
--rt4
+-rt5
@@ -360,6 +360,9 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
unsigned long flags;
struct slab *slab;
void *addr;
+ const bool random_right_allocate = prandom_u32_max(2);
+ const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS &&
+ !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS);
/* Try to obtain a free object. */
raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
@@ -404,7 +407,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
* is that the out-of-bounds accesses detected are deterministic for
* such allocations.
*/
- if (prandom_u32_max(2)) {
+ if (random_right_allocate) {
/* Allocate on the "right" side, re-calculate address. */
meta->addr += PAGE_SIZE - size;
meta->addr = ALIGN_DOWN(meta->addr, cache->align);
@@ -444,7 +447,7 @@ static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t g
if (cache->ctor)
cache->ctor(addr);
- if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
+ if (random_fault)
kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);