@@ -931,7 +931,7 @@ unsigned long get_wchan(struct task_stru
unsigned long start, bottom, top, sp, fp, ip, ret = 0;
int count = 0;
- if (p == current || p->state == TASK_RUNNING)
+ if (p == current || task_is_running(p))
return 0;
if (!try_get_task_stack(p))
@@ -975,7 +975,7 @@ unsigned long get_wchan(struct task_stru
goto out;
}
fp = READ_ONCE_NOCHECK(*(unsigned long *)fp);
- } while (count++ < 16 && p->state != TASK_RUNNING);
+ } while (count++ < 16 && !task_is_running(p));
out:
put_task_stack(p);
@@ -3926,7 +3926,7 @@ int blk_poll(struct request_queue *q, bl
if (signal_pending_state(state, current))
__set_current_state(TASK_RUNNING);
- if (current->state == TASK_RUNNING)
+ if (task_is_running(current))
return 1;
if (ret < 0 || !spin)
break;
@@ -113,6 +113,8 @@ struct task_group;
__TASK_TRACED | EXIT_DEAD | EXIT_ZOMBIE | \
TASK_PARKED)
+#define task_is_running(task) (READ_ONCE((task)->state) == TASK_RUNNING)
+
#define task_is_traced(task) ((task->state & __TASK_TRACED) != 0)
#define task_is_stopped(task) ((task->state & __TASK_STOPPED) != 0)
@@ -760,7 +760,7 @@ static void lockdep_print_held_locks(str
* It's not reliable to print a task's held locks if it's not sleeping
* and it's not the current task.
*/
- if (p->state == TASK_RUNNING && p != current)
+ if (p != current && task_is_running(p))
return;
for (i = 0; i < depth; i++) {
printk(" #%d: ", i);
@@ -2768,7 +2768,7 @@ EXPORT_SYMBOL_GPL(rcu_bind_current_to_no
#ifdef CONFIG_SMP
static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
{
- return tsp && tsp->state == TASK_RUNNING && !tsp->on_cpu ? "!" : "";
+ return tsp && task_is_running(tsp) && !tsp->on_cpu ? "!" : "";
}
#else // #ifdef CONFIG_SMP
static char *show_rcu_should_be_on_cpu(struct task_struct *tsp)
@@ -5989,7 +5989,7 @@ static inline void sched_submit_work(str
{
unsigned int task_flags;
- if (!tsk->state)
+ if (task_is_running(tsk))
return;
task_flags = tsk->flags;
@@ -7964,7 +7964,7 @@ int __sched yield_to(struct task_struct
if (curr->sched_class != p->sched_class)
goto out_unlock;
- if (task_running(p_rq, p) || p->state)
+ if (task_running(p_rq, p) || !task_is_running(p))
goto out_unlock;
yielded = curr->sched_class->yield_to_task(rq, p);
@@ -8167,7 +8167,7 @@ void sched_show_task(struct task_struct
pr_info("task:%-15.15s state:%c", p->comm, task_state_to_char(p));
- if (p->state == TASK_RUNNING)
+ if (task_is_running(p))
pr_cont(" running task ");
#ifdef CONFIG_DEBUG_STACK_USAGE
free = stack_not_used(p);
@@ -217,7 +217,7 @@ static inline void sched_info_depart(str
rq_sched_info_depart(rq, delta);
- if (t->state == TASK_RUNNING)
+ if (task_is_running(t))
sched_info_enqueue(rq, t);
}
@@ -4719,7 +4719,7 @@ void kdb_send_sig(struct task_struct *t,
}
new_t = kdb_prev_t != t;
kdb_prev_t = t;
- if (t->state != TASK_RUNNING && new_t) {
+ if (!task_is_running(t) && new_t) {
spin_unlock(&t->sighand->siglock);
kdb_printf("Process is not RUNNING, sending a signal from "
"kdb risks deadlock\n"
@@ -92,8 +92,7 @@ static bool ksoftirqd_running(unsigned l
if (pending & SOFTIRQ_NOW_MASK)
return false;
- return tsk && (tsk->state == TASK_RUNNING) &&
- !__kthread_should_park(tsk);
+ return tsk && task_is_running(tsk) && !__kthread_should_park(tsk);
}
#ifdef CONFIG_TRACE_IRQFLAGS
@@ -1955,7 +1955,7 @@ static inline bool is_via_compact_memory
static bool kswapd_is_running(pg_data_t *pgdat)
{
- return pgdat->kswapd && (pgdat->kswapd->state == TASK_RUNNING);
+ return pgdat->kswapd && task_is_running(pgdat->kswapd);
}
/*
Replace a bunch of 'p->state == TASK_RUNNING' with a new helper: task_is_running(p). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- arch/x86/kernel/process.c | 4 ++-- block/blk-mq.c | 2 +- include/linux/sched.h | 2 ++ kernel/locking/lockdep.c | 2 +- kernel/rcu/tree_plugin.h | 2 +- kernel/sched/core.c | 6 +++--- kernel/sched/stats.h | 2 +- kernel/signal.c | 2 +- kernel/softirq.c | 3 +-- mm/compaction.c | 2 +- 10 files changed, 14 insertions(+), 13 deletions(-)