Message ID | 20210309084241.988908275@linutronix.de |
---|---|
State | New |
Headers | show |
Series | tasklets: Replace the spin wait loops and make it RT safe | expand |
On 2021-03-09 09:42:10 [+0100], Thomas Gleixner wrote: > tasklet_unlock_spin_wait() spin waits for the TASKLET_STATE_SCHED bit in > the tasklet state to be cleared. This works on !RT nicely because the … Could you please fold this: diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h index 07c7329d21aa7..1c14ccd351091 100644 --- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -663,15 +663,6 @@ static inline int tasklet_trylock(struct tasklet_struct *t) void tasklet_unlock(struct tasklet_struct *t); void tasklet_unlock_wait(struct tasklet_struct *t); -/* - * Do not use in new code. Waiting for tasklets from atomic contexts is - * error prone and should be avoided. - */ -static inline void tasklet_unlock_spin_wait(struct tasklet_struct *t) -{ - while (test_bit(TASKLET_STATE_RUN, &t->state)) - cpu_relax(); -} #else static inline int tasklet_trylock(struct tasklet_struct *t) { return 1; } static inline void tasklet_unlock(struct tasklet_struct *t) { } diff --git a/kernel/softirq.c b/kernel/softirq.c index f0074f1344402..c9adc5c462485 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -830,8 +830,8 @@ EXPORT_SYMBOL(tasklet_init); #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) /* - * Do not use in new code. There is no real reason to invoke this from - * atomic contexts. + * Do not use in new code. Waiting for tasklets from atomic contexts is + * error prone and should be avoided. */ void tasklet_unlock_spin_wait(struct tasklet_struct *t) {
--- a/include/linux/interrupt.h +++ b/include/linux/interrupt.h @@ -658,7 +658,7 @@ enum TASKLET_STATE_RUN /* Tasklet is running (SMP only) */ }; -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) static inline int tasklet_trylock(struct tasklet_struct *t) { return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state); --- a/kernel/softirq.c +++ b/kernel/softirq.c @@ -616,6 +616,32 @@ void tasklet_init(struct tasklet_struct } EXPORT_SYMBOL(tasklet_init); +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) +/* + * Do not use in new code. There is no real reason to invoke this from + * atomic contexts. + */ +void tasklet_unlock_spin_wait(struct tasklet_struct *t) +{ + while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { + if (IS_ENABLED(CONFIG_PREEMPT_RT)) { + /* + * Prevent a live lock when current preempted soft + * interrupt processing or prevents ksoftirqd from + * running. If the tasklet runs on a different CPU + * then this has no effect other than doing the BH + * disable/enable dance for nothing. + */ + local_bh_disable(); + local_bh_enable(); + } else { + cpu_relax(); + } + } +} +EXPORT_SYMBOL(tasklet_unlock_spin_wait); +#endif + void tasklet_kill(struct tasklet_struct *t) { if (in_interrupt()) @@ -629,7 +655,7 @@ void tasklet_kill(struct tasklet_struct } EXPORT_SYMBOL(tasklet_kill); -#ifdef CONFIG_SMP +#if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RT) void tasklet_unlock(struct tasklet_struct *t) { smp_mb__before_atomic();