diff mbox series

kthread: Fix PF_KTHREAD vs to_kthread() race

Message ID YSy7lOd+qB7LXq1n@zn.tnic
State New
Headers show
Series kthread: Fix PF_KTHREAD vs to_kthread() race | expand

Commit Message

Borislav Petkov Aug. 30, 2021, 11:05 a.m. UTC
Hi stable folks,

please queue for 5.10-stable.

See https://bugzilla.kernel.org/show_bug.cgi?id=214159 for more info.

---
Commit 3a7956e25e1d7b3c148569e78895e1f3178122a9 upstream.

The kthread_is_per_cpu() construct relies on only being called on
PF_KTHREAD tasks (per the WARN in to_kthread). This gives rise to the
following usage pattern:

	if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))

However, as reported by syzcaller, this is broken. The scenario is:

	CPU0				CPU1 (running p)

	(p->flags & PF_KTHREAD) // true

					begin_new_exec()
					  me->flags &= ~(PF_KTHREAD|...);
	kthread_is_per_cpu(p)
	  to_kthread(p)
	    WARN(!(p->flags & PF_KTHREAD) <-- *SPLAT*

Introduce __to_kthread() that omits the WARN and is sure to check both
values.

Use this to remove the problematic pattern for kthread_is_per_cpu()
and fix a number of other kthread_*() functions that have similar
issues but are currently not used in ways that would expose the
problem.

Notably kthread_func() is only ever called on 'current', while
kthread_probe_data() is only used for PF_WQ_WORKER, which implies the
task is from kthread_create*().

Fixes: ac687e6e8c26 ("kthread: Extract KTHREAD_IS_PER_CPU")
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Reviewed-by: Valentin Schneider <Valentin.Schneider@arm.com>
Link: https://lkml.kernel.org/r/YH6WJc825C4P0FCK@hirez.programming.kicks-ass.net
[ Drop the balance_push() hunk as it is not needed. ]
Signed-off-by: Borislav Petkov <bp@suse.de>
---
 kernel/kthread.c    | 33 +++++++++++++++++++++++++++------
 kernel/sched/fair.c |  2 +-
 2 files changed, 28 insertions(+), 7 deletions(-)

Comments

Patrick Schaaf Aug. 31, 2021, 6:39 a.m. UTC | #1
On Mon, Aug 30, 2021 at 1:06 PM Borislav Petkov <bp@alien8.de> wrote:
>

> Hi stable folks,

>

> please queue for 5.10-stable.

>

> See https://bugzilla.kernel.org/show_bug.cgi?id=214159 for more info.

>

> ---

> Commit 3a7956e25e1d7b3c148569e78895e1f3178122a9 upstream.

...

Aha. Seconded, and please also consider for 5.4. This looks lke it
could fix the WARNING I reported, seen with 5.4.135, which Igor also
saw on 5.10.

best regards
  Patrick
Greg KH Sept. 1, 2021, 9:44 a.m. UTC | #2
On Mon, Aug 30, 2021 at 01:05:56PM +0200, Borislav Petkov wrote:
> Hi stable folks,

> 

> please queue for 5.10-stable.

> 

> See https://bugzilla.kernel.org/show_bug.cgi?id=214159 for more info.


Now queued up, thanks.

greg k-h
Patrick Schaaf Sept. 1, 2021, 11:45 a.m. UTC | #3
On Wed, Sep 1, 2021 at 11:44 AM Greg KH <greg@kroah.com> wrote:
>

> On Mon, Aug 30, 2021 at 01:05:56PM +0200, Borislav Petkov wrote:

> >

> > please queue for 5.10-stable.

> >

> > See https://bugzilla.kernel.org/show_bug.cgi?id=214159 for more info.

>

> Now queued up, thanks.


I just booted a further of my prod servers with that applied to
5.10.61, patch needed a little mangling, attached below. Once more (as
with the 5.4 one),  will report about stability in a few days,
initially looks fine.

best regards
  Patrick

Tested-By: Patrick Schaaf <bof@bof.de>


diff -purN linux-5.10.61-orig/kernel/kthread.c linux-5.10.61-p/kernel/kthread.c
--- linux-5.10.61-orig/kernel/kthread.c 2021-08-26 14:51:21.000000000 +0200
+++ linux-5.10.61-p/kernel/kthread.c    2021-09-01 09:52:31.056738951 +0200
@@ -84,6 +84,25 @@ static inline struct kthread *to_kthread
        return (__force void *)k->set_child_tid;
 }

+/*
+ * Variant of to_kthread() that doesn't assume @p is a kthread.
+ *
+ * Per construction; when:
+ *
+ *   (p->flags & PF_KTHREAD) && p->set_child_tid
+ *
+ * the task is both a kthread and struct kthread is persistent. However
+ * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
+ * begin_new_exec()).
+ */
+static inline struct kthread *__to_kthread(struct task_struct *p)
+{
+       void *kthread = (__force void *)p->set_child_tid;
+       if (kthread && !(p->flags & PF_KTHREAD))
+               kthread = NULL;
+       return kthread;
+}
+
 void free_kthread_struct(struct task_struct *k)
 {
        struct kthread *kthread;
@@ -168,8 +187,9 @@ EXPORT_SYMBOL_GPL(kthread_freezable_shou
  */
 void *kthread_func(struct task_struct *task)
 {
-       if (task->flags & PF_KTHREAD)
-               return to_kthread(task)->threadfn;
+       struct kthread *kthread = __to_kthread(task);
+       if (kthread)
+               return kthread->threadfn;
        return NULL;
 }
 EXPORT_SYMBOL_GPL(kthread_func);
@@ -199,10 +219,11 @@ EXPORT_SYMBOL_GPL(kthread_data);
  */
 void *kthread_probe_data(struct task_struct *task)
 {
-       struct kthread *kthread = to_kthread(task);
+       struct kthread *kthread = __to_kthread(task);
        void *data = NULL;

-       copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
+       if (kthread)
+               copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
        return data;
 }

@@ -514,9 +535,9 @@ void kthread_set_per_cpu(struct task_str
        set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
 }

-bool kthread_is_per_cpu(struct task_struct *k)
+bool kthread_is_per_cpu(struct task_struct *p)
 {
-       struct kthread *kthread = to_kthread(k);
+       struct kthread *kthread = __to_kthread(p);
        if (!kthread)
                return false;

diff -purN linux-5.10.61-orig/kernel/sched/fair.c
linux-5.10.61-p/kernel/sched/fair.c
--- linux-5.10.61-orig/kernel/sched/fair.c      2021-08-26
14:51:21.000000000 +0200
+++ linux-5.10.61-p/kernel/sched/fair.c 2021-09-01 09:48:06.860333848 +0200
@@ -7569,7 +7569,7 @@ int can_migrate_task(struct task_struct
                return 0;

        /* Disregard pcpu kthreads; they are where they need to be. */
-       if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
+       if (kthread_is_per_cpu(p))
                return 0;

        if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {
diff mbox series

Patch

diff --git a/kernel/kthread.c b/kernel/kthread.c
index 9825cf89c614..508fe5278285 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -84,6 +84,25 @@  static inline struct kthread *to_kthread(struct task_struct *k)
 	return (__force void *)k->set_child_tid;
 }
 
+/*
+ * Variant of to_kthread() that doesn't assume @p is a kthread.
+ *
+ * Per construction; when:
+ *
+ *   (p->flags & PF_KTHREAD) && p->set_child_tid
+ *
+ * the task is both a kthread and struct kthread is persistent. However
+ * PF_KTHREAD on it's own is not, kernel_thread() can exec() (See umh.c and
+ * begin_new_exec()).
+ */
+static inline struct kthread *__to_kthread(struct task_struct *p)
+{
+	void *kthread = (__force void *)p->set_child_tid;
+	if (kthread && !(p->flags & PF_KTHREAD))
+		kthread = NULL;
+	return kthread;
+}
+
 void free_kthread_struct(struct task_struct *k)
 {
 	struct kthread *kthread;
@@ -168,8 +187,9 @@  EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
  */
 void *kthread_func(struct task_struct *task)
 {
-	if (task->flags & PF_KTHREAD)
-		return to_kthread(task)->threadfn;
+	struct kthread *kthread = __to_kthread(task);
+	if (kthread)
+		return kthread->threadfn;
 	return NULL;
 }
 EXPORT_SYMBOL_GPL(kthread_func);
@@ -199,10 +219,11 @@  EXPORT_SYMBOL_GPL(kthread_data);
  */
 void *kthread_probe_data(struct task_struct *task)
 {
-	struct kthread *kthread = to_kthread(task);
+	struct kthread *kthread = __to_kthread(task);
 	void *data = NULL;
 
-	copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
+	if (kthread)
+		copy_from_kernel_nofault(&data, &kthread->data, sizeof(data));
 	return data;
 }
 
@@ -514,9 +535,9 @@  void kthread_set_per_cpu(struct task_struct *k, int cpu)
 	set_bit(KTHREAD_IS_PER_CPU, &kthread->flags);
 }
 
-bool kthread_is_per_cpu(struct task_struct *k)
+bool kthread_is_per_cpu(struct task_struct *p)
 {
-	struct kthread *kthread = to_kthread(k);
+	struct kthread *kthread = __to_kthread(p);
 	if (!kthread)
 		return false;
 
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 262b02d75007..bad97d35684d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7569,7 +7569,7 @@  int can_migrate_task(struct task_struct *p, struct lb_env *env)
 		return 0;
 
 	/* Disregard pcpu kthreads; they are where they need to be. */
-	if ((p->flags & PF_KTHREAD) && kthread_is_per_cpu(p))
+	if (kthread_is_per_cpu(p))
 		return 0;
 
 	if (!cpumask_test_cpu(env->dst_cpu, p->cpus_ptr)) {