diff mbox series

crypto: cryptd - Protect per-CPU resource by disabling BH.

Message ID YnKWuLQZdPwSdRTh@linutronix.de
State Accepted
Commit 91e8bcd7b4da182e09ea19a2c73167345fe14c98
Headers show
Series crypto: cryptd - Protect per-CPU resource by disabling BH. | expand

Commit Message

Sebastian Andrzej Siewior May 4, 2022, 3:07 p.m. UTC
The access to cryptd_queue::cpu_queue is synchronized by disabling
preemption in cryptd_enqueue_request() and disabling BH in
cryptd_queue_worker(). This implies that access is allowed from BH.

If cryptd_enqueue_request() is invoked from preemptible context _and_
soft interrupt then this can lead to list corruption since
cryptd_enqueue_request() is not protected against access from
soft interrupt.

Replace get_cpu() in cryptd_enqueue_request() with local_bh_disable()
to ensure BH is always disabled.
Remove preempt_disable() from cryptd_queue_worker() since it is not
needed because local_bh_disable() ensures synchronisation.

Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
 crypto/cryptd.c | 23 +++++++++++------------
 1 file changed, 11 insertions(+), 12 deletions(-)

Comments

Herbert Xu May 13, 2022, 9:35 a.m. UTC | #1
On Wed, May 04, 2022 at 05:07:36PM +0200, Sebastian Andrzej Siewior wrote:
> The access to cryptd_queue::cpu_queue is synchronized by disabling
> preemption in cryptd_enqueue_request() and disabling BH in
> cryptd_queue_worker(). This implies that access is allowed from BH.
> 
> If cryptd_enqueue_request() is invoked from preemptible context _and_
> soft interrupt then this can lead to list corruption since
> cryptd_enqueue_request() is not protected against access from
> soft interrupt.
> 
> Replace get_cpu() in cryptd_enqueue_request() with local_bh_disable()
> to ensure BH is always disabled.
> Remove preempt_disable() from cryptd_queue_worker() since it is not
> needed because local_bh_disable() ensures synchronisation.
> 
> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
> ---
>  crypto/cryptd.c | 23 +++++++++++------------
>  1 file changed, 11 insertions(+), 12 deletions(-)

Good catch! This bug has been around for a while.  Did you detect
this in the field or was it through code-review?

Patch applied.  Thanks.
Sebastian Andrzej Siewior May 13, 2022, 10:06 a.m. UTC | #2
On 2022-05-13 17:35:31 [+0800], Herbert Xu wrote:
> 
> Good catch! This bug has been around for a while.  Did you detect
> this in the field or was it through code-review?

It caused warnings in RT and we had a RT specific workaround for quite
some time. Now that we try to get RT upstream I've been looking how to
solve this differently and came up with this.

> Patch applied.  Thanks.

Thank you.

Sebastian
diff mbox series

Patch

diff --git a/crypto/cryptd.c b/crypto/cryptd.c
index a1bea0f4baa88..668095eca0faf 100644
--- a/crypto/cryptd.c
+++ b/crypto/cryptd.c
@@ -39,6 +39,10 @@  struct cryptd_cpu_queue {
 };
 
 struct cryptd_queue {
+	/*
+	 * Protected by disabling BH to allow enqueueing from softinterrupt and
+	 * dequeuing from kworker (cryptd_queue_worker()).
+	 */
 	struct cryptd_cpu_queue __percpu *cpu_queue;
 };
 
@@ -125,28 +129,28 @@  static void cryptd_fini_queue(struct cryptd_queue *queue)
 static int cryptd_enqueue_request(struct cryptd_queue *queue,
 				  struct crypto_async_request *request)
 {
-	int cpu, err;
+	int err;
 	struct cryptd_cpu_queue *cpu_queue;
 	refcount_t *refcnt;
 
-	cpu = get_cpu();
+	local_bh_disable();
 	cpu_queue = this_cpu_ptr(queue->cpu_queue);
 	err = crypto_enqueue_request(&cpu_queue->queue, request);
 
 	refcnt = crypto_tfm_ctx(request->tfm);
 
 	if (err == -ENOSPC)
-		goto out_put_cpu;
+		goto out;
 
-	queue_work_on(cpu, cryptd_wq, &cpu_queue->work);
+	queue_work_on(smp_processor_id(), cryptd_wq, &cpu_queue->work);
 
 	if (!refcount_read(refcnt))
-		goto out_put_cpu;
+		goto out;
 
 	refcount_inc(refcnt);
 
-out_put_cpu:
-	put_cpu();
+out:
+	local_bh_enable();
 
 	return err;
 }
@@ -162,15 +166,10 @@  static void cryptd_queue_worker(struct work_struct *work)
 	cpu_queue = container_of(work, struct cryptd_cpu_queue, work);
 	/*
 	 * Only handle one request at a time to avoid hogging crypto workqueue.
-	 * preempt_disable/enable is used to prevent being preempted by
-	 * cryptd_enqueue_request(). local_bh_disable/enable is used to prevent
-	 * cryptd_enqueue_request() being accessed from software interrupts.
 	 */
 	local_bh_disable();
-	preempt_disable();
 	backlog = crypto_get_backlog(&cpu_queue->queue);
 	req = crypto_dequeue_request(&cpu_queue->queue);
-	preempt_enable();
 	local_bh_enable();
 
 	if (!req)