diff mbox series

[RFC,v2,04/11] spi: __spi_pump_messages: Consolidate spin_unlocks to goto target

Message ID 20220615124634.3302867-5-david@protonic.nl
State Accepted
Commit 8711a2ab51dd47b2bcb3880403add25dd7fc7c13
Headers show
Series Optimize spi_sync path | expand

Commit Message

David Jander June 15, 2022, 12:46 p.m. UTC
Signed-off-by: David Jander <david@protonic.nl>
---
 drivers/spi/spi.c | 11 +++--------
 1 file changed, 3 insertions(+), 8 deletions(-)
diff mbox series

Patch

diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index cfff2ff96fa0..fa2d091d2854 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -1650,10 +1650,8 @@  static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
 	spin_lock_irqsave(&ctlr->queue_lock, flags);
 
 	/* Make sure we are not already running a message */
-	if (ctlr->cur_msg) {
-		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+	if (ctlr->cur_msg)
 		goto out_unlock;
-	}
 
 	/* If another context is idling the device then defer */
 	if (ctlr->idling) {
@@ -1664,10 +1662,8 @@  static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
 
 	/* Check if the queue is idle */
 	if (list_empty(&ctlr->queue) || !ctlr->running) {
-		if (!ctlr->busy) {
-			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
+		if (!ctlr->busy)
 			goto out_unlock;
-		}
 
 		/* Defer any non-atomic teardown to the thread */
 		if (!in_kthread) {
@@ -1681,7 +1677,6 @@  static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
 				kthread_queue_work(ctlr->kworker,
 						   &ctlr->pump_messages);
 			}
-			spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 			goto out_unlock;
 		}
 
@@ -1703,7 +1698,6 @@  static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
 		spin_lock_irqsave(&ctlr->queue_lock, flags);
 		ctlr->idling = false;
 		ctlr->queue_empty = true;
-		spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 		goto out_unlock;
 	}
 
@@ -1727,6 +1721,7 @@  static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
 	return;
 
 out_unlock:
+	spin_unlock_irqrestore(&ctlr->queue_lock, flags);
 	mutex_unlock(&ctlr->io_mutex);
 }