@@ -1368,7 +1368,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
/* If another context is idling the device then defer */
if (ctlr->idling) {
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
}
@@ -1382,7 +1382,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread)
/* Only do teardown in the thread */
if (!in_kthread) {
- kthread_queue_work(&ctlr->kworker,
+ kthread_queue_work(ctlr->kworker,
&ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return;
@@ -1618,7 +1618,7 @@ static void spi_set_thread_rt(struct spi_controller *ctlr)
dev_info(&ctlr->dev,
"will run message pump with realtime priority\n");
- sched_setscheduler(ctlr->kworker_task, SCHED_FIFO, ¶m);
+ sched_setscheduler(ctlr->kworker->task, SCHED_FIFO, ¶m);
}
static int spi_init_queue(struct spi_controller *ctlr)
@@ -1626,13 +1626,12 @@ static int spi_init_queue(struct spi_controller *ctlr)
ctlr->running = false;
ctlr->busy = false;
- kthread_init_worker(&ctlr->kworker);
- ctlr->kworker_task = kthread_run(kthread_worker_fn, &ctlr->kworker,
- "%s", dev_name(&ctlr->dev));
- if (IS_ERR(ctlr->kworker_task)) {
- dev_err(&ctlr->dev, "failed to create message pump task\n");
- return PTR_ERR(ctlr->kworker_task);
+ ctlr->kworker = kthread_create_worker(0, dev_name(&ctlr->dev));
+ if (IS_ERR(ctlr->kworker)) {
+ dev_err(&ctlr->dev, "failed to create message pump kworker\n");
+ return PTR_ERR(ctlr->kworker);
}
+
kthread_init_work(&ctlr->pump_messages, spi_pump_messages);
/*
@@ -1716,7 +1715,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr)
ctlr->cur_msg = NULL;
ctlr->cur_msg_prepared = false;
ctlr->fallback = false;
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
trace_spi_message_done(mesg);
@@ -1742,7 +1741,7 @@ static int spi_start_queue(struct spi_controller *ctlr)
ctlr->cur_msg = NULL;
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
return 0;
}
@@ -1798,8 +1797,7 @@ static int spi_destroy_queue(struct spi_controller *ctlr)
return ret;
}
- kthread_flush_worker(&ctlr->kworker);
- kthread_stop(ctlr->kworker_task);
+ kthread_destroy_worker(ctlr->kworker);
return 0;
}
@@ -1822,7 +1820,7 @@ static int __spi_queued_transfer(struct spi_device *spi,
list_add_tail(&msg->queue, &ctlr->queue);
if (!ctlr->busy && need_pump)
- kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages);
+ kthread_queue_work(ctlr->kworker, &ctlr->pump_messages);
spin_unlock_irqrestore(&ctlr->queue_lock, flags);
return 0;
@@ -358,8 +358,7 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @cleanup: frees controller-specific state
* @can_dma: determine whether this controller supports DMA
* @queued: whether this controller is providing an internal message queue
- * @kworker: thread struct for message pump
- * @kworker_task: pointer to task for message pump kworker thread
+ * @kworker: pointer to thread struct for message pump
* @pump_messages: work struct for scheduling work to the message pump
* @queue_lock: spinlock to syncronise access to message queue
* @queue: message queue
@@ -593,8 +592,7 @@ struct spi_controller {
* Over time we expect SPI drivers to be phased over to this API.
*/
bool queued;
- struct kthread_worker kworker;
- struct task_struct *kworker_task;
+ struct kthread_worker *kworker;
struct kthread_work pump_messages;
spinlock_t queue_lock;
struct list_head queue;