@@ -43,6 +43,7 @@
#include <linux/dma-direction.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
+#include <linux/workqueue.h>
#define PDC_SUCCESS 0
@@ -293,8 +294,8 @@ struct pdc_state {
unsigned int pdc_irq;
- /* tasklet for deferred processing after DMA rx interrupt */
- struct tasklet_struct rx_tasklet;
+ /* work for deferred processing after DMA rx interrupt */
+ struct work_struct rx_work;
/* Number of bytes of receive status prior to each rx frame */
u32 rx_status_len;
@@ -952,18 +953,18 @@ static irqreturn_t pdc_irq_handler(int irq, void *data)
iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET);
/* Wakeup IRQ thread */
- tasklet_schedule(&pdcs->rx_tasklet);
+ queue_work(system_bh_wq, &pdcs->rx_work);
return IRQ_HANDLED;
}
/**
- * pdc_tasklet_cb() - Tasklet callback that runs the deferred processing after
+ * pdc_work_cb() - Work callback that runs the deferred processing after
* a DMA receive interrupt. Reenables the receive interrupt.
* @t: Pointer to the Altera sSGDMA channel structure
*/
-static void pdc_tasklet_cb(struct tasklet_struct *t)
+static void pdc_work_cb(struct work_struct *t)
{
- struct pdc_state *pdcs = from_tasklet(pdcs, t, rx_tasklet);
+ struct pdc_state *pdcs = from_work(pdcs, t, rx_work);
pdc_receive(pdcs);
@@ -1577,8 +1578,8 @@ static int pdc_probe(struct platform_device *pdev)
pdc_hw_init(pdcs);
- /* Init tasklet for deferred DMA rx processing */
- tasklet_setup(&pdcs->rx_tasklet, pdc_tasklet_cb);
+ /* Init work for deferred DMA rx processing */
+ INIT_WORK(&pdcs->rx_work, pdc_work_cb);
err = pdc_interrupts_init(pdcs);
if (err)
@@ -1595,7 +1596,7 @@ static int pdc_probe(struct platform_device *pdev)
return PDC_SUCCESS;
cleanup_buf_pool:
- tasklet_kill(&pdcs->rx_tasklet);
+ cancel_work_sync(&pdcs->rx_work);
dma_pool_destroy(pdcs->rx_buf_pool);
cleanup_ring_pool:
@@ -1611,7 +1612,7 @@ static void pdc_remove(struct platform_device *pdev)
pdc_free_debugfs();
- tasklet_kill(&pdcs->rx_tasklet);
+ cancel_work_sync(&pdcs->rx_work);
pdc_hw_disable(pdcs);
@@ -21,6 +21,7 @@
#include <linux/pm_runtime.h>
#include <linux/suspend.h>
#include <linux/slab.h>
+#include <linux/workqueue.h>
#include "mailbox.h"
@@ -80,7 +81,7 @@ struct imx_mu_con_priv {
char irq_desc[IMX_MU_CHAN_NAME_SIZE];
enum imx_mu_chan_type type;
struct mbox_chan *chan;
- struct tasklet_struct txdb_tasklet;
+ struct work_struct txdb_work;
};
struct imx_mu_priv {
@@ -232,7 +233,7 @@ static int imx_mu_generic_tx(struct imx_mu_priv *priv,
break;
case IMX_MU_TYPE_TXDB:
imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
- tasklet_schedule(&cp->txdb_tasklet);
+ queue_work(system_bh_wq, &cp->txdb_work);
break;
case IMX_MU_TYPE_TXDB_V2:
imx_mu_xcr_rmw(priv, IMX_MU_GCR, IMX_MU_xCR_GIRn(priv->dcfg->type, cp->idx), 0);
@@ -420,7 +421,7 @@ static int imx_mu_seco_tx(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp,
}
/* Simulate hack for mbox framework */
- tasklet_schedule(&cp->txdb_tasklet);
+ queue_work(system_bh_wq, &cp->txdb_work);
break;
default:
@@ -484,9 +485,9 @@ static int imx_mu_seco_rxdb(struct imx_mu_priv *priv, struct imx_mu_con_priv *cp
return err;
}
-static void imx_mu_txdb_tasklet(unsigned long data)
+static void imx_mu_txdb_work(struct work_struct *t)
{
- struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
+ struct imx_mu_con_priv *cp = from_work(cp, t, txdb_work);
mbox_chan_txdone(cp->chan, 0);
}
@@ -570,8 +571,7 @@ static int imx_mu_startup(struct mbox_chan *chan)
if (cp->type == IMX_MU_TYPE_TXDB) {
/* Tx doorbell don't have ACK support */
- tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
- (unsigned long)cp);
+ INIT_WORK(&cp->txdb_work, imx_mu_txdb_work);
return 0;
}
@@ -615,7 +615,7 @@ static void imx_mu_shutdown(struct mbox_chan *chan)
}
if (cp->type == IMX_MU_TYPE_TXDB) {
- tasklet_kill(&cp->txdb_tasklet);
+ cancel_work_sync(&cp->txdb_work);
pm_runtime_put_sync(priv->dev);
return;
}
The only generic interface to execute asynchronously in the BH context is tasklet; however, it's marked deprecated and has some design flaws. To replace tasklets, BH workqueue support was recently added. A BH workqueue behaves similarly to regular workqueues except that the queued work items are executed in the BH context. This patch converts drivers/infiniband/* from tasklet to BH workqueue. Based on the work done by Tejun Heo <tj@kernel.org> Branch: https://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git for-6.10 Signed-off-by: Allen Pais <allen.lkml@gmail.com> --- drivers/mailbox/bcm-pdc-mailbox.c | 21 +++++++++++---------- drivers/mailbox/imx-mailbox.c | 16 ++++++++-------- 2 files changed, 19 insertions(+), 18 deletions(-)