From patchwork Tue Sep 12 10:14:07 2023 Content-Type: text/plain; charset="utf-8" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit X-Patchwork-Submitter: Herve Codina X-Patchwork-Id: 722345 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from vger.kernel.org (vger.kernel.org [23.128.96.18]) by smtp.lore.kernel.org (Postfix) with ESMTP id 276E3CA0EC3 for ; Tue, 12 Sep 2023 10:14:47 +0000 (UTC) Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S234106AbjILKOt (ORCPT ); Tue, 12 Sep 2023 06:14:49 -0400 Received: from lindbergh.monkeyblade.net ([23.128.96.19]:41374 "EHLO lindbergh.monkeyblade.net" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S234102AbjILKO3 (ORCPT ); Tue, 12 Sep 2023 06:14:29 -0400 Received: from relay4-d.mail.gandi.net (relay4-d.mail.gandi.net [217.70.183.196]) by lindbergh.monkeyblade.net (Postfix) with ESMTPS id 4FD3B1702; Tue, 12 Sep 2023 03:14:16 -0700 (PDT) Received: by mail.gandi.net (Postfix) with ESMTPA id E0154E0007; Tue, 12 Sep 2023 10:14:08 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/relaxed; d=bootlin.com; s=gm1; t=1694513654; h=from:from:reply-to:subject:subject:date:date:message-id:message-id: to:to:cc:cc:mime-version:mime-version: content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references; bh=bo77X4QhyMa+RhWCIVhgBdOcejnTibSqBkLKVKa3Png=; b=FQTNnrDwvWAPtAztyNBiE0lto/S8poSgFrLUuuJUte43hpU8nKZh7DEb1tFHqYvUfqja6c YDoDjHhPMDZEpSBzSSfkxzbMLHCjWAwDLuOi9AJCsQjlnmbiNZ1eK+UfI5QuXP2gbY4+v4 XTZ9I1LgSEy4gN6hKyuXEiFlqiJTDKkv4c/3ciiUjxjw0fpX5pNSBiQJfcjx/hx7E++UHJ 3bjVelhLLngONEksVMEC8eqehBibxmoI1Hw+raT06tW0b1hYyZeSwwiTLZpIz2b8j2gfBi 8hNKr/Xj7OMBWm1aElyj8Kp4817kshNhlC6uLtKVzeOxqLATZTmUX5S/bA6SnA== From: Herve Codina To: Herve Codina , "David S. Miller" , Eric Dumazet , Jakub Kicinski , Paolo Abeni , Andrew Lunn , Rob Herring , Krzysztof Kozlowski , Conor Dooley , Lee Jones , Linus Walleij , Qiang Zhao , Li Yang , Liam Girdwood , Mark Brown , Jaroslav Kysela , Takashi Iwai , Shengjiu Wang , Xiubo Li , Fabio Estevam , Nicolin Chen , Christophe Leroy , Randy Dunlap Cc: netdev@vger.kernel.org, linuxppc-dev@lists.ozlabs.org, devicetree@vger.kernel.org, linux-kernel@vger.kernel.org, linux-gpio@vger.kernel.org, linux-arm-kernel@lists.infradead.org, alsa-devel@alsa-project.org, Simon Horman , Christophe JAILLET , Thomas Petazzoni Subject: [PATCH v5 20/31] soc: fsl: cpm1: qmc: Handle timeslot entries at channel start() and stop() Date: Tue, 12 Sep 2023 12:14:07 +0200 Message-ID: <20230912101407.225677-1-herve.codina@bootlin.com> X-Mailer: git-send-email 2.41.0 In-Reply-To: <20230912081527.208499-1-herve.codina@bootlin.com> References: <20230912081527.208499-1-herve.codina@bootlin.com> MIME-Version: 1.0 X-GND-Sasl: herve.codina@bootlin.com Precedence: bulk List-ID: X-Mailing-List: linux-gpio@vger.kernel.org In order to support runtime timeslot route changes, enable the channel timeslot entries at channel start() and disable them at channel stop(). Signed-off-by: Herve Codina Reviewed-by: Christophe Leroy Signed-off-by: Christophe Leroy --- drivers/soc/fsl/qe/qmc.c | 175 ++++++++++++++++++++++++++++++++++++--- 1 file changed, 163 insertions(+), 12 deletions(-) diff --git a/drivers/soc/fsl/qe/qmc.c b/drivers/soc/fsl/qe/qmc.c index 19acfcded9bc..185e6e87a83f 100644 --- a/drivers/soc/fsl/qe/qmc.c +++ b/drivers/soc/fsl/qe/qmc.c @@ -177,6 +177,7 @@ struct qmc_chan { struct qmc *qmc; void __iomem *s_param; enum qmc_mode mode; + spinlock_t ts_lock; /* Protect timeslots */ u64 tx_ts_mask_avail; u64 tx_ts_mask; u64 rx_ts_mask_avail; @@ -265,6 +266,7 @@ static void qmc_setbits32(void __iomem *addr, u32 set) int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info) { struct tsa_serial_info tsa_info; + unsigned long flags; int ret; /* Retrieve info from the TSA related serial */ @@ -272,6 +274,8 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info) if (ret) return ret; + spin_lock_irqsave(&chan->ts_lock, flags); + info->mode = chan->mode; info->rx_fs_rate = tsa_info.rx_fs_rate; info->rx_bit_rate = tsa_info.rx_bit_rate; @@ -280,6 +284,8 @@ int qmc_chan_get_info(struct qmc_chan *chan, struct qmc_chan_info *info) info->tx_bit_rate = tsa_info.tx_bit_rate; info->nb_rx_ts = hweight64(chan->rx_ts_mask); + spin_unlock_irqrestore(&chan->ts_lock, flags); + return 0; } EXPORT_SYMBOL(qmc_chan_get_info); @@ -683,6 +689,40 @@ static int qmc_chan_setup_tsa_32tx(struct qmc_chan *chan, const struct tsa_seria return 0; } +static int qmc_chan_setup_tsa_tx(struct qmc_chan *chan, bool enable) +{ + struct tsa_serial_info info; + int ret; + + /* Retrieve info from the TSA related serial */ + ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info); + if (ret) + return ret; + + /* Setup entries */ + if (chan->qmc->is_tsa_64rxtx) + return qmc_chan_setup_tsa_64rxtx(chan, &info, enable); + + return qmc_chan_setup_tsa_32tx(chan, &info, enable); +} + +static int qmc_chan_setup_tsa_rx(struct qmc_chan *chan, bool enable) +{ + struct tsa_serial_info info; + int ret; + + /* Retrieve info from the TSA related serial */ + ret = tsa_serial_get_info(chan->qmc->tsa_serial, &info); + if (ret) + return ret; + + /* Setup entries */ + if (chan->qmc->is_tsa_64rxtx) + return qmc_chan_setup_tsa_64rxtx(chan, &info, enable); + + return qmc_chan_setup_tsa_32rx(chan, &info, enable); +} + static int qmc_chan_setup_tsa(struct qmc_chan *chan, bool enable) { struct tsa_serial_info info; @@ -719,6 +759,12 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan) spin_lock_irqsave(&chan->rx_lock, flags); + if (chan->is_rx_stopped) { + /* The channel is already stopped -> simply return ok */ + ret = 0; + goto end; + } + /* Send STOP RECEIVE command */ ret = qmc_chan_command(chan, 0x0); if (ret) { @@ -729,6 +775,15 @@ static int qmc_chan_stop_rx(struct qmc_chan *chan) chan->is_rx_stopped = true; + if (!chan->qmc->is_tsa_64rxtx || chan->is_tx_stopped) { + ret = qmc_chan_setup_tsa_rx(chan, false); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n", + chan->id, ret); + goto end; + } + } + end: spin_unlock_irqrestore(&chan->rx_lock, flags); return ret; @@ -741,6 +796,12 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan) spin_lock_irqsave(&chan->tx_lock, flags); + if (chan->is_tx_stopped) { + /* The channel is already stopped -> simply return ok */ + ret = 0; + goto end; + } + /* Send STOP TRANSMIT command */ ret = qmc_chan_command(chan, 0x1); if (ret) { @@ -751,37 +812,82 @@ static int qmc_chan_stop_tx(struct qmc_chan *chan) chan->is_tx_stopped = true; + if (!chan->qmc->is_tsa_64rxtx || chan->is_rx_stopped) { + ret = qmc_chan_setup_tsa_tx(chan, false); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: Disable tsa entries failed (%d)\n", + chan->id, ret); + goto end; + } + } + end: spin_unlock_irqrestore(&chan->tx_lock, flags); return ret; } +static int qmc_chan_start_rx(struct qmc_chan *chan); + int qmc_chan_stop(struct qmc_chan *chan, int direction) { - int ret; + bool is_rx_rollback_needed = false; + unsigned long flags; + int ret = 0; + + spin_lock_irqsave(&chan->ts_lock, flags); if (direction & QMC_CHAN_READ) { + is_rx_rollback_needed = !chan->is_rx_stopped; ret = qmc_chan_stop_rx(chan); if (ret) - return ret; + goto end; } if (direction & QMC_CHAN_WRITE) { ret = qmc_chan_stop_tx(chan); - if (ret) - return ret; + if (ret) { + /* Restart rx if needed */ + if (is_rx_rollback_needed) + qmc_chan_start_rx(chan); + goto end; + } } - return 0; +end: + spin_unlock_irqrestore(&chan->ts_lock, flags); + return ret; } EXPORT_SYMBOL(qmc_chan_stop); -static void qmc_chan_start_rx(struct qmc_chan *chan) +static int qmc_setup_chan_trnsync(struct qmc *qmc, struct qmc_chan *chan); + +static int qmc_chan_start_rx(struct qmc_chan *chan) { unsigned long flags; + int ret; spin_lock_irqsave(&chan->rx_lock, flags); + if (!chan->is_rx_stopped) { + /* The channel is already started -> simply return ok */ + ret = 0; + goto end; + } + + ret = qmc_chan_setup_tsa_rx(chan, true); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n", + chan->id, ret); + goto end; + } + + ret = qmc_setup_chan_trnsync(chan->qmc, chan); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", + chan->id, ret); + goto end; + } + /* Restart the receiver */ if (chan->mode == QMC_TRANSPARENT) qmc_write32(chan->s_param + QMC_SPE_ZDSTATE, 0x18000080); @@ -792,15 +898,38 @@ static void qmc_chan_start_rx(struct qmc_chan *chan) chan->is_rx_stopped = false; +end: spin_unlock_irqrestore(&chan->rx_lock, flags); + return ret; } -static void qmc_chan_start_tx(struct qmc_chan *chan) +static int qmc_chan_start_tx(struct qmc_chan *chan) { unsigned long flags; + int ret; spin_lock_irqsave(&chan->tx_lock, flags); + if (!chan->is_tx_stopped) { + /* The channel is already started -> simply return ok */ + ret = 0; + goto end; + } + + ret = qmc_chan_setup_tsa_tx(chan, true); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: Enable tsa entries failed (%d)\n", + chan->id, ret); + goto end; + } + + ret = qmc_setup_chan_trnsync(chan->qmc, chan); + if (ret) { + dev_err(chan->qmc->dev, "chan %u: setup TRNSYNC failed (%d)\n", + chan->id, ret); + goto end; + } + /* * Enable channel transmitter as it could be disabled if * qmc_chan_reset() was called. @@ -812,18 +941,39 @@ static void qmc_chan_start_tx(struct qmc_chan *chan) chan->is_tx_stopped = false; +end: spin_unlock_irqrestore(&chan->tx_lock, flags); + return ret; } int qmc_chan_start(struct qmc_chan *chan, int direction) { - if (direction & QMC_CHAN_READ) - qmc_chan_start_rx(chan); + bool is_rx_rollback_needed = false; + unsigned long flags; + int ret = 0; - if (direction & QMC_CHAN_WRITE) - qmc_chan_start_tx(chan); + spin_lock_irqsave(&chan->ts_lock, flags); - return 0; + if (direction & QMC_CHAN_READ) { + is_rx_rollback_needed = chan->is_rx_stopped; + ret = qmc_chan_start_rx(chan); + if (ret) + goto end; + } + + if (direction & QMC_CHAN_WRITE) { + ret = qmc_chan_start_tx(chan); + if (ret) { + /* Restop rx if needed */ + if (is_rx_rollback_needed) + qmc_chan_stop_rx(chan); + goto end; + } + } + +end: + spin_unlock_irqrestore(&chan->ts_lock, flags); + return ret; } EXPORT_SYMBOL(qmc_chan_start); @@ -992,6 +1142,7 @@ static int qmc_of_parse_chans(struct qmc *qmc, struct device_node *np) } chan->id = chan_id; + spin_lock_init(&chan->ts_lock); spin_lock_init(&chan->rx_lock); spin_lock_init(&chan->tx_lock);