diff mbox series

[V3] spi: spi-geni-qcom: Add support for SE DMA mode

Message ID 1669713814-28876-1-git-send-email-quic_vnivarth@quicinc.com
State Superseded
Headers show
Series [V3] spi: spi-geni-qcom: Add support for SE DMA mode | expand

Commit Message

Vijaya Krishna Nivarthi Nov. 29, 2022, 9:23 a.m. UTC
SE DMA mode can be used for larger transfers and FIFO mode
for smaller transfers.

Signed-off-by: Vijaya Krishna Nivarthi <quic_vnivarth@quicinc.com>
---
v2 -> v3:
- indentation change
- readability change

v1 -> v2:
- replace writel_relaxed with writel
- corrected order of performing FSM reset and dma_unprep
- added more comments in geni_can_dma
- removed redundant change in spi_geni_init(DMA as default xfer_mode)
- removed redundant initialisations in setup_se_xfer
- removed redundant null checks in setup_se_xfer
- added dma_tx_prep failure handling(rx un_map) in setup_se_xfer
- move handing return of setip_se_xfer to transfer_one
- apply irq error handling for DMA mode too in geni_spi_isr
---
 drivers/spi/spi-geni-qcom.c | 211 ++++++++++++++++++++++++++++++++++----------
 1 file changed, 165 insertions(+), 46 deletions(-)

Comments

Doug Anderson Dec. 1, 2022, 10:40 p.m. UTC | #1
Hi,

On Tue, Nov 29, 2022 at 1:23 AM Vijaya Krishna Nivarthi
<quic_vnivarth@quicinc.com> wrote:
>
> @@ -95,6 +97,7 @@ struct spi_geni_master {
>         struct dma_chan *tx;
>         struct dma_chan *rx;
>         int cur_xfer_mode;
> +       u32 cur_m_cmd;

In v1, I said: "I don't think you need to store "cur_m_cmd" ..."
...you responded: Please note that cur_xfer can be NULL. Added further
to comments."

I don't see any comments about this.

In any case, I'm still unclear about why this is needed. I guess
you're looking at the code in handle_se_timeout(). I'll comment there.


> @@ -162,6 +169,45 @@ static void handle_fifo_timeout(struct spi_master *spi,
>                  */
>                 mas->abort_failed = true;
>         }
> +
> +unmap_if_dma:
> +       if (mas->cur_xfer_mode == GENI_SE_DMA) {
> +               if (mas->cur_m_cmd & SPI_TX_ONLY) {
> +                       spin_lock_irq(&mas->lock);
> +                       reinit_completion(&mas->tx_reset_done);
> +                       writel(1, se->base + SE_DMA_TX_FSM_RST);
> +                       spin_unlock_irq(&mas->lock);
> +                       time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
> +                       if (!time_left)
> +                               dev_err(mas->dev, "DMA TX RESET failed\n");
> +               }
> +               if (mas->cur_m_cmd & SPI_RX_ONLY) {
> +                       spin_lock_irq(&mas->lock);
> +                       reinit_completion(&mas->rx_reset_done);
> +                       writel(1, se->base + SE_DMA_RX_FSM_RST);
> +                       spin_unlock_irq(&mas->lock);
> +                       time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
> +                       if (!time_left)
> +                               dev_err(mas->dev, "DMA RX RESET failed\n");
> +               }
> +
> +               if (xfer) {
> +                       if (xfer->tx_buf && xfer->tx_dma)
> +                               geni_se_tx_dma_unprep(se, xfer->tx_dma, xfer->len);
> +                       if (xfer->rx_buf && xfer->rx_dma)
> +                               geni_se_rx_dma_unprep(se, xfer->rx_dma, xfer->len);
> +               } else {
> +                       /*
> +                        * This can happen if a timeout happened and we had to wait
> +                        * for lock in this function because isr was holding the lock
> +                        * and handling transfer completion at that time.
> +                        * isr will set cur_xfer to NULL when done.
> +                        * Unnecessary error but cannot be helped.
> +                        * Only do reset, dma_unprep is already done by isr.
> +                        */
> +                       dev_err(mas->dev, "Cancel/Abort on completed SPI transfer\n");
> +               }

For the above block of code, if "xfer" is NULL then do we actually
need to issue the DMA TX Reset and the DMA RX Reset? As per your
comments, the only case "xfer" can be NULL is if the ISR was holding
the lock and handling the transfer completion at that time. If the ISR
handled the transfer completion then we're not actually in a bad
state, right? Thus, couldn't you do:

if (xfer) {
  if (xfer->tx_buf && xfer->tx_dma) {
    // Do the FSM reset
    // Unprepare the DMA
  }
  if (xfer->rx_buf && xfer->rx_dma) {
    // Do the FSM reset
    // Unprepare the DMA
  }
} else {
  dev_err(...);
}

That should be fine, right? ...and then we can get rid of the need for
"cur_m_cmd" as per my previous comment, right?

I'll also ask if we can downgrade the "dev_err" to a "dev_warn". I
usually reserve dev_err for things that are fatal. Here we think we'll
probably recover, right?


> @@ -778,11 +836,39 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
>          */
>         spin_lock_irq(&mas->lock);
>         geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
> -       if (m_cmd & SPI_TX_ONLY) {
> +
> +       if (mas->cur_xfer_mode == GENI_SE_DMA) {
> +               if (m_cmd & SPI_RX_ONLY) {
> +                       ret =  geni_se_rx_dma_prep(se, xfer->rx_buf,
> +                               xfer->len, &xfer->rx_dma);

In response to v1 I asked if it's really OK to use "xfer->rx_dma" for
your purposes since it's supposed to be managed by the SPI framework.

It still makes me nervous to use it, even though it seems to work.
Since we're using it in an undocumented way, I'd be nervous that the
SPI framework might change what it's doing and break us in the future.

We can only have one TX and one RX transfer at a time anyway. Why
don't we just have our own "rx_dma" and "tx_dma" in "struct
spi_geni_master". It's only 16 extra bytes of data and it would make
me feel less nervous.

It still would be nice to eventually use the SPI framework to manage
the mapping, but I agree that can be a future task.


> +                       if (ret) {
> +                               dev_err(mas->dev, "Failed to setup Rx dma %d\n", ret);
> +                               xfer->rx_dma = 0;
> +                               goto unlock_and_return;
> +                       }
> +               }
> +               if (m_cmd & SPI_TX_ONLY) {
> +                       ret =  geni_se_tx_dma_prep(se, (void *)xfer->tx_buf,
> +                               xfer->len, &xfer->tx_dma);

In v1 I asked about the above "void *" cast. You pointed out that it
was to cast away constness. So I agree that you can keep it here for
now, but could you also post a patch to change geni_se_tx_dma_prep()
to take a "const void *"? You'll need a cast in _that_ function to
remove the constness (since dma_map_single() is generic for both TX
and RX), but it seems like a better place for it. Then a later patch
could remove the cast here.


> +                       if (ret) {
> +                               dev_err(mas->dev, "Failed to setup Tx dma %d\n", ret);
> +                               xfer->tx_dma = 0;
> +                               if (m_cmd & SPI_RX_ONLY && xfer->rx_dma) {

Don't need "&& xfer->rx_dma". You _just_ mapped it above and if it had
failed it would have returned an error. you don't need to
double-check. You can trust that the framework knows what it's doing
and won't return NULL to you. If it did return NULL to you because of
a bug, it's not necessarily better to just silently skip unpreparing
anyway.


> @@ -823,39 +913,66 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
>
>         spin_lock(&mas->lock);
>
> -       if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
> -               geni_spi_handle_rx(mas);
> -
> -       if (m_irq & M_TX_FIFO_WATERMARK_EN)
> -               geni_spi_handle_tx(mas);
> -
> -       if (m_irq & M_CMD_DONE_EN) {
> -               if (mas->cur_xfer) {
> +       if (mas->cur_xfer_mode == GENI_SE_FIFO) {
> +               if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
> +                       geni_spi_handle_rx(mas);
> +
> +               if (m_irq & M_TX_FIFO_WATERMARK_EN)
> +                       geni_spi_handle_tx(mas);
> +
> +               if (m_irq & M_CMD_DONE_EN) {
> +                       if (mas->cur_xfer) {
> +                               spi_finalize_current_transfer(spi);
> +                               mas->cur_xfer = NULL;
> +                               /*
> +                                * If this happens, then a CMD_DONE came before all the
> +                                * Tx buffer bytes were sent out. This is unusual, log
> +                                * this condition and disable the WM interrupt to
> +                                * prevent the system from stalling due an interrupt
> +                                * storm.
> +                                *
> +                                * If this happens when all Rx bytes haven't been
> +                                * received, log the condition. The only known time
> +                                * this can happen is if bits_per_word != 8 and some
> +                                * registers that expect xfer lengths in num spi_words
> +                                * weren't written correctly.
> +                                */
> +                               if (mas->tx_rem_bytes) {
> +                                       writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
> +                                       dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
> +                                               mas->tx_rem_bytes, mas->cur_bits_per_word);
> +                               }
> +                               if (mas->rx_rem_bytes)
> +                                       dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
> +                                               mas->rx_rem_bytes, mas->cur_bits_per_word);
> +                       } else {
> +                               complete(&mas->cs_done);

Question: did you try actually using the chip select with your new
GENI_SE_DMA? Does it work? I ask because I don't see anything that
completes the "cs_done" in the DMA case of the ISR and I don't see
anything in spi_geni_set_cs() that forces it to FIFO mode. Note: if
you're only testing on trogdor/herobrine boards, you'd have to change
them to not use a GPIO for chip select.


-Doug
Mark Brown Dec. 1, 2022, 10:57 p.m. UTC | #2
On Thu, Dec 01, 2022 at 02:40:32PM -0800, Doug Anderson wrote:
> On Tue, Nov 29, 2022 at 1:23 AM Vijaya Krishna Nivarthi
> >
> > +       if (mas->cur_xfer_mode == GENI_SE_DMA) {
> > +               if (m_cmd & SPI_RX_ONLY) {
> > +                       ret =  geni_se_rx_dma_prep(se, xfer->rx_buf,
> > +                               xfer->len, &xfer->rx_dma);

> In response to v1 I asked if it's really OK to use "xfer->rx_dma" for
> your purposes since it's supposed to be managed by the SPI framework.

> It still makes me nervous to use it, even though it seems to work.
> Since we're using it in an undocumented way, I'd be nervous that the
> SPI framework might change what it's doing and break us in the future.

I'm a bit nervous too - why exactly are we doing the open coding here?
Vijaya Krishna Nivarthi Dec. 2, 2022, 5:36 p.m. UTC | #3
Hi,


On 12/2/2022 4:10 AM, Doug Anderson wrote:
> Hi,
>
> On Tue, Nov 29, 2022 at 1:23 AM Vijaya Krishna Nivarthi
> <quic_vnivarth@quicinc.com> wrote:
>> @@ -95,6 +97,7 @@ struct spi_geni_master {
>>          struct dma_chan *tx;
>>          struct dma_chan *rx;
>>          int cur_xfer_mode;
>> +       u32 cur_m_cmd;
> In v1, I said: "I don't think you need to store "cur_m_cmd" ..."
> ...you responded: Please note that cur_xfer can be NULL. Added further
> to comments."
>
> I don't see any comments about this.
>
> In any case, I'm still unclear about why this is needed. I guess
> you're looking at the code in handle_se_timeout(). I'll comment there.
>
>
>> @@ -162,6 +169,45 @@ static void handle_fifo_timeout(struct spi_master *spi,
>>                   */
>>                  mas->abort_failed = true;
>>          }
>> +
>> +unmap_if_dma:
>> +       if (mas->cur_xfer_mode == GENI_SE_DMA) {
>> +               if (mas->cur_m_cmd & SPI_TX_ONLY) {
>> +                       spin_lock_irq(&mas->lock);
>> +                       reinit_completion(&mas->tx_reset_done);
>> +                       writel(1, se->base + SE_DMA_TX_FSM_RST);
>> +                       spin_unlock_irq(&mas->lock);
>> +                       time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
>> +                       if (!time_left)
>> +                               dev_err(mas->dev, "DMA TX RESET failed\n");
>> +               }
>> +               if (mas->cur_m_cmd & SPI_RX_ONLY) {
>> +                       spin_lock_irq(&mas->lock);
>> +                       reinit_completion(&mas->rx_reset_done);
>> +                       writel(1, se->base + SE_DMA_RX_FSM_RST);
>> +                       spin_unlock_irq(&mas->lock);
>> +                       time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
>> +                       if (!time_left)
>> +                               dev_err(mas->dev, "DMA RX RESET failed\n");
>> +               }
>> +
>> +               if (xfer) {
>> +                       if (xfer->tx_buf && xfer->tx_dma)
>> +                               geni_se_tx_dma_unprep(se, xfer->tx_dma, xfer->len);
>> +                       if (xfer->rx_buf && xfer->rx_dma)
>> +                               geni_se_rx_dma_unprep(se, xfer->rx_dma, xfer->len);
>> +               } else {
>> +                       /*
>> +                        * This can happen if a timeout happened and we had to wait
>> +                        * for lock in this function because isr was holding the lock
>> +                        * and handling transfer completion at that time.
>> +                        * isr will set cur_xfer to NULL when done.
>> +                        * Unnecessary error but cannot be helped.
>> +                        * Only do reset, dma_unprep is already done by isr.
>> +                        */
>> +                       dev_err(mas->dev, "Cancel/Abort on completed SPI transfer\n");
>> +               }
> For the above block of code, if "xfer" is NULL then do we actually
> need to issue the DMA TX Reset and the DMA RX Reset? As per your
> comments, the only case "xfer" can be NULL is if the ISR was holding
> the lock and handling the transfer completion at that time. If the ISR
> handled the transfer completion then we're not actually in a bad
> state, right? Thus, couldn't you do:
>
> if (xfer) {
>    if (xfer->tx_buf && xfer->tx_dma) {
>      // Do the FSM reset
>      // Unprepare the DMA
>    }
>    if (xfer->rx_buf && xfer->rx_dma) {
>      // Do the FSM reset
>      // Unprepare the DMA
>    }
> } else {
>    dev_err(...);
> }
>
> That should be fine, right? ...and then we can get rid of the need for
> "cur_m_cmd" as per my previous comment, right?
>
> I'll also ask if we can downgrade the "dev_err" to a "dev_warn". I
> usually reserve dev_err for things that are fatal. Here we think we'll
> probably recover, right?

Agree. Will test this change and apply for next version.

>> @@ -778,11 +836,39 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
>>           */
>>          spin_lock_irq(&mas->lock);
>>          geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
>> -       if (m_cmd & SPI_TX_ONLY) {
>> +
>> +       if (mas->cur_xfer_mode == GENI_SE_DMA) {
>> +               if (m_cmd & SPI_RX_ONLY) {
>> +                       ret =  geni_se_rx_dma_prep(se, xfer->rx_buf,
>> +                               xfer->len, &xfer->rx_dma);
> In response to v1 I asked if it's really OK to use "xfer->rx_dma" for
> your purposes since it's supposed to be managed by the SPI framework.
>
> It still makes me nervous to use it, even though it seems to work.
> Since we're using it in an undocumented way, I'd be nervous that the
> SPI framework might change what it's doing and break us in the future.
>
> We can only have one TX and one RX transfer at a time anyway. Why
> don't we just have our own "rx_dma" and "tx_dma" in "struct
> spi_geni_master". It's only 16 extra bytes of data and it would make
> me feel less nervous.
>
> It still would be nice to eventually use the SPI framework to manage
> the mapping, but I agree that can be a future task.
>
Agree. Will add xx_dma to spi_geni_master and use same instead of dmas 
in xfer.

Next step would be to move mapping to framework and remove the xx_dma 
from spi_geni_master.

>> +                       if (ret) {
>> +                               dev_err(mas->dev, "Failed to setup Rx dma %d\n", ret);
>> +                               xfer->rx_dma = 0;
>> +                               goto unlock_and_return;
>> +                       }
>> +               }
>> +               if (m_cmd & SPI_TX_ONLY) {
>> +                       ret =  geni_se_tx_dma_prep(se, (void *)xfer->tx_buf,
>> +                               xfer->len, &xfer->tx_dma);
> In v1 I asked about the above "void *" cast. You pointed out that it
> was to cast away constness. So I agree that you can keep it here for
> now, but could you also post a patch to change geni_se_tx_dma_prep()
> to take a "const void *"? You'll need a cast in _that_ function to
> remove the constness (since dma_map_single() is generic for both TX
> and RX), but it seems like a better place for it. Then a later patch
> could remove the cast here.
>
Agree.

Will post next patches as suggested, actually will probably raise a bug 
to track feedback for this patch.

>> +                       if (ret) {
>> +                               dev_err(mas->dev, "Failed to setup Tx dma %d\n", ret);
>> +                               xfer->tx_dma = 0;
>> +                               if (m_cmd & SPI_RX_ONLY && xfer->rx_dma) {
> Don't need "&& xfer->rx_dma". You _just_ mapped it above and if it had
> failed it would have returned an error. you don't need to
> double-check. You can trust that the framework knows what it's doing
> and won't return NULL to you. If it did return NULL to you because of
> a bug, it's not necessarily better to just silently skip unpreparing
> anyway.
Agree, will remove.
>> @@ -823,39 +913,66 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
>>
>>          spin_lock(&mas->lock);
>>
>> -       if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
>> -               geni_spi_handle_rx(mas);
>> -
>> -       if (m_irq & M_TX_FIFO_WATERMARK_EN)
>> -               geni_spi_handle_tx(mas);
>> -
>> -       if (m_irq & M_CMD_DONE_EN) {
>> -               if (mas->cur_xfer) {
>> +       if (mas->cur_xfer_mode == GENI_SE_FIFO) {
>> +               if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
>> +                       geni_spi_handle_rx(mas);
>> +
>> +               if (m_irq & M_TX_FIFO_WATERMARK_EN)
>> +                       geni_spi_handle_tx(mas);
>> +
>> +               if (m_irq & M_CMD_DONE_EN) {
>> +                       if (mas->cur_xfer) {
>> +                               spi_finalize_current_transfer(spi);
>> +                               mas->cur_xfer = NULL;
>> +                               /*
>> +                                * If this happens, then a CMD_DONE came before all the
>> +                                * Tx buffer bytes were sent out. This is unusual, log
>> +                                * this condition and disable the WM interrupt to
>> +                                * prevent the system from stalling due an interrupt
>> +                                * storm.
>> +                                *
>> +                                * If this happens when all Rx bytes haven't been
>> +                                * received, log the condition. The only known time
>> +                                * this can happen is if bits_per_word != 8 and some
>> +                                * registers that expect xfer lengths in num spi_words
>> +                                * weren't written correctly.
>> +                                */
>> +                               if (mas->tx_rem_bytes) {
>> +                                       writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
>> +                                       dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
>> +                                               mas->tx_rem_bytes, mas->cur_bits_per_word);
>> +                               }
>> +                               if (mas->rx_rem_bytes)
>> +                                       dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
>> +                                               mas->rx_rem_bytes, mas->cur_bits_per_word);
>> +                       } else {
>> +                               complete(&mas->cs_done);
> Question: did you try actually using the chip select with your new
> GENI_SE_DMA? Does it work? I ask because I don't see anything that
> completes the "cs_done" in the DMA case of the ISR and I don't see
> anything in spi_geni_set_cs() that forces it to FIFO mode. Note: if
> you're only testing on trogdor/herobrine boards, you'd have to change
> them to not use a GPIO for chip select.
>
No I did not test it with chip select as I was using herobrine.

Agreed that it would be broken for a board which doesn't use GPIO for cs.

Will apply cs_done for SE_DMA mode as well, test it with change to not 
use GPIO for cs and upload next version.

Thank you very much.

-Vijay/


> -Doug
Vijaya Krishna Nivarthi Dec. 8, 2022, 2:54 p.m. UTC | #4
Hi,

Uploaded V4.


On 12/2/2022 4:10 AM, Doug Anderson wrote:
> Hi,
>
> On Tue, Nov 29, 2022 at 1:23 AM Vijaya Krishna Nivarthi
> <quic_vnivarth@quicinc.com> wrote:
>> @@ -95,6 +97,7 @@ struct spi_geni_master {
>>          struct dma_chan *tx;
>>          struct dma_chan *rx;
>>          int cur_xfer_mode;
>> +       u32 cur_m_cmd;
> In v1, I said: "I don't think you need to store "cur_m_cmd" ..."
> ...you responded: Please note that cur_xfer can be NULL. Added further
> to comments."
>
> I don't see any comments about this.
>
> In any case, I'm still unclear about why this is needed. I guess
> you're looking at the code in handle_se_timeout(). I'll comment there.
>
Removed cur_m_cmd


>> @@ -162,6 +169,45 @@ static void handle_fifo_timeout(struct spi_master *spi,
>>                   */
>>                  mas->abort_failed = true;
>>          }
>> +
>> +unmap_if_dma:
>> +       if (mas->cur_xfer_mode == GENI_SE_DMA) {
>> +               if (mas->cur_m_cmd & SPI_TX_ONLY) {
>> +                       spin_lock_irq(&mas->lock);
>> +                       reinit_completion(&mas->tx_reset_done);
>> +                       writel(1, se->base + SE_DMA_TX_FSM_RST);
>> +                       spin_unlock_irq(&mas->lock);
>> +                       time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
>> +                       if (!time_left)
>> +                               dev_err(mas->dev, "DMA TX RESET failed\n");
>> +               }
>> +               if (mas->cur_m_cmd & SPI_RX_ONLY) {
>> +                       spin_lock_irq(&mas->lock);
>> +                       reinit_completion(&mas->rx_reset_done);
>> +                       writel(1, se->base + SE_DMA_RX_FSM_RST);
>> +                       spin_unlock_irq(&mas->lock);
>> +                       time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
>> +                       if (!time_left)
>> +                               dev_err(mas->dev, "DMA RX RESET failed\n");
>> +               }
>> +
>> +               if (xfer) {
>> +                       if (xfer->tx_buf && xfer->tx_dma)
>> +                               geni_se_tx_dma_unprep(se, xfer->tx_dma, xfer->len);
>> +                       if (xfer->rx_buf && xfer->rx_dma)
>> +                               geni_se_rx_dma_unprep(se, xfer->rx_dma, xfer->len);
>> +               } else {
>> +                       /*
>> +                        * This can happen if a timeout happened and we had to wait
>> +                        * for lock in this function because isr was holding the lock
>> +                        * and handling transfer completion at that time.
>> +                        * isr will set cur_xfer to NULL when done.
>> +                        * Unnecessary error but cannot be helped.
>> +                        * Only do reset, dma_unprep is already done by isr.
>> +                        */
>> +                       dev_err(mas->dev, "Cancel/Abort on completed SPI transfer\n");
>> +               }
> For the above block of code, if "xfer" is NULL then do we actually
> need to issue the DMA TX Reset and the DMA RX Reset? As per your
> comments, the only case "xfer" can be NULL is if the ISR was holding
> the lock and handling the transfer completion at that time. If the ISR
> handled the transfer completion then we're not actually in a bad
> state, right? Thus, couldn't you do:
>
> if (xfer) {
>    if (xfer->tx_buf && xfer->tx_dma) {
>      // Do the FSM reset
>      // Unprepare the DMA
>    }
>    if (xfer->rx_buf && xfer->rx_dma) {
>      // Do the FSM reset
>      // Unprepare the DMA
>    }
> } else {
>    dev_err(...);
> }
>
> That should be fine, right? ...and then we can get rid of the need for
> "cur_m_cmd" as per my previous comment, right?
>
> I'll also ask if we can downgrade the "dev_err" to a "dev_warn". I
> usually reserve dev_err for things that are fatal. Here we think we'll
> probably recover, right?
>

Done.


>> @@ -778,11 +836,39 @@ static void setup_fifo_xfer(struct spi_transfer *xfer,
>>           */
>>          spin_lock_irq(&mas->lock);
>>          geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
>> -       if (m_cmd & SPI_TX_ONLY) {
>> +
>> +       if (mas->cur_xfer_mode == GENI_SE_DMA) {
>> +               if (m_cmd & SPI_RX_ONLY) {
>> +                       ret =  geni_se_rx_dma_prep(se, xfer->rx_buf,
>> +                               xfer->len, &xfer->rx_dma);
> In response to v1 I asked if it's really OK to use "xfer->rx_dma" for
> your purposes since it's supposed to be managed by the SPI framework.
>
> It still makes me nervous to use it, even though it seems to work.
> Since we're using it in an undocumented way, I'd be nervous that the
> SPI framework might change what it's doing and break us in the future.
>
> We can only have one TX and one RX transfer at a time anyway. Why
> don't we just have our own "rx_dma" and "tx_dma" in "struct
> spi_geni_master". It's only 16 extra bytes of data and it would make
> me feel less nervous.


Done.


>
> It still would be nice to eventually use the SPI framework to manage
> the mapping, but I agree that can be a future task.
>

Tracking this with a bug.

>> +                       if (ret) {
>> +                               dev_err(mas->dev, "Failed to setup Rx dma %d\n", ret);
>> +                               xfer->rx_dma = 0;
>> +                               goto unlock_and_return;
>> +                       }
>> +               }
>> +               if (m_cmd & SPI_TX_ONLY) {
>> +                       ret =  geni_se_tx_dma_prep(se, (void *)xfer->tx_buf,
>> +                               xfer->len, &xfer->tx_dma);
> In v1 I asked about the above "void *" cast. You pointed out that it
> was to cast away constness. So I agree that you can keep it here for
> now, but could you also post a patch to change geni_se_tx_dma_prep()
> to take a "const void *"? You'll need a cast in _that_ function to
> remove the constness (since dma_map_single() is generic for both TX
> and RX), but it seems like a better place for it. Then a later patch
> could remove the cast here.


Tracking this with a bug.

>
>> +                       if (ret) {
>> +                               dev_err(mas->dev, "Failed to setup Tx dma %d\n", ret);
>> +                               xfer->tx_dma = 0;
>> +                               if (m_cmd & SPI_RX_ONLY && xfer->rx_dma) {
> Don't need "&& xfer->rx_dma". You _just_ mapped it above and if it had
> failed it would have returned an error. you don't need to
> double-check. You can trust that the framework knows what it's doing
> and won't return NULL to you. If it did return NULL to you because of
> a bug, it's not necessarily better to just silently skip unpreparing
> anyway.


Done.


>
>> @@ -823,39 +913,66 @@ static irqreturn_t geni_spi_isr(int irq, void *data)
>>
>>          spin_lock(&mas->lock);
>>
>> -       if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
>> -               geni_spi_handle_rx(mas);
>> -
>> -       if (m_irq & M_TX_FIFO_WATERMARK_EN)
>> -               geni_spi_handle_tx(mas);
>> -
>> -       if (m_irq & M_CMD_DONE_EN) {
>> -               if (mas->cur_xfer) {
>> +       if (mas->cur_xfer_mode == GENI_SE_FIFO) {
>> +               if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
>> +                       geni_spi_handle_rx(mas);
>> +
>> +               if (m_irq & M_TX_FIFO_WATERMARK_EN)
>> +                       geni_spi_handle_tx(mas);
>> +
>> +               if (m_irq & M_CMD_DONE_EN) {
>> +                       if (mas->cur_xfer) {
>> +                               spi_finalize_current_transfer(spi);
>> +                               mas->cur_xfer = NULL;
>> +                               /*
>> +                                * If this happens, then a CMD_DONE came before all the
>> +                                * Tx buffer bytes were sent out. This is unusual, log
>> +                                * this condition and disable the WM interrupt to
>> +                                * prevent the system from stalling due an interrupt
>> +                                * storm.
>> +                                *
>> +                                * If this happens when all Rx bytes haven't been
>> +                                * received, log the condition. The only known time
>> +                                * this can happen is if bits_per_word != 8 and some
>> +                                * registers that expect xfer lengths in num spi_words
>> +                                * weren't written correctly.
>> +                                */
>> +                               if (mas->tx_rem_bytes) {
>> +                                       writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
>> +                                       dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
>> +                                               mas->tx_rem_bytes, mas->cur_bits_per_word);
>> +                               }
>> +                               if (mas->rx_rem_bytes)
>> +                                       dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
>> +                                               mas->rx_rem_bytes, mas->cur_bits_per_word);
>> +                       } else {
>> +                               complete(&mas->cs_done);
> Question: did you try actually using the chip select with your new
> GENI_SE_DMA? Does it work? I ask because I don't see anything that
> completes the "cs_done" in the DMA case of the ISR and I don't see
> anything in spi_geni_set_cs() that forces it to FIFO mode. Note: if
> you're only testing on trogdor/herobrine boards, you'd have to change
> them to not use a GPIO for chip select.


Forced FIFO in spi_geni_set_cs

Thank you,

Vijay/


>
> -Doug
diff mbox series

Patch

diff --git a/drivers/spi/spi-geni-qcom.c b/drivers/spi/spi-geni-qcom.c
index 4e83cc5..170c0cb 100644
--- a/drivers/spi/spi-geni-qcom.c
+++ b/drivers/spi/spi-geni-qcom.c
@@ -87,6 +87,8 @@  struct spi_geni_master {
 	struct completion cs_done;
 	struct completion cancel_done;
 	struct completion abort_done;
+	struct completion tx_reset_done;
+	struct completion rx_reset_done;
 	unsigned int oversampling;
 	spinlock_t lock;
 	int irq;
@@ -95,6 +97,7 @@  struct spi_geni_master {
 	struct dma_chan *tx;
 	struct dma_chan *rx;
 	int cur_xfer_mode;
+	u32 cur_m_cmd;
 };
 
 static int get_spi_clk_cfg(unsigned int speed_hz,
@@ -129,23 +132,27 @@  static int get_spi_clk_cfg(unsigned int speed_hz,
 	return ret;
 }
 
-static void handle_fifo_timeout(struct spi_master *spi,
+static void handle_se_timeout(struct spi_master *spi,
 				struct spi_message *msg)
 {
 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
 	unsigned long time_left;
 	struct geni_se *se = &mas->se;
+	const struct spi_transfer *xfer;
 
 	spin_lock_irq(&mas->lock);
 	reinit_completion(&mas->cancel_done);
-	writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+	if (mas->cur_xfer_mode == GENI_SE_FIFO)
+		writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+
+	xfer = mas->cur_xfer;
 	mas->cur_xfer = NULL;
 	geni_se_cancel_m_cmd(se);
 	spin_unlock_irq(&mas->lock);
 
 	time_left = wait_for_completion_timeout(&mas->cancel_done, HZ);
 	if (time_left)
-		return;
+		goto unmap_if_dma;
 
 	spin_lock_irq(&mas->lock);
 	reinit_completion(&mas->abort_done);
@@ -162,6 +169,45 @@  static void handle_fifo_timeout(struct spi_master *spi,
 		 */
 		mas->abort_failed = true;
 	}
+
+unmap_if_dma:
+	if (mas->cur_xfer_mode == GENI_SE_DMA) {
+		if (mas->cur_m_cmd & SPI_TX_ONLY) {
+			spin_lock_irq(&mas->lock);
+			reinit_completion(&mas->tx_reset_done);
+			writel(1, se->base + SE_DMA_TX_FSM_RST);
+			spin_unlock_irq(&mas->lock);
+			time_left = wait_for_completion_timeout(&mas->tx_reset_done, HZ);
+			if (!time_left)
+				dev_err(mas->dev, "DMA TX RESET failed\n");
+		}
+		if (mas->cur_m_cmd & SPI_RX_ONLY) {
+			spin_lock_irq(&mas->lock);
+			reinit_completion(&mas->rx_reset_done);
+			writel(1, se->base + SE_DMA_RX_FSM_RST);
+			spin_unlock_irq(&mas->lock);
+			time_left = wait_for_completion_timeout(&mas->rx_reset_done, HZ);
+			if (!time_left)
+				dev_err(mas->dev, "DMA RX RESET failed\n");
+		}
+
+		if (xfer) {
+			if (xfer->tx_buf && xfer->tx_dma)
+				geni_se_tx_dma_unprep(se, xfer->tx_dma, xfer->len);
+			if (xfer->rx_buf && xfer->rx_dma)
+				geni_se_rx_dma_unprep(se, xfer->rx_dma, xfer->len);
+		} else {
+			/*
+			 * This can happen if a timeout happened and we had to wait
+			 * for lock in this function because isr was holding the lock
+			 * and handling transfer completion at that time.
+			 * isr will set cur_xfer to NULL when done.
+			 * Unnecessary error but cannot be helped.
+			 * Only do reset, dma_unprep is already done by isr.
+			 */
+			dev_err(mas->dev, "Cancel/Abort on completed SPI transfer\n");
+		}
+	}
 }
 
 static void handle_gpi_timeout(struct spi_master *spi, struct spi_message *msg)
@@ -178,7 +224,8 @@  static void spi_geni_handle_err(struct spi_master *spi, struct spi_message *msg)
 
 	switch (mas->cur_xfer_mode) {
 	case GENI_SE_FIFO:
-		handle_fifo_timeout(spi, msg);
+	case GENI_SE_DMA:
+		handle_se_timeout(spi, msg);
 		break;
 	case GENI_GPI_DMA:
 		handle_gpi_timeout(spi, msg);
@@ -260,7 +307,7 @@  static void spi_geni_set_cs(struct spi_device *slv, bool set_flag)
 	time_left = wait_for_completion_timeout(&mas->cs_done, HZ);
 	if (!time_left) {
 		dev_warn(mas->dev, "Timeout setting chip select\n");
-		handle_fifo_timeout(spi, NULL);
+		handle_se_timeout(spi, NULL);
 	}
 
 exit:
@@ -482,8 +529,12 @@  static bool geni_can_dma(struct spi_controller *ctlr,
 {
 	struct spi_geni_master *mas = spi_master_get_devdata(slv->master);
 
-	/* check if dma is supported */
-	return mas->cur_xfer_mode != GENI_SE_FIFO;
+	/*
+	 * Return true if transfer needs to be mapped prior to
+	 * calling transfer_one which is the case only for GPI_DMA.
+	 * For SE_DMA mode, map/unmap is done in geni_se_*x_dma_prep.
+	 */
+	return mas->cur_xfer_mode == GENI_GPI_DMA;
 }
 
 static int spi_geni_prepare_message(struct spi_master *spi,
@@ -494,6 +545,7 @@  static int spi_geni_prepare_message(struct spi_master *spi,
 
 	switch (mas->cur_xfer_mode) {
 	case GENI_SE_FIFO:
+	case GENI_SE_DMA:
 		if (spi_geni_is_abort_still_pending(mas))
 			return -EBUSY;
 		ret = setup_fifo_params(spi_msg->spi, spi);
@@ -597,7 +649,7 @@  static int spi_geni_init(struct spi_geni_master *mas)
 			break;
 		}
 		/*
-		 * in case of failure to get dma channel, we can still do the
+		 * in case of failure to get gpi dma channel, we can still do the
 		 * FIFO mode, so fallthrough
 		 */
 		dev_warn(mas->dev, "FIFO mode disabled, but couldn't get DMA, fall back to FIFO mode\n");
@@ -716,12 +768,12 @@  static void geni_spi_handle_rx(struct spi_geni_master *mas)
 	mas->rx_rem_bytes -= rx_bytes;
 }
 
-static void setup_fifo_xfer(struct spi_transfer *xfer,
+static int setup_se_xfer(struct spi_transfer *xfer,
 				struct spi_geni_master *mas,
 				u16 mode, struct spi_master *spi)
 {
 	u32 m_cmd = 0;
-	u32 len;
+	u32 len, fifo_size;
 	struct geni_se *se = &mas->se;
 	int ret;
 
@@ -748,7 +800,7 @@  static void setup_fifo_xfer(struct spi_transfer *xfer,
 	/* Speed and bits per word can be overridden per transfer */
 	ret = geni_spi_set_clock_and_bw(mas, xfer->speed_hz);
 	if (ret)
-		return;
+		return ret;
 
 	mas->tx_rem_bytes = 0;
 	mas->rx_rem_bytes = 0;
@@ -771,6 +823,12 @@  static void setup_fifo_xfer(struct spi_transfer *xfer,
 		writel(len, se->base + SE_SPI_RX_TRANS_LEN);
 		mas->rx_rem_bytes = xfer->len;
 	}
+	mas->cur_m_cmd = m_cmd;
+
+	/* Select transfer mode based on transfer length */
+	fifo_size = mas->tx_fifo_depth * mas->fifo_width_bits / mas->cur_bits_per_word;
+	mas->cur_xfer_mode = (len <= fifo_size) ? GENI_SE_FIFO : GENI_SE_DMA;
+	geni_se_select_mode(se, mas->cur_xfer_mode);
 
 	/*
 	 * Lock around right before we start the transfer since our
@@ -778,11 +836,39 @@  static void setup_fifo_xfer(struct spi_transfer *xfer,
 	 */
 	spin_lock_irq(&mas->lock);
 	geni_se_setup_m_cmd(se, m_cmd, FRAGMENTATION);
-	if (m_cmd & SPI_TX_ONLY) {
+
+	if (mas->cur_xfer_mode == GENI_SE_DMA) {
+		if (m_cmd & SPI_RX_ONLY) {
+			ret =  geni_se_rx_dma_prep(se, xfer->rx_buf,
+				xfer->len, &xfer->rx_dma);
+			if (ret) {
+				dev_err(mas->dev, "Failed to setup Rx dma %d\n", ret);
+				xfer->rx_dma = 0;
+				goto unlock_and_return;
+			}
+		}
+		if (m_cmd & SPI_TX_ONLY) {
+			ret =  geni_se_tx_dma_prep(se, (void *)xfer->tx_buf,
+				xfer->len, &xfer->tx_dma);
+			if (ret) {
+				dev_err(mas->dev, "Failed to setup Tx dma %d\n", ret);
+				xfer->tx_dma = 0;
+				if (m_cmd & SPI_RX_ONLY && xfer->rx_dma) {
+					/* Unmap rx buffer if duplex transfer */
+					geni_se_rx_dma_unprep(se, xfer->rx_dma, xfer->len);
+					xfer->rx_dma = 0;
+				}
+				goto unlock_and_return;
+			}
+		}
+	} else if (m_cmd & SPI_TX_ONLY) {
 		if (geni_spi_handle_tx(mas))
 			writel(mas->tx_wm, se->base + SE_GENI_TX_WATERMARK_REG);
 	}
+
+unlock_and_return:
 	spin_unlock_irq(&mas->lock);
+	return ret;
 }
 
 static int spi_geni_transfer_one(struct spi_master *spi,
@@ -790,6 +876,7 @@  static int spi_geni_transfer_one(struct spi_master *spi,
 				struct spi_transfer *xfer)
 {
 	struct spi_geni_master *mas = spi_master_get_devdata(spi);
+	int ret;
 
 	if (spi_geni_is_abort_still_pending(mas))
 		return -EBUSY;
@@ -798,9 +885,12 @@  static int spi_geni_transfer_one(struct spi_master *spi,
 	if (!xfer->len)
 		return 0;
 
-	if (mas->cur_xfer_mode == GENI_SE_FIFO) {
-		setup_fifo_xfer(xfer, mas, slv->mode, spi);
-		return 1;
+	if (mas->cur_xfer_mode == GENI_SE_FIFO || mas->cur_xfer_mode == GENI_SE_DMA) {
+		ret = setup_se_xfer(xfer, mas, slv->mode, spi);
+		/* SPI framework expects +ve ret code to wait for transfer complete */
+		if (!ret)
+			ret = 1;
+		return ret;
 	}
 	return setup_gsi_xfer(xfer, mas, slv, spi);
 }
@@ -823,39 +913,66 @@  static irqreturn_t geni_spi_isr(int irq, void *data)
 
 	spin_lock(&mas->lock);
 
-	if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
-		geni_spi_handle_rx(mas);
-
-	if (m_irq & M_TX_FIFO_WATERMARK_EN)
-		geni_spi_handle_tx(mas);
-
-	if (m_irq & M_CMD_DONE_EN) {
-		if (mas->cur_xfer) {
+	if (mas->cur_xfer_mode == GENI_SE_FIFO) {
+		if ((m_irq & M_RX_FIFO_WATERMARK_EN) || (m_irq & M_RX_FIFO_LAST_EN))
+			geni_spi_handle_rx(mas);
+
+		if (m_irq & M_TX_FIFO_WATERMARK_EN)
+			geni_spi_handle_tx(mas);
+
+		if (m_irq & M_CMD_DONE_EN) {
+			if (mas->cur_xfer) {
+				spi_finalize_current_transfer(spi);
+				mas->cur_xfer = NULL;
+				/*
+				 * If this happens, then a CMD_DONE came before all the
+				 * Tx buffer bytes were sent out. This is unusual, log
+				 * this condition and disable the WM interrupt to
+				 * prevent the system from stalling due an interrupt
+				 * storm.
+				 *
+				 * If this happens when all Rx bytes haven't been
+				 * received, log the condition. The only known time
+				 * this can happen is if bits_per_word != 8 and some
+				 * registers that expect xfer lengths in num spi_words
+				 * weren't written correctly.
+				 */
+				if (mas->tx_rem_bytes) {
+					writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
+					dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
+						mas->tx_rem_bytes, mas->cur_bits_per_word);
+				}
+				if (mas->rx_rem_bytes)
+					dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
+						mas->rx_rem_bytes, mas->cur_bits_per_word);
+			} else {
+				complete(&mas->cs_done);
+			}
+		}
+	} else if (mas->cur_xfer_mode == GENI_SE_DMA) {
+		const struct spi_transfer *xfer = mas->cur_xfer;
+		u32 dma_tx_status = readl_relaxed(se->base + SE_DMA_TX_IRQ_STAT);
+		u32 dma_rx_status = readl_relaxed(se->base + SE_DMA_RX_IRQ_STAT);
+
+		if (dma_tx_status)
+			writel(dma_tx_status, se->base + SE_DMA_TX_IRQ_CLR);
+		if (dma_rx_status)
+			writel(dma_rx_status, se->base + SE_DMA_RX_IRQ_CLR);
+		if (dma_tx_status & TX_DMA_DONE)
+			mas->tx_rem_bytes = 0;
+		if (dma_rx_status & RX_DMA_DONE)
+			mas->rx_rem_bytes = 0;
+		if (dma_tx_status & TX_RESET_DONE)
+			complete(&mas->tx_reset_done);
+		if (dma_rx_status & RX_RESET_DONE)
+			complete(&mas->rx_reset_done);
+		if (!mas->tx_rem_bytes && !mas->rx_rem_bytes && xfer) {
+			if (xfer->tx_buf && xfer->tx_dma)
+				geni_se_tx_dma_unprep(se, xfer->tx_dma, xfer->len);
+			if (xfer->rx_buf && xfer->rx_dma)
+				geni_se_rx_dma_unprep(se, xfer->rx_dma, xfer->len);
 			spi_finalize_current_transfer(spi);
 			mas->cur_xfer = NULL;
-			/*
-			 * If this happens, then a CMD_DONE came before all the
-			 * Tx buffer bytes were sent out. This is unusual, log
-			 * this condition and disable the WM interrupt to
-			 * prevent the system from stalling due an interrupt
-			 * storm.
-			 *
-			 * If this happens when all Rx bytes haven't been
-			 * received, log the condition. The only known time
-			 * this can happen is if bits_per_word != 8 and some
-			 * registers that expect xfer lengths in num spi_words
-			 * weren't written correctly.
-			 */
-			if (mas->tx_rem_bytes) {
-				writel(0, se->base + SE_GENI_TX_WATERMARK_REG);
-				dev_err(mas->dev, "Premature done. tx_rem = %d bpw%d\n",
-					mas->tx_rem_bytes, mas->cur_bits_per_word);
-			}
-			if (mas->rx_rem_bytes)
-				dev_err(mas->dev, "Premature done. rx_rem = %d bpw%d\n",
-					mas->rx_rem_bytes, mas->cur_bits_per_word);
-		} else {
-			complete(&mas->cs_done);
 		}
 	}
 
@@ -949,6 +1066,8 @@  static int spi_geni_probe(struct platform_device *pdev)
 	init_completion(&mas->cs_done);
 	init_completion(&mas->cancel_done);
 	init_completion(&mas->abort_done);
+	init_completion(&mas->tx_reset_done);
+	init_completion(&mas->rx_reset_done);
 	spin_lock_init(&mas->lock);
 	pm_runtime_use_autosuspend(&pdev->dev);
 	pm_runtime_set_autosuspend_delay(&pdev->dev, 250);