@@ -493,6 +493,13 @@ int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn)
}
EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_irq);
+struct device *
+ k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn)
+{
+ return xudma_get_device(tx_chn->common.udmax);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_tx_get_dma_device);
+
static int k3_udma_glue_cfg_rx_chn(struct k3_udma_glue_rx_channel *rx_chn)
{
const struct udma_tisci_rm *tisci_rm = rx_chn->common.tisci_rm;
@@ -1201,3 +1208,10 @@ int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
return flow->virq;
}
EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_irq);
+
+struct device *
+ k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn)
+{
+ return xudma_get_device(rx_chn->common.udmax);
+}
+EXPORT_SYMBOL_GPL(k3_udma_glue_rx_get_dma_device);
@@ -50,6 +50,12 @@ struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property)
}
EXPORT_SYMBOL(of_xudma_dev_get);
+struct device *xudma_get_device(struct udma_dev *ud)
+{
+ return ud->dev;
+}
+EXPORT_SYMBOL(xudma_get_device);
+
u32 xudma_dev_get_psil_base(struct udma_dev *ud)
{
return ud->psil_base;
@@ -112,6 +112,7 @@ int xudma_navss_psil_unpair(struct udma_dev *ud, u32 src_thread,
u32 dst_thread);
struct udma_dev *of_xudma_dev_get(struct device_node *np, const char *property);
+struct device *xudma_get_device(struct udma_dev *ud);
void xudma_dev_put(struct udma_dev *ud);
u32 xudma_dev_get_psil_base(struct udma_dev *ud);
struct udma_tisci_rm *xudma_dev_get_tisci_rm(struct udma_dev *ud);
@@ -41,6 +41,8 @@ void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn);
u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn);
int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn);
+struct device *
+ k3_udma_glue_tx_get_dma_device(struct k3_udma_glue_tx_channel *tx_chn);
enum {
K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0,
@@ -130,5 +132,7 @@ int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx);
int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
u32 flow_idx);
+struct device *
+ k3_udma_glue_rx_get_dma_device(struct k3_udma_glue_rx_channel *rx_chn);
#endif /* K3_UDMA_GLUE_H_ */
Glue layer users should use the device of the DMA for DMA mapping and allocations as it is the DMA which accesses to descriptors and buffers, not the clients Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com> --- drivers/dma/ti/k3-udma-glue.c | 14 ++++++++++++++ drivers/dma/ti/k3-udma-private.c | 6 ++++++ drivers/dma/ti/k3-udma.h | 1 + include/linux/dma/k3-udma-glue.h | 4 ++++ 4 files changed, 25 insertions(+) -- Peter Texas Instruments Finland Oy, Porkkalankatu 22, 00180 Helsinki. Y-tunnus/Business ID: 0615521-4. Kotipaikka/Domicile: Helsinki