Message ID | 20241015182637.955753-18-quic_rajkbhag@quicinc.com |
---|---|
State | Superseded |
Headers | show |
Series | wifi: ath12k: add Ath12k AHB driver support for IPQ5332 | expand |
On 15.10.2024 8:26 PM, Raj Kumar Bhagat wrote: > From: Balamurugan S <quic_bselvara@quicinc.com> > > Add Initial Ath12k AHB driver support for IPQ5332. IPQ5332 is AHB > based IEEE802.11be 2 GHz 2x2 WiFi device. > > Tested-on: IPQ5332 hw1.0 AHB WLAN.WBE.1.3.1-00130-QCAHKSWPL_SILICONZ-1 > Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.1.1-00210-QCAHKSWPL_SILICONZ-1 > > Signed-off-by: Balamurugan S <quic_bselvara@quicinc.com> > Co-developed-by: P Praneesh <quic_ppranees@quicinc.com> > Signed-off-by: P Praneesh <quic_ppranees@quicinc.com> > Co-developed-by: Raj Kumar Bhagat <quic_rajkbhag@quicinc.com> > Signed-off-by: Raj Kumar Bhagat <quic_rajkbhag@quicinc.com> > --- [...] > +enum ext_irq_num { > + host2wbm_desc_feed = 16, > + host2reo_re_injection, Why? > +static u32 ath12k_ahb_cmem_read32(struct ath12k_base *ab, u32 offset) > +{ > + offset = offset - HAL_IPQ5332_CMEM_BASE; > + return ioread32(ab->mem_cmem + offset); return ioread32(ab->mem_cmem + offset - HAL_IPQ5332_CMEM_BASE)? Or maybe the mem_cmem base should be moved? > +static int ath12k_ahb_start(struct ath12k_base *ab) > +{ > + ath12k_ahb_ce_irqs_enable(ab); > + ath12k_ce_rx_post_buf(ab); > + > + return 0; > +} Neither this nor ath12k_pci returns anything useful - perhaps make this void? > +static void ath12k_ahb_free_ext_irq(struct ath12k_base *ab) Any reason we're not using devm APIs? > +static int ath12k_ahb_config_ext_irq(struct ath12k_base *ab) > +{ > + struct ath12k_ext_irq_grp *irq_grp; > + const struct hal_ops *hal_ops; > + int i, j, irq, irq_idx, ret; > + u32 num_irq; > + > + hal_ops = ab->hw_params->hal_ops; > + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { > + irq_grp = &ab->ext_irq_grp[i]; > + num_irq = 0; > + > + irq_grp->ab = ab; > + irq_grp->grp_id = i; > + > + irq_grp->napi_ndev = alloc_netdev_dummy(0); > + if (!irq_grp->napi_ndev) > + return -ENOMEM; > + > + netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, > + ath12k_ahb_ext_grp_napi_poll); > + > + for (j = 0; j < ATH12K_EXT_IRQ_NUM_MAX; j++) { > + if (ab->hw_params->ring_mask->tx[i] && > + j <= ATH12K_MAX_TCL_RING_NUM && > + (ab->hw_params->ring_mask->tx[i] & > + BIT(hal_ops->tcl_to_wbm_rbm_map[j].wbm_ring_num))) { > + irq_grp->irqs[num_irq++] = > + wbm2host_tx_completions_ring1 - j; > + } This is unreadable > + > + if (ab->hw_params->ring_mask->rx[i] & BIT(j)) { Consider taking a pointer to ring_mask so that the lines are shorter > + irq_grp->irqs[num_irq++] = > + reo2host_destination_ring1 - j; > + } > + > + if (ab->hw_params->ring_mask->rx_err[i] & BIT(j)) > + irq_grp->irqs[num_irq++] = reo2host_exception; > + > + if (ab->hw_params->ring_mask->rx_wbm_rel[i] & BIT(j)) > + irq_grp->irqs[num_irq++] = wbm2host_rx_release; > + > + if (ab->hw_params->ring_mask->reo_status[i] & BIT(j)) > + irq_grp->irqs[num_irq++] = reo2host_status; > + > + if (ab->hw_params->ring_mask->rx_mon_dest[i] & BIT(j)) > + irq_grp->irqs[num_irq++] = > + rxdma2host_monitor_destination_mac1; > + } > + > + irq_grp->num_irq = num_irq; > + > + for (j = 0; j < irq_grp->num_irq; j++) { > + irq_idx = irq_grp->irqs[j]; > + > + irq = platform_get_irq_byname(ab->pdev, > + irq_name[irq_idx]); > + ab->irq_num[irq_idx] = irq; > + irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY); > + ret = request_irq(irq, ath12k_ahb_ext_interrupt_handler, > + IRQF_TRIGGER_RISING, > + irq_name[irq_idx], irq_grp); > + if (ret) { > + ath12k_err(ab, "failed request_irq for %d\n", > + irq); > + } > + } Instead of doing all this magic, can we request the IRQs manually, as we have interrupt-names in dt? > + } > + > + return 0; > +} > + > +static int ath12k_ahb_config_irq(struct ath12k_base *ab) > +{ > + int irq, irq_idx, i; > + int ret; > + > + /* Configure CE irqs */ > + for (i = 0; i < ab->hw_params->ce_count; i++) { > + struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; > + > + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) > + continue; > + > + irq_idx = ATH12K_IRQ_CE0_OFFSET + i; > + > + INIT_WORK(&ce_pipe->intr_wq, ath12k_ahb_ce_workqueue); > + irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]); > + ret = request_irq(irq, ath12k_ahb_ce_interrupt_handler, > + IRQF_TRIGGER_RISING, irq_name[irq_idx], > + ce_pipe); > + if (ret) > + return ret; > + > + ab->irq_num[irq_idx] = irq; > + } > + > + /* Configure external interrupts */ > + ret = ath12k_ahb_config_ext_irq(ab); > + > + return ret; > +} > + > +static int ath12k_ahb_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, > + u8 *ul_pipe, u8 *dl_pipe) > +{ > + const struct service_to_pipe *entry; > + bool ul_set = false, dl_set = false; > + int i; > + > + for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) { > + entry = &ab->hw_params->svc_to_ce_map[i]; > + > + if (__le32_to_cpu(entry->service_id) != service_id) > + continue; > + > + switch (__le32_to_cpu(entry->pipedir)) { > + case PIPEDIR_NONE: > + break; > + case PIPEDIR_IN: > + WARN_ON(dl_set); > + *dl_pipe = __le32_to_cpu(entry->pipenum); > + dl_set = true; > + break; > + case PIPEDIR_OUT: > + WARN_ON(ul_set); > + *ul_pipe = __le32_to_cpu(entry->pipenum); > + ul_set = true; > + break; > + case PIPEDIR_INOUT: > + WARN_ON(dl_set); > + WARN_ON(ul_set); > + *dl_pipe = __le32_to_cpu(entry->pipenum); > + *ul_pipe = __le32_to_cpu(entry->pipenum); > + dl_set = true; > + ul_set = true; > + break; > + } if pipedir == PIPEDIR_IN || pipedir == PIPEDIR_INOUT if pipedir == PIPEDIR_OUT || pipedir == PIPE_INOUT ? > + } > + > + if (WARN_ON(!ul_set || !dl_set)) > + return -ENOENT; > + > + return 0; > +} > + > +static const struct ath12k_hif_ops ath12k_ahb_hif_ops_ipq5332 = { > + .start = ath12k_ahb_start, > + .stop = ath12k_ahb_stop, > + .read32 = ath12k_ahb_read32, > + .write32 = ath12k_ahb_write32, > + .cmem_read32 = ath12k_ahb_cmem_read32, > + .cmem_write32 = ath12k_ahb_cmem_write32, > + .irq_enable = ath12k_ahb_ext_irq_enable, > + .irq_disable = ath12k_ahb_ext_irq_disable, > + .map_service_to_pipe = ath12k_ahb_map_service_to_pipe, > +}; > + > +static int ath12k_ahb_clock_init(struct ath12k_base *ab) > +{ > + struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab); > + int ret; > + > + ab_ahb->xo_clk = devm_clk_get(ab->dev, "gcc_xo_clk"); > + if (IS_ERR_OR_NULL(ab_ahb->xo_clk)) { > + ath12k_err(ab, "failed to get gcc_xo_clk: %d\n", > + PTR_ERR_OR_ZERO(ab_ahb->xo_clk)); > + ret = ab_ahb->xo_clk ? PTR_ERR(ab_ahb->xo_clk) : -ENODEV; > + ab_ahb->xo_clk = NULL; > + return ret; > + } Won't clk core print errors in both of these cases? > + > + return 0; > +} > + > +static void ath12k_ahb_clock_deinit(struct ath12k_base *ab) > +{ > + struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab); > + > + devm_clk_put(ab->dev, ab_ahb->xo_clk); > + ab_ahb->xo_clk = NULL; > +} > + > +static int ath12k_ahb_clock_enable(struct ath12k_base *ab) > +{ > + struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab); > + int ret; > + > + if (IS_ERR_OR_NULL(ab_ahb->xo_clk)) { > + ath12k_err(ab, "clock is not initialized\n"); > + return -EIO; > + } > + > + ret = clk_prepare_enable(ab_ahb->xo_clk); > + if (ret) { > + ath12k_err(ab, "failed to enable gcc_xo_clk: %d\n", ret); > + return ret; > + } > + > + return 0; > +} > + > +static void ath12k_ahb_clock_disable(struct ath12k_base *ab) > +{ > + struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab); > + > + clk_disable_unprepare(ab_ahb->xo_clk); > +} > + > +static int ath12k_ahb_resource_init(struct ath12k_base *ab) > +{ > + struct platform_device *pdev = ab->pdev; > + struct resource *mem_res; > + void __iomem *mem; > + int ret; > + > + mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res); > + if (IS_ERR(mem)) { > + dev_err(&pdev->dev, "ioremap error\n"); > + ret = PTR_ERR(mem); > + goto out; If you assign ab->mem directly, you can get rid of the line below and return the error here > + } > + > + ab->mem = mem; > + ab->mem_len = resource_size(mem_res); > + > + if (ab->hw_params->ce_remap) { > + const struct ce_remap *ce_remap = ab->hw_params->ce_remap; > + /* ce register space is moved out of wcss and the space is not > + * contiguous, hence remapping the CE registers to a new space > + * for accessing them. > + */ Please capitalize words consistently > + ab->mem_ce = ioremap(ce_remap->base, ce_remap->size); > + if (IS_ERR(ab->mem_ce)) { > + dev_err(&pdev->dev, "ce ioremap error\n"); > + ret = -ENOMEM; > + goto err_mem_unmap; > + } > + ab->ce_remap = true; > + ab->ce_remap_base_addr = HAL_IPQ5332_CE_WFSS_REG_BASE; > + } > + > + if (ab->hw_params->cmem_remap) { > + const struct cmem_remap *cmem_remap = ab->hw_params->cmem_remap; > + /* For device like IPQ5332 CMEM region is outside WCSS block. IPQ5332 is not a 'device' > + * Allocate separate I/O remap to access CMEM address. > + */ > + ab->mem_cmem = ioremap(cmem_remap->base, cmem_remap->size); > + if (IS_ERR(ab->mem_cmem)) { > + dev_err(&pdev->dev, "cmem ioremap error\n"); > + ret = -ENOMEM; > + goto err_mem_ce_unmap; > + } > + } [...] > + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); > + if (ret) { > + dev_err(&pdev->dev, "Failed to set 32-bit consistent dma\n"); s/consistent/coherent You're setting the mask, not the DMA itself [...] > + /* Set fixed_mem_region to true for platforms that support fixed memory > + * reservation from DT. If memory is reserved from DT for FW, ath12k driver > + * need not to allocate memory. > + */ > + if (!of_property_read_u32(ab->dev->of_node, "memory-region", &addr)) { > + set_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags); > + mem_node = of_find_node_by_name(NULL, "mlo_global_mem_0"); This is not mentioned or documented anywhere. Konrad
On 10/19/2024 1:59 AM, Konrad Dybcio wrote: > On 15.10.2024 8:26 PM, Raj Kumar Bhagat wrote: >> From: Balamurugan S <quic_bselvara@quicinc.com> >> >> Add Initial Ath12k AHB driver support for IPQ5332. IPQ5332 is AHB >> based IEEE802.11be 2 GHz 2x2 WiFi device. >> >> Tested-on: IPQ5332 hw1.0 AHB WLAN.WBE.1.3.1-00130-QCAHKSWPL_SILICONZ-1 >> Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.1.1-00210-QCAHKSWPL_SILICONZ-1 >> >> Signed-off-by: Balamurugan S <quic_bselvara@quicinc.com> >> Co-developed-by: P Praneesh <quic_ppranees@quicinc.com> >> Signed-off-by: P Praneesh <quic_ppranees@quicinc.com> >> Co-developed-by: Raj Kumar Bhagat <quic_rajkbhag@quicinc.com> >> Signed-off-by: Raj Kumar Bhagat <quic_rajkbhag@quicinc.com> >> --- > > [...] > >> +enum ext_irq_num { >> + host2wbm_desc_feed = 16, >> + host2reo_re_injection, > > Why? > This enum is used as a IRQ number for Ath12k AHB. Based on this enum we can get the IRQ name from irq_name[]. This helps to request the original IRQ number from the DT. It is starting from 16 becasue, in irq_name[], the name for ext IRQ starts from 16 index. > >> +static u32 ath12k_ahb_cmem_read32(struct ath12k_base *ab, u32 offset) >> +{ >> + offset = offset - HAL_IPQ5332_CMEM_BASE; >> + return ioread32(ab->mem_cmem + offset); > > return ioread32(ab->mem_cmem + offset - HAL_IPQ5332_CMEM_BASE)? > > Or maybe the mem_cmem base should be moved? > sure, will update in next version. >> +static int ath12k_ahb_start(struct ath12k_base *ab) >> +{ >> + ath12k_ahb_ce_irqs_enable(ab); >> + ath12k_ce_rx_post_buf(ab); >> + >> + return 0; >> +} > > Neither this nor ath12k_pci returns anything useful - perhaps make this void? > >> +static void ath12k_ahb_free_ext_irq(struct ath12k_base *ab) > > Any reason we're not using devm APIs? > Thanks, we will move devm APIs in next version. >> +static int ath12k_ahb_config_ext_irq(struct ath12k_base *ab) >> +{ >> + struct ath12k_ext_irq_grp *irq_grp; >> + const struct hal_ops *hal_ops; >> + int i, j, irq, irq_idx, ret; >> + u32 num_irq; >> + >> + hal_ops = ab->hw_params->hal_ops; >> + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { >> + irq_grp = &ab->ext_irq_grp[i]; >> + num_irq = 0; >> + >> + irq_grp->ab = ab; >> + irq_grp->grp_id = i; >> + >> + irq_grp->napi_ndev = alloc_netdev_dummy(0); >> + if (!irq_grp->napi_ndev) >> + return -ENOMEM; >> + >> + netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, >> + ath12k_ahb_ext_grp_napi_poll); >> + >> + for (j = 0; j < ATH12K_EXT_IRQ_NUM_MAX; j++) { >> + if (ab->hw_params->ring_mask->tx[i] && >> + j <= ATH12K_MAX_TCL_RING_NUM && >> + (ab->hw_params->ring_mask->tx[i] & >> + BIT(hal_ops->tcl_to_wbm_rbm_map[j].wbm_ring_num))) { >> + irq_grp->irqs[num_irq++] = >> + wbm2host_tx_completions_ring1 - j; >> + } > > This is unreadable > In next version we will use a ring_mask pointer and also add comments to make it more readable. >> + >> + if (ab->hw_params->ring_mask->rx[i] & BIT(j)) { > > Consider taking a pointer to ring_mask so that the lines are shorter > Thansk for the suggestion, will update in next version. >> + irq_grp->irqs[num_irq++] = >> + reo2host_destination_ring1 - j; >> + } >> + >> + if (ab->hw_params->ring_mask->rx_err[i] & BIT(j)) >> + irq_grp->irqs[num_irq++] = reo2host_exception; >> + >> + if (ab->hw_params->ring_mask->rx_wbm_rel[i] & BIT(j)) >> + irq_grp->irqs[num_irq++] = wbm2host_rx_release; >> + >> + if (ab->hw_params->ring_mask->reo_status[i] & BIT(j)) >> + irq_grp->irqs[num_irq++] = reo2host_status; >> + >> + if (ab->hw_params->ring_mask->rx_mon_dest[i] & BIT(j)) >> + irq_grp->irqs[num_irq++] = >> + rxdma2host_monitor_destination_mac1; >> + } >> + >> + irq_grp->num_irq = num_irq; >> + >> + for (j = 0; j < irq_grp->num_irq; j++) { >> + irq_idx = irq_grp->irqs[j]; >> + >> + irq = platform_get_irq_byname(ab->pdev, >> + irq_name[irq_idx]); >> + ab->irq_num[irq_idx] = irq; >> + irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY); >> + ret = request_irq(irq, ath12k_ahb_ext_interrupt_handler, >> + IRQF_TRIGGER_RISING, >> + irq_name[irq_idx], irq_grp); >> + if (ret) { >> + ath12k_err(ab, "failed request_irq for %d\n", >> + irq); >> + } >> + } > > Instead of doing all this magic, can we request the IRQs manually, as we > have interrupt-names in dt? > I'm not sure if I fully understood this comment. If we manually request IRQs using their names from the DT, we won't be able to group the IRQs. Grouping the IRQs is one of our main objectives here. Additionally, we are not using all the IRQ names defined in the DT, so the logic in this function is crucial for grouping and requesting the IRQs according to the ring-mask. >> + } >> + >> + return 0; >> +} >> + >> +static int ath12k_ahb_config_irq(struct ath12k_base *ab) >> +{ >> + int irq, irq_idx, i; >> + int ret; >> + >> + /* Configure CE irqs */ >> + for (i = 0; i < ab->hw_params->ce_count; i++) { >> + struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; >> + >> + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) >> + continue; >> + >> + irq_idx = ATH12K_IRQ_CE0_OFFSET + i; >> + >> + INIT_WORK(&ce_pipe->intr_wq, ath12k_ahb_ce_workqueue); >> + irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]); >> + ret = request_irq(irq, ath12k_ahb_ce_interrupt_handler, >> + IRQF_TRIGGER_RISING, irq_name[irq_idx], >> + ce_pipe); >> + if (ret) >> + return ret; >> + >> + ab->irq_num[irq_idx] = irq; >> + } >> + >> + /* Configure external interrupts */ >> + ret = ath12k_ahb_config_ext_irq(ab); >> + >> + return ret; >> +} >> + >> +static int ath12k_ahb_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, >> + u8 *ul_pipe, u8 *dl_pipe) >> +{ >> + const struct service_to_pipe *entry; >> + bool ul_set = false, dl_set = false; >> + int i; >> + >> + for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) { >> + entry = &ab->hw_params->svc_to_ce_map[i]; >> + >> + if (__le32_to_cpu(entry->service_id) != service_id) >> + continue; >> + >> + switch (__le32_to_cpu(entry->pipedir)) { >> + case PIPEDIR_NONE: >> + break; >> + case PIPEDIR_IN: >> + WARN_ON(dl_set); >> + *dl_pipe = __le32_to_cpu(entry->pipenum); >> + dl_set = true; >> + break; >> + case PIPEDIR_OUT: >> + WARN_ON(ul_set); >> + *ul_pipe = __le32_to_cpu(entry->pipenum); >> + ul_set = true; >> + break; >> + case PIPEDIR_INOUT: >> + WARN_ON(dl_set); >> + WARN_ON(ul_set); >> + *dl_pipe = __le32_to_cpu(entry->pipenum); >> + *ul_pipe = __le32_to_cpu(entry->pipenum); >> + dl_set = true; >> + ul_set = true; >> + break; >> + } > > if pipedir == PIPEDIR_IN || pipedir == PIPEDIR_INOUT > if pipedir == PIPEDIR_OUT || pipedir == PIPE_INOUT > > ? > Thanks for this logic simplification. Will use this in next version. >> + } >> + >> + if (WARN_ON(!ul_set || !dl_set)) >> + return -ENOENT; >> + >> + return 0; >> +} >> + >> +static const struct ath12k_hif_ops ath12k_ahb_hif_ops_ipq5332 = { >> + .start = ath12k_ahb_start, >> + .stop = ath12k_ahb_stop, >> + .read32 = ath12k_ahb_read32, >> + .write32 = ath12k_ahb_write32, >> + .cmem_read32 = ath12k_ahb_cmem_read32, >> + .cmem_write32 = ath12k_ahb_cmem_write32, >> + .irq_enable = ath12k_ahb_ext_irq_enable, >> + .irq_disable = ath12k_ahb_ext_irq_disable, >> + .map_service_to_pipe = ath12k_ahb_map_service_to_pipe, >> +}; >> + >> +static int ath12k_ahb_clock_init(struct ath12k_base *ab) >> +{ >> + struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab); >> + int ret; >> + >> + ab_ahb->xo_clk = devm_clk_get(ab->dev, "gcc_xo_clk"); >> + if (IS_ERR_OR_NULL(ab_ahb->xo_clk)) { >> + ath12k_err(ab, "failed to get gcc_xo_clk: %d\n", >> + PTR_ERR_OR_ZERO(ab_ahb->xo_clk)); >> + ret = ab_ahb->xo_clk ? PTR_ERR(ab_ahb->xo_clk) : -ENODEV; >> + ab_ahb->xo_clk = NULL; >> + return ret; >> + } > > Won't clk core print errors in both of these cases? > Did not see any print error form clk core during validation. Hence added the error print for ath12k. >> + >> + return 0; >> +} >> + >> +static void ath12k_ahb_clock_deinit(struct ath12k_base *ab) >> +{ >> + struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab); >> + >> + devm_clk_put(ab->dev, ab_ahb->xo_clk); >> + ab_ahb->xo_clk = NULL; >> +} >> + >> +static int ath12k_ahb_clock_enable(struct ath12k_base *ab) >> +{ >> + struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab); >> + int ret; >> + >> + if (IS_ERR_OR_NULL(ab_ahb->xo_clk)) { >> + ath12k_err(ab, "clock is not initialized\n"); >> + return -EIO; >> + } >> + >> + ret = clk_prepare_enable(ab_ahb->xo_clk); >> + if (ret) { >> + ath12k_err(ab, "failed to enable gcc_xo_clk: %d\n", ret); >> + return ret; >> + } >> + >> + return 0; >> +} >> + >> +static void ath12k_ahb_clock_disable(struct ath12k_base *ab) >> +{ >> + struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab); >> + >> + clk_disable_unprepare(ab_ahb->xo_clk); >> +} >> + >> +static int ath12k_ahb_resource_init(struct ath12k_base *ab) >> +{ >> + struct platform_device *pdev = ab->pdev; >> + struct resource *mem_res; >> + void __iomem *mem; >> + int ret; >> + >> + mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res); >> + if (IS_ERR(mem)) { >> + dev_err(&pdev->dev, "ioremap error\n"); >> + ret = PTR_ERR(mem); >> + goto out; > > If you assign ab->mem directly, you can get rid of the line below > and return the error here > Thanks, will update in next version. >> + } >> + >> + ab->mem = mem; >> + ab->mem_len = resource_size(mem_res); >> + >> + if (ab->hw_params->ce_remap) { >> + const struct ce_remap *ce_remap = ab->hw_params->ce_remap; >> + /* ce register space is moved out of wcss and the space is not >> + * contiguous, hence remapping the CE registers to a new space >> + * for accessing them. >> + */ > > Please capitalize words consistently > sure, will update in next version. >> + ab->mem_ce = ioremap(ce_remap->base, ce_remap->size); >> + if (IS_ERR(ab->mem_ce)) { >> + dev_err(&pdev->dev, "ce ioremap error\n"); >> + ret = -ENOMEM; >> + goto err_mem_unmap; >> + } >> + ab->ce_remap = true; >> + ab->ce_remap_base_addr = HAL_IPQ5332_CE_WFSS_REG_BASE; >> + } >> + >> + if (ab->hw_params->cmem_remap) { >> + const struct cmem_remap *cmem_remap = ab->hw_params->cmem_remap; >> + /* For device like IPQ5332 CMEM region is outside WCSS block. > > IPQ5332 is not a 'device' > >> + * Allocate separate I/O remap to access CMEM address. >> + */ >> + ab->mem_cmem = ioremap(cmem_remap->base, cmem_remap->size); >> + if (IS_ERR(ab->mem_cmem)) { >> + dev_err(&pdev->dev, "cmem ioremap error\n"); >> + ret = -ENOMEM; >> + goto err_mem_ce_unmap; >> + } >> + } > > [...] > >> + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); >> + if (ret) { >> + dev_err(&pdev->dev, "Failed to set 32-bit consistent dma\n"); > > s/consistent/coherent > > You're setting the mask, not the DMA itself > Thanks will update in next version. > [...] > >> + /* Set fixed_mem_region to true for platforms that support fixed memory >> + * reservation from DT. If memory is reserved from DT for FW, ath12k driver >> + * need not to allocate memory. >> + */ >> + if (!of_property_read_u32(ab->dev->of_node, "memory-region", &addr)) { >> + set_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags); >> + mem_node = of_find_node_by_name(NULL, "mlo_global_mem_0"); > > This is not mentioned or documented anywhere. > In next version, will document the below info: "If the platform supports fixed memory, then it should define/reserve MLO global memory in DT to support Multi Link Operation. If MLO global memory is not reserved in fixed memory mode, then MLO cannot be supported."
On 10/19/2024 1:59 AM, Konrad Dybcio wrote: >> +static int ath12k_ahb_start(struct ath12k_base *ab) >> +{ >> + ath12k_ahb_ce_irqs_enable(ab); >> + ath12k_ce_rx_post_buf(ab); >> + >> + return 0; >> +} > Neither this nor ath12k_pci returns anything useful - perhaps make this void? Thanks for the comment, this requires changes to be done also in ath12k pci and hif_ops, hence, will take is up separately.
On 6.12.2024 10:56 AM, Raj Kumar Bhagat wrote: > On 10/19/2024 1:59 AM, Konrad Dybcio wrote: >> On 15.10.2024 8:26 PM, Raj Kumar Bhagat wrote: >>> From: Balamurugan S <quic_bselvara@quicinc.com> >>> >>> Add Initial Ath12k AHB driver support for IPQ5332. IPQ5332 is AHB >>> based IEEE802.11be 2 GHz 2x2 WiFi device. >>> >>> Tested-on: IPQ5332 hw1.0 AHB WLAN.WBE.1.3.1-00130-QCAHKSWPL_SILICONZ-1 >>> Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.1.1-00210-QCAHKSWPL_SILICONZ-1 >>> >>> Signed-off-by: Balamurugan S <quic_bselvara@quicinc.com> >>> Co-developed-by: P Praneesh <quic_ppranees@quicinc.com> >>> Signed-off-by: P Praneesh <quic_ppranees@quicinc.com> >>> Co-developed-by: Raj Kumar Bhagat <quic_rajkbhag@quicinc.com> >>> Signed-off-by: Raj Kumar Bhagat <quic_rajkbhag@quicinc.com> >>> --- >> >> [...] >> >>> +enum ext_irq_num { >>> + host2wbm_desc_feed = 16, >>> + host2reo_re_injection, >> >> Why? >> > > This enum is used as a IRQ number for Ath12k AHB. Based on this enum > we can get the IRQ name from irq_name[]. This helps to request the original > IRQ number from the DT. > It is starting from 16 becasue, in irq_name[], the name for ext IRQ starts > from 16 index. [...] > >>> + irq_grp->irqs[num_irq++] = >>> + reo2host_destination_ring1 - j; >>> + } >>> + >>> + if (ab->hw_params->ring_mask->rx_err[i] & BIT(j)) >>> + irq_grp->irqs[num_irq++] = reo2host_exception; >>> + >>> + if (ab->hw_params->ring_mask->rx_wbm_rel[i] & BIT(j)) >>> + irq_grp->irqs[num_irq++] = wbm2host_rx_release; >>> + >>> + if (ab->hw_params->ring_mask->reo_status[i] & BIT(j)) >>> + irq_grp->irqs[num_irq++] = reo2host_status; >>> + >>> + if (ab->hw_params->ring_mask->rx_mon_dest[i] & BIT(j)) >>> + irq_grp->irqs[num_irq++] = >>> + rxdma2host_monitor_destination_mac1; >>> + } >>> + >>> + irq_grp->num_irq = num_irq; >>> + >>> + for (j = 0; j < irq_grp->num_irq; j++) { >>> + irq_idx = irq_grp->irqs[j]; >>> + >>> + irq = platform_get_irq_byname(ab->pdev, >>> + irq_name[irq_idx]); >>> + ab->irq_num[irq_idx] = irq; >>> + irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY); >>> + ret = request_irq(irq, ath12k_ahb_ext_interrupt_handler, >>> + IRQF_TRIGGER_RISING, >>> + irq_name[irq_idx], irq_grp); >>> + if (ret) { >>> + ath12k_err(ab, "failed request_irq for %d\n", >>> + irq); >>> + } >>> + } >> >> Instead of doing all this magic, can we request the IRQs manually, as we >> have interrupt-names in dt? >> > > I'm not sure if I fully understood this comment. > If we manually request IRQs using their names from the DT, we won't be able to > group the IRQs. Grouping the IRQs is one of our main objectives here. Additionally, > we are not using all the IRQ names defined in the DT, so the logic in this function > is crucial for grouping and requesting the IRQs according to the ring-mask. Surely you can name these "foo_bar_ring%d" in DT and use the OF APIs [...] >> >>> + /* Set fixed_mem_region to true for platforms that support fixed memory >>> + * reservation from DT. If memory is reserved from DT for FW, ath12k driver >>> + * need not to allocate memory. >>> + */ >>> + if (!of_property_read_u32(ab->dev->of_node, "memory-region", &addr)) { >>> + set_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags); >>> + mem_node = of_find_node_by_name(NULL, "mlo_global_mem_0"); >> >> This is not mentioned or documented anywhere. >> > > In next version, will document the below info: > > "If the platform supports fixed memory, then it should define/reserve > MLO global memory in DT to support Multi Link Operation. > If MLO global memory is not reserved in fixed memory mode, then > MLO cannot be supported." You should also explain what Multi Link Operation means Konrad
diff --git a/drivers/net/wireless/ath/ath12k/ahb.c b/drivers/net/wireless/ath/ath12k/ahb.c new file mode 100644 index 000000000000..7049efdae38c --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/ahb.c @@ -0,0 +1,950 @@ +// SPDX-License-Identifier: BSD-3-Clause-Clear +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. + */ + +#include <linux/dma-mapping.h> +#include <linux/iommu.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/remoteproc.h> +#include <linux/soc/qcom/smem.h> +#include <linux/soc/qcom/smem_state.h> +#include "ahb.h" +#include "debug.h" +#include "hif.h" + +static const struct of_device_id ath12k_ahb_of_match[] = { + { .compatible = "qcom,ipq5332-wifi", + .data = (void *)ATH12K_HW_IPQ5332_HW10, + }, + { } +}; + +MODULE_DEVICE_TABLE(of, ath12k_ahb_of_match); + +#define ATH12K_IRQ_CE0_OFFSET 4 + +static const char *irq_name[ATH12K_IRQ_NUM_MAX] = { + "misc-pulse1", + "misc-latch", + "sw-exception", + "watchdog", + "ce0", + "ce1", + "ce2", + "ce3", + "ce4", + "ce5", + "ce6", + "ce7", + "ce8", + "ce9", + "ce10", + "ce11", + "host2wbm-desc-feed", + "host2reo-re-injection", + "host2reo-command", + "host2rxdma-monitor-ring3", + "host2rxdma-monitor-ring2", + "host2rxdma-monitor-ring1", + "reo2ost-exception", + "wbm2host-rx-release", + "reo2host-status", + "reo2host-destination-ring4", + "reo2host-destination-ring3", + "reo2host-destination-ring2", + "reo2host-destination-ring1", + "rxdma2host-monitor-destination-mac3", + "rxdma2host-monitor-destination-mac2", + "rxdma2host-monitor-destination-mac1", + "ppdu-end-interrupts-mac3", + "ppdu-end-interrupts-mac2", + "ppdu-end-interrupts-mac1", + "rxdma2host-monitor-status-ring-mac3", + "rxdma2host-monitor-status-ring-mac2", + "rxdma2host-monitor-status-ring-mac1", + "host2rxdma-host-buf-ring-mac3", + "host2rxdma-host-buf-ring-mac2", + "host2rxdma-host-buf-ring-mac1", + "rxdma2host-destination-ring-mac3", + "rxdma2host-destination-ring-mac2", + "rxdma2host-destination-ring-mac1", + "host2tcl-input-ring4", + "host2tcl-input-ring3", + "host2tcl-input-ring2", + "host2tcl-input-ring1", + "wbm2host-tx-completions-ring4", + "wbm2host-tx-completions-ring3", + "wbm2host-tx-completions-ring2", + "wbm2host-tx-completions-ring1", + "tcl2host-status-ring", +}; + +enum ext_irq_num { + host2wbm_desc_feed = 16, + host2reo_re_injection, + host2reo_command, + host2rxdma_monitor_ring3, + host2rxdma_monitor_ring2, + host2rxdma_monitor_ring1, + reo2host_exception, + wbm2host_rx_release, + reo2host_status, + reo2host_destination_ring4, + reo2host_destination_ring3, + reo2host_destination_ring2, + reo2host_destination_ring1, + rxdma2host_monitor_destination_mac3, + rxdma2host_monitor_destination_mac2, + rxdma2host_monitor_destination_mac1, + ppdu_end_interrupts_mac3, + ppdu_end_interrupts_mac2, + ppdu_end_interrupts_mac1, + rxdma2host_monitor_status_ring_mac3, + rxdma2host_monitor_status_ring_mac2, + rxdma2host_monitor_status_ring_mac1, + host2rxdma_host_buf_ring_mac3, + host2rxdma_host_buf_ring_mac2, + host2rxdma_host_buf_ring_mac1, + rxdma2host_destination_ring_mac3, + rxdma2host_destination_ring_mac2, + rxdma2host_destination_ring_mac1, + host2tcl_input_ring4, + host2tcl_input_ring3, + host2tcl_input_ring2, + host2tcl_input_ring1, + wbm2host_tx_completions_ring4, + wbm2host_tx_completions_ring3, + wbm2host_tx_completions_ring2, + wbm2host_tx_completions_ring1, + tcl2host_status_ring, +}; + +static u32 ath12k_ahb_cmem_read32(struct ath12k_base *ab, u32 offset) +{ + offset = offset - HAL_IPQ5332_CMEM_BASE; + return ioread32(ab->mem_cmem + offset); +} + +static void ath12k_ahb_cmem_write32(struct ath12k_base *ab, u32 offset, + u32 value) +{ + offset = offset - HAL_IPQ5332_CMEM_BASE; + iowrite32(value, ab->mem_cmem + offset); +} + +static u32 ath12k_ahb_read32(struct ath12k_base *ab, u32 offset) +{ + if (ab->ce_remap && offset < HAL_SEQ_WCSS_UMAC_OFFSET) { + offset = offset - HAL_CE_REMAP_REG_BASE; + return ioread32(ab->mem_ce + offset); + } + return ioread32(ab->mem + offset); +} + +static void ath12k_ahb_write32(struct ath12k_base *ab, u32 offset, + u32 value) +{ + if (ab->ce_remap && offset < HAL_SEQ_WCSS_UMAC_OFFSET) { + offset = offset - HAL_CE_REMAP_REG_BASE; + iowrite32(value, ab->mem_ce + offset); + } else { + iowrite32(value, ab->mem + offset); + } +} + +static void ath12k_ahb_cancel_workqueue(struct ath12k_base *ab) +{ + int i; + + for (i = 0; i < ab->hw_params->ce_count; i++) { + struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; + + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + cancel_work_sync(&ce_pipe->intr_wq); + } +} + +static void ath12k_ahb_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp) +{ + int i; + + for (i = 0; i < irq_grp->num_irq; i++) + disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); +} + +static void __ath12k_ahb_ext_irq_disable(struct ath12k_base *ab) +{ + int i; + + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + ath12k_ahb_ext_grp_disable(irq_grp); + if (irq_grp->napi_enabled) { + napi_synchronize(&irq_grp->napi); + napi_disable(&irq_grp->napi); + irq_grp->napi_enabled = false; + } + } +} + +static void ath12k_ahb_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp) +{ + int i; + + for (i = 0; i < irq_grp->num_irq; i++) + enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); +} + +static void ath12k_ahb_setbit32(struct ath12k_base *ab, u8 bit, u32 offset) +{ + u32 val; + + val = ath12k_ahb_read32(ab, offset); + ath12k_ahb_write32(ab, offset, val | BIT(bit)); +} + +static void ath12k_ahb_clearbit32(struct ath12k_base *ab, u8 bit, u32 offset) +{ + u32 val; + + val = ath12k_ahb_read32(ab, offset); + ath12k_ahb_write32(ab, offset, val & ~BIT(bit)); +} + +static void ath12k_ahb_ce_irq_enable(struct ath12k_base *ab, u16 ce_id) +{ + const struct ce_attr *ce_attr; + const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr; + u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr; + + ie1_reg_addr = ce_ie_addr->ie1_reg_addr; + ie2_reg_addr = ce_ie_addr->ie2_reg_addr; + ie3_reg_addr = ce_ie_addr->ie3_reg_addr; + + ce_attr = &ab->hw_params->host_ce_config[ce_id]; + if (ce_attr->src_nentries) + ath12k_ahb_setbit32(ab, ce_id, ie1_reg_addr); + + if (ce_attr->dest_nentries) { + ath12k_ahb_setbit32(ab, ce_id, ie2_reg_addr); + ath12k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, + ie3_reg_addr); + } +} + +static void ath12k_ahb_ce_irq_disable(struct ath12k_base *ab, u16 ce_id) +{ + const struct ce_attr *ce_attr; + const struct ce_ie_addr *ce_ie_addr = ab->hw_params->ce_ie_addr; + u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr; + + ie1_reg_addr = ce_ie_addr->ie1_reg_addr; + ie2_reg_addr = ce_ie_addr->ie2_reg_addr; + ie3_reg_addr = ce_ie_addr->ie3_reg_addr; + + ce_attr = &ab->hw_params->host_ce_config[ce_id]; + if (ce_attr->src_nentries) + ath12k_ahb_clearbit32(ab, ce_id, ie1_reg_addr); + + if (ce_attr->dest_nentries) { + ath12k_ahb_clearbit32(ab, ce_id, ie2_reg_addr); + ath12k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, + ie3_reg_addr); + } +} + +static void ath12k_ahb_sync_ce_irqs(struct ath12k_base *ab) +{ + int i; + int irq_idx; + + for (i = 0; i < ab->hw_params->ce_count; i++) { + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + irq_idx = ATH12K_IRQ_CE0_OFFSET + i; + synchronize_irq(ab->irq_num[irq_idx]); + } +} + +static void ath12k_ahb_sync_ext_irqs(struct ath12k_base *ab) +{ + int i, j; + int irq_idx; + + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + for (j = 0; j < irq_grp->num_irq; j++) { + irq_idx = irq_grp->irqs[j]; + synchronize_irq(ab->irq_num[irq_idx]); + } + } +} + +static void ath12k_ahb_ce_irqs_enable(struct ath12k_base *ab) +{ + int i; + + for (i = 0; i < ab->hw_params->ce_count; i++) { + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + ath12k_ahb_ce_irq_enable(ab, i); + } +} + +static void ath12k_ahb_ce_irqs_disable(struct ath12k_base *ab) +{ + int i; + + for (i = 0; i < ab->hw_params->ce_count; i++) { + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + ath12k_ahb_ce_irq_disable(ab, i); + } +} + +static int ath12k_ahb_start(struct ath12k_base *ab) +{ + ath12k_ahb_ce_irqs_enable(ab); + ath12k_ce_rx_post_buf(ab); + + return 0; +} + +static void ath12k_ahb_ext_irq_enable(struct ath12k_base *ab) +{ + struct ath12k_ext_irq_grp *irq_grp; + int i; + + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { + irq_grp = &ab->ext_irq_grp[i]; + if (!irq_grp->napi_enabled) { + napi_enable(&irq_grp->napi); + irq_grp->napi_enabled = true; + } + ath12k_ahb_ext_grp_enable(irq_grp); + } +} + +static void ath12k_ahb_ext_irq_disable(struct ath12k_base *ab) +{ + __ath12k_ahb_ext_irq_disable(ab); + ath12k_ahb_sync_ext_irqs(ab); +} + +static void ath12k_ahb_stop(struct ath12k_base *ab) +{ + if (!test_bit(ATH12K_FLAG_CRASH_FLUSH, &ab->dev_flags)) + ath12k_ahb_ce_irqs_disable(ab); + ath12k_ahb_sync_ce_irqs(ab); + ath12k_ahb_cancel_workqueue(ab); + del_timer_sync(&ab->rx_replenish_retry); + ath12k_ce_cleanup_pipes(ab); +} + +static void ath12k_ahb_init_qmi_ce_config(struct ath12k_base *ab) +{ + struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; + + cfg->tgt_ce_len = ab->hw_params->target_ce_count; + cfg->tgt_ce = ab->hw_params->target_ce_config; + cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len; + cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map; + ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id; +} + +static void ath12k_ahb_free_ext_irq(struct ath12k_base *ab) +{ + int i, j; + + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { + struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; + + for (j = 0; j < irq_grp->num_irq; j++) + free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); + + netif_napi_del(&irq_grp->napi); + free_netdev(irq_grp->napi_ndev); + } +} + +static void ath12k_ahb_free_irq(struct ath12k_base *ab) +{ + int irq_idx; + int i; + + for (i = 0; i < ab->hw_params->ce_count; i++) { + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + irq_idx = ATH12K_IRQ_CE0_OFFSET + i; + free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); + } + + ath12k_ahb_free_ext_irq(ab); +} + +static void ath12k_ahb_ce_workqueue(struct work_struct *work) +{ + struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq); + + ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); + + ath12k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num); +} + +static irqreturn_t ath12k_ahb_ce_interrupt_handler(int irq, void *arg) +{ + struct ath12k_ce_pipe *ce_pipe = arg; + + /* last interrupt received for this CE */ + ce_pipe->timestamp = jiffies; + + ath12k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); + + queue_work(system_bh_wq, &ce_pipe->intr_wq); + + return IRQ_HANDLED; +} + +static int ath12k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget) +{ + struct ath12k_ext_irq_grp *irq_grp = container_of(napi, + struct ath12k_ext_irq_grp, + napi); + struct ath12k_base *ab = irq_grp->ab; + int work_done; + + work_done = ath12k_dp_service_srng(ab, irq_grp, budget); + if (work_done < budget) { + napi_complete_done(napi, work_done); + ath12k_ahb_ext_grp_enable(irq_grp); + } + + if (work_done > budget) + work_done = budget; + + return work_done; +} + +static irqreturn_t ath12k_ahb_ext_interrupt_handler(int irq, void *arg) +{ + struct ath12k_ext_irq_grp *irq_grp = arg; + + /* last interrupt received for this group */ + irq_grp->timestamp = jiffies; + + ath12k_ahb_ext_grp_disable(irq_grp); + + napi_schedule(&irq_grp->napi); + + return IRQ_HANDLED; +} + +static int ath12k_ahb_config_ext_irq(struct ath12k_base *ab) +{ + struct ath12k_ext_irq_grp *irq_grp; + const struct hal_ops *hal_ops; + int i, j, irq, irq_idx, ret; + u32 num_irq; + + hal_ops = ab->hw_params->hal_ops; + for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { + irq_grp = &ab->ext_irq_grp[i]; + num_irq = 0; + + irq_grp->ab = ab; + irq_grp->grp_id = i; + + irq_grp->napi_ndev = alloc_netdev_dummy(0); + if (!irq_grp->napi_ndev) + return -ENOMEM; + + netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, + ath12k_ahb_ext_grp_napi_poll); + + for (j = 0; j < ATH12K_EXT_IRQ_NUM_MAX; j++) { + if (ab->hw_params->ring_mask->tx[i] && + j <= ATH12K_MAX_TCL_RING_NUM && + (ab->hw_params->ring_mask->tx[i] & + BIT(hal_ops->tcl_to_wbm_rbm_map[j].wbm_ring_num))) { + irq_grp->irqs[num_irq++] = + wbm2host_tx_completions_ring1 - j; + } + + if (ab->hw_params->ring_mask->rx[i] & BIT(j)) { + irq_grp->irqs[num_irq++] = + reo2host_destination_ring1 - j; + } + + if (ab->hw_params->ring_mask->rx_err[i] & BIT(j)) + irq_grp->irqs[num_irq++] = reo2host_exception; + + if (ab->hw_params->ring_mask->rx_wbm_rel[i] & BIT(j)) + irq_grp->irqs[num_irq++] = wbm2host_rx_release; + + if (ab->hw_params->ring_mask->reo_status[i] & BIT(j)) + irq_grp->irqs[num_irq++] = reo2host_status; + + if (ab->hw_params->ring_mask->rx_mon_dest[i] & BIT(j)) + irq_grp->irqs[num_irq++] = + rxdma2host_monitor_destination_mac1; + } + + irq_grp->num_irq = num_irq; + + for (j = 0; j < irq_grp->num_irq; j++) { + irq_idx = irq_grp->irqs[j]; + + irq = platform_get_irq_byname(ab->pdev, + irq_name[irq_idx]); + ab->irq_num[irq_idx] = irq; + irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY); + ret = request_irq(irq, ath12k_ahb_ext_interrupt_handler, + IRQF_TRIGGER_RISING, + irq_name[irq_idx], irq_grp); + if (ret) { + ath12k_err(ab, "failed request_irq for %d\n", + irq); + } + } + } + + return 0; +} + +static int ath12k_ahb_config_irq(struct ath12k_base *ab) +{ + int irq, irq_idx, i; + int ret; + + /* Configure CE irqs */ + for (i = 0; i < ab->hw_params->ce_count; i++) { + struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; + + if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) + continue; + + irq_idx = ATH12K_IRQ_CE0_OFFSET + i; + + INIT_WORK(&ce_pipe->intr_wq, ath12k_ahb_ce_workqueue); + irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]); + ret = request_irq(irq, ath12k_ahb_ce_interrupt_handler, + IRQF_TRIGGER_RISING, irq_name[irq_idx], + ce_pipe); + if (ret) + return ret; + + ab->irq_num[irq_idx] = irq; + } + + /* Configure external interrupts */ + ret = ath12k_ahb_config_ext_irq(ab); + + return ret; +} + +static int ath12k_ahb_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, + u8 *ul_pipe, u8 *dl_pipe) +{ + const struct service_to_pipe *entry; + bool ul_set = false, dl_set = false; + int i; + + for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) { + entry = &ab->hw_params->svc_to_ce_map[i]; + + if (__le32_to_cpu(entry->service_id) != service_id) + continue; + + switch (__le32_to_cpu(entry->pipedir)) { + case PIPEDIR_NONE: + break; + case PIPEDIR_IN: + WARN_ON(dl_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + break; + case PIPEDIR_OUT: + WARN_ON(ul_set); + *ul_pipe = __le32_to_cpu(entry->pipenum); + ul_set = true; + break; + case PIPEDIR_INOUT: + WARN_ON(dl_set); + WARN_ON(ul_set); + *dl_pipe = __le32_to_cpu(entry->pipenum); + *ul_pipe = __le32_to_cpu(entry->pipenum); + dl_set = true; + ul_set = true; + break; + } + } + + if (WARN_ON(!ul_set || !dl_set)) + return -ENOENT; + + return 0; +} + +static const struct ath12k_hif_ops ath12k_ahb_hif_ops_ipq5332 = { + .start = ath12k_ahb_start, + .stop = ath12k_ahb_stop, + .read32 = ath12k_ahb_read32, + .write32 = ath12k_ahb_write32, + .cmem_read32 = ath12k_ahb_cmem_read32, + .cmem_write32 = ath12k_ahb_cmem_write32, + .irq_enable = ath12k_ahb_ext_irq_enable, + .irq_disable = ath12k_ahb_ext_irq_disable, + .map_service_to_pipe = ath12k_ahb_map_service_to_pipe, +}; + +static int ath12k_ahb_clock_init(struct ath12k_base *ab) +{ + struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab); + int ret; + + ab_ahb->xo_clk = devm_clk_get(ab->dev, "gcc_xo_clk"); + if (IS_ERR_OR_NULL(ab_ahb->xo_clk)) { + ath12k_err(ab, "failed to get gcc_xo_clk: %d\n", + PTR_ERR_OR_ZERO(ab_ahb->xo_clk)); + ret = ab_ahb->xo_clk ? PTR_ERR(ab_ahb->xo_clk) : -ENODEV; + ab_ahb->xo_clk = NULL; + return ret; + } + + return 0; +} + +static void ath12k_ahb_clock_deinit(struct ath12k_base *ab) +{ + struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab); + + devm_clk_put(ab->dev, ab_ahb->xo_clk); + ab_ahb->xo_clk = NULL; +} + +static int ath12k_ahb_clock_enable(struct ath12k_base *ab) +{ + struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab); + int ret; + + if (IS_ERR_OR_NULL(ab_ahb->xo_clk)) { + ath12k_err(ab, "clock is not initialized\n"); + return -EIO; + } + + ret = clk_prepare_enable(ab_ahb->xo_clk); + if (ret) { + ath12k_err(ab, "failed to enable gcc_xo_clk: %d\n", ret); + return ret; + } + + return 0; +} + +static void ath12k_ahb_clock_disable(struct ath12k_base *ab) +{ + struct ath12k_ahb *ab_ahb = ath12k_ab_to_ahb(ab); + + clk_disable_unprepare(ab_ahb->xo_clk); +} + +static int ath12k_ahb_resource_init(struct ath12k_base *ab) +{ + struct platform_device *pdev = ab->pdev; + struct resource *mem_res; + void __iomem *mem; + int ret; + + mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res); + if (IS_ERR(mem)) { + dev_err(&pdev->dev, "ioremap error\n"); + ret = PTR_ERR(mem); + goto out; + } + + ab->mem = mem; + ab->mem_len = resource_size(mem_res); + + if (ab->hw_params->ce_remap) { + const struct ce_remap *ce_remap = ab->hw_params->ce_remap; + /* ce register space is moved out of wcss and the space is not + * contiguous, hence remapping the CE registers to a new space + * for accessing them. + */ + ab->mem_ce = ioremap(ce_remap->base, ce_remap->size); + if (IS_ERR(ab->mem_ce)) { + dev_err(&pdev->dev, "ce ioremap error\n"); + ret = -ENOMEM; + goto err_mem_unmap; + } + ab->ce_remap = true; + ab->ce_remap_base_addr = HAL_IPQ5332_CE_WFSS_REG_BASE; + } + + if (ab->hw_params->cmem_remap) { + const struct cmem_remap *cmem_remap = ab->hw_params->cmem_remap; + /* For device like IPQ5332 CMEM region is outside WCSS block. + * Allocate separate I/O remap to access CMEM address. + */ + ab->mem_cmem = ioremap(cmem_remap->base, cmem_remap->size); + if (IS_ERR(ab->mem_cmem)) { + dev_err(&pdev->dev, "cmem ioremap error\n"); + ret = -ENOMEM; + goto err_mem_ce_unmap; + } + } + + ret = ath12k_ahb_clock_init(ab); + if (ret) + goto err_mem_cmem_unmap; + + ret = ath12k_ahb_clock_enable(ab); + if (ret) + goto err_clock_deinit; + + return 0; + +err_clock_deinit: + ath12k_ahb_clock_deinit(ab); + +err_mem_cmem_unmap: + if (ab->hw_params->cmem_remap) + iounmap(ab->mem_cmem); + +err_mem_ce_unmap: + ab->mem_cmem = NULL; + if (ab->hw_params->ce_remap) + iounmap(ab->mem_ce); + +err_mem_unmap: + ab->mem_ce = NULL; + devm_iounmap(ab->dev, ab->mem); + +out: + ab->mem = NULL; + return ret; +} + +static void ath12k_ahb_resource_deinit(struct ath12k_base *ab) +{ + if (ab->mem) + devm_iounmap(ab->dev, ab->mem); + + if (ab->mem_ce) + iounmap(ab->mem_ce); + + if (ab->mem_cmem) + iounmap(ab->mem_cmem); + + ab->mem = NULL; + ab->mem_ce = NULL; + ab->mem_cmem = NULL; + + ath12k_ahb_clock_disable(ab); + ath12k_ahb_clock_deinit(ab); +} + +static enum ath12k_hw_rev ath12k_ahb_get_hw_rev(struct platform_device *pdev) +{ + const struct of_device_id *of_id; + + of_id = of_match_device(ath12k_ahb_of_match, &pdev->dev); + if (!of_id) { + dev_err(&pdev->dev, "Failed to find matching device tree id\n"); + return -EINVAL; + } + + return (enum ath12k_hw_rev)of_id->data; +} + +static int ath12k_ahb_probe(struct platform_device *pdev) +{ + struct ath12k_base *ab; + const struct ath12k_hif_ops *hif_ops; + struct device_node *mem_node; + enum ath12k_hw_rev hw_rev; + u32 addr; + int ret; + + hw_rev = ath12k_ahb_get_hw_rev(pdev); + switch (hw_rev) { + case ATH12K_HW_IPQ5332_HW10: + hif_ops = &ath12k_ahb_hif_ops_ipq5332; + break; + default: + dev_err(&pdev->dev, "Unsupported device type %d\n", hw_rev); + return -EOPNOTSUPP; + } + + ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); + if (ret) { + dev_err(&pdev->dev, "Failed to set 32-bit consistent dma\n"); + return ret; + } + + ab = ath12k_core_alloc(&pdev->dev, sizeof(struct ath12k_ahb), + ATH12K_BUS_AHB); + if (!ab) { + dev_err(&pdev->dev, "failed to allocate ath12k base\n"); + return -ENOMEM; + } + + ab->hif.ops = hif_ops; + ab->pdev = pdev; + ab->hw_rev = hw_rev; + platform_set_drvdata(pdev, ab); + + /* Set fixed_mem_region to true for platforms that support fixed memory + * reservation from DT. If memory is reserved from DT for FW, ath12k driver + * need not to allocate memory. + */ + if (!of_property_read_u32(ab->dev->of_node, "memory-region", &addr)) { + set_bit(ATH12K_FLAG_FIXED_MEM_REGION, &ab->dev_flags); + mem_node = of_find_node_by_name(NULL, "mlo_global_mem_0"); + if (!mem_node) + ab->mlo_capable_flags = 0; + } + + ret = ath12k_core_pre_init(ab); + if (ret) + goto err_core_free; + + ret = ath12k_ahb_resource_init(ab); + if (ret) + goto err_core_free; + + ret = ath12k_hal_srng_init(ab); + if (ret) + goto err_resource_deinit; + + ret = ath12k_ce_alloc_pipes(ab); + if (ret) { + ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret); + goto err_hal_srng_deinit; + } + + ath12k_ahb_init_qmi_ce_config(ab); + + ret = ath12k_ahb_config_irq(ab); + if (ret) { + ath12k_err(ab, "failed to configure irq: %d\n", ret); + goto err_ce_free; + } + + ret = ath12k_core_init(ab); + if (ret) { + ath12k_err(ab, "failed to init core: %d\n", ret); + goto err_ce_free; + } + + return 0; + +err_ce_free: + ath12k_ce_free_pipes(ab); + +err_hal_srng_deinit: + ath12k_hal_srng_deinit(ab); + +err_resource_deinit: + ath12k_ahb_resource_deinit(ab); + +err_core_free: + ath12k_core_free(ab); + platform_set_drvdata(pdev, NULL); + + return ret; +} + +static void ath12k_ahb_remove_prepare(struct ath12k_base *ab) +{ + unsigned long left; + + if (test_bit(ATH12K_FLAG_RECOVERY, &ab->dev_flags)) { + left = wait_for_completion_timeout(&ab->driver_recovery, + ATH12K_AHB_RECOVERY_TIMEOUT); + if (!left) + ath12k_warn(ab, "failed to receive recovery response completion\n"); + } + + set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags); + cancel_work_sync(&ab->restart_work); + cancel_work_sync(&ab->qmi.event_work); +} + +static void ath12k_ahb_free_resources(struct ath12k_base *ab) +{ + struct platform_device *pdev = ab->pdev; + + ath12k_ahb_free_irq(ab); + ath12k_hal_srng_deinit(ab); + ath12k_ce_free_pipes(ab); + ath12k_ahb_resource_deinit(ab); + ath12k_core_free(ab); + platform_set_drvdata(pdev, NULL); +} + +static void ath12k_ahb_remove(struct platform_device *pdev) +{ + struct ath12k_base *ab = platform_get_drvdata(pdev); + + if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) { + ath12k_qmi_deinit_service(ab); + goto qmi_fail; + } + + ath12k_ahb_remove_prepare(ab); + ath12k_core_deinit(ab); + +qmi_fail: + ath12k_ahb_free_resources(ab); +} + +static void ath12k_ahb_shutdown(struct platform_device *pdev) +{ + struct ath12k_base *ab = platform_get_drvdata(pdev); + + /* platform shutdown() & remove() are mutually exclusive. + * remove() is invoked during rmmod & shutdown() during + * system reboot/shutdown. + */ + ath12k_ahb_remove_prepare(ab); + + if (!(test_bit(ATH12K_FLAG_REGISTERED, &ab->dev_flags))) + goto free_resources; + + ath12k_core_deinit(ab); + +free_resources: + ath12k_ahb_free_resources(ab); +} + +static struct platform_driver ath12k_ahb_driver = { + .driver = { + .name = "ath12k_ahb", + .of_match_table = ath12k_ahb_of_match, + }, + .probe = ath12k_ahb_probe, + .remove = ath12k_ahb_remove, + .shutdown = ath12k_ahb_shutdown, +}; + +int ath12k_ahb_init(void) +{ + return platform_driver_register(&ath12k_ahb_driver); +} + +void ath12k_ahb_exit(void) +{ + platform_driver_unregister(&ath12k_ahb_driver); +} diff --git a/drivers/net/wireless/ath/ath12k/ahb.h b/drivers/net/wireless/ath/ath12k/ahb.h new file mode 100644 index 000000000000..bd0366a79587 --- /dev/null +++ b/drivers/net/wireless/ath/ath12k/ahb.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: BSD-3-Clause-Clear */ +/* + * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. + * Copyright (c) 2022-2024, Qualcomm Innovation Center, Inc. All rights reserved. + */ +#ifndef ATH12K_AHB_H +#define ATH12K_AHB_H + +#include <linux/clk.h> +#include "core.h" + +#define ATH12K_AHB_RECOVERY_TIMEOUT (3 * HZ) + +#define ATH12K_AHB_SMP2P_SMEM_MSG GENMASK(15, 0) +#define ATH12K_AHB_SMP2P_SMEM_SEQ_NO GENMASK(31, 16) +#define ATH12K_AHB_SMP2P_SMEM_VALUE_MASK 0xFFFFFFFF +#define ATH12K_PCI_CE_WAKE_IRQ 2 +#define ATH12K_PCI_IRQ_CE0_OFFSET 3 + +enum ath12k_ahb_smp2p_msg_id { + ATH12K_AHB_POWER_SAVE_ENTER = 1, + ATH12K_AHB_POWER_SAVE_EXIT, +}; + +struct ath12k_base; + +struct ath12k_ahb { + struct rproc *tgt_rproc; + struct clk *xo_clk; +}; + +static inline struct ath12k_ahb *ath12k_ab_to_ahb(struct ath12k_base *ab) +{ + return (struct ath12k_ahb *)ab->drv_priv; +} + +#endif diff --git a/drivers/net/wireless/ath/ath12k/core.h b/drivers/net/wireless/ath/ath12k/core.h index 75bc863ca381..a815a23a7274 100644 --- a/drivers/net/wireless/ath/ath12k/core.h +++ b/drivers/net/wireless/ath/ath12k/core.h @@ -142,6 +142,7 @@ enum ath12k_firmware_mode { #define ATH12K_IRQ_NUM_MAX 57 #define ATH12K_EXT_IRQ_NUM_MAX 16 +#define ATH12K_MAX_TCL_RING_NUM 3 struct ath12k_ext_irq_grp { struct ath12k_base *ab; @@ -149,6 +150,7 @@ struct ath12k_ext_irq_grp { u32 num_irq; u32 grp_id; u64 timestamp; + bool napi_enabled; struct napi_struct napi; struct net_device *napi_ndev; }; @@ -1063,6 +1065,8 @@ static inline const char *ath12k_bus_str(enum ath12k_bus bus) switch (bus) { case ATH12K_BUS_PCI: return "pci"; + case ATH12K_BUS_AHB: + return "ahb"; } return "unknown"; diff --git a/drivers/net/wireless/ath/ath12k/hw.h b/drivers/net/wireless/ath/ath12k/hw.h index 038fe1b30d11..b3e2a5d7e0e4 100644 --- a/drivers/net/wireless/ath/ath12k/hw.h +++ b/drivers/net/wireless/ath/ath12k/hw.h @@ -121,6 +121,7 @@ enum ath12k_hw_rate_ofdm { enum ath12k_bus { ATH12K_BUS_PCI, + ATH12K_BUS_AHB, }; #define ATH12K_EXT_IRQ_GRP_NUM_MAX 11