@@ -504,6 +504,10 @@ for details.
driver level. Any packet received will be reflected back by the
driver on same port. e.g. ``fslmc:dpni.1,drv_loopback=1``
+* Use dev arg option ``drv_no_prefetch=1`` to disable prefetching
+ of the packet pull command which is issued in the previous cycle.
+ e.g. ``fslmc:dpni.1,drv_no_prefetch=1``
+
Enabling logs
-------------
@@ -29,6 +29,7 @@
#include <fsl_qbman_debug.h>
#define DRIVER_LOOPBACK_MODE "drv_loopback"
+#define DRIVER_NO_PREFETCH_MODE "drv_no_prefetch"
/* Supported Rx offloads */
static uint64_t dev_rx_offloads_sup =
@@ -750,6 +751,7 @@ dpaa2_supported_ptypes_get(struct rte_eth_dev *dev)
};
if (dev->rx_pkt_burst == dpaa2_dev_prefetch_rx ||
+ dev->rx_pkt_burst == dpaa2_dev_rx ||
dev->rx_pkt_burst == dpaa2_dev_loopback_rx)
return ptypes;
return NULL;
@@ -2118,6 +2120,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
eth_dev->dev_ops = &dpaa2_ethdev_ops;
if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE))
eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
+ else if (dpaa2_get_devargs(dev->devargs,
+ DRIVER_NO_PREFETCH_MODE))
+ eth_dev->rx_pkt_burst = dpaa2_dev_rx;
else
eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
eth_dev->tx_pkt_burst = dpaa2_dev_tx;
@@ -2236,6 +2241,9 @@ dpaa2_dev_init(struct rte_eth_dev *eth_dev)
if (dpaa2_get_devargs(dev->devargs, DRIVER_LOOPBACK_MODE)) {
eth_dev->rx_pkt_burst = dpaa2_dev_loopback_rx;
DPAA2_PMD_INFO("Loopback mode");
+ } else if (dpaa2_get_devargs(dev->devargs, DRIVER_NO_PREFETCH_MODE)) {
+ eth_dev->rx_pkt_burst = dpaa2_dev_rx;
+ DPAA2_PMD_INFO("No Prefetch mode");
} else {
eth_dev->rx_pkt_burst = dpaa2_dev_prefetch_rx;
}
@@ -2395,7 +2403,8 @@ static struct rte_dpaa2_driver rte_dpaa2_pmd = {
RTE_PMD_REGISTER_DPAA2(net_dpaa2, rte_dpaa2_pmd);
RTE_PMD_REGISTER_PARAM_STRING(net_dpaa2,
- DRIVER_LOOPBACK_MODE "=<int>");
+ DRIVER_LOOPBACK_MODE "=<int> "
+ DRIVER_NO_PREFETCH_MODE "=<int>");
RTE_INIT(dpaa2_pmd_init_log)
{
dpaa2_logtype_pmd = rte_log_register("pmd.net.dpaa2");
@@ -149,6 +149,8 @@ int dpaa2_eth_eventq_attach(const struct rte_eth_dev *dev,
int dpaa2_eth_eventq_detach(const struct rte_eth_dev *dev,
int eth_rx_queue_id);
+uint16_t dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts);
+
uint16_t dpaa2_dev_loopback_rx(void *queue, struct rte_mbuf **bufs,
uint16_t nb_pkts);
@@ -727,6 +727,111 @@ dpaa2_dev_process_ordered_event(struct qbman_swp *swp,
qbman_swp_dqrr_consume(swp, dq);
}
+uint16_t
+dpaa2_dev_rx(void *queue, struct rte_mbuf **bufs, uint16_t nb_pkts)
+{
+ /* Function receive frames for a given device and VQ */
+ struct dpaa2_queue *dpaa2_q = (struct dpaa2_queue *)queue;
+ struct qbman_result *dq_storage;
+ uint32_t fqid = dpaa2_q->fqid;
+ int ret, num_rx = 0, next_pull = nb_pkts, num_pulled;
+ uint8_t pending, status;
+ struct qbman_swp *swp;
+ const struct qbman_fd *fd, *next_fd;
+ struct qbman_pull_desc pulldesc;
+ struct rte_eth_dev_data *eth_data = dpaa2_q->eth_data;
+
+ if (unlikely(!DPAA2_PER_LCORE_DPIO)) {
+ ret = dpaa2_affine_qbman_swp();
+ if (ret) {
+ DPAA2_PMD_ERR("Failure in affining portal\n");
+ return 0;
+ }
+ }
+ swp = DPAA2_PER_LCORE_PORTAL;
+
+ do {
+ dq_storage = dpaa2_q->q_storage->dq_storage[0];
+ qbman_pull_desc_clear(&pulldesc);
+ qbman_pull_desc_set_fq(&pulldesc, fqid);
+ qbman_pull_desc_set_storage(&pulldesc, dq_storage,
+ (size_t)(DPAA2_VADDR_TO_IOVA(dq_storage)), 1);
+
+ if (next_pull > dpaa2_dqrr_size) {
+ qbman_pull_desc_set_numframes(&pulldesc,
+ dpaa2_dqrr_size);
+ next_pull -= dpaa2_dqrr_size;
+ } else {
+ qbman_pull_desc_set_numframes(&pulldesc, next_pull);
+ next_pull = 0;
+ }
+
+ while (1) {
+ if (qbman_swp_pull(swp, &pulldesc)) {
+ DPAA2_PMD_DP_DEBUG(
+ "VDQ command is not issued.QBMAN is busy\n");
+ /* Portal was busy, try again */
+ continue;
+ }
+ break;
+ }
+
+ rte_prefetch0((void *)((size_t)(dq_storage + 1)));
+ /* Check if the previous issued command is completed. */
+ while (!qbman_check_command_complete(dq_storage))
+ ;
+
+ num_pulled = 0;
+ pending = 1;
+ do {
+ /* Loop until the dq_storage is updated with
+ * new token by QBMAN
+ */
+ while (!qbman_check_new_result(dq_storage))
+ ;
+ rte_prefetch0((void *)((size_t)(dq_storage + 2)));
+ /* Check whether Last Pull command is Expired and
+ * setting Condition for Loop termination
+ */
+ if (qbman_result_DQ_is_pull_complete(dq_storage)) {
+ pending = 0;
+ /* Check for valid frame. */
+ status = qbman_result_DQ_flags(dq_storage);
+ if (unlikely((status &
+ QBMAN_DQ_STAT_VALIDFRAME) == 0))
+ continue;
+ }
+ fd = qbman_result_DQ_fd(dq_storage);
+
+ next_fd = qbman_result_DQ_fd(dq_storage + 1);
+ /* Prefetch Annotation address for the parse results */
+ rte_prefetch0(
+ (void *)(size_t)(DPAA2_GET_FD_ADDR(next_fd)
+ + DPAA2_FD_PTA_SIZE + 16));
+
+ if (unlikely(DPAA2_FD_GET_FORMAT(fd) == qbman_fd_sg))
+ bufs[num_rx] = eth_sg_fd_to_mbuf(fd);
+ else
+ bufs[num_rx] = eth_fd_to_mbuf(fd);
+ bufs[num_rx]->port = eth_data->port_id;
+
+ if (eth_data->dev_conf.rxmode.offloads &
+ DEV_RX_OFFLOAD_VLAN_STRIP) {
+ rte_vlan_strip(bufs[num_rx]);
+ }
+
+ dq_storage++;
+ num_rx++;
+ num_pulled++;
+ } while (pending);
+ /* Last VDQ provided all packets and more packets are requested */
+ } while (next_pull && num_pulled == dpaa2_dqrr_size);
+
+ dpaa2_q->rx_pkts += num_rx;
+
+ return num_rx;
+}
+
/*
* Callback to handle sending packets through WRIOP based interface
*/