@@ -763,6 +763,85 @@ void efx_remove_channels(struct efx_nic *efx)
kfree(efx->xdp_tx_queues);
}
+static int efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
+ struct efx_tx_queue *tx_queue)
+{
+ if (xdp_queue_number >= efx->xdp_tx_queue_count)
+ return -EINVAL;
+
+ netif_dbg(efx, drv, efx->net_dev,
+ "Channel %u TXQ %u is XDP %u, HW %u\n",
+ tx_queue->channel->channel, tx_queue->label,
+ xdp_queue_number, tx_queue->queue);
+ efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
+ return 0;
+}
+
+static void efx_set_xdp_channels(struct efx_nic *efx)
+{
+ struct efx_tx_queue *tx_queue;
+ struct efx_channel *channel;
+ unsigned int next_queue = 0;
+ int xdp_queue_number = 0;
+ int rc;
+
+ /* We need to mark which channels really have RX and TX
+ * queues, and adjust the TX queue numbers if we have separate
+ * RX-only and TX-only channels.
+ */
+ efx_for_each_channel(channel, efx) {
+ if (channel->channel < efx->tx_channel_offset)
+ continue;
+
+ if (efx_channel_is_xdp_tx(channel)) {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->queue = next_queue++;
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
+ tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
+ } else {
+ efx_for_each_channel_tx_queue(tx_queue, channel) {
+ tx_queue->queue = next_queue++;
+ netif_dbg(efx, drv, efx->net_dev,
+ "Channel %u TXQ %u is HW %u\n",
+ channel->channel, tx_queue->label,
+ tx_queue->queue);
+ }
+
+ /* If XDP is borrowing queues from net stack, it must
+ * use the queue with no csum offload, which is the
+ * first one of the channel
+ * (note: tx_queue_by_type is not initialized yet)
+ */
+ if (efx->xdp_txq_queues_mode ==
+ EFX_XDP_TX_QUEUES_BORROWED) {
+ tx_queue = &channel->tx_queue[0];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number,
+ tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
+ }
+ }
+ WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number != efx->xdp_tx_queue_count);
+ WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
+ xdp_queue_number > efx->xdp_tx_queue_count);
+
+ /* If we have more CPUs than assigned XDP TX queues, assign the already
+ * existing queues to the exceeding CPUs
+ */
+ next_queue = 0;
+ while (xdp_queue_number < efx->xdp_tx_queue_count) {
+ tx_queue = efx->xdp_tx_queues[next_queue++];
+ rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
+ if (rc == 0)
+ xdp_queue_number++;
+ }
+}
+
int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
{
struct efx_channel *other_channel[EFX_MAX_CHANNELS], *channel;
@@ -837,6 +916,7 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
efx_init_napi_channel(efx->channel[i]);
}
+ efx_set_xdp_channels(efx);
out:
/* Destroy unused channel structures */
for (i = 0; i < efx->n_channels; i++) {
@@ -872,26 +952,9 @@ int efx_realloc_channels(struct efx_nic *efx, u32 rxq_entries, u32 txq_entries)
goto out;
}
-static inline int
-efx_set_xdp_tx_queue(struct efx_nic *efx, int xdp_queue_number,
- struct efx_tx_queue *tx_queue)
-{
- if (xdp_queue_number >= efx->xdp_tx_queue_count)
- return -EINVAL;
-
- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is XDP %u, HW %u\n",
- tx_queue->channel->channel, tx_queue->label,
- xdp_queue_number, tx_queue->queue);
- efx->xdp_tx_queues[xdp_queue_number] = tx_queue;
- return 0;
-}
-
int efx_set_channels(struct efx_nic *efx)
{
- struct efx_tx_queue *tx_queue;
struct efx_channel *channel;
- unsigned int next_queue = 0;
- int xdp_queue_number;
int rc;
efx->tx_channel_offset =
@@ -909,61 +972,14 @@ int efx_set_channels(struct efx_nic *efx)
return -ENOMEM;
}
- /* We need to mark which channels really have RX and TX
- * queues, and adjust the TX queue numbers if we have separate
- * RX-only and TX-only channels.
- */
- xdp_queue_number = 0;
efx_for_each_channel(channel, efx) {
if (channel->channel < efx->n_rx_channels)
channel->rx_queue.core_index = channel->channel;
else
channel->rx_queue.core_index = -1;
-
- if (channel->channel >= efx->tx_channel_offset) {
- if (efx_channel_is_xdp_tx(channel)) {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->queue = next_queue++;
- rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
- if (rc == 0)
- xdp_queue_number++;
- }
- } else {
- efx_for_each_channel_tx_queue(tx_queue, channel) {
- tx_queue->queue = next_queue++;
- netif_dbg(efx, drv, efx->net_dev, "Channel %u TXQ %u is HW %u\n",
- channel->channel, tx_queue->label,
- tx_queue->queue);
- }
-
- /* If XDP is borrowing queues from net stack, it must use the queue
- * with no csum offload, which is the first one of the channel
- * (note: channel->tx_queue_by_type is not initialized yet)
- */
- if (efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_BORROWED) {
- tx_queue = &channel->tx_queue[0];
- rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
- if (rc == 0)
- xdp_queue_number++;
- }
- }
- }
}
- WARN_ON(efx->xdp_txq_queues_mode == EFX_XDP_TX_QUEUES_DEDICATED &&
- xdp_queue_number != efx->xdp_tx_queue_count);
- WARN_ON(efx->xdp_txq_queues_mode != EFX_XDP_TX_QUEUES_DEDICATED &&
- xdp_queue_number > efx->xdp_tx_queue_count);
- /* If we have more CPUs than assigned XDP TX queues, assign the already
- * existing queues to the exceeding CPUs
- */
- next_queue = 0;
- while (xdp_queue_number < efx->xdp_tx_queue_count) {
- tx_queue = efx->xdp_tx_queues[next_queue++];
- rc = efx_set_xdp_tx_queue(efx, xdp_queue_number, tx_queue);
- if (rc == 0)
- xdp_queue_number++;
- }
+ efx_set_xdp_channels(efx);
rc = netif_set_real_num_tx_queues(efx->net_dev, efx->n_tx_channels);
if (rc)