@@ -1229,6 +1229,7 @@ struct qman_fq {
int q_fd;
u16 ch_id;
+ int8_t vsp_id;
u8 cgr_groupid;
u8 is_static:4;
u8 qp_initialized:4;
@@ -722,6 +722,56 @@ static int dpaa_eth_multicast_disable(struct rte_eth_dev *dev)
return 0;
}
+static void dpaa_fman_if_pool_setup(struct rte_eth_dev *dev)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct fman_if_ic_params icp;
+ uint32_t fd_offset;
+ uint32_t bp_size;
+
+ memset(&icp, 0, sizeof(icp));
+ /* set ICEOF for to the default value , which is 0*/
+ icp.iciof = DEFAULT_ICIOF;
+ icp.iceof = DEFAULT_RX_ICEOF;
+ icp.icsz = DEFAULT_ICSZ;
+ fman_if_set_ic_params(dev->process_private, &icp);
+
+ fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
+ fman_if_set_fdoff(dev->process_private, fd_offset);
+
+ /* Buffer pool size should be equal to Dataroom Size*/
+ bp_size = rte_pktmbuf_data_room_size(dpaa_intf->bp_info->mp);
+
+ fman_if_set_bp(dev->process_private,
+ dpaa_intf->bp_info->mp->size,
+ dpaa_intf->bp_info->bpid, bp_size);
+}
+
+static inline int dpaa_eth_rx_queue_bp_check(
+ struct rte_eth_dev *dev, int8_t vsp_id, uint32_t bpid)
+{
+ struct dpaa_if *dpaa_intf = dev->data->dev_private;
+ struct fman_if *fif = dev->process_private;
+
+ if (fif->num_profiles) {
+ if (vsp_id < 0)
+ vsp_id = fif->base_profile_id;
+ } else {
+ if (vsp_id < 0)
+ vsp_id = 0;
+ }
+
+ if (dpaa_intf->vsp_bpid[vsp_id] &&
+ bpid != dpaa_intf->vsp_bpid[vsp_id]) {
+ DPAA_PMD_ERR(
+ "Various MPs are assigned to RXQs with same VSP");
+
+ return -1;
+ }
+
+ return 0;
+}
+
static
int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
uint16_t nb_desc,
@@ -757,6 +807,20 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
DPAA_PMD_INFO("Rx queue setup for queue index: %d fq_id (0x%x)",
queue_idx, rxq->fqid);
+ if (!fif->num_profiles) {
+ if (dpaa_intf->bp_info && dpaa_intf->bp_info->bp &&
+ dpaa_intf->bp_info->mp != mp) {
+ DPAA_PMD_WARN(
+ "Multiple pools on same interface not supported");
+ return -EINVAL;
+ }
+ } else {
+ if (dpaa_eth_rx_queue_bp_check(dev, rxq->vsp_id,
+ DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid)) {
+ return -EINVAL;
+ }
+ }
+
/* Max packet can fit in single buffer */
if (dev->data->dev_conf.rxmode.max_rx_pkt_len <= buffsz) {
;
@@ -779,36 +843,41 @@ int dpaa_eth_rx_queue_setup(struct rte_eth_dev *dev, uint16_t queue_idx,
buffsz - RTE_PKTMBUF_HEADROOM);
}
- if (!dpaa_intf->bp_info || dpaa_intf->bp_info->mp != mp) {
- struct fman_if_ic_params icp;
- uint32_t fd_offset;
- uint32_t bp_size;
+ dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
- if (!mp->pool_data) {
- DPAA_PMD_ERR("Not an offloaded buffer pool!");
- return -1;
+ /* For shared interface, it's done in kernel, skip.*/
+ if (!fif->is_shared_mac)
+ dpaa_fman_if_pool_setup(dev);
+
+ if (fif->num_profiles) {
+ int8_t vsp_id = rxq->vsp_id;
+
+ if (vsp_id >= 0) {
+ ret = dpaa_port_vsp_update(dpaa_intf, fmc_q, vsp_id,
+ DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid,
+ fif);
+ if (ret) {
+ DPAA_PMD_ERR("dpaa_port_vsp_update failed");
+ return ret;
+ }
+ } else {
+ DPAA_PMD_INFO("Base profile is associated to"
+ " RXQ fqid:%d\r\n", rxq->fqid);
+ if (fif->is_shared_mac) {
+ DPAA_PMD_ERR(
+ "Fatal: Base profile is associated to"
+ " shared interface on DPDK.");
+ return -EINVAL;
+ }
+ dpaa_intf->vsp_bpid[fif->base_profile_id] =
+ DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
}
- dpaa_intf->bp_info = DPAA_MEMPOOL_TO_POOL_INFO(mp);
-
- memset(&icp, 0, sizeof(icp));
- /* set ICEOF for to the default value , which is 0*/
- icp.iciof = DEFAULT_ICIOF;
- icp.iceof = DEFAULT_RX_ICEOF;
- icp.icsz = DEFAULT_ICSZ;
- fman_if_set_ic_params(fif, &icp);
-
- fd_offset = RTE_PKTMBUF_HEADROOM + DPAA_HW_BUF_RESERVE;
- fman_if_set_fdoff(fif, fd_offset);
-
- /* Buffer pool size should be equal to Dataroom Size*/
- bp_size = rte_pktmbuf_data_room_size(mp);
- fman_if_set_bp(fif, mp->size,
- dpaa_intf->bp_info->bpid, bp_size);
- dpaa_intf->valid = 1;
- DPAA_PMD_DEBUG("if:%s fd_offset = %d offset = %d",
- dpaa_intf->name, fd_offset,
- fman_if_get_fdoff(fif));
+ } else {
+ dpaa_intf->vsp_bpid[0] =
+ DPAA_MEMPOOL_TO_POOL_INFO(mp)->bpid;
}
+
+ dpaa_intf->valid = 1;
DPAA_PMD_DEBUG("if:%s sg_on = %d, max_frm =%d", dpaa_intf->name,
fman_if_get_sg_enable(fif),
dev->data->dev_conf.rxmode.max_rx_pkt_len);
@@ -1605,6 +1674,8 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
uint32_t cgrid[DPAA_MAX_NUM_PCD_QUEUES];
uint32_t cgrid_tx[MAX_DPAA_CORES];
uint32_t dev_rx_fqids[DPAA_MAX_NUM_PCD_QUEUES];
+ int8_t dev_vspids[DPAA_MAX_NUM_PCD_QUEUES];
+ int8_t vsp_id = -1;
PMD_INIT_FUNC_TRACE();
@@ -1624,6 +1695,8 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
memset((char *)dev_rx_fqids, 0,
sizeof(uint32_t) * DPAA_MAX_NUM_PCD_QUEUES);
+ memset(dev_vspids, -1, DPAA_MAX_NUM_PCD_QUEUES);
+
/* Initialize Rx FQ's */
if (default_q) {
num_rx_fqs = DPAA_DEFAULT_NUM_PCD_QUEUES;
@@ -1703,6 +1776,8 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
else
fqid = dev_rx_fqids[loop];
+ vsp_id = dev_vspids[loop];
+
if (dpaa_intf->cgr_rx)
dpaa_intf->cgr_rx[loop].cgrid = cgrid[loop];
@@ -1711,6 +1786,7 @@ dpaa_dev_init(struct rte_eth_dev *eth_dev)
fqid);
if (ret)
goto free_rx;
+ dpaa_intf->rx_queues[loop].vsp_id = vsp_id;
dpaa_intf->rx_queues[loop].dpaa_intf = dpaa_intf;
}
dpaa_intf->nb_rx_queues = num_rx_fqs;
@@ -2051,6 +2127,11 @@ static void __attribute__((destructor(102))) dpaa_finish(void)
if (dpaa_fm_deconfig(dpaa_intf, fif))
DPAA_PMD_WARN("DPAA FM "
"deconfig failed\n");
+ if (fif->num_profiles) {
+ if (dpaa_port_vsp_cleanup(dpaa_intf,
+ fif))
+ DPAA_PMD_WARN("DPAA FM vsp cleanup failed\n");
+ }
}
}
if (is_global_init)
@@ -103,6 +103,10 @@
#define DPAA_FD_CMD_CFQ 0x00ffffff
/**< Confirmation Frame Queue */
+#define DPAA_VSP_PROFILE_MAX_NUM 8
+
+#define DPAA_DEFAULT_RXQ_VSP_ID 1
+
/* Each network interface is represented by one of these */
struct dpaa_if {
int valid;
@@ -122,6 +126,9 @@ struct dpaa_if {
void *netenv_handle;
void *scheme_handle[2];
uint32_t scheme_count;
+
+ void *vsp_handle[DPAA_VSP_PROFILE_MAX_NUM];
+ uint32_t vsp_bpid[DPAA_VSP_PROFILE_MAX_NUM];
};
struct dpaa_if_stats {
@@ -12,6 +12,7 @@
#include <dpaa_flow.h>
#include <rte_dpaa_logs.h>
#include <fmlib/fm_port_ext.h>
+#include <fmlib/fm_vsp_ext.h>
#define DPAA_MAX_NUM_ETH_DEV 8
@@ -47,6 +48,17 @@ static struct dpaa_fm_info fm_info;
static struct dpaa_fm_model fm_model;
static const char *fm_log = "/tmp/fmdpdk.bin";
+static inline uint8_t fm_default_vsp_id(struct fman_if *fif)
+{
+ /* Avoid being same as base profile which could be used
+ * for kernel interface of shared mac.
+ */
+ if (fif->base_profile_id)
+ return 0;
+ else
+ return DPAA_DEFAULT_RXQ_VSP_ID;
+}
+
static void fm_prev_cleanup(void)
{
uint32_t fman_id = 0, i = 0, devid;
@@ -300,11 +312,18 @@ set_hashParams_sctp(ioc_fm_pcd_kg_scheme_params_t *scheme_params, int hdr_idx)
static int set_scheme_params(ioc_fm_pcd_kg_scheme_params_t *scheme_params,
ioc_fm_pcd_net_env_params_t *dist_units,
struct dpaa_if *dpaa_intf,
- struct fman_if *fif __rte_unused)
+ struct fman_if *fif)
{
int dist_idx, hdr_idx = 0;
PMD_INIT_FUNC_TRACE();
+ if (fif->num_profiles) {
+ scheme_params->param.override_storage_profile = true;
+ scheme_params->param.storage_profile.direct = true;
+ scheme_params->param.storage_profile.profile_select
+ .direct_relative_profileId = fm_default_vsp_id(fif);
+ }
+
scheme_params->param.use_hash = 1;
scheme_params->param.modify = false;
scheme_params->param.always_direct = false;
@@ -787,6 +806,14 @@ int dpaa_fm_config(struct rte_eth_dev *dev, uint64_t req_dist_set)
return -1;
}
+ if (fif->num_profiles) {
+ for (i = 0; i < dpaa_intf->nb_rx_queues; i++)
+ dpaa_intf->rx_queues[i].vsp_id =
+ fm_default_vsp_id(fif);
+
+ i = 0;
+ }
+
/* Set PCD netenv and scheme */
if (req_dist_set) {
ret = set_pcd_netenv_scheme(dpaa_intf, req_dist_set, fif);
@@ -912,3 +939,141 @@ int dpaa_fm_term(void)
}
return 0;
}
+
+static int dpaa_port_vsp_configure(struct dpaa_if *dpaa_intf,
+ uint8_t vsp_id, t_Handle fman_handle,
+ struct fman_if *fif)
+{
+ t_FmVspParams vsp_params;
+ t_FmBufferPrefixContent buf_prefix_cont;
+ uint8_t mac_idx[] = {-1, 0, 1, 2, 3, 4, 5, 6, 7, 0, 1};
+ uint8_t idx = mac_idx[fif->mac_idx];
+ int ret;
+
+ if (vsp_id == fif->base_profile_id && fif->is_shared_mac) {
+ /* For shared interface, VSP of base
+ * profile is default pool located in kernel.
+ */
+ dpaa_intf->vsp_bpid[vsp_id] = 0;
+ return 0;
+ }
+
+ if (vsp_id >= DPAA_VSP_PROFILE_MAX_NUM) {
+ DPAA_PMD_ERR("VSP ID %d exceeds MAX number %d",
+ vsp_id, DPAA_VSP_PROFILE_MAX_NUM);
+ return -1;
+ }
+
+ memset(&vsp_params, 0, sizeof(vsp_params));
+ vsp_params.h_Fm = fman_handle;
+ vsp_params.relativeProfileId = vsp_id;
+ vsp_params.portParams.portId = idx;
+ if (fif->mac_type == fman_mac_1g) {
+ vsp_params.portParams.portType = e_FM_PORT_TYPE_RX;
+ } else if (fif->mac_type == fman_mac_2_5g) {
+ vsp_params.portParams.portType = e_FM_PORT_TYPE_RX_2_5G;
+ } else if (fif->mac_type == fman_mac_10g) {
+ vsp_params.portParams.portType = e_FM_PORT_TYPE_RX_10G;
+ } else {
+ DPAA_PMD_ERR("Mac type %d error", fif->mac_type);
+ return -1;
+ }
+ vsp_params.extBufPools.numOfPoolsUsed = 1;
+ vsp_params.extBufPools.extBufPool[0].id =
+ dpaa_intf->vsp_bpid[vsp_id];
+ vsp_params.extBufPools.extBufPool[0].size =
+ RTE_MBUF_DEFAULT_BUF_SIZE;
+
+ dpaa_intf->vsp_handle[vsp_id] = FM_VSP_Config(&vsp_params);
+ if (!dpaa_intf->vsp_handle[vsp_id]) {
+ DPAA_PMD_ERR("FM_VSP_Config error for profile %d", vsp_id);
+ return -EINVAL;
+ }
+
+ /* configure the application buffer (structure, size and
+ * content)
+ */
+
+ memset(&buf_prefix_cont, 0, sizeof(buf_prefix_cont));
+
+ buf_prefix_cont.privDataSize = 16;
+ buf_prefix_cont.dataAlign = 64;
+ buf_prefix_cont.passPrsResult = true;
+ buf_prefix_cont.passTimeStamp = true;
+ buf_prefix_cont.passHashResult = false;
+ buf_prefix_cont.passAllOtherPCDInfo = false;
+ ret = FM_VSP_ConfigBufferPrefixContent(dpaa_intf->vsp_handle[vsp_id],
+ &buf_prefix_cont);
+ if (ret != E_OK) {
+ DPAA_PMD_ERR("FM_VSP_ConfigBufferPrefixContent error for profile %d err: %d",
+ vsp_id, ret);
+ return ret;
+ }
+
+ /* initialize the FM VSP module */
+ ret = FM_VSP_Init(dpaa_intf->vsp_handle[vsp_id]);
+ if (ret != E_OK) {
+ DPAA_PMD_ERR("FM_VSP_Init error for profile %d err:%d",
+ vsp_id, ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+int dpaa_port_vsp_update(struct dpaa_if *dpaa_intf,
+ bool fmc_mode, uint8_t vsp_id, uint32_t bpid,
+ struct fman_if *fif)
+{
+ int ret = 0;
+ t_Handle fman_handle;
+
+ if (!fif->num_profiles)
+ return 0;
+
+ if (vsp_id >= fif->num_profiles)
+ return 0;
+
+ if (dpaa_intf->vsp_bpid[vsp_id] == bpid)
+ return 0;
+
+ if (dpaa_intf->vsp_handle[vsp_id]) {
+ ret = FM_VSP_Free(dpaa_intf->vsp_handle[vsp_id]);
+ if (ret != E_OK) {
+ DPAA_PMD_ERR(
+ "Error FM_VSP_Free: "
+ "err %d vsp_handle[%d]",
+ ret, vsp_id);
+ return ret;
+ }
+ dpaa_intf->vsp_handle[vsp_id] = 0;
+ }
+
+ if (fmc_mode)
+ fman_handle = FM_Open(0);
+ else
+ fman_handle = fm_info.fman_handle;
+
+ dpaa_intf->vsp_bpid[vsp_id] = bpid;
+
+ return dpaa_port_vsp_configure(dpaa_intf, vsp_id, fman_handle, fif);
+}
+
+int dpaa_port_vsp_cleanup(struct dpaa_if *dpaa_intf, struct fman_if *fif)
+{
+ int idx, ret;
+
+ for (idx = 0; idx < (uint8_t)fif->num_profiles; idx++) {
+ if (dpaa_intf->vsp_handle[idx]) {
+ ret = FM_VSP_Free(dpaa_intf->vsp_handle[idx]);
+ if (ret != E_OK) {
+ DPAA_PMD_ERR(
+ "Error FM_VSP_Free: err %d vsp_handle[%d]",
+ ret, idx);
+ return ret;
+ }
+ }
+ }
+
+ return E_OK;
+}
@@ -10,5 +10,10 @@ int dpaa_fm_term(void);
int dpaa_fm_config(struct rte_eth_dev *dev, uint64_t req_dist_set);
int dpaa_fm_deconfig(struct dpaa_if *dpaa_intf, struct fman_if *fif);
void dpaa_write_fm_config_to_file(void);
+int dpaa_port_vsp_update(struct dpaa_if *dpaa_intf,
+ bool fmc_mode, uint8_t vsp_id, uint32_t bpid, struct fman_if *fif);
+int dpaa_port_vsp_cleanup(struct dpaa_if *dpaa_intf, struct fman_if *fif);
+int dpaa_port_fmc_init(struct fman_if *fif,
+ uint32_t *fqids, int8_t *vspids, int max_nb_rxq);
#endif