@@ -99,6 +99,7 @@ dpaa_mbuf_create_pool(struct rte_mempool *mp)
rte_dpaa_bpid_info[bpid].meta_data_size =
sizeof(struct rte_mbuf) + rte_pktmbuf_priv_size(mp);
rte_dpaa_bpid_info[bpid].dpaa_ops_index = mp->ops_index;
+ rte_dpaa_bpid_info[bpid].ptov_off = 0;
bp_info = rte_malloc(NULL,
sizeof(struct dpaa_bp_info),
@@ -171,9 +172,20 @@ dpaa_mbuf_free_bulk(struct rte_mempool *pool,
}
while (i < n) {
+ uint64_t phy = rte_mempool_virt2iova(obj_table[i]);
+
+ if (unlikely(!bp_info->ptov_off)) {
+ /* buffers are not from multiple memzones */
+ if (!(bp_info->mp->flags & MEMPOOL_F_MULTI_MEMZONE)) {
+ bp_info->ptov_off
+ = (uint64_t)obj_table[i] - phy;
+ rte_dpaa_bpid_info[bp_info->bpid].ptov_off
+ = bp_info->ptov_off;
+ }
+ }
+
dpaa_buf_free(bp_info,
- (uint64_t)rte_mempool_virt2iova(obj_table[i]) +
- bp_info->meta_data_size);
+ (uint64_t)phy + bp_info->meta_data_size);
i = i + 1;
}
@@ -241,7 +253,7 @@ dpaa_mbuf_alloc_bulk(struct rte_mempool *pool,
* i.e. first buffer is valid, remaining 6 buffers
* may be null.
*/
- bufaddr = (void *)rte_dpaa_mem_ptov(bufs[i].addr);
+ bufaddr = DPAA_MEMPOOL_PTOV(bp_info, bufs[i].addr);
m[n] = (struct rte_mbuf *)((char *)bufaddr
- bp_info->meta_data_size);
DPAA_MEMPOOL_DPDEBUG("Paddr (%p), FD (%p) from BMAN",
@@ -61,8 +61,17 @@ struct dpaa_bp_info {
uint32_t size;
uint32_t meta_data_size;
int32_t dpaa_ops_index;
+ int64_t ptov_off;
};
+static inline void *
+DPAA_MEMPOOL_PTOV(struct dpaa_bp_info *bp_info, uint64_t addr)
+{
+ if (bp_info->ptov_off)
+ return ((void *)(addr + bp_info->ptov_off));
+ return rte_dpaa_mem_ptov(addr);
+}
+
#define DPAA_MEMPOOL_TO_POOL_INFO(__mp) \
((struct dpaa_bp_info *)__mp->pool_data)
If the allocation is from a single memzone, optimize the phy-virt address conversions. Signed-off-by: Hemant Agrawal <hemant.agrawal@nxp.com> --- drivers/mempool/dpaa/dpaa_mempool.c | 18 +++++++++++++++--- drivers/mempool/dpaa/dpaa_mempool.h | 9 +++++++++ 2 files changed, 24 insertions(+), 3 deletions(-) -- 2.7.4