Message ID | 1431079069-9702-4-git-send-email-maxim.uvarov@linaro.org |
---|---|
State | New |
Headers | show |
On Fri, May 8, 2015 at 12:57 PM, Maxim Uvarov <maxim.uvarov@linaro.org> wrote: > Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> > --- > helper/include/odp/helper/ring.h | 7 +- > helper/ring.c | 3 +- > platform/linux-generic/Makefile.am | 3 + > .../linux-generic/include/odp_buffer_internal.h | 3 + > .../linux-generic/include/odp_packet_io_internal.h | 16 + > .../include/odp_packet_io_ipc_internal.h | 48 ++ > platform/linux-generic/include/odp_shm_internal.h | 22 + > platform/linux-generic/odp_packet_io.c | 19 +- > platform/linux-generic/odp_packet_io_ipc.c | 603 +++++++++++++++++++++ > platform/linux-generic/odp_pool.c | 23 +- > platform/linux-generic/odp_shared_memory.c | 10 +- > 11 files changed, 745 insertions(+), 12 deletions(-) > create mode 100644 platform/linux-generic/include/odp_packet_io_ipc_internal.h > create mode 100644 platform/linux-generic/include/odp_shm_internal.h > create mode 100644 platform/linux-generic/odp_packet_io_ipc.c > > diff --git a/helper/include/odp/helper/ring.h b/helper/include/odp/helper/ring.h > index 5e640a7..c3c2f6a 100644 > --- a/helper/include/odp/helper/ring.h > +++ b/helper/include/odp/helper/ring.h > @@ -156,10 +156,11 @@ typedef struct odph_ring { > } odph_ring_t; > > > -#define ODPH_RING_F_SP_ENQ 0x0001 /* The default enqueue is "single-producer".*/ > -#define ODPH_RING_F_SC_DEQ 0x0002 /* The default dequeue is "single-consumer".*/ > -#define ODPH_RING_SHM_PROC 0x0004 /* If set - ring is visible from different > +#define ODPH_RING_F_SP_ENQ (1 << 0) /* The default enqueue is "single-producer".*/ > +#define ODPH_RING_F_SC_DEQ (1 << 1) /* The default dequeue is "single-consumer".*/ > +#define ODPH_RING_SHM_PROC (1 << 2) /* If set - ring is visible from different > processes. Default is thread visible. */ > +#define ODPH_RING_NO_LIST (1 << 3) /* Do not link ring to linked list. */ > #define ODPH_RING_QUOT_EXCEED (1 << 31) /* Quota exceed for burst ops */ > #define ODPH_RING_SZ_MASK (unsigned)(0x0fffffff) /* Ring size mask */ > > diff --git a/helper/ring.c b/helper/ring.c > index 0927a6c..54c40cf 100644 > --- a/helper/ring.c > +++ b/helper/ring.c > @@ -200,7 +200,8 @@ odph_ring_create(const char *name, unsigned count, unsigned flags) > r->prod.tail = 0; > r->cons.tail = 0; > > - TAILQ_INSERT_TAIL(&odp_ring_list, r, next); > + if (!(flags & ODPH_RING_NO_LIST)) > + TAILQ_INSERT_TAIL(&odp_ring_list, r, next); > } else { > ODP_ERR("Cannot reserve memory\n"); > } > diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am > index 66f0474..4843386 100644 > --- a/platform/linux-generic/Makefile.am > +++ b/platform/linux-generic/Makefile.am > @@ -120,11 +120,13 @@ noinst_HEADERS = \ > ${top_srcdir}/platform/linux-generic/include/odp_internal.h \ > ${top_srcdir}/platform/linux-generic/include/odp_packet_internal.h \ > ${top_srcdir}/platform/linux-generic/include/odp_packet_io_internal.h \ > + ${top_srcdir}/platform/linux-generic/include/odp_packet_io_ipc_internal.h \ > ${top_srcdir}/platform/linux-generic/include/odp_packet_io_queue.h \ > ${top_srcdir}/platform/linux-generic/include/odp_packet_socket.h \ > ${top_srcdir}/platform/linux-generic/include/odp_pool_internal.h \ > ${top_srcdir}/platform/linux-generic/include/odp_queue_internal.h \ > ${top_srcdir}/platform/linux-generic/include/odp_schedule_internal.h \ > + ${top_srcdir}/platform/linux-generic/include/odp_shm_internal.h \ > ${top_srcdir}/platform/linux-generic/include/odp_spin_internal.h \ > ${top_srcdir}/platform/linux-generic/include/odp_timer_internal.h \ > ${top_srcdir}/platform/linux-generic/Makefile.inc > @@ -155,6 +157,7 @@ __LIB__libodp_la_SOURCES = \ > odp_packet.c \ > odp_packet_flags.c \ > odp_packet_io.c \ > + odp_packet_io_ipc.c \ > odp_packet_socket.c \ > odp_pool.c \ > odp_queue.c \ > diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h > index 3a3d2a2..4ea7c62 100644 > --- a/platform/linux-generic/include/odp_buffer_internal.h > +++ b/platform/linux-generic/include/odp_buffer_internal.h > @@ -129,6 +129,9 @@ typedef struct odp_buffer_hdr_t { > size_t udata_size; /* size of user metadata */ > uint32_t segcount; /* segment count */ > uint32_t segsize; /* segment size */ > + /* ipc mapped process can not walk over pointers, > + * offset has to be used */ > + uint64_t ipc_addr_offset[ODP_BUFFER_MAX_SEG]; > void *addr[ODP_BUFFER_MAX_SEG]; /* block addrs */ > } odp_buffer_hdr_t; > > diff --git a/platform/linux-generic/include/odp_packet_io_internal.h b/platform/linux-generic/include/odp_packet_io_internal.h > index 18b59ef..d12a1fa 100644 > --- a/platform/linux-generic/include/odp_packet_io_internal.h > +++ b/platform/linux-generic/include/odp_packet_io_internal.h > @@ -23,6 +23,7 @@ extern "C" { > #include <odp_classification_datamodel.h> > #include <odp_align_internal.h> > #include <odp_debug_internal.h> > +#include <odp/helper/ring.h> > > #include <odp/config.h> > #include <odp/hints.h> > @@ -36,6 +37,8 @@ typedef enum { > ODP_PKTIO_TYPE_SOCKET_MMSG, > ODP_PKTIO_TYPE_SOCKET_MMAP, > ODP_PKTIO_TYPE_LOOPBACK, > + ODP_PKTIO_TYPE_IPC, > + ODP_PKTIO_TYPE_IPC_SLAVE, > } odp_pktio_type_t; > > struct pktio_entry { > @@ -53,6 +56,19 @@ struct pktio_entry { > char name[IFNAMSIZ]; /**< name of pktio provided to > pktio_open() */ > odp_bool_t promisc; /**< promiscuous mode state */ > + odph_ring_t *ipc_r; /**< ODP ring for IPC msg packets > + indexes transmitted to shared memory */ > + odph_ring_t *ipc_p; /**< ODP ring for IPC msg packets > + indexes already processed by remote process */ > + void *ipc_pool_base; /**< IPC Remote pool base addr */ > + void *ipc_pool_mdata_base; /**< IPC Remote pool mdata base addr */ > + uint64_t ipc_pkt_size; /**< IPC: packet size in remote pool */ > + > + odph_ring_t *ipc_r_slave; > + odph_ring_t *ipc_p_slave; Could you add a brief description to these too? And what do _r and _p stand for? > + > + odp_pool_t ipc_pool; /**< IPC: Pool of main process */ > + odp_shm_t ipc_pool_shm; /**< IPC: Shm memory for remote pool */ > }; > > typedef union { > diff --git a/platform/linux-generic/include/odp_packet_io_ipc_internal.h b/platform/linux-generic/include/odp_packet_io_ipc_internal.h > new file mode 100644 > index 0000000..e11ec35 > --- /dev/null > +++ b/platform/linux-generic/include/odp_packet_io_ipc_internal.h > @@ -0,0 +1,48 @@ > +/* Copyright (c) 2015, Linaro Limited > + * All rights reserved. > + * > + * SPDX-License-Identifier: BSD-3-Clause > + */ > + > +#include <odp/packet_io.h> > +#include <odp_packet_io_internal.h> > +#include <odp/packet.h> > +#include <odp_packet_internal.h> > +#include <odp_internal.h> > +#include <odp/shared_memory.h> > + > +#include <string.h> > +#include <unistd.h> > +#include <stdlib.h> > + > +/* IPC packet I/O over odph_ring */ > +#include <odp/helper/ring.h> > + > +#define PKTIO_IPC_ENTRIES 4096 /**< number of odp buffers in > + odp ring queue */ > + > +/* that struct is exported to shared memory, so that 2 processes can find > + * each other. > + */ > +struct pktio_info { > + char remote_pool_name[ODP_POOL_NAME_LEN]; > + int shm_pool_num; > + size_t shm_pkt_pool_size; > + uint32_t shm_pkt_size; > + odp_shm_t shm; /*< current structure stored in this shm */ > + size_t mdata_offset; /*< offset from shared memory block start > + *to pool_mdata_addr */ > + struct { > + size_t mdata_offset; The naming is confusing for mdata_offset especially as the second one is not commented. > + char pool_name[ODP_POOL_NAME_LEN]; > + } slave; > +} __packed; > + > +int ipc_pktio_init(pktio_entry_t *pktio_entry, const char *dev, > + odp_pool_t pool); > + > +int ipc_pktio_recv(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[], > + unsigned len); > + > +int ipc_pktio_send(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[], > + unsigned len); > diff --git a/platform/linux-generic/include/odp_shm_internal.h b/platform/linux-generic/include/odp_shm_internal.h > new file mode 100644 > index 0000000..e6b37fa > --- /dev/null > +++ b/platform/linux-generic/include/odp_shm_internal.h > @@ -0,0 +1,22 @@ > +/* Copyright (c) 2013, Linaro Limited > + * All rights reserved. > + * > + * SPDX-License-Identifier: BSD-3-Clause > + */ > + > + > + > +#ifndef ODP_SHM_INTERNAL_H_ > +#define ODP_SHM_INTERNAL_H_ > + > +#ifdef __cplusplus > +extern "C" { > +#endif > + > +#define _ODP_SHM_PROC_NOCREAT 0x4 /**< Do not create shm if not exist */ > + > +#ifdef __cplusplus > +} > +#endif > + > +#endif > diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c > index cfe5b71..0be4cce 100644 > --- a/platform/linux-generic/odp_packet_io.c > +++ b/platform/linux-generic/odp_packet_io.c > @@ -18,6 +18,7 @@ > #include <odp_schedule_internal.h> > #include <odp_classification_internal.h> > #include <odp_debug_internal.h> > +#include <odp_packet_io_ipc_internal.h> > > #include <string.h> > #include <sys/ioctl.h> > @@ -25,6 +26,9 @@ > #include <ifaddrs.h> > #include <errno.h> > > +#include <sys/types.h> > +#include <unistd.h> > + > /* MTU to be reported for the "loop" interface */ > #define PKTIO_LOOP_MTU 1500 > /* MAC address for the "loop" interface */ > @@ -263,7 +267,12 @@ static odp_pktio_t setup_pktio_entry(const char *dev, odp_pool_t pool) > > if (strcmp(dev, "loop") == 0) > ret = init_loop(pktio_entry, id); > - else > + else if (!strncmp(dev, "ipc", 3)) { > + ret = ipc_pktio_init(pktio_entry, dev, pool); > + if (ret != 0) > + ODP_ABORT("unable to init ipc for %s, pool %" PRIu64 "\n", > + dev, pool); > + } else > ret = init_socket(pktio_entry, dev, pool); > > if (ret != 0) { > @@ -408,6 +417,10 @@ int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], int len) > case ODP_PKTIO_TYPE_LOOPBACK: > pkts = deq_loopback(pktio_entry, pkt_table, len); > break; > + case ODP_PKTIO_TYPE_IPC_SLAVE: > + case ODP_PKTIO_TYPE_IPC: > + pkts = ipc_pktio_recv(pktio_entry, pkt_table, len); > + break; > default: > pkts = -1; > break; > @@ -462,6 +475,10 @@ int odp_pktio_send(odp_pktio_t id, odp_packet_t pkt_table[], int len) > case ODP_PKTIO_TYPE_LOOPBACK: > pkts = enq_loopback(pktio_entry, pkt_table, len); > break; > + case ODP_PKTIO_TYPE_IPC: > + case ODP_PKTIO_TYPE_IPC_SLAVE: > + pkts = ipc_pktio_send(pktio_entry, pkt_table, len); > + break; > default: > pkts = -1; > } > diff --git a/platform/linux-generic/odp_packet_io_ipc.c b/platform/linux-generic/odp_packet_io_ipc.c > new file mode 100644 > index 0000000..f9ae87f > --- /dev/null > +++ b/platform/linux-generic/odp_packet_io_ipc.c > @@ -0,0 +1,603 @@ > +/* Copyright (c) 2015, Linaro Limited > + * All rights reserved. > + * > + * SPDX-License-Identifier: BSD-3-Clause > + */ > + > +#include <odp_packet_io_ipc_internal.h> > +#include <odp_debug_internal.h> > +#include <odp_packet_io_internal.h> > +#include <odp_spin_internal.h> > +#include <odp/system_info.h> > +#include <odp_shm_internal.h> > + > +#include <sys/mman.h> > +#include <sys/stat.h> > +#include <fcntl.h> > + > +static void *_ipc_map_remote_pool(const char *name, size_t size); > + > +static const char *_ipc_odp_buffer_pool_shm_name(odp_pool_t pool_hdl) > +{ > + pool_entry_t *pool; > + uint32_t pool_id; > + odp_shm_t shm; > + odp_shm_info_t info; > + > + pool_id = pool_handle_to_index(pool_hdl); > + pool = get_pool_entry(pool_id); > + shm = pool->s.pool_shm; > + > + odp_shm_info(shm, &info); > + > + return info.name; > +} > + > +/** > +* Look up for shared memory object. > +* > +* @param name name of shm object > +* > +* @return 0 on success, otherwise non-zero > +*/ > +static int _odp_shm_lookup_ipc(const char *name) > +{ > + int shm; > + > + shm = shm_open(name, O_RDWR, S_IRUSR | S_IWUSR); > + if (shm == -1) { > + if (errno == ENOENT) > + return -1; > + else > + ODP_ABORT("shm_open for %s err %s\n", > + name, strerror(errno)); > + } > + close(shm); > + return 0; > +} > + > +static struct pktio_info *_ipc_map_pool_info(pktio_entry_t *pktio_entry, > + const char *pool_name, > + int flag) > +{ > + struct pktio_info *pinfo; > + char name[ODP_POOL_NAME_LEN + sizeof("_info")]; > + > + /* Create info about remote pktio */ > + snprintf(name, sizeof(name), "%s_info", pool_name); > + odp_shm_t shm = odp_shm_reserve(name, sizeof(struct pktio_info), > + ODP_CACHE_LINE_SIZE, > + flag); > + if (ODP_SHM_INVALID == shm) > + ODP_ABORT("unable to reserve memory for shm info"); > + pinfo = odp_shm_addr(shm); > + if (flag != _ODP_SHM_PROC_NOCREAT) > + pinfo->remote_pool_name[0] = 0; > + > + pktio_entry->s.ipc_pool_shm = shm; > + return pinfo; > +} > + > +static int _ipc_pktio_init_master(pktio_entry_t *pktio_entry, const char *dev, > + odp_pool_t pool) > +{ > + char ipc_shm_name[ODP_POOL_NAME_LEN + sizeof("_slave_r")]; > + pool_entry_t *pool_entry; > + uint32_t pool_id; > + void *ipc_pool_base; > + struct pktio_info *pinfo; > + const char *pool_name; > + odp_shm_t shm; > + > + pool_id = pool_handle_to_index(pool); > + pool_entry = get_pool_entry(pool_id); > + > + if (ODP_POOL_NAME_LEN != ODPH_RING_NAMESIZE) > + ODP_ABORT(""); > + > + if (strlen(dev) > (ODP_POOL_NAME_LEN - sizeof("_slave_r"))) { > + ODP_DBG("too big ipc name\n"); > + return -1; > + } > + > + /* generate name in shm like ipc_pktio_r for > + * to be processed packets ring. > + */ > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_r", dev); > + pktio_entry->s.ipc_r = odph_ring_create(ipc_shm_name, > + PKTIO_IPC_ENTRIES, > + ODPH_RING_SHM_PROC | ODPH_RING_NO_LIST); > + if (!pktio_entry->s.ipc_r) { > + ODP_DBG("pid %d unable to create ipc ring %s name\n", > + getpid(), ipc_shm_name); > + return -1; > + } > + ODP_DBG("Created IPC ring: %s, count %d, free %d\n", > + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_r), > + odph_ring_free_count(pktio_entry->s.ipc_r)); > + > + /* generate name in shm like ipc_pktio_p for > + * already processed packets > + */ > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_p", dev); > + pktio_entry->s.ipc_p = odph_ring_create(ipc_shm_name, > + PKTIO_IPC_ENTRIES, > + ODPH_RING_SHM_PROC | ODPH_RING_NO_LIST); > + if (!pktio_entry->s.ipc_p) { > + ODP_DBG("pid %d unable to create ipc ring %s name\n", > + getpid(), ipc_shm_name); > + goto free_r; > + } > + ODP_DBG("Created IPC ring: %s, count %d, free %d\n", > + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_p), > + odph_ring_free_count(pktio_entry->s.ipc_p)); > + > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_slave_r", dev); > + pktio_entry->s.ipc_r_slave = odph_ring_create(ipc_shm_name, > + PKTIO_IPC_ENTRIES, > + ODPH_RING_SHM_PROC | ODPH_RING_NO_LIST); > + if (!pktio_entry->s.ipc_r_slave) { > + ODP_DBG("pid %d unable to create ipc ring %s name\n", > + getpid(), ipc_shm_name); > + goto free_p; > + } > + ODP_DBG("Created IPC ring: %s, count %d, free %d\n", > + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_r_slave), > + odph_ring_free_count(pktio_entry->s.ipc_r_slave)); > + > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_slave_p", dev); > + pktio_entry->s.ipc_p_slave = odph_ring_create(ipc_shm_name, > + PKTIO_IPC_ENTRIES, > + ODPH_RING_SHM_PROC | ODPH_RING_NO_LIST); > + if (!pktio_entry->s.ipc_p_slave) { > + ODP_DBG("pid %d unable to create ipc ring %s name\n", > + getpid(), ipc_shm_name); > + goto free_slave_r; > + } > + ODP_DBG("Created IPC ring: %s, count %d, free %d\n", > + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_p_slave), > + odph_ring_free_count(pktio_entry->s.ipc_p_slave)); > + > + /* Memory to store information about exported pool */ > + pinfo = _ipc_map_pool_info(pktio_entry, dev, ODP_SHM_PROC); > + > + /* Set up pool name for remote info */ > + pool_name = _ipc_odp_buffer_pool_shm_name(pool); > + memcpy(pinfo->remote_pool_name, pool_name, strlen(pool_name)); > + pinfo->shm_pkt_pool_size = pool_entry->s.pool_size; > + pinfo->shm_pool_num = pool_entry->s.buf_num; > + pinfo->shm_pkt_size = pool_entry->s.seg_size; > + pinfo->mdata_offset = pool_entry->s.pool_mdata_addr - > + pool_entry->s.pool_base_addr; > + pinfo->slave.mdata_offset = 0; > + ODP_DBG("Master waiting for slave to be connected now..\n"); > + > + /* Wait for remote process to export his pool. */ > + ODP_DBG("Wait for second process set mdata_offset...\n"); > + while (pinfo->slave.mdata_offset == 0) > + odp_spin(); > + > + ODP_DBG("Wait for second process set mdata_offset... DONE.\n"); > + > + while (1) { > + int ret = _odp_shm_lookup_ipc(pinfo->slave.pool_name); > + if (!ret) > + break; > + ODP_DBG("Master looking for %s\n", pinfo->slave.pool_name); > + sleep(1); > + } > + > + ipc_pool_base = _ipc_map_remote_pool(pinfo->slave.pool_name, > + pinfo->shm_pkt_pool_size); > + pktio_entry->s.ipc_pool_mdata_base = (char *)ipc_pool_base + > + pinfo->slave.mdata_offset; > + pktio_entry->s.ipc_pool = pool; > + > + return 0; > + > +free_slave_r: > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_slave_r", dev); > + shm = odp_shm_lookup(ipc_shm_name); > + odp_shm_free(shm); > +free_p: > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_p", dev); > + shm = odp_shm_lookup(ipc_shm_name); > + odp_shm_free(shm); > +free_r: > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_r", dev); > + shm = odp_shm_lookup(ipc_shm_name); > + odp_shm_free(shm); > + return -1; > +} > + > +static odp_pool_t _ipc_odp_alloc_and_create_pool_slave(struct pktio_info *pinfo) > +{ > + odp_pool_t pool; > + char *pool_name; > + odp_pool_param_t params; > + int num = pinfo->shm_pool_num; > + uint32_t buf_size = pinfo->shm_pkt_size; > + pool_entry_t *pool_entry; > + > + pool_name = calloc(1, strlen(pinfo->remote_pool_name) + > + sizeof("ipc_pool_slave_")); > + sprintf(pool_name, "ipc_pool_slave_%s", pinfo->remote_pool_name); > + > + ODP_DBG("slave uses pool %s\n", pool_name); > + > + memset(¶ms, 0, sizeof(params)); > + params.pkt.num = num; > + params.pkt.len = buf_size; > + params.pkt.seg_len = buf_size; > + params.type = ODP_POOL_PACKET; > + > + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); > + if (pool == ODP_POOL_INVALID) > + ODP_ABORT("Error: packet pool create failed.\n" > + "num %d, len %d, seg_len %d\n", > + params.pkt.num, params.pkt.len, params.pkt.seg_len); > + > + /* Export info so that master can connect to that pool*/ > + snprintf(pinfo->slave.pool_name, 30, "%s", pool_name); > + pool_entry = odp_pool_to_entry(pool); > + pinfo->slave.mdata_offset = pool_entry->s.pool_mdata_addr - > + pool_entry->s.pool_base_addr; > + free(pool_name); > + > + return pool; > +} > + > +static void *_ipc_map_remote_pool(const char *name, size_t size) > +{ > + odp_shm_t shm; > + > + ODP_DBG("Mapping remote pool %s, size %ld\n", name, size); > + shm = odp_shm_reserve(name, > + size, > + ODP_CACHE_LINE_SIZE, > + _ODP_SHM_PROC_NOCREAT); > + if (shm == ODP_SHM_INVALID) > + ODP_ABORT("unable map %s\n", name); > + return odp_shm_addr(shm); > +} > + > +static void *_ipc_shm_map(char *name, size_t size, int timeout) > +{ > + odp_shm_t shm; > + int ret; > + > + while (1) { > + ret = _odp_shm_lookup_ipc(name); > + if (!ret) > + break; > + ODP_DBG("Waiting for %s\n", name); > + if (timeout <= 0) > + return NULL; > + timeout--; > + sleep(1); > + } > + > + shm = odp_shm_reserve(name, size, > + ODP_CACHE_LINE_SIZE, > + _ODP_SHM_PROC_NOCREAT); > + if (ODP_SHM_INVALID == shm) > + ODP_ABORT("unable to map: %s\n", name); > + > + return odp_shm_addr(shm); > +} > + > +static int _ipc_pktio_init_slave(const char *dev, pktio_entry_t *pktio_entry) > +{ > + char ipc_shm_name[ODP_POOL_NAME_LEN + sizeof("_slave_r")]; > + size_t ring_size = PKTIO_IPC_ENTRIES * sizeof(void *) + > + sizeof(odph_ring_t); > + struct pktio_info *pinfo; > + void *ipc_pool_base; > + odp_shm_t shm; > + > + if (ODP_POOL_NAME_LEN != ODPH_RING_NAMESIZE) > + ODP_ABORT(""); > + > + if (strlen(dev) > (ODP_POOL_NAME_LEN - sizeof("_slave_r"))) { > + ODP_DBG("too big ipc name\n"); > + return -1; > + } > + > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_r", dev); > + pktio_entry->s.ipc_r = _ipc_shm_map(ipc_shm_name, ring_size, 10); > + if (!pktio_entry->s.ipc_r) { > + ODP_DBG("pid %d unable to find ipc ring %s name\n", > + getpid(), dev); > + return -1; > + } > + ODP_DBG("Connected IPC ring: %s, count %d, free %d\n", > + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_r), > + odph_ring_free_count(pktio_entry->s.ipc_r)); > + > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_p", dev); > + pktio_entry->s.ipc_p = _ipc_shm_map(ipc_shm_name, ring_size, 10); > + if (!pktio_entry->s.ipc_p) { > + ODP_DBG("pid %d unable to find ipc ring %s name\n", > + getpid(), dev); > + goto free_r; > + } > + ODP_DBG("Connected IPC ring: %s, count %d, free %d\n", > + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_p), > + odph_ring_free_count(pktio_entry->s.ipc_p)); > + > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_slave_r", dev); > + pktio_entry->s.ipc_r_slave = _ipc_shm_map(ipc_shm_name, ring_size, 10); > + if (!pktio_entry->s.ipc_r_slave) { > + ODP_DBG("pid %d unable to find ipc ring %s name\n", > + getpid(), dev); > + goto free_p; > + } > + ODP_DBG("Connected IPC ring: %s, count %d, free %d\n", > + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_r_slave), > + odph_ring_free_count(pktio_entry->s.ipc_r_slave)); > + > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_slave_p", dev); > + pktio_entry->s.ipc_p_slave = _ipc_shm_map(ipc_shm_name, ring_size, 10); > + if (!pktio_entry->s.ipc_p_slave) { > + ODP_DBG("pid %d unable to find ipc ring %s name\n", > + getpid(), dev); > + goto free_slave_r; > + } > + ODP_DBG("Connected IPC ring: %s, count %d, free %d\n", > + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_p_slave), > + odph_ring_free_count(pktio_entry->s.ipc_p_slave)); > + > + > + /* Get info about remote pool */ > + pinfo = _ipc_map_pool_info(pktio_entry, dev, _ODP_SHM_PROC_NOCREAT); > + > + ipc_pool_base = _ipc_map_remote_pool(pinfo->remote_pool_name, > + pinfo->shm_pkt_pool_size); > + pktio_entry->s.ipc_pool_mdata_base = (char *)ipc_pool_base + > + pinfo->mdata_offset; > + pktio_entry->s.ipc_pkt_size = pinfo->shm_pkt_size; > + > + /* @todo: to simplify in linux-generic implementation we create pool for > + * packets from IPC queue. On receive implementation copies packets to > + * that pool. Later we can try to reuse original pool without packets > + * copying. > + */ > + pktio_entry->s.ipc_pool = _ipc_odp_alloc_and_create_pool_slave(pinfo); > + ODP_DBG("%s OK.\n", __func__); > + return 0; > + > +free_slave_r: > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_slave_r", dev); > + shm = odp_shm_lookup(ipc_shm_name); > + odp_shm_free(shm); > +free_p: > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_p", dev); > + shm = odp_shm_lookup(ipc_shm_name); > + odp_shm_free(shm); > +free_r: > + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_r", dev); > + shm = odp_shm_lookup(ipc_shm_name); > + odp_shm_free(shm); > + return -1; > +} > + > +int ipc_pktio_init(pktio_entry_t *pktio_entry, const char *dev, > + odp_pool_t pool) > +{ > + int ret; > + > + /* if pool is zero we assume that it's slave process connects > + * to shared memory already created by main process. > + */ > + if (ODP_POOL_INVALID != pool) { > + pktio_entry->s.type = ODP_PKTIO_TYPE_IPC; > + ret = _ipc_pktio_init_master(pktio_entry, dev, pool); > + } else { > + pktio_entry->s.type = ODP_PKTIO_TYPE_IPC_SLAVE; > + ret = _ipc_pktio_init_slave(dev, pktio_entry); > + } > + > + return ret; > +} > + > + > +static inline void *_ipc_buffer_map(odp_buffer_hdr_t *buf, > + uint32_t offset, > + uint32_t *seglen, > + uint32_t limit) > +{ > + int seg_index = offset / buf->segsize; > + int seg_offset = offset % buf->segsize; > + void *addr = (char *)buf - buf->ipc_addr_offset[seg_index]; > + > + if (seglen != NULL) { > + uint32_t buf_left = limit - offset; > + *seglen = seg_offset + buf_left <= buf->segsize ? > + buf_left : buf->segsize - seg_offset; > + } > + > + return (void *)(seg_offset + (uint8_t *)addr); > +} > + > + > +static inline void *_ipc_packet_map(odp_packet_hdr_t *pkt_hdr, > + uint32_t offset, uint32_t *seglen) > +{ > + if (offset > pkt_hdr->frame_len) > + return NULL; > + > + return _ipc_buffer_map(&pkt_hdr->buf_hdr, > + pkt_hdr->headroom + offset, seglen, > + pkt_hdr->headroom + pkt_hdr->frame_len); > +} > + > +int ipc_pktio_recv(pktio_entry_t *pktio_entry, > + odp_packet_t pkt_table[], unsigned len) > +{ > + int pkts = 0; > + int i; > + odph_ring_t *r; > + odph_ring_t *r_p; > + odp_packet_t remote_pkts[PKTIO_IPC_ENTRIES]; > + void **ipcbufs_p = (void *)&remote_pkts; > + > + if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC) { > + r = pktio_entry->s.ipc_r_slave; > + r_p = pktio_entry->s.ipc_p_slave; > + } else if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC_SLAVE) { > + r = pktio_entry->s.ipc_r; > + r_p = pktio_entry->s.ipc_p; > + } else { > + ODP_ABORT("wrong type: %d\n", pktio_entry->s.type); > + } > + > + pkts = odph_ring_mc_dequeue_burst(r, ipcbufs_p, len); > + if (odp_unlikely(pkts < 0)) > + ODP_ABORT("error to dequeue no packets\n"); > + > + /* fast path */ > + if (odp_likely(0 == pkts)) > + return 0; > + > + for (i = 0; i < pkts; i++) { > + odp_pool_t pool; > + odp_packet_t pkt; > + odp_packet_hdr_t *phdr; > + odp_buffer_bits_t handle; > + int idx; /* Remote packet has coded pool and index. > + * We need only index.*/ > + void *pkt_data; > + void *remote_pkt_data; > + > + handle.handle = _odp_packet_to_buffer(remote_pkts[i]); > + idx = handle.index; > + > + /* Link to packed data. To this line we have Zero-Copy between > + * processes, to simplify use packet copy in that version which > + * can be removed later with more advance buffer management > + * (ref counters). > + */ > + /* reverse odp_buf_to_hdr() */ > + phdr = (odp_packet_hdr_t *)( > + (char *)pktio_entry->s.ipc_pool_mdata_base + > + (idx * ODP_CACHE_LINE_SIZE)); > + > + /* Allocate new packet. Select*/ > + pool = pktio_entry->s.ipc_pool; > + if (odp_unlikely(pool == ODP_POOL_INVALID)) > + ODP_ABORT("invalid pool"); > + > + pkt = odp_packet_alloc(pool, phdr->frame_len); > + if (odp_unlikely(pkt == ODP_PACKET_INVALID)) { > + /* Original pool might be smaller then > + * PKTIO_IPC_ENTRIES. If packet can not be > + * allocated from pool at this time, > + * simple get in on next recv() call. > + */ > + pkts = i - 1; > + break; > + } > + > + /* Copy packet data. */ > + pkt_data = odp_packet_data(pkt); > + if (odp_unlikely(pkt_data == NULL)) > + ODP_ABORT("unable to map pkt_data ipc_slave %d\n", > + (ODP_PKTIO_TYPE_IPC_SLAVE == > + pktio_entry->s.type)); > + > + remote_pkt_data = _ipc_packet_map(phdr, 0, NULL); > + if (odp_unlikely(remote_pkt_data == NULL)) > + ODP_ABORT("unable to map remote_pkt_data, ipc_slave %d\n", > + (ODP_PKTIO_TYPE_IPC_SLAVE == > + pktio_entry->s.type)); > + > + /* @todo fix copy packet!!! */ > + memcpy(pkt_data, remote_pkt_data, phdr->frame_len); > + > + /* Copy packets L2, L3 parsed offsets and size */ > + odp_packet_hdr(pkt)->l2_offset = phdr->l2_offset; > + odp_packet_hdr(pkt)->l3_offset = phdr->l3_offset; > + odp_packet_hdr(pkt)->l4_offset = phdr->l4_offset; > + odp_packet_hdr(pkt)->payload_offset = phdr->payload_offset; > + > + odp_packet_hdr(pkt)->vlan_s_tag = phdr->vlan_s_tag; > + odp_packet_hdr(pkt)->vlan_c_tag = phdr->vlan_c_tag; > + odp_packet_hdr(pkt)->l3_protocol = phdr->l3_protocol; > + odp_packet_hdr(pkt)->l3_len = phdr->l3_len; > + > + odp_packet_hdr(pkt)->frame_len = phdr->frame_len; > + odp_packet_hdr(pkt)->headroom = phdr->headroom; > + odp_packet_hdr(pkt)->tailroom = phdr->tailroom; This block of code should be somewhere in odp_packet_internal.h, you might even find something helpful there after you rebase to latest ODP. > + pkt_table[i] = pkt; > + } > + > + /* Now tell other process that we no longer need that buffers.*/ > + pkts = odph_ring_mp_enqueue_burst(r_p, ipcbufs_p, pkts); > + if (odp_unlikely(pkts < 0)) > + ODP_ABORT("ipc: odp_ring_mp_enqueue_bulk r_p fail\n"); > + > + return pkts; > +} > + > +int ipc_pktio_send(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[], > + unsigned len) > +{ > + odph_ring_t *r; > + odph_ring_t *r_p; > + void **rbuf_p; > + int ret; > + unsigned i; > + > + if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC_SLAVE) { > + r = pktio_entry->s.ipc_r_slave; > + r_p = pktio_entry->s.ipc_p_slave; > + } else if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC) { > + r = pktio_entry->s.ipc_r; > + r_p = pktio_entry->s.ipc_p; > + } else { > + ODP_ABORT("wrong type: %d\n", pktio_entry->s.type); > + } > + > + /* Free already processed packets, if any */ > + while (1) { > + odp_packet_t r_p_pkts[PKTIO_IPC_ENTRIES]; > + rbuf_p = (void *)&r_p_pkts; > + > + ret = odph_ring_mc_dequeue_burst(r_p, rbuf_p, > + PKTIO_IPC_ENTRIES); > + if (0 == ret) > + break; > + for (i = 0; i < (unsigned)ret; i++) > + odp_packet_free(r_p_pkts[i]); > + } > + > + /* Prepare packets: calculate offset from address. */ > + for (i = 0; i < len; i++) { > + int j; > + odp_packet_t pkt = pkt_table[i]; > + rbuf_p = (void *)&pkt; > + odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); > + > + /* buf_hdr.addr can not be used directly in remote process, > + * convert it to offset > + */ > + for (j = 0; j < ODP_BUFFER_MAX_SEG; j++) > + pkt_hdr->buf_hdr.ipc_addr_offset[j] = (char *)pkt_hdr - > + (char *)pkt_hdr->buf_hdr.addr[j]; > + } > + > + /* Put packets to ring to be processed in other process. */ > + rbuf_p = (void *)&pkt_table[0]; > + ret = odph_ring_mp_enqueue_burst(r, rbuf_p, len); > + if (odp_unlikely(ret < 0)) { > + ODP_ERR("pid %d odp_ring_mp_enqueue_bulk fail, ipc_slave %d, ret %d\n", > + getpid(), > + (ODP_PKTIO_TYPE_IPC_SLAVE == pktio_entry->s.type), > + ret); > + ODP_ERR("odp_ring_full: %d, odp_ring_count %d, odph_ring_free_count %d\n", > + odph_ring_full(r), odph_ring_count(r), > + odph_ring_free_count(r)); > + } > + > + return ret; > +} > diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c > index a3d80b5..78153d1 100644 > --- a/platform/linux-generic/odp_pool.c > +++ b/platform/linux-generic/odp_pool.c > @@ -231,8 +231,11 @@ odp_pool_t odp_pool_create(const char *name, > ODP_ALIGN_ROUNDUP(params->pkt.len, seg_len); > > /* Reject create if pkt.len needs too many segments */ > - if (blk_size / seg_len > ODP_BUFFER_MAX_SEG) > + if (blk_size / seg_len > ODP_BUFFER_MAX_SEG) { > + ODP_ERR("ODP_BUFFER_MAX_SEG exceed %d(%d)\n", > + blk_size / seg_len, ODP_BUFFER_MAX_SEG); > return ODP_POOL_INVALID; > + } > > buf_stride = sizeof(odp_packet_hdr_stride); > break; > @@ -249,8 +252,12 @@ odp_pool_t odp_pool_create(const char *name, > > /* Validate requested number of buffers against addressable limits */ > if (buf_num > > - (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE))) > + (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE))) { > + ODP_ERR("buf_num %d > then expected %d\n", > + buf_num, ODP_BUFFER_MAX_BUFFERS / > + (buf_stride / ODP_CACHE_LINE_SIZE)); > return ODP_POOL_INVALID; > + } > > /* Find an unused buffer pool slot and iniitalize it as requested */ > for (i = 0; i < ODP_CONFIG_POOLS; i++) { > @@ -302,7 +309,8 @@ odp_pool_t odp_pool_create(const char *name, > if (shm == ODP_SHM_NULL) { > shm = odp_shm_reserve(pool->s.name, > pool->s.pool_size, > - ODP_PAGE_SIZE, 0); > + ODP_PAGE_SIZE, > + ODP_SHM_PROC); > if (shm == ODP_SHM_INVALID) { > POOL_UNLOCK(&pool->s.lock); > return ODP_POOL_INVALID; > @@ -310,8 +318,12 @@ odp_pool_t odp_pool_create(const char *name, > pool->s.pool_base_addr = odp_shm_addr(shm); > } else { > odp_shm_info_t info; > - if (odp_shm_info(shm, &info) != 0 || > - info.size < pool->s.pool_size) { > + int ret; > + > + ret = odp_shm_info(shm, &info); > + if (ret != 0 || info.size < pool->s.pool_size) { > + ODP_ERR("shm info %d, info size %ld, pool size %ld\n", > + ret, info.size, pool->s.pool_size); > POOL_UNLOCK(&pool->s.lock); > return ODP_POOL_INVALID; > } > @@ -324,6 +336,7 @@ odp_pool_t odp_pool_create(const char *name, > ((size_t)page_addr - > (size_t)pool->s.pool_base_addr)) { > POOL_UNLOCK(&pool->s.lock); > + ODP_ERR("small shm size\n"); > return ODP_POOL_INVALID; > } > pool->s.pool_base_addr = page_addr; > diff --git a/platform/linux-generic/odp_shared_memory.c b/platform/linux-generic/odp_shared_memory.c > index ab48dda..5de48d3 100644 > --- a/platform/linux-generic/odp_shared_memory.c > +++ b/platform/linux-generic/odp_shared_memory.c > @@ -15,6 +15,7 @@ > #include <odp/debug.h> > #include <odp_debug_internal.h> > #include <odp_align_internal.h> > +#include <odp_shm_internal.h> > #include <odp/config.h> > > #include <unistd.h> > @@ -189,7 +190,7 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align, > int fd = -1; > int map_flag = MAP_SHARED; > /* If already exists: O_EXCL: error, O_TRUNC: truncate to zero */ > - int oflag = O_RDWR | O_CREAT | O_TRUNC; > + int oflag = O_RDWR; > uint64_t alloc_size; > uint64_t page_sz, huge_sz; > #ifdef MAP_HUGETLB > @@ -207,7 +208,12 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align, > alloc_hp_size = (size + align + (huge_sz - 1)) & (-huge_sz); > #endif > > - if (flags & ODP_SHM_PROC) { > + if (flags & ODP_SHM_PROC) > + oflag |= O_CREAT | O_TRUNC; > + > + if (flags & (ODP_SHM_PROC | _ODP_SHM_PROC_NOCREAT)) { > + need_huge_page = 0; > + > /* Creates a file to /dev/shm */ > fd = shm_open(name, oflag, > S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); > -- > 1.9.1 > > _______________________________________________ > lng-odp mailing list > lng-odp@lists.linaro.org > https://lists.linaro.org/mailman/listinfo/lng-odp
diff --git a/helper/include/odp/helper/ring.h b/helper/include/odp/helper/ring.h index 5e640a7..c3c2f6a 100644 --- a/helper/include/odp/helper/ring.h +++ b/helper/include/odp/helper/ring.h @@ -156,10 +156,11 @@ typedef struct odph_ring { } odph_ring_t; -#define ODPH_RING_F_SP_ENQ 0x0001 /* The default enqueue is "single-producer".*/ -#define ODPH_RING_F_SC_DEQ 0x0002 /* The default dequeue is "single-consumer".*/ -#define ODPH_RING_SHM_PROC 0x0004 /* If set - ring is visible from different +#define ODPH_RING_F_SP_ENQ (1 << 0) /* The default enqueue is "single-producer".*/ +#define ODPH_RING_F_SC_DEQ (1 << 1) /* The default dequeue is "single-consumer".*/ +#define ODPH_RING_SHM_PROC (1 << 2) /* If set - ring is visible from different processes. Default is thread visible. */ +#define ODPH_RING_NO_LIST (1 << 3) /* Do not link ring to linked list. */ #define ODPH_RING_QUOT_EXCEED (1 << 31) /* Quota exceed for burst ops */ #define ODPH_RING_SZ_MASK (unsigned)(0x0fffffff) /* Ring size mask */ diff --git a/helper/ring.c b/helper/ring.c index 0927a6c..54c40cf 100644 --- a/helper/ring.c +++ b/helper/ring.c @@ -200,7 +200,8 @@ odph_ring_create(const char *name, unsigned count, unsigned flags) r->prod.tail = 0; r->cons.tail = 0; - TAILQ_INSERT_TAIL(&odp_ring_list, r, next); + if (!(flags & ODPH_RING_NO_LIST)) + TAILQ_INSERT_TAIL(&odp_ring_list, r, next); } else { ODP_ERR("Cannot reserve memory\n"); } diff --git a/platform/linux-generic/Makefile.am b/platform/linux-generic/Makefile.am index 66f0474..4843386 100644 --- a/platform/linux-generic/Makefile.am +++ b/platform/linux-generic/Makefile.am @@ -120,11 +120,13 @@ noinst_HEADERS = \ ${top_srcdir}/platform/linux-generic/include/odp_internal.h \ ${top_srcdir}/platform/linux-generic/include/odp_packet_internal.h \ ${top_srcdir}/platform/linux-generic/include/odp_packet_io_internal.h \ + ${top_srcdir}/platform/linux-generic/include/odp_packet_io_ipc_internal.h \ ${top_srcdir}/platform/linux-generic/include/odp_packet_io_queue.h \ ${top_srcdir}/platform/linux-generic/include/odp_packet_socket.h \ ${top_srcdir}/platform/linux-generic/include/odp_pool_internal.h \ ${top_srcdir}/platform/linux-generic/include/odp_queue_internal.h \ ${top_srcdir}/platform/linux-generic/include/odp_schedule_internal.h \ + ${top_srcdir}/platform/linux-generic/include/odp_shm_internal.h \ ${top_srcdir}/platform/linux-generic/include/odp_spin_internal.h \ ${top_srcdir}/platform/linux-generic/include/odp_timer_internal.h \ ${top_srcdir}/platform/linux-generic/Makefile.inc @@ -155,6 +157,7 @@ __LIB__libodp_la_SOURCES = \ odp_packet.c \ odp_packet_flags.c \ odp_packet_io.c \ + odp_packet_io_ipc.c \ odp_packet_socket.c \ odp_pool.c \ odp_queue.c \ diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h index 3a3d2a2..4ea7c62 100644 --- a/platform/linux-generic/include/odp_buffer_internal.h +++ b/platform/linux-generic/include/odp_buffer_internal.h @@ -129,6 +129,9 @@ typedef struct odp_buffer_hdr_t { size_t udata_size; /* size of user metadata */ uint32_t segcount; /* segment count */ uint32_t segsize; /* segment size */ + /* ipc mapped process can not walk over pointers, + * offset has to be used */ + uint64_t ipc_addr_offset[ODP_BUFFER_MAX_SEG]; void *addr[ODP_BUFFER_MAX_SEG]; /* block addrs */ } odp_buffer_hdr_t; diff --git a/platform/linux-generic/include/odp_packet_io_internal.h b/platform/linux-generic/include/odp_packet_io_internal.h index 18b59ef..d12a1fa 100644 --- a/platform/linux-generic/include/odp_packet_io_internal.h +++ b/platform/linux-generic/include/odp_packet_io_internal.h @@ -23,6 +23,7 @@ extern "C" { #include <odp_classification_datamodel.h> #include <odp_align_internal.h> #include <odp_debug_internal.h> +#include <odp/helper/ring.h> #include <odp/config.h> #include <odp/hints.h> @@ -36,6 +37,8 @@ typedef enum { ODP_PKTIO_TYPE_SOCKET_MMSG, ODP_PKTIO_TYPE_SOCKET_MMAP, ODP_PKTIO_TYPE_LOOPBACK, + ODP_PKTIO_TYPE_IPC, + ODP_PKTIO_TYPE_IPC_SLAVE, } odp_pktio_type_t; struct pktio_entry { @@ -53,6 +56,19 @@ struct pktio_entry { char name[IFNAMSIZ]; /**< name of pktio provided to pktio_open() */ odp_bool_t promisc; /**< promiscuous mode state */ + odph_ring_t *ipc_r; /**< ODP ring for IPC msg packets + indexes transmitted to shared memory */ + odph_ring_t *ipc_p; /**< ODP ring for IPC msg packets + indexes already processed by remote process */ + void *ipc_pool_base; /**< IPC Remote pool base addr */ + void *ipc_pool_mdata_base; /**< IPC Remote pool mdata base addr */ + uint64_t ipc_pkt_size; /**< IPC: packet size in remote pool */ + + odph_ring_t *ipc_r_slave; + odph_ring_t *ipc_p_slave; + + odp_pool_t ipc_pool; /**< IPC: Pool of main process */ + odp_shm_t ipc_pool_shm; /**< IPC: Shm memory for remote pool */ }; typedef union { diff --git a/platform/linux-generic/include/odp_packet_io_ipc_internal.h b/platform/linux-generic/include/odp_packet_io_ipc_internal.h new file mode 100644 index 0000000..e11ec35 --- /dev/null +++ b/platform/linux-generic/include/odp_packet_io_ipc_internal.h @@ -0,0 +1,48 @@ +/* Copyright (c) 2015, Linaro Limited + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <odp/packet_io.h> +#include <odp_packet_io_internal.h> +#include <odp/packet.h> +#include <odp_packet_internal.h> +#include <odp_internal.h> +#include <odp/shared_memory.h> + +#include <string.h> +#include <unistd.h> +#include <stdlib.h> + +/* IPC packet I/O over odph_ring */ +#include <odp/helper/ring.h> + +#define PKTIO_IPC_ENTRIES 4096 /**< number of odp buffers in + odp ring queue */ + +/* that struct is exported to shared memory, so that 2 processes can find + * each other. + */ +struct pktio_info { + char remote_pool_name[ODP_POOL_NAME_LEN]; + int shm_pool_num; + size_t shm_pkt_pool_size; + uint32_t shm_pkt_size; + odp_shm_t shm; /*< current structure stored in this shm */ + size_t mdata_offset; /*< offset from shared memory block start + *to pool_mdata_addr */ + struct { + size_t mdata_offset; + char pool_name[ODP_POOL_NAME_LEN]; + } slave; +} __packed; + +int ipc_pktio_init(pktio_entry_t *pktio_entry, const char *dev, + odp_pool_t pool); + +int ipc_pktio_recv(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[], + unsigned len); + +int ipc_pktio_send(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[], + unsigned len); diff --git a/platform/linux-generic/include/odp_shm_internal.h b/platform/linux-generic/include/odp_shm_internal.h new file mode 100644 index 0000000..e6b37fa --- /dev/null +++ b/platform/linux-generic/include/odp_shm_internal.h @@ -0,0 +1,22 @@ +/* Copyright (c) 2013, Linaro Limited + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + + + +#ifndef ODP_SHM_INTERNAL_H_ +#define ODP_SHM_INTERNAL_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +#define _ODP_SHM_PROC_NOCREAT 0x4 /**< Do not create shm if not exist */ + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/platform/linux-generic/odp_packet_io.c b/platform/linux-generic/odp_packet_io.c index cfe5b71..0be4cce 100644 --- a/platform/linux-generic/odp_packet_io.c +++ b/platform/linux-generic/odp_packet_io.c @@ -18,6 +18,7 @@ #include <odp_schedule_internal.h> #include <odp_classification_internal.h> #include <odp_debug_internal.h> +#include <odp_packet_io_ipc_internal.h> #include <string.h> #include <sys/ioctl.h> @@ -25,6 +26,9 @@ #include <ifaddrs.h> #include <errno.h> +#include <sys/types.h> +#include <unistd.h> + /* MTU to be reported for the "loop" interface */ #define PKTIO_LOOP_MTU 1500 /* MAC address for the "loop" interface */ @@ -263,7 +267,12 @@ static odp_pktio_t setup_pktio_entry(const char *dev, odp_pool_t pool) if (strcmp(dev, "loop") == 0) ret = init_loop(pktio_entry, id); - else + else if (!strncmp(dev, "ipc", 3)) { + ret = ipc_pktio_init(pktio_entry, dev, pool); + if (ret != 0) + ODP_ABORT("unable to init ipc for %s, pool %" PRIu64 "\n", + dev, pool); + } else ret = init_socket(pktio_entry, dev, pool); if (ret != 0) { @@ -408,6 +417,10 @@ int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], int len) case ODP_PKTIO_TYPE_LOOPBACK: pkts = deq_loopback(pktio_entry, pkt_table, len); break; + case ODP_PKTIO_TYPE_IPC_SLAVE: + case ODP_PKTIO_TYPE_IPC: + pkts = ipc_pktio_recv(pktio_entry, pkt_table, len); + break; default: pkts = -1; break; @@ -462,6 +475,10 @@ int odp_pktio_send(odp_pktio_t id, odp_packet_t pkt_table[], int len) case ODP_PKTIO_TYPE_LOOPBACK: pkts = enq_loopback(pktio_entry, pkt_table, len); break; + case ODP_PKTIO_TYPE_IPC: + case ODP_PKTIO_TYPE_IPC_SLAVE: + pkts = ipc_pktio_send(pktio_entry, pkt_table, len); + break; default: pkts = -1; } diff --git a/platform/linux-generic/odp_packet_io_ipc.c b/platform/linux-generic/odp_packet_io_ipc.c new file mode 100644 index 0000000..f9ae87f --- /dev/null +++ b/platform/linux-generic/odp_packet_io_ipc.c @@ -0,0 +1,603 @@ +/* Copyright (c) 2015, Linaro Limited + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +#include <odp_packet_io_ipc_internal.h> +#include <odp_debug_internal.h> +#include <odp_packet_io_internal.h> +#include <odp_spin_internal.h> +#include <odp/system_info.h> +#include <odp_shm_internal.h> + +#include <sys/mman.h> +#include <sys/stat.h> +#include <fcntl.h> + +static void *_ipc_map_remote_pool(const char *name, size_t size); + +static const char *_ipc_odp_buffer_pool_shm_name(odp_pool_t pool_hdl) +{ + pool_entry_t *pool; + uint32_t pool_id; + odp_shm_t shm; + odp_shm_info_t info; + + pool_id = pool_handle_to_index(pool_hdl); + pool = get_pool_entry(pool_id); + shm = pool->s.pool_shm; + + odp_shm_info(shm, &info); + + return info.name; +} + +/** +* Look up for shared memory object. +* +* @param name name of shm object +* +* @return 0 on success, otherwise non-zero +*/ +static int _odp_shm_lookup_ipc(const char *name) +{ + int shm; + + shm = shm_open(name, O_RDWR, S_IRUSR | S_IWUSR); + if (shm == -1) { + if (errno == ENOENT) + return -1; + else + ODP_ABORT("shm_open for %s err %s\n", + name, strerror(errno)); + } + close(shm); + return 0; +} + +static struct pktio_info *_ipc_map_pool_info(pktio_entry_t *pktio_entry, + const char *pool_name, + int flag) +{ + struct pktio_info *pinfo; + char name[ODP_POOL_NAME_LEN + sizeof("_info")]; + + /* Create info about remote pktio */ + snprintf(name, sizeof(name), "%s_info", pool_name); + odp_shm_t shm = odp_shm_reserve(name, sizeof(struct pktio_info), + ODP_CACHE_LINE_SIZE, + flag); + if (ODP_SHM_INVALID == shm) + ODP_ABORT("unable to reserve memory for shm info"); + pinfo = odp_shm_addr(shm); + if (flag != _ODP_SHM_PROC_NOCREAT) + pinfo->remote_pool_name[0] = 0; + + pktio_entry->s.ipc_pool_shm = shm; + return pinfo; +} + +static int _ipc_pktio_init_master(pktio_entry_t *pktio_entry, const char *dev, + odp_pool_t pool) +{ + char ipc_shm_name[ODP_POOL_NAME_LEN + sizeof("_slave_r")]; + pool_entry_t *pool_entry; + uint32_t pool_id; + void *ipc_pool_base; + struct pktio_info *pinfo; + const char *pool_name; + odp_shm_t shm; + + pool_id = pool_handle_to_index(pool); + pool_entry = get_pool_entry(pool_id); + + if (ODP_POOL_NAME_LEN != ODPH_RING_NAMESIZE) + ODP_ABORT(""); + + if (strlen(dev) > (ODP_POOL_NAME_LEN - sizeof("_slave_r"))) { + ODP_DBG("too big ipc name\n"); + return -1; + } + + /* generate name in shm like ipc_pktio_r for + * to be processed packets ring. + */ + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_r", dev); + pktio_entry->s.ipc_r = odph_ring_create(ipc_shm_name, + PKTIO_IPC_ENTRIES, + ODPH_RING_SHM_PROC | ODPH_RING_NO_LIST); + if (!pktio_entry->s.ipc_r) { + ODP_DBG("pid %d unable to create ipc ring %s name\n", + getpid(), ipc_shm_name); + return -1; + } + ODP_DBG("Created IPC ring: %s, count %d, free %d\n", + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_r), + odph_ring_free_count(pktio_entry->s.ipc_r)); + + /* generate name in shm like ipc_pktio_p for + * already processed packets + */ + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_p", dev); + pktio_entry->s.ipc_p = odph_ring_create(ipc_shm_name, + PKTIO_IPC_ENTRIES, + ODPH_RING_SHM_PROC | ODPH_RING_NO_LIST); + if (!pktio_entry->s.ipc_p) { + ODP_DBG("pid %d unable to create ipc ring %s name\n", + getpid(), ipc_shm_name); + goto free_r; + } + ODP_DBG("Created IPC ring: %s, count %d, free %d\n", + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_p), + odph_ring_free_count(pktio_entry->s.ipc_p)); + + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_slave_r", dev); + pktio_entry->s.ipc_r_slave = odph_ring_create(ipc_shm_name, + PKTIO_IPC_ENTRIES, + ODPH_RING_SHM_PROC | ODPH_RING_NO_LIST); + if (!pktio_entry->s.ipc_r_slave) { + ODP_DBG("pid %d unable to create ipc ring %s name\n", + getpid(), ipc_shm_name); + goto free_p; + } + ODP_DBG("Created IPC ring: %s, count %d, free %d\n", + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_r_slave), + odph_ring_free_count(pktio_entry->s.ipc_r_slave)); + + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_slave_p", dev); + pktio_entry->s.ipc_p_slave = odph_ring_create(ipc_shm_name, + PKTIO_IPC_ENTRIES, + ODPH_RING_SHM_PROC | ODPH_RING_NO_LIST); + if (!pktio_entry->s.ipc_p_slave) { + ODP_DBG("pid %d unable to create ipc ring %s name\n", + getpid(), ipc_shm_name); + goto free_slave_r; + } + ODP_DBG("Created IPC ring: %s, count %d, free %d\n", + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_p_slave), + odph_ring_free_count(pktio_entry->s.ipc_p_slave)); + + /* Memory to store information about exported pool */ + pinfo = _ipc_map_pool_info(pktio_entry, dev, ODP_SHM_PROC); + + /* Set up pool name for remote info */ + pool_name = _ipc_odp_buffer_pool_shm_name(pool); + memcpy(pinfo->remote_pool_name, pool_name, strlen(pool_name)); + pinfo->shm_pkt_pool_size = pool_entry->s.pool_size; + pinfo->shm_pool_num = pool_entry->s.buf_num; + pinfo->shm_pkt_size = pool_entry->s.seg_size; + pinfo->mdata_offset = pool_entry->s.pool_mdata_addr - + pool_entry->s.pool_base_addr; + pinfo->slave.mdata_offset = 0; + ODP_DBG("Master waiting for slave to be connected now..\n"); + + /* Wait for remote process to export his pool. */ + ODP_DBG("Wait for second process set mdata_offset...\n"); + while (pinfo->slave.mdata_offset == 0) + odp_spin(); + + ODP_DBG("Wait for second process set mdata_offset... DONE.\n"); + + while (1) { + int ret = _odp_shm_lookup_ipc(pinfo->slave.pool_name); + if (!ret) + break; + ODP_DBG("Master looking for %s\n", pinfo->slave.pool_name); + sleep(1); + } + + ipc_pool_base = _ipc_map_remote_pool(pinfo->slave.pool_name, + pinfo->shm_pkt_pool_size); + pktio_entry->s.ipc_pool_mdata_base = (char *)ipc_pool_base + + pinfo->slave.mdata_offset; + pktio_entry->s.ipc_pool = pool; + + return 0; + +free_slave_r: + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_slave_r", dev); + shm = odp_shm_lookup(ipc_shm_name); + odp_shm_free(shm); +free_p: + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_p", dev); + shm = odp_shm_lookup(ipc_shm_name); + odp_shm_free(shm); +free_r: + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_r", dev); + shm = odp_shm_lookup(ipc_shm_name); + odp_shm_free(shm); + return -1; +} + +static odp_pool_t _ipc_odp_alloc_and_create_pool_slave(struct pktio_info *pinfo) +{ + odp_pool_t pool; + char *pool_name; + odp_pool_param_t params; + int num = pinfo->shm_pool_num; + uint32_t buf_size = pinfo->shm_pkt_size; + pool_entry_t *pool_entry; + + pool_name = calloc(1, strlen(pinfo->remote_pool_name) + + sizeof("ipc_pool_slave_")); + sprintf(pool_name, "ipc_pool_slave_%s", pinfo->remote_pool_name); + + ODP_DBG("slave uses pool %s\n", pool_name); + + memset(¶ms, 0, sizeof(params)); + params.pkt.num = num; + params.pkt.len = buf_size; + params.pkt.seg_len = buf_size; + params.type = ODP_POOL_PACKET; + + pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms); + if (pool == ODP_POOL_INVALID) + ODP_ABORT("Error: packet pool create failed.\n" + "num %d, len %d, seg_len %d\n", + params.pkt.num, params.pkt.len, params.pkt.seg_len); + + /* Export info so that master can connect to that pool*/ + snprintf(pinfo->slave.pool_name, 30, "%s", pool_name); + pool_entry = odp_pool_to_entry(pool); + pinfo->slave.mdata_offset = pool_entry->s.pool_mdata_addr - + pool_entry->s.pool_base_addr; + free(pool_name); + + return pool; +} + +static void *_ipc_map_remote_pool(const char *name, size_t size) +{ + odp_shm_t shm; + + ODP_DBG("Mapping remote pool %s, size %ld\n", name, size); + shm = odp_shm_reserve(name, + size, + ODP_CACHE_LINE_SIZE, + _ODP_SHM_PROC_NOCREAT); + if (shm == ODP_SHM_INVALID) + ODP_ABORT("unable map %s\n", name); + return odp_shm_addr(shm); +} + +static void *_ipc_shm_map(char *name, size_t size, int timeout) +{ + odp_shm_t shm; + int ret; + + while (1) { + ret = _odp_shm_lookup_ipc(name); + if (!ret) + break; + ODP_DBG("Waiting for %s\n", name); + if (timeout <= 0) + return NULL; + timeout--; + sleep(1); + } + + shm = odp_shm_reserve(name, size, + ODP_CACHE_LINE_SIZE, + _ODP_SHM_PROC_NOCREAT); + if (ODP_SHM_INVALID == shm) + ODP_ABORT("unable to map: %s\n", name); + + return odp_shm_addr(shm); +} + +static int _ipc_pktio_init_slave(const char *dev, pktio_entry_t *pktio_entry) +{ + char ipc_shm_name[ODP_POOL_NAME_LEN + sizeof("_slave_r")]; + size_t ring_size = PKTIO_IPC_ENTRIES * sizeof(void *) + + sizeof(odph_ring_t); + struct pktio_info *pinfo; + void *ipc_pool_base; + odp_shm_t shm; + + if (ODP_POOL_NAME_LEN != ODPH_RING_NAMESIZE) + ODP_ABORT(""); + + if (strlen(dev) > (ODP_POOL_NAME_LEN - sizeof("_slave_r"))) { + ODP_DBG("too big ipc name\n"); + return -1; + } + + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_r", dev); + pktio_entry->s.ipc_r = _ipc_shm_map(ipc_shm_name, ring_size, 10); + if (!pktio_entry->s.ipc_r) { + ODP_DBG("pid %d unable to find ipc ring %s name\n", + getpid(), dev); + return -1; + } + ODP_DBG("Connected IPC ring: %s, count %d, free %d\n", + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_r), + odph_ring_free_count(pktio_entry->s.ipc_r)); + + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_p", dev); + pktio_entry->s.ipc_p = _ipc_shm_map(ipc_shm_name, ring_size, 10); + if (!pktio_entry->s.ipc_p) { + ODP_DBG("pid %d unable to find ipc ring %s name\n", + getpid(), dev); + goto free_r; + } + ODP_DBG("Connected IPC ring: %s, count %d, free %d\n", + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_p), + odph_ring_free_count(pktio_entry->s.ipc_p)); + + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_slave_r", dev); + pktio_entry->s.ipc_r_slave = _ipc_shm_map(ipc_shm_name, ring_size, 10); + if (!pktio_entry->s.ipc_r_slave) { + ODP_DBG("pid %d unable to find ipc ring %s name\n", + getpid(), dev); + goto free_p; + } + ODP_DBG("Connected IPC ring: %s, count %d, free %d\n", + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_r_slave), + odph_ring_free_count(pktio_entry->s.ipc_r_slave)); + + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_slave_p", dev); + pktio_entry->s.ipc_p_slave = _ipc_shm_map(ipc_shm_name, ring_size, 10); + if (!pktio_entry->s.ipc_p_slave) { + ODP_DBG("pid %d unable to find ipc ring %s name\n", + getpid(), dev); + goto free_slave_r; + } + ODP_DBG("Connected IPC ring: %s, count %d, free %d\n", + ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_p_slave), + odph_ring_free_count(pktio_entry->s.ipc_p_slave)); + + + /* Get info about remote pool */ + pinfo = _ipc_map_pool_info(pktio_entry, dev, _ODP_SHM_PROC_NOCREAT); + + ipc_pool_base = _ipc_map_remote_pool(pinfo->remote_pool_name, + pinfo->shm_pkt_pool_size); + pktio_entry->s.ipc_pool_mdata_base = (char *)ipc_pool_base + + pinfo->mdata_offset; + pktio_entry->s.ipc_pkt_size = pinfo->shm_pkt_size; + + /* @todo: to simplify in linux-generic implementation we create pool for + * packets from IPC queue. On receive implementation copies packets to + * that pool. Later we can try to reuse original pool without packets + * copying. + */ + pktio_entry->s.ipc_pool = _ipc_odp_alloc_and_create_pool_slave(pinfo); + ODP_DBG("%s OK.\n", __func__); + return 0; + +free_slave_r: + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_slave_r", dev); + shm = odp_shm_lookup(ipc_shm_name); + odp_shm_free(shm); +free_p: + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_p", dev); + shm = odp_shm_lookup(ipc_shm_name); + odp_shm_free(shm); +free_r: + snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_r", dev); + shm = odp_shm_lookup(ipc_shm_name); + odp_shm_free(shm); + return -1; +} + +int ipc_pktio_init(pktio_entry_t *pktio_entry, const char *dev, + odp_pool_t pool) +{ + int ret; + + /* if pool is zero we assume that it's slave process connects + * to shared memory already created by main process. + */ + if (ODP_POOL_INVALID != pool) { + pktio_entry->s.type = ODP_PKTIO_TYPE_IPC; + ret = _ipc_pktio_init_master(pktio_entry, dev, pool); + } else { + pktio_entry->s.type = ODP_PKTIO_TYPE_IPC_SLAVE; + ret = _ipc_pktio_init_slave(dev, pktio_entry); + } + + return ret; +} + + +static inline void *_ipc_buffer_map(odp_buffer_hdr_t *buf, + uint32_t offset, + uint32_t *seglen, + uint32_t limit) +{ + int seg_index = offset / buf->segsize; + int seg_offset = offset % buf->segsize; + void *addr = (char *)buf - buf->ipc_addr_offset[seg_index]; + + if (seglen != NULL) { + uint32_t buf_left = limit - offset; + *seglen = seg_offset + buf_left <= buf->segsize ? + buf_left : buf->segsize - seg_offset; + } + + return (void *)(seg_offset + (uint8_t *)addr); +} + + +static inline void *_ipc_packet_map(odp_packet_hdr_t *pkt_hdr, + uint32_t offset, uint32_t *seglen) +{ + if (offset > pkt_hdr->frame_len) + return NULL; + + return _ipc_buffer_map(&pkt_hdr->buf_hdr, + pkt_hdr->headroom + offset, seglen, + pkt_hdr->headroom + pkt_hdr->frame_len); +} + +int ipc_pktio_recv(pktio_entry_t *pktio_entry, + odp_packet_t pkt_table[], unsigned len) +{ + int pkts = 0; + int i; + odph_ring_t *r; + odph_ring_t *r_p; + odp_packet_t remote_pkts[PKTIO_IPC_ENTRIES]; + void **ipcbufs_p = (void *)&remote_pkts; + + if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC) { + r = pktio_entry->s.ipc_r_slave; + r_p = pktio_entry->s.ipc_p_slave; + } else if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC_SLAVE) { + r = pktio_entry->s.ipc_r; + r_p = pktio_entry->s.ipc_p; + } else { + ODP_ABORT("wrong type: %d\n", pktio_entry->s.type); + } + + pkts = odph_ring_mc_dequeue_burst(r, ipcbufs_p, len); + if (odp_unlikely(pkts < 0)) + ODP_ABORT("error to dequeue no packets\n"); + + /* fast path */ + if (odp_likely(0 == pkts)) + return 0; + + for (i = 0; i < pkts; i++) { + odp_pool_t pool; + odp_packet_t pkt; + odp_packet_hdr_t *phdr; + odp_buffer_bits_t handle; + int idx; /* Remote packet has coded pool and index. + * We need only index.*/ + void *pkt_data; + void *remote_pkt_data; + + handle.handle = _odp_packet_to_buffer(remote_pkts[i]); + idx = handle.index; + + /* Link to packed data. To this line we have Zero-Copy between + * processes, to simplify use packet copy in that version which + * can be removed later with more advance buffer management + * (ref counters). + */ + /* reverse odp_buf_to_hdr() */ + phdr = (odp_packet_hdr_t *)( + (char *)pktio_entry->s.ipc_pool_mdata_base + + (idx * ODP_CACHE_LINE_SIZE)); + + /* Allocate new packet. Select*/ + pool = pktio_entry->s.ipc_pool; + if (odp_unlikely(pool == ODP_POOL_INVALID)) + ODP_ABORT("invalid pool"); + + pkt = odp_packet_alloc(pool, phdr->frame_len); + if (odp_unlikely(pkt == ODP_PACKET_INVALID)) { + /* Original pool might be smaller then + * PKTIO_IPC_ENTRIES. If packet can not be + * allocated from pool at this time, + * simple get in on next recv() call. + */ + pkts = i - 1; + break; + } + + /* Copy packet data. */ + pkt_data = odp_packet_data(pkt); + if (odp_unlikely(pkt_data == NULL)) + ODP_ABORT("unable to map pkt_data ipc_slave %d\n", + (ODP_PKTIO_TYPE_IPC_SLAVE == + pktio_entry->s.type)); + + remote_pkt_data = _ipc_packet_map(phdr, 0, NULL); + if (odp_unlikely(remote_pkt_data == NULL)) + ODP_ABORT("unable to map remote_pkt_data, ipc_slave %d\n", + (ODP_PKTIO_TYPE_IPC_SLAVE == + pktio_entry->s.type)); + + /* @todo fix copy packet!!! */ + memcpy(pkt_data, remote_pkt_data, phdr->frame_len); + + /* Copy packets L2, L3 parsed offsets and size */ + odp_packet_hdr(pkt)->l2_offset = phdr->l2_offset; + odp_packet_hdr(pkt)->l3_offset = phdr->l3_offset; + odp_packet_hdr(pkt)->l4_offset = phdr->l4_offset; + odp_packet_hdr(pkt)->payload_offset = phdr->payload_offset; + + odp_packet_hdr(pkt)->vlan_s_tag = phdr->vlan_s_tag; + odp_packet_hdr(pkt)->vlan_c_tag = phdr->vlan_c_tag; + odp_packet_hdr(pkt)->l3_protocol = phdr->l3_protocol; + odp_packet_hdr(pkt)->l3_len = phdr->l3_len; + + odp_packet_hdr(pkt)->frame_len = phdr->frame_len; + odp_packet_hdr(pkt)->headroom = phdr->headroom; + odp_packet_hdr(pkt)->tailroom = phdr->tailroom; + pkt_table[i] = pkt; + } + + /* Now tell other process that we no longer need that buffers.*/ + pkts = odph_ring_mp_enqueue_burst(r_p, ipcbufs_p, pkts); + if (odp_unlikely(pkts < 0)) + ODP_ABORT("ipc: odp_ring_mp_enqueue_bulk r_p fail\n"); + + return pkts; +} + +int ipc_pktio_send(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[], + unsigned len) +{ + odph_ring_t *r; + odph_ring_t *r_p; + void **rbuf_p; + int ret; + unsigned i; + + if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC_SLAVE) { + r = pktio_entry->s.ipc_r_slave; + r_p = pktio_entry->s.ipc_p_slave; + } else if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC) { + r = pktio_entry->s.ipc_r; + r_p = pktio_entry->s.ipc_p; + } else { + ODP_ABORT("wrong type: %d\n", pktio_entry->s.type); + } + + /* Free already processed packets, if any */ + while (1) { + odp_packet_t r_p_pkts[PKTIO_IPC_ENTRIES]; + rbuf_p = (void *)&r_p_pkts; + + ret = odph_ring_mc_dequeue_burst(r_p, rbuf_p, + PKTIO_IPC_ENTRIES); + if (0 == ret) + break; + for (i = 0; i < (unsigned)ret; i++) + odp_packet_free(r_p_pkts[i]); + } + + /* Prepare packets: calculate offset from address. */ + for (i = 0; i < len; i++) { + int j; + odp_packet_t pkt = pkt_table[i]; + rbuf_p = (void *)&pkt; + odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt); + + /* buf_hdr.addr can not be used directly in remote process, + * convert it to offset + */ + for (j = 0; j < ODP_BUFFER_MAX_SEG; j++) + pkt_hdr->buf_hdr.ipc_addr_offset[j] = (char *)pkt_hdr - + (char *)pkt_hdr->buf_hdr.addr[j]; + } + + /* Put packets to ring to be processed in other process. */ + rbuf_p = (void *)&pkt_table[0]; + ret = odph_ring_mp_enqueue_burst(r, rbuf_p, len); + if (odp_unlikely(ret < 0)) { + ODP_ERR("pid %d odp_ring_mp_enqueue_bulk fail, ipc_slave %d, ret %d\n", + getpid(), + (ODP_PKTIO_TYPE_IPC_SLAVE == pktio_entry->s.type), + ret); + ODP_ERR("odp_ring_full: %d, odp_ring_count %d, odph_ring_free_count %d\n", + odph_ring_full(r), odph_ring_count(r), + odph_ring_free_count(r)); + } + + return ret; +} diff --git a/platform/linux-generic/odp_pool.c b/platform/linux-generic/odp_pool.c index a3d80b5..78153d1 100644 --- a/platform/linux-generic/odp_pool.c +++ b/platform/linux-generic/odp_pool.c @@ -231,8 +231,11 @@ odp_pool_t odp_pool_create(const char *name, ODP_ALIGN_ROUNDUP(params->pkt.len, seg_len); /* Reject create if pkt.len needs too many segments */ - if (blk_size / seg_len > ODP_BUFFER_MAX_SEG) + if (blk_size / seg_len > ODP_BUFFER_MAX_SEG) { + ODP_ERR("ODP_BUFFER_MAX_SEG exceed %d(%d)\n", + blk_size / seg_len, ODP_BUFFER_MAX_SEG); return ODP_POOL_INVALID; + } buf_stride = sizeof(odp_packet_hdr_stride); break; @@ -249,8 +252,12 @@ odp_pool_t odp_pool_create(const char *name, /* Validate requested number of buffers against addressable limits */ if (buf_num > - (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE))) + (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE))) { + ODP_ERR("buf_num %d > then expected %d\n", + buf_num, ODP_BUFFER_MAX_BUFFERS / + (buf_stride / ODP_CACHE_LINE_SIZE)); return ODP_POOL_INVALID; + } /* Find an unused buffer pool slot and iniitalize it as requested */ for (i = 0; i < ODP_CONFIG_POOLS; i++) { @@ -302,7 +309,8 @@ odp_pool_t odp_pool_create(const char *name, if (shm == ODP_SHM_NULL) { shm = odp_shm_reserve(pool->s.name, pool->s.pool_size, - ODP_PAGE_SIZE, 0); + ODP_PAGE_SIZE, + ODP_SHM_PROC); if (shm == ODP_SHM_INVALID) { POOL_UNLOCK(&pool->s.lock); return ODP_POOL_INVALID; @@ -310,8 +318,12 @@ odp_pool_t odp_pool_create(const char *name, pool->s.pool_base_addr = odp_shm_addr(shm); } else { odp_shm_info_t info; - if (odp_shm_info(shm, &info) != 0 || - info.size < pool->s.pool_size) { + int ret; + + ret = odp_shm_info(shm, &info); + if (ret != 0 || info.size < pool->s.pool_size) { + ODP_ERR("shm info %d, info size %ld, pool size %ld\n", + ret, info.size, pool->s.pool_size); POOL_UNLOCK(&pool->s.lock); return ODP_POOL_INVALID; } @@ -324,6 +336,7 @@ odp_pool_t odp_pool_create(const char *name, ((size_t)page_addr - (size_t)pool->s.pool_base_addr)) { POOL_UNLOCK(&pool->s.lock); + ODP_ERR("small shm size\n"); return ODP_POOL_INVALID; } pool->s.pool_base_addr = page_addr; diff --git a/platform/linux-generic/odp_shared_memory.c b/platform/linux-generic/odp_shared_memory.c index ab48dda..5de48d3 100644 --- a/platform/linux-generic/odp_shared_memory.c +++ b/platform/linux-generic/odp_shared_memory.c @@ -15,6 +15,7 @@ #include <odp/debug.h> #include <odp_debug_internal.h> #include <odp_align_internal.h> +#include <odp_shm_internal.h> #include <odp/config.h> #include <unistd.h> @@ -189,7 +190,7 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align, int fd = -1; int map_flag = MAP_SHARED; /* If already exists: O_EXCL: error, O_TRUNC: truncate to zero */ - int oflag = O_RDWR | O_CREAT | O_TRUNC; + int oflag = O_RDWR; uint64_t alloc_size; uint64_t page_sz, huge_sz; #ifdef MAP_HUGETLB @@ -207,7 +208,12 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align, alloc_hp_size = (size + align + (huge_sz - 1)) & (-huge_sz); #endif - if (flags & ODP_SHM_PROC) { + if (flags & ODP_SHM_PROC) + oflag |= O_CREAT | O_TRUNC; + + if (flags & (ODP_SHM_PROC | _ODP_SHM_PROC_NOCREAT)) { + need_huge_page = 0; + /* Creates a file to /dev/shm */ fd = shm_open(name, oflag, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> --- helper/include/odp/helper/ring.h | 7 +- helper/ring.c | 3 +- platform/linux-generic/Makefile.am | 3 + .../linux-generic/include/odp_buffer_internal.h | 3 + .../linux-generic/include/odp_packet_io_internal.h | 16 + .../include/odp_packet_io_ipc_internal.h | 48 ++ platform/linux-generic/include/odp_shm_internal.h | 22 + platform/linux-generic/odp_packet_io.c | 19 +- platform/linux-generic/odp_packet_io_ipc.c | 603 +++++++++++++++++++++ platform/linux-generic/odp_pool.c | 23 +- platform/linux-generic/odp_shared_memory.c | 10 +- 11 files changed, 745 insertions(+), 12 deletions(-) create mode 100644 platform/linux-generic/include/odp_packet_io_ipc_internal.h create mode 100644 platform/linux-generic/include/odp_shm_internal.h create mode 100644 platform/linux-generic/odp_packet_io_ipc.c