@@ -156,10 +156,11 @@ typedef struct odph_ring {
} odph_ring_t;
-#define ODPH_RING_F_SP_ENQ 0x0001 /* The default enqueue is "single-producer".*/
-#define ODPH_RING_F_SC_DEQ 0x0002 /* The default dequeue is "single-consumer".*/
-#define ODPH_RING_SHM_PROC 0x0004 /* If set - ring is visible from different
+#define ODPH_RING_F_SP_ENQ (1 << 0) /* The default enqueue is "single-producer".*/
+#define ODPH_RING_F_SC_DEQ (1 << 1) /* The default dequeue is "single-consumer".*/
+#define ODPH_RING_SHM_PROC (1 << 2) /* If set - ring is visible from different
processes. Default is thread visible. */
+#define ODPH_RING_NO_LIST (1 << 3) /* Do not link ring to linked list. */
#define ODPH_RING_QUOT_EXCEED (1 << 31) /* Quota exceed for burst ops */
#define ODPH_RING_SZ_MASK (unsigned)(0x0fffffff) /* Ring size mask */
@@ -199,7 +199,8 @@ odph_ring_create(const char *name, unsigned count, unsigned flags)
r->prod.tail = 0;
r->cons.tail = 0;
- TAILQ_INSERT_TAIL(&odp_ring_list, r, next);
+ if (!(flags & ODPH_RING_NO_LIST))
+ TAILQ_INSERT_TAIL(&odp_ring_list, r, next);
} else {
ODP_ERR("Cannot reserve memory\n");
}
@@ -120,11 +120,13 @@ noinst_HEADERS = \
${top_srcdir}/platform/linux-generic/include/odp_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_packet_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_packet_io_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_packet_io_ipc_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_packet_io_queue.h \
${top_srcdir}/platform/linux-generic/include/odp_packet_socket.h \
${top_srcdir}/platform/linux-generic/include/odp_pool_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_queue_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_schedule_internal.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_shm_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_spin_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_timer_internal.h \
${top_srcdir}/platform/linux-generic/Makefile.inc
@@ -155,6 +157,7 @@ __LIB__libodp_la_SOURCES = \
odp_packet.c \
odp_packet_flags.c \
odp_packet_io.c \
+ odp_packet_io_ipc.c \
odp_packet_socket.c \
odp_pool.c \
odp_queue.c \
@@ -129,6 +129,9 @@ typedef struct odp_buffer_hdr_t {
uint32_t uarea_size; /* size of user area */
uint32_t segcount; /* segment count */
uint32_t segsize; /* segment size */
+ /* ipc mapped process can not walk over pointers,
+ * offset has to be used */
+ uint64_t ipc_addr_offset[ODP_BUFFER_MAX_SEG];
void *addr[ODP_BUFFER_MAX_SEG]; /* block addrs */
} odp_buffer_hdr_t;
@@ -23,6 +23,7 @@ extern "C" {
#include <odp_classification_datamodel.h>
#include <odp_align_internal.h>
#include <odp_debug_internal.h>
+#include <odp/helper/ring.h>
#include <odp/config.h>
#include <odp/hints.h>
@@ -36,8 +37,36 @@ typedef enum {
ODP_PKTIO_TYPE_SOCKET_MMSG,
ODP_PKTIO_TYPE_SOCKET_MMAP,
ODP_PKTIO_TYPE_LOOPBACK,
+ ODP_PKTIO_TYPE_IPC,
+ ODP_PKTIO_TYPE_IPC_SLAVE,
} odp_pktio_type_t;
+typedef struct {
+ /* TX */
+ struct {
+ odph_ring_t *prod; /**< ODP ring for IPC msg packets
+ indexes transmitted to shared
+ memory */
+ odph_ring_t *cons; /**< ODP ring for IPC msg packets
+ indexes already processed by remote
+ process */
+ } m; /* master */
+ /* RX */
+ struct {
+ odph_ring_t *prod; /**< ODP ring for IPC msg packets
+ indexes recieved from shared
+ memory (from remote process) */
+ odph_ring_t *cons; /**< ODP ring for IPC msg packets
+ indexes already processed by
+ current process */
+ } s; /* slave */
+ void *pool_base; /**< Remote pool base addr */
+ void *pool_mdata_base; /**< Remote pool mdata base addr */
+ uint64_t pkt_size; /**< Packet size in remote pool */
+ odp_pool_t pool; /**< Pool of main process */
+ odp_shm_t pool_shm; /**< Shm memory for remote pool */
+} _ipc_pktio_t;
+
struct pktio_entry {
odp_spinlock_t lock; /**< entry spinlock */
int taken; /**< is entry taken(1) or free(0) */
@@ -53,6 +82,7 @@ struct pktio_entry {
char name[IFNAMSIZ]; /**< name of pktio provided to
pktio_open() */
odp_bool_t promisc; /**< promiscuous mode state */
+ _ipc_pktio_t ipc; /**< IPC pktio data */
};
typedef union {
new file mode 100644
@@ -0,0 +1,51 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/packet_io.h>
+#include <odp_packet_io_internal.h>
+#include <odp/packet.h>
+#include <odp_packet_internal.h>
+#include <odp_internal.h>
+#include <odp/shared_memory.h>
+
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+/* IPC packet I/O over odph_ring */
+#include <odp/helper/ring.h>
+
+#define PKTIO_IPC_ENTRIES 4096 /**< number of odp buffers in
+ odp ring queue */
+
+/* that struct is exported to shared memory, so that 2 processes can find
+ * each other.
+ */
+struct pktio_info {
+ char remote_pool_name[ODP_POOL_NAME_LEN];
+ int shm_pool_bufs_num; /*< number of buffer in remote pool */
+ size_t shm_pkt_pool_size; /*< size of remote pool */
+ uint32_t shm_pkt_size; /*< size of packet/segment in remote pool */
+ odp_shm_t shm; /*< current structure stored in this shm */
+ size_t mdata_offset; /*< offset from shared memory block start
+ *to pool_mdata_addr
+ * (linux-generic pool specific) */
+ struct {
+ size_t mdata_offset; /*< offset from shared memory block start
+ * to pool_mdata_addr in remote process.
+ * (linux-generic pool specific) */
+ char pool_name[ODP_POOL_NAME_LEN];
+ } slave;
+} __packed;
+
+int ipc_pktio_init(pktio_entry_t *pktio_entry, const char *dev,
+ odp_pool_t pool);
+
+int ipc_pktio_recv(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
+ unsigned len);
+
+int ipc_pktio_send(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
+ unsigned len);
new file mode 100644
@@ -0,0 +1,20 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef ODP_SHM_INTERNAL_H_
+#define ODP_SHM_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define _ODP_SHM_PROC_NOCREAT 0x4 /**< Do not create shm if not exist */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
@@ -18,6 +18,7 @@
#include <odp_schedule_internal.h>
#include <odp_classification_internal.h>
#include <odp_debug_internal.h>
+#include <odp_packet_io_ipc_internal.h>
#include <string.h>
#include <sys/ioctl.h>
@@ -25,6 +26,9 @@
#include <ifaddrs.h>
#include <errno.h>
+#include <sys/types.h>
+#include <unistd.h>
+
/* MTU to be reported for the "loop" interface */
#define PKTIO_LOOP_MTU 1500
/* MAC address for the "loop" interface */
@@ -265,7 +269,12 @@ static odp_pktio_t setup_pktio_entry(const char *dev, odp_pool_t pool)
if (strcmp(dev, "loop") == 0)
ret = init_loop(pktio_entry, id);
- else
+ else if (!strncmp(dev, "ipc", 3)) {
+ ret = ipc_pktio_init(pktio_entry, dev, pool);
+ if (ret != 0)
+ ODP_ABORT("unable to init ipc for %s, pool %" PRIu64 "\n",
+ dev, pool);
+ } else
ret = init_socket(pktio_entry, dev, pool);
if (ret != 0) {
@@ -410,6 +419,10 @@ int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], int len)
case ODP_PKTIO_TYPE_LOOPBACK:
pkts = deq_loopback(pktio_entry, pkt_table, len);
break;
+ case ODP_PKTIO_TYPE_IPC_SLAVE:
+ case ODP_PKTIO_TYPE_IPC:
+ pkts = ipc_pktio_recv(pktio_entry, pkt_table, len);
+ break;
default:
pkts = -1;
break;
@@ -464,6 +477,10 @@ int odp_pktio_send(odp_pktio_t id, odp_packet_t pkt_table[], int len)
case ODP_PKTIO_TYPE_LOOPBACK:
pkts = enq_loopback(pktio_entry, pkt_table, len);
break;
+ case ODP_PKTIO_TYPE_IPC:
+ case ODP_PKTIO_TYPE_IPC_SLAVE:
+ pkts = ipc_pktio_send(pktio_entry, pkt_table, len);
+ break;
default:
pkts = -1;
}
new file mode 100644
@@ -0,0 +1,594 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_packet_io_ipc_internal.h>
+#include <odp_debug_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_spin_internal.h>
+#include <odp/system_info.h>
+#include <odp_shm_internal.h>
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+static void *_ipc_map_remote_pool(const char *name, size_t size);
+
+static const char *_ipc_odp_buffer_pool_shm_name(odp_pool_t pool_hdl)
+{
+ pool_entry_t *pool;
+ uint32_t pool_id;
+ odp_shm_t shm;
+ odp_shm_info_t info;
+
+ pool_id = pool_handle_to_index(pool_hdl);
+ pool = get_pool_entry(pool_id);
+ shm = pool->s.pool_shm;
+
+ odp_shm_info(shm, &info);
+
+ return info.name;
+}
+
+/**
+* Look up for shared memory object.
+*
+* @param name name of shm object
+*
+* @return 0 on success, otherwise non-zero
+*/
+static int _odp_shm_lookup_ipc(const char *name)
+{
+ int shm;
+
+ shm = shm_open(name, O_RDWR, S_IRUSR | S_IWUSR);
+ if (shm == -1) {
+ if (errno == ENOENT)
+ return -1;
+ ODP_ABORT("shm_open for %s err %s\n",
+ name, strerror(errno));
+ }
+ close(shm);
+ return 0;
+}
+
+static struct pktio_info *_ipc_map_pool_info(pktio_entry_t *pktio_entry,
+ const char *pool_name,
+ int flag)
+{
+ struct pktio_info *pinfo;
+ char name[ODP_POOL_NAME_LEN + sizeof("_info")];
+
+ /* Create info about remote pktio */
+ snprintf(name, sizeof(name), "%s_info", pool_name);
+ odp_shm_t shm = odp_shm_reserve(name, sizeof(struct pktio_info),
+ ODP_CACHE_LINE_SIZE,
+ flag);
+ if (ODP_SHM_INVALID == shm)
+ ODP_ABORT("unable to reserve memory for shm info");
+ pinfo = odp_shm_addr(shm);
+ if (flag != _ODP_SHM_PROC_NOCREAT)
+ pinfo->remote_pool_name[0] = 0;
+
+ pktio_entry->s.ipc.pool_shm = shm;
+ return pinfo;
+}
+
+static int _ipc_pktio_init_master(pktio_entry_t *pktio_entry, const char *dev,
+ odp_pool_t pool)
+{
+ char ipc_shm_name[ODP_POOL_NAME_LEN + sizeof("_slave_r")];
+ pool_entry_t *pool_entry;
+ uint32_t pool_id;
+ void *ipc_pool_base;
+ struct pktio_info *pinfo;
+ const char *pool_name;
+ odp_shm_t shm;
+
+ pool_id = pool_handle_to_index(pool);
+ pool_entry = get_pool_entry(pool_id);
+
+ if (ODP_POOL_NAME_LEN != ODPH_RING_NAMESIZE)
+ ODP_ABORT("");
+
+ if (strlen(dev) > (ODP_POOL_NAME_LEN - sizeof("_slave_r"))) {
+ ODP_DBG("too big ipc name\n");
+ return -1;
+ }
+
+ /* generate name in shm like ipc_pktio_r for
+ * to be processed packets ring.
+ */
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_prod", dev);
+ pktio_entry->s.ipc.m.prod = odph_ring_create(ipc_shm_name,
+ PKTIO_IPC_ENTRIES,
+ ODPH_RING_SHM_PROC | ODPH_RING_NO_LIST);
+ if (!pktio_entry->s.ipc.m.prod) {
+ ODP_DBG("pid %d unable to create ipc ring %s name\n",
+ getpid(), ipc_shm_name);
+ return -1;
+ }
+ ODP_DBG("Created IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, odph_ring_count(pktio_entry->s.ipc.m.prod),
+ odph_ring_free_count(pktio_entry->s.ipc.m.prod));
+
+ /* generate name in shm like ipc_pktio_p for
+ * already processed packets
+ */
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_cons", dev);
+ pktio_entry->s.ipc.m.cons = odph_ring_create(ipc_shm_name,
+ PKTIO_IPC_ENTRIES,
+ ODPH_RING_SHM_PROC | ODPH_RING_NO_LIST);
+ if (!pktio_entry->s.ipc.m.cons) {
+ ODP_DBG("pid %d unable to create ipc ring %s name\n",
+ getpid(), ipc_shm_name);
+ goto free_m_prod;
+ }
+ ODP_DBG("Created IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, odph_ring_count(pktio_entry->s.ipc.m.cons),
+ odph_ring_free_count(pktio_entry->s.ipc.m.cons));
+
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_prod", dev);
+ pktio_entry->s.ipc.s.prod = odph_ring_create(ipc_shm_name,
+ PKTIO_IPC_ENTRIES,
+ ODPH_RING_SHM_PROC | ODPH_RING_NO_LIST);
+ if (!pktio_entry->s.ipc.s.prod) {
+ ODP_DBG("pid %d unable to create ipc ring %s name\n",
+ getpid(), ipc_shm_name);
+ goto free_m_cons;
+ }
+ ODP_DBG("Created IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, odph_ring_count(pktio_entry->s.ipc.s.prod),
+ odph_ring_free_count(pktio_entry->s.ipc.s.prod));
+
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_cons", dev);
+ pktio_entry->s.ipc.s.cons = odph_ring_create(ipc_shm_name,
+ PKTIO_IPC_ENTRIES,
+ ODPH_RING_SHM_PROC | ODPH_RING_NO_LIST);
+ if (!pktio_entry->s.ipc.s.cons) {
+ ODP_DBG("pid %d unable to create ipc ring %s name\n",
+ getpid(), ipc_shm_name);
+ goto free_s_prod;
+ }
+ ODP_DBG("Created IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, odph_ring_count(pktio_entry->s.ipc.s.cons),
+ odph_ring_free_count(pktio_entry->s.ipc.s.cons));
+
+ /* Memory to store information about exported pool */
+ pinfo = _ipc_map_pool_info(pktio_entry, dev, ODP_SHM_PROC);
+
+ /* Set up pool name for remote info */
+ pool_name = _ipc_odp_buffer_pool_shm_name(pool);
+ memcpy(pinfo->remote_pool_name, pool_name, strlen(pool_name));
+ pinfo->shm_pkt_pool_size = pool_entry->s.pool_size;
+ pinfo->shm_pool_bufs_num = pool_entry->s.buf_num;
+ pinfo->shm_pkt_size = pool_entry->s.seg_size;
+ pinfo->mdata_offset = pool_entry->s.pool_mdata_addr -
+ pool_entry->s.pool_base_addr;
+ pinfo->slave.mdata_offset = 0;
+ ODP_DBG("Master waiting for slave to be connected now..\n");
+
+ /* Wait for remote process to export his pool. */
+ ODP_DBG("Wait for second process set mdata_offset...\n");
+ while (pinfo->slave.mdata_offset == 0)
+ odp_spin();
+
+ ODP_DBG("Wait for second process set mdata_offset... DONE.\n");
+
+ while (1) {
+ int ret;
+
+ ret = _odp_shm_lookup_ipc(pinfo->slave.pool_name);
+ if (!ret)
+ break;
+ ODP_DBG("Master looking for %s\n", pinfo->slave.pool_name);
+ sleep(1);
+ }
+
+ ipc_pool_base = _ipc_map_remote_pool(pinfo->slave.pool_name,
+ pinfo->shm_pkt_pool_size);
+ pktio_entry->s.ipc.pool_mdata_base = (char *)ipc_pool_base +
+ pinfo->slave.mdata_offset;
+ pktio_entry->s.ipc.pool = pool;
+
+ return 0;
+
+free_s_prod:
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_prod", dev);
+ shm = odp_shm_lookup(ipc_shm_name);
+ odp_shm_free(shm);
+free_m_cons:
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_cons", dev);
+ shm = odp_shm_lookup(ipc_shm_name);
+ odp_shm_free(shm);
+free_m_prod:
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_prod", dev);
+ shm = odp_shm_lookup(ipc_shm_name);
+ odp_shm_free(shm);
+ return -1;
+}
+
+static odp_pool_t _ipc_odp_alloc_and_create_pool_slave(struct pktio_info *pinfo)
+{
+ odp_pool_t pool;
+ char *pool_name;
+ odp_pool_param_t params;
+ int num = pinfo->shm_pool_bufs_num;
+ uint32_t buf_size = pinfo->shm_pkt_size;
+ pool_entry_t *pool_entry;
+
+ pool_name = calloc(1, strlen(pinfo->remote_pool_name) +
+ sizeof("ipc_pool_slave_"));
+ sprintf(pool_name, "ipc_pool_slave_%s", pinfo->remote_pool_name);
+
+ ODP_DBG("slave uses pool %s\n", pool_name);
+
+ memset(¶ms, 0, sizeof(params));
+ params.pkt.num = num;
+ params.pkt.len = buf_size;
+ params.pkt.seg_len = buf_size;
+ params.type = ODP_POOL_PACKET;
+
+ pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms);
+ if (pool == ODP_POOL_INVALID)
+ ODP_ABORT("Error: packet pool create failed.\n"
+ "num %d, len %d, seg_len %d\n",
+ params.pkt.num, params.pkt.len, params.pkt.seg_len);
+
+ /* Export info so that master can connect to that pool*/
+ snprintf(pinfo->slave.pool_name, 30, "%s", pool_name);
+ pool_entry = odp_pool_to_entry(pool);
+ pinfo->slave.mdata_offset = pool_entry->s.pool_mdata_addr -
+ pool_entry->s.pool_base_addr;
+ free(pool_name);
+
+ return pool;
+}
+
+static void *_ipc_map_remote_pool(const char *name, size_t size)
+{
+ odp_shm_t shm;
+
+ ODP_DBG("Mapping remote pool %s, size %ld\n", name, size);
+ shm = odp_shm_reserve(name,
+ size,
+ ODP_CACHE_LINE_SIZE,
+ _ODP_SHM_PROC_NOCREAT);
+ if (shm == ODP_SHM_INVALID)
+ ODP_ABORT("unable map %s\n", name);
+ return odp_shm_addr(shm);
+}
+
+static void *_ipc_shm_map(char *name, size_t size, int timeout)
+{
+ odp_shm_t shm;
+ int ret;
+
+ while (1) {
+ ret = _odp_shm_lookup_ipc(name);
+ if (!ret)
+ break;
+ ODP_DBG("Waiting for %s\n", name);
+ if (timeout <= 0)
+ return NULL;
+ timeout--;
+ sleep(1);
+ }
+
+ shm = odp_shm_reserve(name, size,
+ ODP_CACHE_LINE_SIZE,
+ _ODP_SHM_PROC_NOCREAT);
+ if (ODP_SHM_INVALID == shm)
+ ODP_ABORT("unable to map: %s\n", name);
+
+ return odp_shm_addr(shm);
+}
+
+static int _ipc_pktio_init_slave(const char *dev, pktio_entry_t *pktio_entry)
+{
+ char ipc_shm_name[ODP_POOL_NAME_LEN + sizeof("_slave_r")];
+ size_t ring_size = PKTIO_IPC_ENTRIES * sizeof(void *) +
+ sizeof(odph_ring_t);
+ struct pktio_info *pinfo;
+ void *ipc_pool_base;
+ odp_shm_t shm;
+
+ if (ODP_POOL_NAME_LEN != ODPH_RING_NAMESIZE)
+ ODP_ABORT("");
+
+ if (strlen(dev) > (ODP_POOL_NAME_LEN - sizeof("_slave_r"))) {
+ ODP_DBG("too big ipc name\n");
+ return -1;
+ }
+
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_prod", dev);
+ pktio_entry->s.ipc.m.prod = _ipc_shm_map(ipc_shm_name, ring_size, 10);
+ if (!pktio_entry->s.ipc.m.prod) {
+ ODP_DBG("pid %d unable to find ipc ring %s name\n",
+ getpid(), dev);
+ return -1;
+ }
+ ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, odph_ring_count(pktio_entry->s.ipc.m.prod),
+ odph_ring_free_count(pktio_entry->s.ipc.m.prod));
+
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_cons", dev);
+ pktio_entry->s.ipc.m.cons = _ipc_shm_map(ipc_shm_name, ring_size, 10);
+ if (!pktio_entry->s.ipc.m.cons) {
+ ODP_DBG("pid %d unable to find ipc ring %s name\n",
+ getpid(), dev);
+ goto free_m_prod;
+ }
+ ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, odph_ring_count(pktio_entry->s.ipc.m.cons),
+ odph_ring_free_count(pktio_entry->s.ipc.m.cons));
+
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_prod", dev);
+ pktio_entry->s.ipc.s.prod = _ipc_shm_map(ipc_shm_name, ring_size, 10);
+ if (!pktio_entry->s.ipc.s.prod) {
+ ODP_DBG("pid %d unable to find ipc ring %s name\n",
+ getpid(), dev);
+ goto free_m_cons;
+ }
+ ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, odph_ring_count(pktio_entry->s.ipc.s.prod),
+ odph_ring_free_count(pktio_entry->s.ipc.s.prod));
+
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_cons", dev);
+ pktio_entry->s.ipc.s.cons = _ipc_shm_map(ipc_shm_name, ring_size, 10);
+ if (!pktio_entry->s.ipc.s.cons) {
+ ODP_DBG("pid %d unable to find ipc ring %s name\n",
+ getpid(), dev);
+ goto free_s_prod;
+ }
+ ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, odph_ring_count(pktio_entry->s.ipc.s.cons),
+ odph_ring_free_count(pktio_entry->s.ipc.s.cons));
+
+ /* Get info about remote pool */
+ pinfo = _ipc_map_pool_info(pktio_entry, dev, _ODP_SHM_PROC_NOCREAT);
+
+ ipc_pool_base = _ipc_map_remote_pool(pinfo->remote_pool_name,
+ pinfo->shm_pkt_pool_size);
+ pktio_entry->s.ipc.pool_mdata_base = (char *)ipc_pool_base +
+ pinfo->mdata_offset;
+ pktio_entry->s.ipc.pkt_size = pinfo->shm_pkt_size;
+
+ /* @todo: to simplify in linux-generic implementation we create pool for
+ * packets from IPC queue. On receive implementation copies packets to
+ * that pool. Later we can try to reuse original pool without packets
+ * copying.
+ */
+ pktio_entry->s.ipc.pool = _ipc_odp_alloc_and_create_pool_slave(pinfo);
+ ODP_DBG("%s OK.\n", __func__);
+ return 0;
+
+free_s_prod:
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_s_prod", dev);
+ shm = odp_shm_lookup(ipc_shm_name);
+ odp_shm_free(shm);
+free_m_cons:
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_cons", dev);
+ shm = odp_shm_lookup(ipc_shm_name);
+ odp_shm_free(shm);
+free_m_prod:
+ snprintf(ipc_shm_name, sizeof(ipc_shm_name), "%s_m_prod", dev);
+ shm = odp_shm_lookup(ipc_shm_name);
+ odp_shm_free(shm);
+ return -1;
+}
+
+int ipc_pktio_init(pktio_entry_t *pktio_entry, const char *dev,
+ odp_pool_t pool)
+{
+ int ret;
+
+ /* if pool is zero we assume that it's slave process connects
+ * to shared memory already created by main process.
+ */
+ if (ODP_POOL_INVALID != pool) {
+ pktio_entry->s.type = ODP_PKTIO_TYPE_IPC;
+ ret = _ipc_pktio_init_master(pktio_entry, dev, pool);
+ } else {
+ pktio_entry->s.type = ODP_PKTIO_TYPE_IPC_SLAVE;
+ ret = _ipc_pktio_init_slave(dev, pktio_entry);
+ }
+
+ return ret;
+}
+
+static inline void *_ipc_buffer_map(odp_buffer_hdr_t *buf,
+ uint32_t offset,
+ uint32_t *seglen,
+ uint32_t limit)
+{
+ int seg_index = offset / buf->segsize;
+ int seg_offset = offset % buf->segsize;
+ void *addr = (char *)buf - buf->ipc_addr_offset[seg_index];
+
+ if (seglen) {
+ uint32_t buf_left = limit - offset;
+ *seglen = seg_offset + buf_left <= buf->segsize ?
+ buf_left : buf->segsize - seg_offset;
+ }
+
+ return (void *)(seg_offset + (uint8_t *)addr);
+}
+
+static inline void *_ipc_packet_map(odp_packet_hdr_t *pkt_hdr,
+ uint32_t offset, uint32_t *seglen)
+{
+ if (offset > pkt_hdr->frame_len)
+ return NULL;
+
+ return _ipc_buffer_map(&pkt_hdr->buf_hdr,
+ pkt_hdr->headroom + offset, seglen,
+ pkt_hdr->headroom + pkt_hdr->frame_len);
+}
+
+int ipc_pktio_recv(pktio_entry_t *pktio_entry,
+ odp_packet_t pkt_table[], unsigned len)
+{
+ int pkts = 0;
+ int i;
+ odph_ring_t *r;
+ odph_ring_t *r_p;
+ odp_packet_t remote_pkts[PKTIO_IPC_ENTRIES];
+ void **ipcbufs_p = (void *)&remote_pkts;
+
+ if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC) {
+ r = pktio_entry->s.ipc.s.prod;
+ r_p = pktio_entry->s.ipc.s.cons;
+ } else if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC_SLAVE) {
+ r = pktio_entry->s.ipc.m.prod;
+ r_p = pktio_entry->s.ipc.m.cons;
+ } else {
+ ODP_ABORT("wrong type: %d\n", pktio_entry->s.type);
+ }
+
+ pkts = odph_ring_mc_dequeue_burst(r, ipcbufs_p, len);
+ if (odp_unlikely(pkts < 0))
+ ODP_ABORT("error to dequeue no packets\n");
+
+ /* fast path */
+ if (odp_likely(0 == pkts))
+ return 0;
+
+ for (i = 0; i < pkts; i++) {
+ odp_pool_t pool;
+ odp_packet_t pkt;
+ odp_packet_hdr_t *phdr;
+ odp_buffer_bits_t handle;
+ int idx; /* Remote packet has coded pool and index.
+ * We need only index.*/
+ void *pkt_data;
+ void *remote_pkt_data;
+
+ handle.handle = _odp_packet_to_buffer(remote_pkts[i]);
+ idx = handle.index;
+
+ /* Link to packed data. To this line we have Zero-Copy between
+ * processes, to simplify use packet copy in that version which
+ * can be removed later with more advance buffer management
+ * (ref counters).
+ */
+ /* reverse odp_buf_to_hdr() */
+ phdr = (odp_packet_hdr_t *)(
+ (char *)pktio_entry->s.ipc.pool_mdata_base +
+ (idx * ODP_CACHE_LINE_SIZE));
+
+ /* Allocate new packet. Select*/
+ pool = pktio_entry->s.ipc.pool;
+ if (odp_unlikely(pool == ODP_POOL_INVALID))
+ ODP_ABORT("invalid pool");
+
+ pkt = odp_packet_alloc(pool, phdr->frame_len);
+ if (odp_unlikely(pkt == ODP_PACKET_INVALID)) {
+ /* Original pool might be smaller then
+ * PKTIO_IPC_ENTRIES. If packet can not be
+ * allocated from pool at this time,
+ * simple get in on next recv() call.
+ */
+ pkts = i - 1;
+ break;
+ }
+
+ /* Copy packet data. */
+ pkt_data = odp_packet_data(pkt);
+ if (odp_unlikely(!pkt_data))
+ ODP_ABORT("unable to map pkt_data ipc_slave %d\n",
+ (ODP_PKTIO_TYPE_IPC_SLAVE ==
+ pktio_entry->s.type));
+
+ remote_pkt_data = _ipc_packet_map(phdr, 0, NULL);
+ if (odp_unlikely(!remote_pkt_data))
+ ODP_ABORT("unable to map remote_pkt_data, ipc_slave %d\n",
+ (ODP_PKTIO_TYPE_IPC_SLAVE ==
+ pktio_entry->s.type));
+
+ /* @todo fix copy packet!!! */
+ memcpy(pkt_data, remote_pkt_data, phdr->frame_len);
+
+ /* Copy packets L2, L3 parsed offsets and size */
+ copy_packet_parser_metadata(phdr, odp_packet_hdr(pkt));
+
+ odp_packet_hdr(pkt)->frame_len = phdr->frame_len;
+ odp_packet_hdr(pkt)->headroom = phdr->headroom;
+ odp_packet_hdr(pkt)->tailroom = phdr->tailroom;
+ pkt_table[i] = pkt;
+ }
+
+ /* Now tell other process that we no longer need that buffers.*/
+ pkts = odph_ring_mp_enqueue_burst(r_p, ipcbufs_p, pkts);
+ if (odp_unlikely(pkts < 0))
+ ODP_ABORT("ipc: odp_ring_mp_enqueue_bulk r_p fail\n");
+
+ return pkts;
+}
+
+int ipc_pktio_send(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
+ unsigned len)
+{
+ odph_ring_t *r;
+ odph_ring_t *r_p;
+ void **rbuf_p;
+ int ret;
+ unsigned i;
+
+ if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC_SLAVE) {
+ r = pktio_entry->s.ipc.s.prod;
+ r_p = pktio_entry->s.ipc.s.cons;
+ } else if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC) {
+ r = pktio_entry->s.ipc.m.prod;
+ r_p = pktio_entry->s.ipc.m.cons;
+ } else {
+ ODP_ABORT("wrong type: %d\n", pktio_entry->s.type);
+ }
+
+ /* Free already processed packets, if any */
+ while (1) {
+ odp_packet_t r_p_pkts[PKTIO_IPC_ENTRIES];
+
+ rbuf_p = (void *)&r_p_pkts;
+ ret = odph_ring_mc_dequeue_burst(r_p, rbuf_p,
+ PKTIO_IPC_ENTRIES);
+ if (0 == ret)
+ break;
+ for (i = 0; i < (unsigned)ret; i++)
+ odp_packet_free(r_p_pkts[i]);
+ }
+
+ /* Prepare packets: calculate offset from address. */
+ for (i = 0; i < len; i++) {
+ int j;
+ odp_packet_t pkt = pkt_table[i];
+ odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+
+ rbuf_p = (void *)&pkt;
+
+ /* buf_hdr.addr can not be used directly in remote process,
+ * convert it to offset
+ */
+ for (j = 0; j < ODP_BUFFER_MAX_SEG; j++)
+ pkt_hdr->buf_hdr.ipc_addr_offset[j] = (char *)pkt_hdr -
+ (char *)pkt_hdr->buf_hdr.addr[j];
+ }
+
+ /* Put packets to ring to be processed in other process. */
+ rbuf_p = (void *)&pkt_table[0];
+ ret = odph_ring_mp_enqueue_burst(r, rbuf_p, len);
+ if (odp_unlikely(ret < 0)) {
+ ODP_ERR("pid %d odp_ring_mp_enqueue_bulk fail, ipc_slave %d, ret %d\n",
+ getpid(),
+ (ODP_PKTIO_TYPE_IPC_SLAVE == pktio_entry->s.type),
+ ret);
+ ODP_ERR("odp_ring_full: %d, odp_ring_count %d, odph_ring_free_count %d\n",
+ odph_ring_full(r), odph_ring_count(r),
+ odph_ring_free_count(r));
+ }
+
+ return ret;
+}
@@ -219,8 +219,11 @@ odp_pool_t _pool_create(const char *name,
ODP_ALIGN_ROUNDUP(params->pkt.len, seg_len);
/* Reject create if pkt.len needs too many segments */
- if (blk_size / seg_len > ODP_BUFFER_MAX_SEG)
+ if (blk_size / seg_len > ODP_BUFFER_MAX_SEG) {
+ ODP_ERR("ODP_BUFFER_MAX_SEG exceed %d(%d)\n",
+ blk_size / seg_len, ODP_BUFFER_MAX_SEG);
return ODP_POOL_INVALID;
+ }
p_udata_size = params->pkt.uarea_size;
udata_stride = ODP_ALIGN_ROUNDUP(p_udata_size,
@@ -241,8 +244,12 @@ odp_pool_t _pool_create(const char *name,
/* Validate requested number of buffers against addressable limits */
if (buf_num >
- (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE)))
+ (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE))) {
+ ODP_ERR("buf_num %d > then expected %d\n",
+ buf_num, ODP_BUFFER_MAX_BUFFERS /
+ (buf_stride / ODP_CACHE_LINE_SIZE));
return ODP_POOL_INVALID;
+ }
/* Find an unused buffer pool slot and iniitalize it as requested */
for (i = 0; i < ODP_CONFIG_POOLS; i++) {
@@ -301,8 +308,12 @@ odp_pool_t _pool_create(const char *name,
pool->s.pool_base_addr = odp_shm_addr(shm);
} else {
odp_shm_info_t info;
- if (odp_shm_info(shm, &info) != 0 ||
- info.size < pool->s.pool_size) {
+ int ret;
+
+ ret = odp_shm_info(shm, &info);
+ if (ret != 0 || info.size < pool->s.pool_size) {
+ ODP_ERR("shm info %d, info size %ld, pool size %ld\n",
+ ret, info.size, pool->s.pool_size);
POOL_UNLOCK(&pool->s.lock);
return ODP_POOL_INVALID;
}
@@ -315,6 +326,7 @@ odp_pool_t _pool_create(const char *name,
((size_t)page_addr -
(size_t)pool->s.pool_base_addr)) {
POOL_UNLOCK(&pool->s.lock);
+ ODP_ERR("small shm size\n");
return ODP_POOL_INVALID;
}
pool->s.pool_base_addr = page_addr;
@@ -15,6 +15,7 @@
#include <odp/debug.h>
#include <odp_debug_internal.h>
#include <odp_align_internal.h>
+#include <odp_shm_internal.h>
#include <odp/config.h>
#include <unistd.h>
@@ -189,7 +190,7 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
int fd = -1;
int map_flag = MAP_SHARED;
/* If already exists: O_EXCL: error, O_TRUNC: truncate to zero */
- int oflag = O_RDWR | O_CREAT | O_TRUNC;
+ int oflag = O_RDWR;
uint64_t alloc_size;
uint64_t page_sz, huge_sz;
#ifdef MAP_HUGETLB
@@ -207,7 +208,12 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
alloc_hp_size = (size + align + (huge_sz - 1)) & (-huge_sz);
#endif
- if (flags & ODP_SHM_PROC) {
+ if (flags & ODP_SHM_PROC)
+ oflag |= O_CREAT | O_TRUNC;
+
+ if (flags & (ODP_SHM_PROC | _ODP_SHM_PROC_NOCREAT)) {
+ need_huge_page = 0;
+
/* Creates a file to /dev/shm */
fd = shm_open(name, oflag,
S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> --- helper/include/odp/helper/ring.h | 7 +- helper/ring.c | 3 +- platform/linux-generic/Makefile.am | 3 + .../linux-generic/include/odp_buffer_internal.h | 3 + .../linux-generic/include/odp_packet_io_internal.h | 30 ++ .../include/odp_packet_io_ipc_internal.h | 51 ++ platform/linux-generic/include/odp_shm_internal.h | 20 + platform/linux-generic/odp_packet_io.c | 19 +- platform/linux-generic/odp_packet_io_ipc.c | 594 +++++++++++++++++++++ platform/linux-generic/odp_pool.c | 20 +- platform/linux-generic/odp_shared_memory.c | 10 +- 11 files changed, 749 insertions(+), 11 deletions(-) create mode 100644 platform/linux-generic/include/odp_packet_io_ipc_internal.h create mode 100644 platform/linux-generic/include/odp_shm_internal.h create mode 100644 platform/linux-generic/odp_packet_io_ipc.c