@@ -251,6 +251,7 @@ AC_CONFIG_FILES([Makefile
doc/Makefile
example/Makefile
example/generator/Makefile
+ example/ipc/Makefile
example/ipsec/Makefile
example/l2fwd/Makefile
example/packet/Makefile
@@ -1 +1 @@
-SUBDIRS = generator ipsec l2fwd packet timer
+SUBDIRS = generator ipc ipsec l2fwd packet timer
new file mode 100644
@@ -0,0 +1 @@
+odp_ipc
new file mode 100644
@@ -0,0 +1,7 @@
+include $(top_srcdir)/example/Makefile.inc
+
+bin_PROGRAMS = odp_ipc
+odp_ipc_LDFLAGS = $(AM_LDFLAGS) -static
+odp_ipc_CFLAGS = $(AM_CFLAGS) -I${top_srcdir}/example
+
+dist_odp_ipc_SOURCES = odp_ipc.c
new file mode 100644
@@ -0,0 +1,425 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+/**
+ * @file
+ *
+ * @example odp_pktio.c ODP basic packet IO loopback test application
+ */
+
+#include <stdlib.h>
+#include <string.h>
+#include <getopt.h>
+#include <unistd.h>
+
+#include <example_debug.h>
+
+#include <odp.h>
+#include <odp/helper/linux.h>
+#include <odp/helper/eth.h>
+#include <odp/helper/ip.h>
+
+/** @def SHM_PKT_POOL_SIZE
+ * @brief Size of the shared memory block
+ */
+#define SHM_PKT_POOL_SIZE (512*2048)
+
+/** @def SHM_PKT_POOL_BUF_SIZE
+ * @brief Buffer size of the packet pool buffer
+ */
+#define SHM_PKT_POOL_BUF_SIZE 1856
+
+/** @def MAX_PKT_BURST
+ * @brief Maximum number of packet bursts
+ */
+#define MAX_PKT_BURST 16
+
+/** Get rid of path in filename - only for unix-type paths using '/' */
+#define NO_PATH(file_name) (strrchr((file_name), '/') ? \
+ strrchr((file_name), '/') + 1 : (file_name))
+
+/** Application argument */
+static char *pktio_name;
+
+/* helper funcs */
+static int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned len);
+static void parse_args(int argc, char *argv[]);
+static void print_info(char *progname);
+static void usage(char *progname);
+
+/**
+ * Create a pktio handle, optionally associating a default input queue.
+ *
+ * @param dev Name of device to open
+ * @param pool Pool to associate with device for packet RX/TX
+ *
+ * @return The handle of the created pktio object.
+ * @retval ODP_PKTIO_INVALID if the create fails.
+ */
+static odp_pktio_t create_pktio(const char *dev, odp_pool_t pool)
+{
+ odp_pktio_t pktio;
+ char inq_name[ODP_QUEUE_NAME_LEN];
+ odp_pktio_t ipc_pktio;
+
+ /* Open a packet IO instance */
+ pktio = odp_pktio_open(dev, pool);
+ if (pktio == ODP_PKTIO_INVALID)
+ EXAMPLE_ABORT("Error: pktio create failed for %s\n", dev);
+
+ snprintf(inq_name, sizeof(inq_name), "%" PRIu64 "-pktio_inq_def",
+ odp_pktio_to_u64(pktio));
+ inq_name[ODP_QUEUE_NAME_LEN - 1] = '\0';
+
+ printf("pid: %d, create IPC pktio\n", getpid());
+ ipc_pktio = odp_pktio_open("ipc_pktio", pool);
+ if (ipc_pktio == ODP_PKTIO_INVALID) {
+ EXAMPLE_ABORT("Error: ipc pktio create failed.\n");
+ }
+
+ return pktio;
+}
+
+/**
+ * Packet IO loopback worker thread using bursts from/to IO resources
+ *
+ * @param arg thread arguments of type 'thread_args_t *'
+ */
+static void *pktio_ifburst_thread(void *arg ODP_UNUSED)
+{
+ int thr;
+ odp_pktio_t pktio;
+ int pkts, pkts_ok;
+ odp_packet_t pkt_tbl[MAX_PKT_BURST];
+ odp_pktio_t ipc_pktio;
+
+ thr = odp_thread_id();
+
+ pktio = odp_pktio_lookup(pktio_name);
+ if (pktio == ODP_PKTIO_INVALID) {
+ EXAMPLE_ERR(" [%02i] Error: lookup of pktio %s failed\n",
+ thr, pktio_name);
+ return NULL;
+ }
+
+ printf(" [%02i] looked up pktio:%02" PRIu64 ", burst mode\n",
+ thr, odp_pktio_to_u64(pktio));
+
+ ipc_pktio = odp_pktio_lookup("ipc_pktio");
+ if (pktio == ODP_PKTIO_INVALID) {
+ EXAMPLE_ERR(" [%02i] Error: lookup of pktio %s failed\n",
+ thr, "ipc_pktio");
+ return NULL;
+ }
+ printf(" [%02i] looked up ipc_pktio:%02" PRIu64 ", burst mode\n",
+ thr, odp_pktio_to_u64(pktio));
+
+ /* Loop packets */
+ for (;;) {
+ int i;
+
+ pkts = odp_pktio_recv(pktio, pkt_tbl, MAX_PKT_BURST);
+ if (pkts > 0) {
+ /* Drop packets with errors */
+ pkts_ok = drop_err_pkts(pkt_tbl, pkts);
+ if (pkts_ok > 0) {
+ time_t tm = time(NULL);
+ char *tm_str = ctime(&tm);
+ for (i = 0; i < pkts_ok; i++) {
+ odp_packet_copydata_in(pkt_tbl[i], 0, strlen(tm_str), tm_str);
+ odp_packet_copydata_in(pkt_tbl[i], strlen(tm_str), 1, "\0");
+ }
+
+ odp_pktio_send(ipc_pktio, pkt_tbl, pkts_ok);
+ printf("---main pid %d: ipcsend %d pkts, size %d, data: %s\n", getpid(), pkts_ok, odp_packet_len(pkt_tbl[0]), tm_str);
+ }
+
+ }
+ pkts = odp_pktio_recv(ipc_pktio, pkt_tbl, MAX_PKT_BURST);
+ if (pkts > 0) {
+ for (i = 0; i < pkts; i++) {
+ char *b = malloc(odp_packet_len(pkt_tbl[i]));
+ odp_packet_copydata_out(pkt_tbl[i], 0, odp_packet_len(pkt_tbl[i]), b);
+ printf("---main pid %d: ipcsrecv: size %d, data: %s\n", getpid(), odp_packet_len(pkt_tbl[i]), b);
+ free(b);
+ odp_packet_free(pkt_tbl[i]);
+ }
+ }
+
+ }
+
+/* unreachable */
+ return NULL;
+}
+
+
+static int ipc_second_process(void)
+{
+ odp_pktio_t pktio;
+ odp_packet_t pkt_tbl[MAX_PKT_BURST];
+ int i;
+ int pkts;
+
+ /* linux shared memory can already have objects with names which
+ * second process can try to connect. That might be even interrupted
+ * current application. Might be later I will add magic numbers to
+ * each ipc object in linux-generic. HW platfrom shound not have that
+ * problem. So just wait a little while master process will create
+ * all ipc objects before connectioning to them.
+ */
+ sleep(3);
+
+ /* Do lookup packet I/O in IPC shared memory,
+ * and link it to local pool. */
+ while (1) {
+ pktio = odp_pktio_open("ipc_pktio", NULL);
+ if (pktio == ODP_PKTIO_INVALID) {
+ sleep(1);
+ printf("%s() pid %d: looking for ipc_pktio\n", __func__, getpid());
+ continue;
+ }
+ break;
+ }
+
+ for (;;) {
+ pkts = odp_pktio_recv(pktio, pkt_tbl, MAX_PKT_BURST);
+ if (pkts > 0) {
+ for (i = 0; i < pkts; i++) {
+ char *b = malloc(odp_packet_len(pkt_tbl[i]));
+
+ odp_packet_copydata_out(pkt_tbl[i], 0, odp_packet_len(pkt_tbl[i]), b);
+
+ printf("++++%s: pid %d, got packet %p, size %d, data: %s\n",
+ __func__, getpid(), (void*)pkt_tbl[i],
+ odp_packet_len(pkt_tbl[i]), b);
+ free(b);
+
+ odp_pktio_send(pktio, pkt_tbl, pkts);
+ }
+ } else {
+ /* No need to load cpu in example app.*/
+ sleep(1);
+ }
+ }
+
+ EXAMPLE_ERR("Unexpected close.");
+ return 0;
+}
+
+/**
+ * ODP packet example main function
+ */
+int main(int argc, char *argv[])
+{
+ odp_pool_t pool;
+ odp_pool_param_t params;
+ int f;
+
+ /* Parse and store the application arguments */
+ parse_args(argc, argv);
+
+ f = fork();
+ if (f) {
+ printf("Process one pid: %d\n", getpid());
+ /* Init ODP before calling anything else */
+ if (odp_init_global(NULL, NULL)) {
+ EXAMPLE_ERR("Error: ODP global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local()) {
+ EXAMPLE_ERR("Error: ODP local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ ipc_second_process();
+ } else {
+ printf("Process two pid: %d\n", getpid());
+ }
+
+
+ /* Init ODP before calling anything else */
+ if (odp_init_global(NULL, NULL)) {
+ EXAMPLE_ERR("Error: ODP global init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Init this thread */
+ if (odp_init_local()) {
+ EXAMPLE_ERR("Error: ODP local init failed.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ /* Print both system and application information */
+ print_info(NO_PATH(argv[0]));
+
+ /* Create packet pool */
+ memset(¶ms, 0, sizeof(params));
+ params.pkt.seg_len = SHM_PKT_POOL_BUF_SIZE;
+ params.pkt.len = SHM_PKT_POOL_BUF_SIZE;
+ params.pkt.num = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE;
+ params.type = ODP_POOL_PACKET;
+ params.shm_flags = ODP_SHM_PROC;
+
+ EXAMPLE_DBG("Main proc: packet pool create: "
+ "num %d, len %d, seg_len %d\n",
+ params.pkt.num, params.pkt.len, params.pkt.seg_len);
+
+ pool = odp_pool_create("packet_pool", ODP_SHM_NULL, ¶ms);
+
+ if (pool == ODP_POOL_INVALID) {
+ EXAMPLE_ERR("Error: packet pool create failed.\n");
+ exit(EXIT_FAILURE);
+ }
+ odp_pool_print(pool);
+
+ create_pktio(pktio_name, pool);
+
+ pktio_ifburst_thread(NULL);
+
+ return 0;
+}
+
+/**
+ * Drop packets which input parsing marked as containing errors.
+ *
+ * Frees packets with error and modifies pkt_tbl[] to only contain packets with
+ * no detected errors.
+ *
+ * @param pkt_tbl Array of packet
+ * @param len Length of pkt_tbl[]
+ *
+ * @return Number of packets with no detected error
+ */
+static int drop_err_pkts(odp_packet_t pkt_tbl[], unsigned len)
+{
+ odp_packet_t pkt;
+ unsigned pkt_cnt = len;
+ unsigned i, j;
+
+ for (i = 0, j = 0; i < len; ++i) {
+ pkt = pkt_tbl[i];
+
+ if (odp_unlikely(odp_packet_has_error(pkt))) {
+ odp_packet_free(pkt); /* Drop */
+ pkt_cnt--;
+ } else if (odp_unlikely(i != j++)) {
+ pkt_tbl[j-1] = pkt;
+ }
+ }
+
+ return pkt_cnt;
+}
+
+/**
+ * Parse and store the command line arguments
+ *
+ * @param argc argument count
+ * @param argv[] argument vector
+ * @param appl_args Store application arguments here
+ */
+static void parse_args(int argc, char *argv[])
+{
+ int opt;
+ int long_index;
+ size_t len;
+ static struct option longopts[] = {
+ {"interface", required_argument, NULL, 'i'}, /* return 'i' */
+ {"help", no_argument, NULL, 'h'}, /* return 'h' */
+ {NULL, 0, NULL, 0}
+ };
+
+ while (1) {
+ opt = getopt_long(argc, argv, "i:h",
+ longopts, &long_index);
+
+ if (opt == -1)
+ break; /* No more options */
+
+ switch (opt) {
+ case 'i':
+ len = strlen(optarg);
+ if (len == 0) {
+ usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ len += 1; /* add room for '\0' */
+
+ pktio_name = malloc(len);
+ if (pktio_name == NULL) {
+ usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+ strcpy(pktio_name, optarg);
+
+ break;
+ case 'h':
+ usage(argv[0]);
+ exit(EXIT_SUCCESS);
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ if (pktio_name == NULL) {
+ usage(argv[0]);
+ exit(EXIT_FAILURE);
+ }
+
+ optind = 1; /* reset 'extern optind' from the getopt lib */
+}
+
+/**
+ * Print system and application info
+ */
+static void print_info(char *progname)
+{
+ printf("\n"
+ "ODP system info\n"
+ "---------------\n"
+ "ODP API version: %s\n"
+ "CPU model: %s\n"
+ "CPU freq (hz): %"PRIu64"\n"
+ "Cache line size: %i\n"
+ "CPU count: %i\n"
+ "\n",
+ odp_version_api_str(), odp_sys_cpu_model_str(), odp_sys_cpu_hz(),
+ odp_sys_cache_line_size(), odp_cpu_count());
+
+ printf("Running ODP appl: \"%s\"\n"
+ "-----------------\n"
+ "Using IF: %s\n",
+ progname, pktio_name);
+ printf("\n\n");
+ fflush(NULL);
+}
+
+/**
+ * Prinf usage information
+ */
+static void usage(char *progname)
+{
+ printf("\n"
+ "Usage: %s OPTIONS\n"
+ " E.g. %s -i eth0\n"
+ "\n"
+ "OpenDataPlane example application.\n"
+ "\n"
+ "Mandatory OPTIONS:\n"
+ " -i, --interface Eth interface\n"
+ "\n"
+ "Optional OPTIONS\n"
+ " -h, --help Display help and exit.\n"
+ " environment variables: ODP_PKTIO_DISABLE_SOCKET_MMAP\n"
+ " ODP_PKTIO_DISABLE_SOCKET_MMSG\n"
+ " ODP_PKTIO_DISABLE_SOCKET_BASIC\n"
+ " can be used to advanced pkt I/O selection for linux-generic\n"
+ "\n", NO_PATH(progname), NO_PATH(progname)
+ );
+}
@@ -158,6 +158,8 @@ typedef struct odph_ring {
#define ODPH_RING_F_SP_ENQ 0x0001 /* The default enqueue is "single-producer".*/
#define ODPH_RING_F_SC_DEQ 0x0002 /* The default dequeue is "single-consumer".*/
+#define ODPH_RING_SHM_PROC 0x0004 /* If set - ring is visible from different
+ processes. Default is thread visible. */
#define ODPH_RING_QUOT_EXCEED (1 << 31) /* Quota exceed for burst ops */
#define ODPH_RING_SZ_MASK (unsigned)(0x0fffffff) /* Ring size mask */
@@ -83,6 +83,7 @@ typedef struct odp_pool_param_t {
};
int type; /**< Pool type */
+ uint32_t shm_flags; /**< Flags to odp_shm_reserve() */
} odp_pool_param_t;
/** Packet pool*/
@@ -49,6 +49,7 @@ extern "C" {
/* Share level */
#define ODP_SHM_SW_ONLY 0x1 /**< Application SW only, no HW access */
#define ODP_SHM_PROC 0x2 /**< Share with external processes */
+#define ODP_SHM_PROC_NOCREAT 0x4
/**
* Shared memory block info
@@ -125,7 +126,6 @@ void *odp_shm_addr(odp_shm_t shm);
*/
int odp_shm_info(odp_shm_t shm, odp_shm_info_t *info);
-
/**
* Print all shared memory blocks
*/
@@ -148,6 +148,7 @@ __LIB__libodp_la_SOURCES = \
odp_errno.c \
odp_event.c \
odp_init.c \
+ odp_ipc.c \
odp_impl.c \
odp_linux.c \
odp_packet.c \
@@ -129,6 +129,8 @@ typedef struct odp_buffer_hdr_t {
size_t udata_size; /* size of user metadata */
uint32_t segcount; /* segment count */
uint32_t segsize; /* segment size */
+ /* ipc mapped process can not walk over pointers, offset has to be used */
+ uint64_t ipc_addr_offset[ODP_BUFFER_MAX_SEG];
void *addr[ODP_BUFFER_MAX_SEG]; /* block addrs */
} odp_buffer_hdr_t;
new file mode 100644
@@ -0,0 +1,47 @@
+/* Copyright (c) 2015, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/packet_io.h>
+#include <odp_packet_io_internal.h>
+#include <odp/packet.h>
+#include <odp_packet_internal.h>
+#include <odp_internal.h>
+#include <odp/shared_memory.h>
+
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+
+/* IPC packet I/O over odph_ring */
+#include <odp/helper/ring.h>
+
+#define PKTIO_IPC_ENTRIES 4096 /**< number of odp buffers in
+ odp ring queue */
+
+/* that struct is exported to shared memory, so that 2 processes can find
+ * each other.
+ */
+struct pktio_info {
+ char remote_pool_name[30];
+ int shm_pool_num;
+ size_t shm_pkt_pool_size;
+ size_t shm_pkt_size;
+ size_t mdata_offset; /*offset of shared memory block start to pool_mdata_addr*/
+ struct {
+ size_t mdata_offset;
+ char pool_name[30];
+ } slave;
+
+} __packed;
+
+int ipc_pktio_init(pktio_entry_t *pktio_entry, const char *dev,
+ odp_pool_t pool);
+
+int ipc_pktio_recv(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
+ unsigned len);
+
+int ipc_pktio_send(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
+ unsigned len);
@@ -22,6 +22,7 @@ extern "C" {
#include <odp_packet_socket.h>
#include <odp_classification_datamodel.h>
#include <odp_align_internal.h>
+#include <odp/helper/ring.h>
#include <odp/config.h>
#include <odp/hints.h>
@@ -35,6 +36,8 @@ typedef enum {
ODP_PKTIO_TYPE_SOCKET_MMSG,
ODP_PKTIO_TYPE_SOCKET_MMAP,
ODP_PKTIO_TYPE_LOOPBACK,
+ ODP_PKTIO_TYPE_IPC,
+ ODP_PKTIO_TYPE_IPC_SLAVE,
} odp_pktio_type_t;
struct pktio_entry {
@@ -50,6 +53,18 @@ struct pktio_entry {
char name[IFNAMSIZ]; /**< name of pktio provided to
pktio_open() */
odp_bool_t promisc; /**< promiscuous mode state */
+ odph_ring_t *ipc_r; /**< ODP ring for IPC mgs packets
+ indexes transmitted to shared memory */
+ odph_ring_t *ipc_p; /**< ODP ring for IPC msg packets
+ indexes already processed by remote process */
+ void *ipc_pool_base; /**< IPC Remote pool base addr */
+ void *ipc_pool_mdata_base; /**< IPC Remote pool mdata base addr */
+ uint64_t ipc_pkt_size; /**< IPC: packet size in remote pool */
+
+ odph_ring_t *ipc_r_slave;
+ odph_ring_t *ipc_p_slave;
+
+ odp_pool_t ipc_pool; /**< IPC: Pool of main process to alloc packets */
};
typedef union {
@@ -22,6 +22,7 @@ extern "C" {
#include <odp/align.h>
#include <odp_align_internal.h>
#include <odp/pool.h>
+#include <odp_pool_internal.h>
#include <odp_buffer_internal.h>
#include <odp/hints.h>
#include <odp/config.h>
@@ -8,6 +8,7 @@
#include <odp_internal.h>
#include <odp/debug.h>
#include <odp_debug_internal.h>
+#include <odp/helper/ring.h>
struct odp_global_data_s odp_global_data;
@@ -71,6 +72,11 @@ int odp_init_global(odp_init_t *params,
return -1;
}
+ /* for linux-generic IPC queue implemented totaly in
+ * software using odp_ring.
+ */
+ odph_ring_tailq_init();
+
return 0;
}
new file mode 100644
@@ -0,0 +1,597 @@
+
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp_ipc.h>
+#include <odp_debug_internal.h>
+#include <odp_packet_io_internal.h>
+#include <odp_spin_internal.h>
+#include <odp/system_info.h>
+
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+
+static void *_ipc_map_remote_pool(const char *name, size_t size);
+
+static const char *_ipc_odp_buffer_pool_shm_name(odp_pool_t pool_hdl)
+{
+ pool_entry_t *pool;
+ uint32_t pool_id;
+ odp_shm_t shm;
+ odp_shm_info_t info;
+
+ pool_id = pool_handle_to_index(pool_hdl);
+ pool = get_pool_entry(pool_id);
+ shm = pool->s.pool_shm;
+
+ odp_shm_info(shm, &info);
+
+ return info.name;
+}
+
+#if 0
+static uint64_t odp_buffer_pool_get_pktsize(odp_pool_t pool_hdl)
+{
+ pool_entry_t *pool;
+ uint32_t pool_id;
+
+ pool_id = pool_handle_to_index(pool_hdl);
+ pool = get_pool_entry(pool_id);
+
+ return pool->s.blk_size;
+}
+
+static uint64_t odp_buffer_pool_get_size(odp_pool_t pool_hdl)
+{
+ pool_entry_t *pool;
+ uint32_t pool_id;
+
+ pool_id = pool_handle_to_index(pool_hdl);
+ pool = get_pool_entry(pool_id);
+
+ return pool->s.pool_size;
+}
+#endif
+
+
+/**
+* Look up for shared memory object.
+*
+* @param name name of shm object
+*
+* @return 0 on success, otherwise non-zero
+*/
+static int _odp_shm_lookup_ipc(const char *name)
+{
+ int shm;
+
+ shm = shm_open(name, O_RDWR, S_IRUSR | S_IWUSR);
+ if (shm == -1) {
+ ODP_DBG("IPC shm_open for %s not found\n", name);
+ return -1;
+ }
+ close(shm);
+ return 0;
+}
+
+static struct pktio_info *_ipc_map_pool_info(const char *pool_name, int flag)
+{
+ char *name;
+ struct pktio_info *pinfo;
+
+ /* Create info about remote pktio */
+ name = (char *)malloc(strlen(pool_name) + sizeof("_info"));
+ memcpy(name, pool_name, strlen(pool_name));
+ memcpy(name + strlen(pool_name), "_info", sizeof("_info"));
+ odp_shm_t shm = odp_shm_reserve(name, sizeof(struct pktio_info),
+ ODP_CACHE_LINE_SIZE,
+ flag);
+ if (ODP_SHM_INVALID == shm)
+ ODP_ABORT("unable to reserve memory for shm info");
+ free(name);
+ pinfo = odp_shm_addr(shm);
+ if (flag != ODP_SHM_PROC_NOCREAT)
+ memset(pinfo->remote_pool_name, 0, 30);
+ return pinfo;
+}
+
+static int _ipc_pktio_init_master(pktio_entry_t *pktio_entry, const char *dev,
+ odp_pool_t pool)
+{
+ char ipc_shm_name[ODPH_RING_NAMESIZE];
+ pool_entry_t *pool_entry;
+ uint32_t pool_id;
+ void *ipc_pool_base;
+ struct pktio_info *pinfo;
+ const char *pool_name;
+
+ pool_id = pool_handle_to_index(pool);
+ pool_entry = get_pool_entry(pool_id);
+
+ /* generate name in shm like ipc_pktio_r for
+ * to be processed packets ring.
+ */
+ memset(ipc_shm_name, 0, ODPH_RING_NAMESIZE);
+ memcpy(ipc_shm_name, dev, strlen(dev));
+ memcpy(ipc_shm_name + strlen(dev), "_r", 2);
+
+ pktio_entry->s.ipc_r = odph_ring_create(ipc_shm_name,
+ PKTIO_IPC_ENTRIES,
+ ODPH_RING_SHM_PROC);
+ if (!pktio_entry->s.ipc_r) {
+ ODP_DBG("pid %d unable to create ipc ring %s name\n",
+ getpid(), ipc_shm_name);
+ return -1;
+ }
+ ODP_DBG("Created IPC ring: %s, count %d, free %d\n", ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_r),
+ odph_ring_free_count(pktio_entry->s.ipc_r));
+
+ /* generate name in shm like ipc_pktio_p for
+ * already processed packets
+ */
+ memcpy(ipc_shm_name + strlen(dev), "_p", 2);
+
+ pktio_entry->s.ipc_p = odph_ring_create(ipc_shm_name,
+ PKTIO_IPC_ENTRIES,
+ ODPH_RING_SHM_PROC);
+ if (!pktio_entry->s.ipc_p) {
+ ODP_DBG("pid %d unable to create ipc ring %s name\n",
+ getpid(), ipc_shm_name);
+ return -1;
+ }
+
+ ODP_DBG("Created IPC ring: %s, count %d, free %d\n", ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_p),
+ odph_ring_free_count(pktio_entry->s.ipc_p));
+
+
+ memcpy(ipc_shm_name + strlen(dev), "_slave_r", 8);
+ pktio_entry->s.ipc_r_slave = odph_ring_create(ipc_shm_name,
+ PKTIO_IPC_ENTRIES,
+ ODPH_RING_SHM_PROC);
+ if (!pktio_entry->s.ipc_r_slave) {
+ ODP_DBG("pid %d unable to create ipc ring %s name\n",
+ getpid(), ipc_shm_name);
+ return -1;
+ }
+ ODP_DBG("Created IPC ring: %s, count %d, free %d\n", ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_r_slave),
+ odph_ring_free_count(pktio_entry->s.ipc_r_slave));
+
+
+ memcpy(ipc_shm_name + strlen(dev), "_slave_p", 8);
+ pktio_entry->s.ipc_p_slave = odph_ring_create(ipc_shm_name,
+ PKTIO_IPC_ENTRIES,
+ ODPH_RING_SHM_PROC);
+ if (!pktio_entry->s.ipc_p_slave) {
+ ODP_DBG("pid %d unable to create ipc ring %s name\n",
+ getpid(), ipc_shm_name);
+ return -1;
+ }
+ ODP_DBG("Created IPC ring: %s, count %d, free %d\n", ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_p_slave),
+ odph_ring_free_count(pktio_entry->s.ipc_p_slave));
+
+ /* Memory to store information about exported pool */
+ pinfo = _ipc_map_pool_info(dev, ODP_SHM_PROC);
+
+ /* Set up pool name for remote info */
+ pool_name = _ipc_odp_buffer_pool_shm_name(pool);
+ memcpy(pinfo->remote_pool_name, pool_name, strlen(pool_name));
+ pinfo->shm_pkt_pool_size = pool_entry->s.pool_size;
+ pinfo->shm_pool_num = pool_entry->s.buf_num;
+ pinfo->shm_pkt_size = pool_entry->s.seg_size;
+ pinfo->mdata_offset = pool_entry->s.pool_mdata_addr - pool_entry->s.pool_base_addr;
+ pinfo->slave.mdata_offset = 0;
+ ODP_DBG("Master waiting for slave to be connected now..\n");
+
+ /* Wait for remote process to export his pool. */
+ ODP_DBG("Wait for second process set mdata_offset...\n");
+ while (pinfo->slave.mdata_offset == 0) {
+ odp_spin();
+ }
+ ODP_DBG("Wait for second process set mdata_offset... DONE.\n");
+
+
+ while (1) {
+ int ret = _odp_shm_lookup_ipc(pinfo->slave.pool_name);
+ if (!ret)
+ break;
+ ODP_DBG("Master looking for %s\n", pinfo->slave.pool_name);
+ sleep(1);
+ }
+
+ ipc_pool_base = _ipc_map_remote_pool(pinfo->slave.pool_name, pinfo->shm_pkt_pool_size);
+ pktio_entry->s.ipc_pool_mdata_base = (char *)ipc_pool_base + pinfo->slave.mdata_offset;
+
+ pktio_entry->s.ipc_pool = pool;
+
+ return 0;
+}
+
+static odp_pool_t _ipc_odp_alloc_and_create_pool_slave(struct pktio_info *pinfo)
+{
+ odp_pool_t pool;
+ char *pool_name;
+ odp_pool_param_t params;
+ int num = pinfo->shm_pool_num;
+ uint64_t buf_size = pinfo->shm_pkt_size;
+ pool_entry_t *pool_entry;
+
+ pool_name = calloc(1, strlen(pinfo->remote_pool_name) + sizeof("ipc_pool_slave_"));
+ sprintf(pool_name, "ipc_pool_slave_%s", pinfo->remote_pool_name);
+
+ ODP_DBG("slave uses pool %s\n", pool_name);
+
+ memset(¶ms, 0, sizeof(params));
+ params.pkt.num = num;
+ params.pkt.len = buf_size;
+ params.pkt.seg_len = buf_size;
+ params.type = ODP_POOL_PACKET;
+ params.shm_flags = ODP_SHM_PROC;
+
+ pool = odp_pool_create(pool_name, ODP_SHM_NULL, ¶ms);
+ if (pool == ODP_POOL_INVALID)
+ ODP_ABORT("Error: packet pool create failed.\n"
+ "num %d, len %d, seg_len %d\n",
+ params.pkt.num, params.pkt.len, params.pkt.seg_len);
+
+ /* Export info so that master can connect to that pool*/
+ snprintf(pinfo->slave.pool_name, 30, "%s", pool_name);
+ pool_entry = odp_pool_to_entry(pool);
+ pinfo->slave.mdata_offset = pool_entry->s.pool_mdata_addr - pool_entry->s.pool_base_addr;
+ free(pool_name);
+
+ return pool;
+}
+
+static void *_ipc_map_remote_pool(const char *name, size_t size)
+{
+ odp_shm_t shm;
+
+ ODP_DBG("Mapping remote pool %s, size %ld\n", name, size);
+ shm = odp_shm_reserve(name,
+ size,
+ ODP_CACHE_LINE_SIZE,
+ ODP_SHM_PROC_NOCREAT);
+ if (shm == ODP_SHM_INVALID)
+ ODP_ABORT("unable map %s\n", name);
+ return odp_shm_addr(shm);
+}
+
+static void * _ipc_shm_map(char *name, size_t size, int timeout)
+{
+ odp_shm_t shm;
+ int ret;
+
+ while (1) {
+ ret = _odp_shm_lookup_ipc(name);
+ if (!ret)
+ break;
+ ODP_DBG("Waiting for %s\n", name);
+ if (timeout <= 0)
+ return NULL;
+ timeout--;
+ sleep(1);
+ }
+
+ shm = odp_shm_reserve(name, size,
+ ODP_CACHE_LINE_SIZE,
+ ODP_SHM_PROC_NOCREAT);
+ if (ODP_SHM_INVALID == shm)
+ ODP_ABORT("unable to map: %s\n", name);
+
+ return odp_shm_addr(shm);
+}
+
+static int _ipc_pktio_init_slave(const char *dev, pktio_entry_t *pktio_entry)
+{
+ int ret = -1;
+ char ipc_shm_name[ODPH_RING_NAMESIZE];
+ size_t ring_size = PKTIO_IPC_ENTRIES * sizeof(void *) +
+ sizeof(odph_ring_t);
+ struct pktio_info *pinfo;
+ void *ipc_pool_base;
+
+ memset(ipc_shm_name, 0, ODPH_RING_NAMESIZE);
+ memcpy(ipc_shm_name, dev, strlen(dev));
+
+ memcpy(ipc_shm_name + strlen(dev), "_r", 2);
+ pktio_entry->s.ipc_r = _ipc_shm_map(ipc_shm_name, ring_size, 10);
+ if (!pktio_entry->s.ipc_r) {
+ ODP_DBG("pid %d unable to find ipc ring %s name\n",
+ getpid(), dev);
+ goto error;
+ }
+ ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_r),
+ odph_ring_free_count(pktio_entry->s.ipc_r));
+
+
+ memcpy(ipc_shm_name + strlen(dev), "_p", 2);
+ pktio_entry->s.ipc_p = _ipc_shm_map(ipc_shm_name, ring_size, 10);
+ if (!pktio_entry->s.ipc_p) {
+ ODP_DBG("pid %d unable to find ipc ring %s name\n",
+ getpid(), dev);
+ goto error;
+ }
+ ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_p),
+ odph_ring_free_count(pktio_entry->s.ipc_p));
+
+ memcpy(ipc_shm_name + strlen(dev), "_slave_r", 8);
+ pktio_entry->s.ipc_r_slave = _ipc_shm_map(ipc_shm_name, ring_size, 10);
+ if (!pktio_entry->s.ipc_r_slave) {
+ ODP_DBG("pid %d unable to find ipc ring %s name\n",
+ getpid(), dev);
+ goto error;
+ }
+ ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_r_slave),
+ odph_ring_free_count(pktio_entry->s.ipc_r_slave));
+
+ memcpy(ipc_shm_name + strlen(dev), "_slave_p", 8);
+ pktio_entry->s.ipc_p_slave = _ipc_shm_map(ipc_shm_name, ring_size, 10);
+ if (!pktio_entry->s.ipc_p_slave) {
+ ODP_DBG("pid %d unable to find ipc ring %s name\n",
+ getpid(), dev);
+ goto error;
+ }
+ ODP_DBG("Connected IPC ring: %s, count %d, free %d\n",
+ ipc_shm_name, odph_ring_count(pktio_entry->s.ipc_p_slave),
+ odph_ring_free_count(pktio_entry->s.ipc_p_slave));
+
+
+ /* Get info about remote pool */
+ pinfo = _ipc_map_pool_info(dev, ODP_SHM_PROC_NOCREAT);
+
+ ipc_pool_base = _ipc_map_remote_pool(pinfo->remote_pool_name, pinfo->shm_pkt_pool_size);
+ pktio_entry->s.ipc_pool_mdata_base = (char *)ipc_pool_base + pinfo->mdata_offset;
+ pktio_entry->s.ipc_pkt_size = pinfo->shm_pkt_size;
+
+ /* @todo: to simplify in linux-generic implementation we create pool for
+ * packets from IPC queue. On receive implementation copies packets to
+ * that pool. Later we can try to reuse original pool without packets
+ * copying.
+ */
+ pktio_entry->s.ipc_pool = _ipc_odp_alloc_and_create_pool_slave(pinfo);
+
+ ret = 0;
+ ODP_DBG("%s OK.\n", __func__);
+error:
+ /* @todo free shm on error (api not impemented yet) */
+ return ret;
+}
+
+int ipc_pktio_init(pktio_entry_t *pktio_entry, const char *dev,
+ odp_pool_t pool)
+{
+ int ret;
+
+ /* if pool is zero we assume that it's slave process connects
+ * to shared memory already created by main process.
+ */
+ if (pool) {
+ pktio_entry->s.type = ODP_PKTIO_TYPE_IPC;
+ ret = _ipc_pktio_init_master(pktio_entry, dev, pool);
+ } else {
+ pktio_entry->s.type = ODP_PKTIO_TYPE_IPC_SLAVE;
+ ret = _ipc_pktio_init_slave(dev, pktio_entry);
+ }
+
+ return ret;
+}
+
+
+static inline void *_ipc_buffer_map(odp_buffer_hdr_t *buf,
+ uint32_t offset,
+ uint32_t *seglen,
+ uint32_t limit)
+{
+ int seg_index = offset / buf->segsize;
+ int seg_offset = offset % buf->segsize;
+ void *addr = (char *)buf - buf->ipc_addr_offset[seg_index];
+
+ if (seglen != NULL) {
+ uint32_t buf_left = limit - offset;
+ *seglen = seg_offset + buf_left <= buf->segsize ?
+ buf_left : buf->segsize - seg_offset;
+ }
+
+ return (void *)(seg_offset + (uint8_t *)addr);
+}
+
+
+static inline void *_ipc_packet_map(odp_packet_hdr_t *pkt_hdr,
+ uint32_t offset, uint32_t *seglen)
+{
+ if (offset > pkt_hdr->frame_len)
+ return NULL;
+
+ return _ipc_buffer_map(&pkt_hdr->buf_hdr,
+ pkt_hdr->headroom + offset, seglen,
+ pkt_hdr->headroom + pkt_hdr->frame_len);
+
+}
+
+int ipc_pktio_recv(pktio_entry_t *pktio_entry,
+ odp_packet_t pkt_table[], unsigned len)
+{
+ int pkts = 0;
+ int ret;
+ int i;
+ odph_ring_t *r;
+ odph_ring_t *r_p;
+ odp_packet_t remote_pkts[PKTIO_IPC_ENTRIES];
+ void **ipcbufs_p = (void *)&remote_pkts;
+ unsigned ring_len;
+ int p_free;
+
+ if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC) {
+ r = pktio_entry->s.ipc_r_slave;
+ r_p = pktio_entry->s.ipc_p_slave;
+ } else if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC_SLAVE) {
+ r = pktio_entry->s.ipc_r;
+ r_p = pktio_entry->s.ipc_p;
+ } else
+ ODP_ABORT("wrong type: %d\n", pktio_entry->s.type);
+
+ ring_len = odph_ring_count(r);
+
+ pkts = len;
+ if (len > ring_len)
+ pkts = ring_len;
+
+ /* Do not dequeue more then we can put to producer ring */
+ p_free = odph_ring_free_count(r_p);
+ if (pkts > p_free)
+ pkts = p_free;
+
+ ret = odph_ring_mc_dequeue_bulk(r, ipcbufs_p, pkts);
+ if (ret != 0) {
+ ODP_DBG("error to dequeue no packets\n");
+ pkts = -1;
+ return pkts;
+ }
+
+ if (pkts == 0)
+ return 0;
+
+ for (i = 0; i < pkts; i++) {
+ odp_pool_t pool;
+ odp_packet_t pkt;
+ odp_packet_hdr_t *phdr;
+ odp_buffer_bits_t handle;
+ int idx; /* Remote packet has coded pool and index. We need only index.*/
+ void *pkt_data;
+ void *remote_pkt_data;
+
+ handle.handle = _odp_packet_to_buffer(remote_pkts[i]);
+ idx = handle.index;
+
+ /* Link to packed data. To this line we have Zero-Copy between
+ * processes, to simplify use packet copy in that version which
+ * can be removed later with more advance buffer management
+ * (ref counters).
+ */
+ phdr = (odp_packet_hdr_t *)((char *)pktio_entry->s.ipc_pool_mdata_base +
+ (idx * ODP_CACHE_LINE_SIZE)); /* reverse odp_buf_to_hdr() */
+
+ /* Allocate new packet. Select*/
+ pool = pktio_entry->s.ipc_pool;
+ if (odp_unlikely(pool == ODP_POOL_INVALID))
+ ODP_ABORT("invalid pool");
+
+ pkt = odp_packet_alloc(pool, phdr->frame_len);
+ if (odp_unlikely(pkt == ODP_PACKET_INVALID)) {
+ /* Original pool might be smaller then PKTIO_IPC_ENTRIES.
+ * if packet can not be allocated from pool at this time,
+ * simple get in on next recv() call.
+ */
+ pkts = i - 1;
+ break;
+ }
+
+ /* Copy packet data. */
+ pkt_data = odp_packet_data(pkt);
+ if (odp_unlikely(pkt_data == NULL))
+ ODP_ABORT("unable to map pkt_data ipc_slave %d\n",
+ (ODP_PKTIO_TYPE_IPC_SLAVE == pktio_entry->s.type));
+
+ remote_pkt_data = _ipc_packet_map(phdr, 0, NULL);
+ if (odp_unlikely(remote_pkt_data == NULL))
+ ODP_ABORT("unable to map remote_pkt_data, ipc_slave %d\n",
+ (ODP_PKTIO_TYPE_IPC_SLAVE == pktio_entry->s.type));
+
+ memcpy(pkt_data, remote_pkt_data, phdr->frame_len); // FIXME: copy packet!!!
+
+ /* Copy packets L2, L3 parsed offsets and size */
+ odp_packet_hdr(pkt)->l2_offset = phdr->l2_offset;
+ odp_packet_hdr(pkt)->l3_offset = phdr->l3_offset;
+ odp_packet_hdr(pkt)->l4_offset = phdr->l4_offset;
+ odp_packet_hdr(pkt)->payload_offset = phdr->payload_offset;
+
+ odp_packet_hdr(pkt)->vlan_s_tag = phdr->vlan_s_tag;
+ odp_packet_hdr(pkt)->vlan_c_tag = phdr->vlan_c_tag;
+ odp_packet_hdr(pkt)->l3_protocol = phdr->l3_protocol;
+ odp_packet_hdr(pkt)->l3_len = phdr->l3_len;
+
+ odp_packet_hdr(pkt)->frame_len = phdr->frame_len;
+ odp_packet_hdr(pkt)->headroom = phdr->headroom;
+ odp_packet_hdr(pkt)->tailroom = phdr->tailroom;
+ pkt_table[i] = pkt;
+ }
+
+ /* Now tell other process that we no longer need that buffers.*/
+ ret = odph_ring_mp_enqueue_bulk(r_p, ipcbufs_p, pkts);
+ if (ret != 0)
+ ODP_ABORT("ipc: odp_ring_mp_enqueue_bulk r_p fail\n");
+
+ return pkts;
+}
+
+int ipc_pktio_send(pktio_entry_t *pktio_entry, odp_packet_t pkt_table[],
+ unsigned len)
+{
+ odph_ring_t *r;
+ odph_ring_t *r_p;
+ void **rbuf_p;
+ int ret;
+ unsigned i;
+
+ if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC_SLAVE) {
+ r = pktio_entry->s.ipc_r_slave;
+ r_p = pktio_entry->s.ipc_p_slave;
+ } else if (pktio_entry->s.type == ODP_PKTIO_TYPE_IPC) {
+ r = pktio_entry->s.ipc_r;
+ r_p = pktio_entry->s.ipc_p;
+ } else
+ ODP_ABORT("wrong type: %d\n", pktio_entry->s.type);
+
+
+ /* Free already processed packets, if any */
+ {
+ unsigned complete_packets = odph_ring_count(r_p);
+ odp_packet_t r_p_pkts[PKTIO_IPC_ENTRIES];
+
+ if (complete_packets > 0) {
+ rbuf_p = (void *)&r_p_pkts;
+ ret = odph_ring_mc_dequeue_bulk(r_p, rbuf_p,
+ complete_packets);
+ if (ret == 0) {
+ for (i = 0; i < complete_packets; i++)
+ odp_buffer_free(_odp_packet_to_buffer(r_p_pkts[i]));
+ }
+ }
+ }
+
+ /* wait while second process takes packet from the ring.*/
+ i = 3;
+ while (odph_ring_free_count(r) < len && i) {
+ i --;
+ sleep(1);
+ }
+
+ /* Put packets to ring to be processed in other process. */
+ for (i = 0; i < len; i++) {
+ int j;
+ odp_packet_t pkt = pkt_table[i];
+ rbuf_p = (void *)&pkt;
+ odp_packet_hdr_t *pkt_hdr = odp_packet_hdr(pkt);
+
+ /* buf_hdr.addr can not be used directly in remote process, convert it to offset */
+ for (j = 0; j < ODP_BUFFER_MAX_SEG; j++)
+ pkt_hdr->buf_hdr.ipc_addr_offset[j] = (char *)pkt_hdr -
+ (char*)pkt_hdr->buf_hdr.addr[j];
+
+ ret = odph_ring_mp_enqueue_bulk(r, rbuf_p, 1);
+ if (odp_unlikely(ret != 0)) {
+ ODP_ERR("pid %d odp_ring_mp_enqueue_bulk fail, ipc_slave %d, ret %d\n", getpid(),
+ (ODP_PKTIO_TYPE_IPC_SLAVE == pktio_entry->s.type), ret);
+ ODP_ERR("odp_ring_full: %d, odp_ring_count %d, odph_ring_free_count %d\n",
+ odph_ring_full(r), odph_ring_count(r), odph_ring_free_count(r));
+ }
+ }
+ return len;
+}
@@ -18,6 +18,7 @@
#include <odp_schedule_internal.h>
#include <odp_classification_internal.h>
#include <odp_debug_internal.h>
+#include <odp_ipc.h>
#include <string.h>
#include <sys/ioctl.h>
@@ -25,6 +26,15 @@
#include <ifaddrs.h>
#include <errno.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+/* IPC packet I/O over odph_ring */
+#include <odp/helper/ring.h>
+
+#define PKTIO_IPC_ENTRIES 4096 /**< number of odp buffers in
+ odp ring queue */
+
/* MTU to be reported for the "loop" interface */
#define PKTIO_LOOP_MTU 1500
/* MAC address for the "loop" interface */
@@ -260,7 +270,12 @@ static odp_pktio_t setup_pktio_entry(const char *dev, odp_pool_t pool)
if (strcmp(dev, "loop") == 0)
ret = init_loop(pktio_entry, id);
- else
+ else if (!strncmp(dev, "ipc", 3)) {
+ ret = ipc_pktio_init(pktio_entry, dev, pool);
+ if (ret != 0)
+ ODP_ABORT("unable to init ipc for %s, pool %" PRIu64 "\n",
+ dev, pool);
+ } else
ret = init_socket(pktio_entry, dev, pool);
if (ret != 0) {
@@ -280,6 +295,10 @@ odp_pktio_t odp_pktio_open(const char *dev, odp_pool_t pool)
{
odp_pktio_t id;
+ /* no local table lookup for ipc case */
+ if (pool == NULL && !memcmp(dev, "ipc", 3))
+ goto no_local_lookup;
+
id = odp_pktio_lookup(dev);
if (id != ODP_PKTIO_INVALID) {
/* interface is already open */
@@ -287,6 +306,7 @@ odp_pktio_t odp_pktio_open(const char *dev, odp_pool_t pool)
return ODP_PKTIO_INVALID;
}
+no_local_lookup:
odp_spinlock_lock(&pktio_tbl->lock);
id = setup_pktio_entry(dev, pool);
odp_spinlock_unlock(&pktio_tbl->lock);
@@ -403,6 +423,10 @@ int odp_pktio_recv(odp_pktio_t id, odp_packet_t pkt_table[], int len)
case ODP_PKTIO_TYPE_LOOPBACK:
pkts = deq_loopback(pktio_entry, pkt_table, len);
break;
+ case ODP_PKTIO_TYPE_IPC_SLAVE:
+ case ODP_PKTIO_TYPE_IPC:
+ pkts = ipc_pktio_recv(pktio_entry, pkt_table, len);
+ break;
default:
pkts = -1;
break;
@@ -457,6 +481,10 @@ int odp_pktio_send(odp_pktio_t id, odp_packet_t pkt_table[], int len)
case ODP_PKTIO_TYPE_LOOPBACK:
pkts = enq_loopback(pktio_entry, pkt_table, len);
break;
+ case ODP_PKTIO_TYPE_IPC:
+ case ODP_PKTIO_TYPE_IPC_SLAVE:
+ pkts = ipc_pktio_send(pktio_entry, pkt_table, len);
+ break;
default:
pkts = -1;
}
@@ -219,8 +219,10 @@ odp_pool_t odp_pool_create(const char *name,
ODP_ALIGN_ROUNDUP(params->pkt.len, seg_len);
/* Reject create if pkt.len needs too many segments */
- if (blk_size / seg_len > ODP_BUFFER_MAX_SEG)
+ if (blk_size / seg_len > ODP_BUFFER_MAX_SEG) {
+ ODP_DBG("Reject create if pkt.len needs too many segments\n");
return ODP_POOL_INVALID;
+ }
buf_stride = sizeof(odp_packet_hdr_stride);
break;
@@ -237,8 +239,10 @@ odp_pool_t odp_pool_create(const char *name,
/* Validate requested number of buffers against addressable limits */
if (buf_num >
- (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE)))
+ (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE))) {
+ ODP_DBG("Validate requested number of buffers against addressable limits\n");
return ODP_POOL_INVALID;
+ }
/* Find an unused buffer pool slot and iniitalize it as requested */
for (i = 0; i < ODP_CONFIG_POOLS; i++) {
@@ -290,7 +294,8 @@ odp_pool_t odp_pool_create(const char *name,
if (shm == ODP_SHM_NULL) {
shm = odp_shm_reserve(pool->s.name,
pool->s.pool_size,
- ODP_PAGE_SIZE, 0);
+ ODP_PAGE_SIZE,
+ params->shm_flags);
if (shm == ODP_SHM_INVALID) {
POOL_UNLOCK(&pool->s.lock);
return ODP_POOL_INVALID;
@@ -301,6 +306,8 @@ odp_pool_t odp_pool_create(const char *name,
if (odp_shm_info(shm, &info) != 0 ||
info.size < pool->s.pool_size) {
POOL_UNLOCK(&pool->s.lock);
+ ODP_DBG("shm info %d, info size %ld, pool size %ld\n",
+ odp_shm_info(shm, &info), info.size, pool->s.pool_size);
return ODP_POOL_INVALID;
}
pool->s.pool_base_addr = odp_shm_addr(shm);
@@ -312,6 +319,7 @@ odp_pool_t odp_pool_create(const char *name,
((size_t)page_addr -
(size_t)pool->s.pool_base_addr)) {
POOL_UNLOCK(&pool->s.lock);
+ ODP_DBG("wrong shm\n");
return ODP_POOL_INVALID;
}
pool->s.pool_base_addr = page_addr;
@@ -159,8 +159,14 @@ odph_ring_create(const char *name, unsigned count, unsigned flags)
char ring_name[ODPH_RING_NAMESIZE];
odph_ring_t *r;
size_t ring_size;
+ uint32_t shm_flag;
odp_shm_t shm;
+ if (flags & ODPH_RING_SHM_PROC)
+ shm_flag = ODP_SHM_PROC;
+ else
+ shm_flag = 0;
+
/* count must be a power of 2 */
if (!ODP_VAL_IS_POWER_2(count) || (count > ODPH_RING_SZ_MASK)) {
ODP_ERR("Requested size is invalid, must be power of 2, and do not exceed the size limit %u\n",
@@ -173,7 +179,8 @@ odph_ring_create(const char *name, unsigned count, unsigned flags)
odp_rwlock_write_lock(&qlock);
/* reserve a memory zone for this ring.*/
- shm = odp_shm_reserve(ring_name, ring_size, ODP_CACHE_LINE_SIZE, 0);
+ shm = odp_shm_reserve(ring_name, ring_size, ODP_CACHE_LINE_SIZE,
+ shm_flag);
r = odp_shm_addr(shm);
@@ -101,6 +101,7 @@ int odp_schedule_init_global(void)
return -1;
}
+ memset(¶ms, 0, sizeof(params));
params.buf.size = sizeof(queue_desc_t);
params.buf.align = 0;
params.buf.num = SCHED_POOL_SIZE/sizeof(queue_desc_t);
@@ -16,6 +16,7 @@
#include <odp_debug_internal.h>
#include <odp_align_internal.h>
#include <odp/config.h>
+#include <odp/helper/ring.h>
#include <unistd.h>
#include <sys/mman.h>
@@ -189,7 +190,7 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
int fd = -1;
int map_flag = MAP_SHARED;
/* If already exists: O_EXCL: error, O_TRUNC: truncate to zero */
- int oflag = O_RDWR | O_CREAT | O_TRUNC;
+ int oflag = O_RDWR;
uint64_t alloc_size;
uint64_t page_sz, huge_sz;
#ifdef MAP_HUGETLB
@@ -207,7 +208,12 @@ odp_shm_t odp_shm_reserve(const char *name, uint64_t size, uint64_t align,
alloc_hp_size = (size + align + (huge_sz - 1)) & (-huge_sz);
#endif
- if (flags & ODP_SHM_PROC) {
+ if (flags & ODP_SHM_PROC)
+ oflag |= O_CREAT | O_TRUNC;
+
+ if (flags & (ODP_SHM_PROC | ODP_SHM_PROC_NOCREAT)) {
+ need_huge_page = 0;
+
/* Creates a file to /dev/shm */
fd = shm_open(name, oflag,
S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH);
Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org> --- I'm updating IPC patch version to the latest ODP code here: https://git.linaro.org/people/maxim.uvarov/odp.git odp_ipc_v4 And found that it will be good to add more parameters to pool_create(). For example I need to pass there flag about shared memory. Can that be accepted? That is not final version of patch and platform/linux-generic/odp_ipc.c will be moved to platform/linux-generic/include/odp_pktio_ipc_internal.h. But if somebody also started look at IPC I think it's reasonable to look at this patch. Thanks, Maxim. configure.ac | 1 + example/Makefile.am | 2 +- example/ipc/.gitignore | 1 + example/ipc/Makefile.am | 7 + example/ipc/odp_ipc.c | 425 +++++++++++++++ helper/include/odp/helper/ring.h | 2 + include/odp/api/pool.h | 1 + include/odp/api/shared_memory.h | 2 +- platform/linux-generic/Makefile.am | 1 + .../linux-generic/include/odp_buffer_internal.h | 2 + platform/linux-generic/include/odp_ipc.h | 47 ++ .../linux-generic/include/odp_packet_io_internal.h | 15 + platform/linux-generic/include/odp_pool_internal.h | 1 + platform/linux-generic/odp_init.c | 6 + platform/linux-generic/odp_ipc.c | 597 +++++++++++++++++++++ platform/linux-generic/odp_packet_io.c | 30 +- platform/linux-generic/odp_pool.c | 14 +- platform/linux-generic/odp_ring.c | 9 +- platform/linux-generic/odp_schedule.c | 1 + platform/linux-generic/odp_shared_memory.c | 10 +- 20 files changed, 1165 insertions(+), 9 deletions(-) create mode 100644 example/ipc/.gitignore create mode 100644 example/ipc/Makefile.am create mode 100644 example/ipc/odp_ipc.c create mode 100644 platform/linux-generic/include/odp_ipc.h create mode 100644 platform/linux-generic/odp_ipc.c