Message ID | 1417994664-24534-1-git-send-email-bill.fischofer@linaro.org |
---|---|
State | New |
Headers | show |
Hi, Comments inline. Regards, Bala On Monday 08 December 2014 04:54 AM, Bill Fischofer wrote: > Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> > --- > > Petri: Please review the following files here: > platform/linux-generic/include/api/odp_buffer.h > platform/linux-generic/include/api/odp_buffer_pool.h > platform/linux-generic/include/api/odp_config.h > > This patch is complete and compilable/testable. It is RFC pending > Petri approval of the public API headers and recommendations for > final packaging. > > example/generator/odp_generator.c | 19 +- > example/ipsec/odp_ipsec.c | 57 +- > example/l2fwd/odp_l2fwd.c | 19 +- > example/odp_example/odp_example.c | 18 +- > example/packet/odp_pktio.c | 19 +- > example/timer/odp_timer_test.c | 13 +- > platform/linux-generic/include/api/odp_buffer.h | 3 +- > .../linux-generic/include/api/odp_buffer_pool.h | 103 ++- > platform/linux-generic/include/api/odp_config.h | 19 + > .../linux-generic/include/api/odp_platform_types.h | 12 + > .../linux-generic/include/api/odp_shared_memory.h | 10 +- > .../linux-generic/include/odp_buffer_inlines.h | 150 ++++ > .../linux-generic/include/odp_buffer_internal.h | 150 ++-- > .../include/odp_buffer_pool_internal.h | 351 ++++++++-- > platform/linux-generic/include/odp_internal.h | 2 + > .../linux-generic/include/odp_packet_internal.h | 50 +- > .../linux-generic/include/odp_timer_internal.h | 11 +- > platform/linux-generic/odp_buffer.c | 33 +- > platform/linux-generic/odp_buffer_pool.c | 777 ++++++++++----------- > platform/linux-generic/odp_linux.c | 4 +- > platform/linux-generic/odp_packet.c | 41 +- > platform/linux-generic/odp_queue.c | 1 + > platform/linux-generic/odp_schedule.c | 20 +- > platform/linux-generic/odp_timer.c | 3 +- > test/api_test/odp_timer_ping.c | 19 +- > test/validation/odp_crypto.c | 43 +- > test/validation/odp_queue.c | 19 +- > 27 files changed, 1208 insertions(+), 758 deletions(-) > create mode 100644 platform/linux-generic/include/odp_buffer_inlines.h > > diff --git a/example/generator/odp_generator.c b/example/generator/odp_generator.c > index 73b0369..476cbef 100644 > --- a/example/generator/odp_generator.c > +++ b/example/generator/odp_generator.c > @@ -522,11 +522,11 @@ int main(int argc, char *argv[]) > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > odp_buffer_pool_t pool; > int num_workers; > - void *pool_base; > int i; > int first_core; > int core_count; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* Init ODP before calling anything else */ > if (odp_init_global(NULL, NULL)) { > @@ -589,20 +589,13 @@ int main(int argc, char *argv[]) > printf("First core: %i\n\n", first_core); > > /* Create packet pool */ > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - if (pool_base == NULL) { > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, ¶ms); > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Error: packet pool create failed.\n"); > exit(EXIT_FAILURE); > diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c > index 76d27c5..f96338c 100644 > --- a/example/ipsec/odp_ipsec.c > +++ b/example/ipsec/odp_ipsec.c > @@ -367,8 +367,7 @@ static > void ipsec_init_pre(void) > { > odp_queue_param_t qparam; > - void *pool_base; > - odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* > * Create queues > @@ -401,16 +400,12 @@ void ipsec_init_pre(void) > } > > /* Create output buffer pool */ > - shm = odp_shm_reserve("shm_out_pool", > - SHM_OUT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_OUT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - out_pool = odp_buffer_pool_create("out_pool", pool_base, > - SHM_OUT_POOL_SIZE, > - SHM_OUT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > + out_pool = odp_buffer_pool_create("out_pool", ODP_SHM_NULL, ¶ms); > > if (ODP_BUFFER_POOL_INVALID == out_pool) { > EXAMPLE_ERR("Error: message pool create failed.\n"); > @@ -1176,12 +1171,12 @@ main(int argc, char *argv[]) > { > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > int num_workers; > - void *pool_base; > int i; > int first_core; > int core_count; > int stream_count; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* Init ODP before calling anything else */ > if (odp_init_global(NULL, NULL)) { > @@ -1241,42 +1236,28 @@ main(int argc, char *argv[]) > printf("First core: %i\n\n", first_core); > > /* Create packet buffer pool */ > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - pool_base = odp_shm_addr(shm); > - > - if (NULL == pool_base) { > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + pkt_pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, > + ¶ms); > > - pkt_pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (ODP_BUFFER_POOL_INVALID == pkt_pool) { > EXAMPLE_ERR("Error: packet pool create failed.\n"); > exit(EXIT_FAILURE); > } > > /* Create context buffer pool */ > - shm = odp_shm_reserve("shm_ctx_pool", > - SHM_CTX_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_CTX_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_CTX_POOL_BUF_COUNT; > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > - if (NULL == pool_base) { > - EXAMPLE_ERR("Error: context pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + ctx_pool = odp_buffer_pool_create("ctx_pool", ODP_SHM_NULL, > + ¶ms); > > - ctx_pool = odp_buffer_pool_create("ctx_pool", pool_base, > - SHM_CTX_POOL_SIZE, > - SHM_CTX_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_RAW); > if (ODP_BUFFER_POOL_INVALID == ctx_pool) { > EXAMPLE_ERR("Error: context pool create failed.\n"); > exit(EXIT_FAILURE); > diff --git a/example/l2fwd/odp_l2fwd.c b/example/l2fwd/odp_l2fwd.c > index ebac8c5..3c1fd6a 100644 > --- a/example/l2fwd/odp_l2fwd.c > +++ b/example/l2fwd/odp_l2fwd.c > @@ -314,12 +314,12 @@ int main(int argc, char *argv[]) > { > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > odp_buffer_pool_t pool; > - void *pool_base; > int i; > int first_core; > int core_count; > odp_pktio_t pktio; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* Init ODP before calling anything else */ > if (odp_init_global(NULL, NULL)) { > @@ -383,20 +383,13 @@ int main(int argc, char *argv[]) > printf("First core: %i\n\n", first_core); > > /* Create packet pool */ > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - if (pool_base == NULL) { > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + pool = odp_buffer_pool_create("packet pool", ODP_SHM_NULL, ¶ms); > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Error: packet pool create failed.\n"); > exit(EXIT_FAILURE); > diff --git a/example/odp_example/odp_example.c b/example/odp_example/odp_example.c > index 96a2912..8373f12 100644 > --- a/example/odp_example/odp_example.c > +++ b/example/odp_example/odp_example.c > @@ -954,13 +954,13 @@ int main(int argc, char *argv[]) > test_args_t args; > int num_workers; > odp_buffer_pool_t pool; > - void *pool_base; > odp_queue_t queue; > int i, j; > int prios; > int first_core; > odp_shm_t shm; > test_globals_t *globals; > + odp_buffer_pool_param_t params; > > printf("\nODP example starts\n\n"); > > @@ -1042,19 +1042,13 @@ int main(int argc, char *argv[]) > /* > * Create message pool > */ > - shm = odp_shm_reserve("msg_pool", > - MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - pool_base = odp_shm_addr(shm); > + params.buf_size = sizeof(test_message_t); > + params.buf_align = 0; > + params.num_bufs = MSG_POOL_SIZE/sizeof(test_message_t); > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > - if (pool_base == NULL) { > - EXAMPLE_ERR("Shared memory reserve failed.\n"); > - return -1; > - } > - > - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, > - sizeof(test_message_t), > - ODP_CACHE_LINE_SIZE, ODP_BUFFER_TYPE_RAW); > + pool = odp_buffer_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); > > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Pool create failed.\n"); > diff --git a/example/packet/odp_pktio.c b/example/packet/odp_pktio.c > index 7d51682..f2e7b2d 100644 > --- a/example/packet/odp_pktio.c > +++ b/example/packet/odp_pktio.c > @@ -331,11 +331,11 @@ int main(int argc, char *argv[]) > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > odp_buffer_pool_t pool; > int num_workers; > - void *pool_base; > int i; > int first_core; > int core_count; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* Init ODP before calling anything else */ > if (odp_init_global(NULL, NULL)) { > @@ -389,20 +389,13 @@ int main(int argc, char *argv[]) > printf("First core: %i\n\n", first_core); > > /* Create packet pool */ > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - if (pool_base == NULL) { > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, ¶ms); > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Error: packet pool create failed.\n"); > exit(EXIT_FAILURE); > diff --git a/example/timer/odp_timer_test.c b/example/timer/odp_timer_test.c > index 9968bfe..0d6e31a 100644 > --- a/example/timer/odp_timer_test.c > +++ b/example/timer/odp_timer_test.c > @@ -244,12 +244,12 @@ int main(int argc, char *argv[]) > test_args_t args; > int num_workers; > odp_buffer_pool_t pool; > - void *pool_base; > odp_queue_t queue; > int first_core; > uint64_t cycles, ns; > odp_queue_param_t param; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > printf("\nODP timer example starts\n"); > > @@ -313,12 +313,13 @@ int main(int argc, char *argv[]) > */ > shm = odp_shm_reserve("msg_pool", > MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > > - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, > - 0, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_TIMEOUT); > + params.buf_size = 0; > + params.buf_align = 0; > + params.num_bufs = MSG_POOL_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_TIMEOUT; > + > + pool = odp_buffer_pool_create("msg_pool", shm, ¶ms); > > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Pool create failed.\n"); > diff --git a/platform/linux-generic/include/api/odp_buffer.h b/platform/linux-generic/include/api/odp_buffer.h > index da23120..e981324 100644 > --- a/platform/linux-generic/include/api/odp_buffer.h > +++ b/platform/linux-generic/include/api/odp_buffer.h > @@ -68,7 +68,8 @@ int odp_buffer_type(odp_buffer_t buf); > * > * @param buf Buffer handle > * > - * @return 1 if valid, otherwise 0 > + * @retval 1 Buffer handle represents a valid buffer. > + * @retval 0 Buffer handle does not represent a valid buffer. > */ > int odp_buffer_is_valid(odp_buffer_t buf); > > diff --git a/platform/linux-generic/include/api/odp_buffer_pool.h b/platform/linux-generic/include/api/odp_buffer_pool.h > index 30b83e0..3d85066 100644 > --- a/platform/linux-generic/include/api/odp_buffer_pool.h > +++ b/platform/linux-generic/include/api/odp_buffer_pool.h > @@ -32,42 +32,114 @@ extern "C" { > /** Maximum queue name lenght in chars */ > #define ODP_BUFFER_POOL_NAME_LEN 32 > > -/** Invalid buffer pool */ > -#define ODP_BUFFER_POOL_INVALID 0 > +/** > + * Buffer pool parameters > + * Used to communicate buffer pool creation options. > + */ > +typedef struct odp_buffer_pool_param_t { > + size_t buf_size; /**< Buffer size in bytes. The maximum > + number of bytes application will > + store in each buffer. */ > + size_t buf_align; /**< Minimum buffer alignment in bytes. > + Valid values are powers of two. Use 0 > + for default alignment. Default will > + always be a multiple of 8. */ > + uint32_t num_bufs; /**< Number of buffers in the pool */ > + int buf_type; /**< Buffer type */ > +} odp_buffer_pool_param_t; > > /** > * Create a buffer pool > + * This routine is used to create a buffer pool. It take three > + * arguments: the optional name of the pool to be created, an optional shared > + * memory handle, and a parameter struct that describes the pool to be > + * created. If a name is not specified the result is an anonymous pool that > + * cannot be referenced by odp_buffer_pool_lookup(). > + * > + * @param name Name of the pool, max ODP_BUFFER_POOL_NAME_LEN-1 chars. > + * May be specified as NULL for anonymous pools. > * > - * @param name Name of the pool (max ODP_BUFFER_POOL_NAME_LEN - 1 chars) > - * @param base_addr Pool base address > - * @param size Pool size in bytes > - * @param buf_size Buffer size in bytes > - * @param buf_align Minimum buffer alignment > - * @param buf_type Buffer type > + * @param shm The shared memory object in which to create the pool. > + * Use ODP_SHM_NULL to reserve default memory type > + * for the buffer type. > * > - * @return Buffer pool handle > + * @param params Buffer pool parameters. > + * > + * @retval Handle Buffer pool handle on success > + * @retval ODP_BUFFER_POOL_INVALID if call failed > */ > + > odp_buffer_pool_t odp_buffer_pool_create(const char *name, > - void *base_addr, uint64_t size, > - size_t buf_size, size_t buf_align, > - int buf_type); > + odp_shm_t shm, > + odp_buffer_pool_param_t *params); > > +/** > + * Destroy a buffer pool previously created by odp_buffer_pool_create() > + * > + * @param pool Handle of the buffer pool to be destroyed > + * > + * @retval 0 Success > + * @retval -1 Failure > + * > + * @note This routine destroys a previously created buffer pool. This call > + * does not destroy any shared memory object passed to > + * odp_buffer_pool_create() used to store the buffer pool contents. The caller > + * takes responsibility for that. If no shared memory object was passed as > + * part of the create call, then this routine will destroy any internal shared > + * memory objects associated with the buffer pool. Results are undefined if > + * an attempt is made to destroy a buffer pool that contains allocated or > + * otherwise active buffers. > + */ > +int odp_buffer_pool_destroy(odp_buffer_pool_t pool); > > /** > * Find a buffer pool by name > * > * @param name Name of the pool > * > - * @return Buffer pool handle, or ODP_BUFFER_POOL_INVALID if not found. > + * @retval Handle Buffer pool handle on successs > + * @retval ODP_BUFFER_POOL_INVALID if not found > + * > + * @note This routine cannot be used to look up an anonymous pool (one created > + * with no name). > */ > odp_buffer_pool_t odp_buffer_pool_lookup(const char *name); > > +/** > + * Buffer pool information struct > + * Used to get information about a buffer pool. > + */ > +typedef struct odp_buffer_pool_info_t { > + const char *name; /**< pool name */ > + odp_buffer_pool_param_t params; /**< pool parameters */ > +} odp_buffer_pool_info_t; > + > +/** > + * Retrieve information about a buffer pool > + * > + * @param pool Buffer pool handle > + * > + * @param shm Recieves odp_shm_t supplied by caller at > + * pool creation, or ODP_SHM_NULL if the > + * pool is managed internally. > + * > + * @param[out] info Receives an odp_buffer_pool_info_t object > + * that describes the pool. > + * > + * @retval 0 Success > + * @retval -1 Failure. Info could not be retrieved. > + */ > + > +int odp_buffer_pool_info(odp_buffer_pool_t pool, odp_shm_t *shm, > + odp_buffer_pool_info_t *info); > > /** > * Print buffer pool info > * > * @param pool Pool handle > * > + * @note This routine writes implementation-defined information about the > + * specified buffer pool to the ODP log. The intended use is for debugging. > */ > void odp_buffer_pool_print(odp_buffer_pool_t pool); > > @@ -78,7 +150,8 @@ void odp_buffer_pool_print(odp_buffer_pool_t pool); > * The validity of a buffer can be cheked at any time with odp_buffer_is_valid() > * @param pool Pool handle > * > - * @return Buffer handle or ODP_BUFFER_INVALID > + * @retval Handle Buffer handle of allocated buffer > + * @retval ODP_BUFFER_INVALID Allocation failed > */ > odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool); > > @@ -97,7 +170,7 @@ void odp_buffer_free(odp_buffer_t buf); > * > * @param buf Buffer handle > * > - * @return Buffer pool the buffer was allocated from > + * @retval Handle Buffer pool handle that the buffer was allocated from > */ > odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf); > > diff --git a/platform/linux-generic/include/api/odp_config.h b/platform/linux-generic/include/api/odp_config.h > index 906897c..5ca5bb2 100644 > --- a/platform/linux-generic/include/api/odp_config.h > +++ b/platform/linux-generic/include/api/odp_config.h > @@ -49,6 +49,25 @@ extern "C" { > #define ODP_CONFIG_PKTIO_ENTRIES 64 > > /** > + * Buffer segment size to use > + * This is the granularity of segmented buffers. Sized for now to be large > + * enough to support 1500-byte packets since the raw socket interface does not > + * support scatter/gather I/O. ODP requires a minimum segment size of 128 > + * bytes with 256 recommended. Linux-generic code will enforce a 256 byte > + * minimum. Note that the chosen segment size must be a multiple of > + * ODP_CACHE_LINE_SIZE. > + */ > +#define ODP_CONFIG_BUF_SEG_SIZE (512*3) > + > +/** > + * Maximum buffer size supported > + * Must be an integral number of segments and should be large enough to > + * accommodate jumbo packets. Attempts to allocate or extend buffers to sizes > + * larger than this limit will fail. > + */ > +#define ODP_CONFIG_BUF_MAX_SIZE (ODP_CONFIG_BUF_SEG_SIZE*7) > + > +/** > * @} > */ > > diff --git a/platform/linux-generic/include/api/odp_platform_types.h b/platform/linux-generic/include/api/odp_platform_types.h > index 4db47d3..2181eb6 100644 > --- a/platform/linux-generic/include/api/odp_platform_types.h > +++ b/platform/linux-generic/include/api/odp_platform_types.h > @@ -26,6 +26,9 @@ > /** ODP Buffer pool */ > typedef uint32_t odp_buffer_pool_t; > > +/** Invalid buffer pool */ > +#define ODP_BUFFER_POOL_INVALID (0xffffffff) > + > /** ODP buffer */ > typedef uint32_t odp_buffer_t; > > @@ -65,6 +68,15 @@ typedef uint32_t odp_pktio_t; > #define ODP_PKTIO_ANY ((odp_pktio_t)~0) > > /** > + * ODP shared memory block > + */ > +typedef uint32_t odp_shm_t; > + > +/** Invalid shared memory block */ > +#define ODP_SHM_INVALID 0 > +#define ODP_SHM_NULL ODP_SHM_INVALID /**< Synonym for buffer pool use */ > + > +/** > * @} > */ > > diff --git a/platform/linux-generic/include/api/odp_shared_memory.h b/platform/linux-generic/include/api/odp_shared_memory.h > index 26e208b..f70db5a 100644 > --- a/platform/linux-generic/include/api/odp_shared_memory.h > +++ b/platform/linux-generic/include/api/odp_shared_memory.h > @@ -20,6 +20,7 @@ extern "C" { > > > #include <odp_std_types.h> > +#include <odp_platform_types.h> > > /** @defgroup odp_shared_memory ODP SHARED MEMORY > * Operations on shared memory. > @@ -38,15 +39,6 @@ extern "C" { > #define ODP_SHM_PROC 0x2 /**< Share with external processes */ > > /** > - * ODP shared memory block > - */ > -typedef uint32_t odp_shm_t; > - > -/** Invalid shared memory block */ > -#define ODP_SHM_INVALID 0 > - > - > -/** > * Shared memory block info > */ > typedef struct odp_shm_info_t { > diff --git a/platform/linux-generic/include/odp_buffer_inlines.h b/platform/linux-generic/include/odp_buffer_inlines.h > new file mode 100644 > index 0000000..9eb425c > --- /dev/null > +++ b/platform/linux-generic/include/odp_buffer_inlines.h > @@ -0,0 +1,150 @@ > +/* Copyright (c) 2014, Linaro Limited > + * All rights reserved. > + * > + * SPDX-License-Identifier: BSD-3-Clause > + */ > + > +/** > + * @file > + * > + * Inline functions for ODP buffer mgmt routines - implementation internal > + */ > + > +#ifndef ODP_BUFFER_INLINES_H_ > +#define ODP_BUFFER_INLINES_H_ > + > +#ifdef __cplusplus > +extern "C" { > +#endif > + > +static inline odp_buffer_t odp_buffer_encode_handle(odp_buffer_hdr_t *hdr) > +{ > + odp_buffer_bits_t handle; > + uint32_t pool_id = pool_handle_to_index(hdr->pool_hdl); > + struct pool_entry_s *pool = get_pool_entry(pool_id); > + > + handle.pool_id = pool_id; > + handle.index = ((uint8_t *)hdr - pool->pool_base_addr) / > + ODP_CACHE_LINE_SIZE; > + handle.seg = 0; > + > + return handle.u32; > +} > + > +static inline odp_buffer_t odp_hdr_to_buf(odp_buffer_hdr_t *hdr) > +{ > + return hdr->handle.handle; > +} > + > +static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf) > +{ > + odp_buffer_bits_t handle; > + uint32_t pool_id; > + uint32_t index; > + struct pool_entry_s *pool; > + > + handle.u32 = buf; > + pool_id = handle.pool_id; > + index = handle.index; > + > +#ifdef POOL_ERROR_CHECK > + if (odp_unlikely(pool_id > ODP_CONFIG_BUFFER_POOLS)) { > + ODP_ERR("odp_buf_to_hdr: Bad pool id\n"); > + return NULL; > + } > +#endif > + > + pool = get_pool_entry(pool_id); > + > +#ifdef POOL_ERROR_CHECK > + if (odp_unlikely(index > pool->params.num_bufs - 1)) { > + ODP_ERR("odp_buf_to_hdr: Bad buffer index\n"); > + return NULL; > + } > +#endif > + > + return (odp_buffer_hdr_t *)(void *) > + (pool->pool_base_addr + (index * ODP_CACHE_LINE_SIZE)); > +} > + > +static inline uint32_t odp_buffer_refcount(odp_buffer_hdr_t *buf) > +{ > + return odp_atomic_load_u32(&buf->ref_count); > +} > + > +static inline uint32_t odp_buffer_incr_refcount(odp_buffer_hdr_t *buf, > + uint32_t val) > +{ > + return odp_atomic_fetch_add_u32(&buf->ref_count, val) + val; > +} > + > +static inline uint32_t odp_buffer_decr_refcount(odp_buffer_hdr_t *buf, > + uint32_t val) > +{ > + uint32_t tmp; > + > + tmp = odp_atomic_fetch_sub_u32(&buf->ref_count, val); > + > + if (tmp < val) { > + odp_atomic_fetch_add_u32(&buf->ref_count, val - tmp); > + return 0; > + } else { > + return tmp - val; > + } > +} IMO, I do not see any use case where refcount gets increamented by a value greater than 1 in a single API. If we drop "val" from input function we can simply use odp_atomic_inc_u32 apis. > + > +static inline odp_buffer_hdr_t *validate_buf(odp_buffer_t buf) > +{ > + odp_buffer_bits_t handle; > + odp_buffer_hdr_t *buf_hdr; > + handle.u32 = buf; > + > + /* For buffer handles, segment index must be 0 and pool id in range */ > + if (handle.seg != 0 || handle.pool_id >= ODP_CONFIG_BUFFER_POOLS) > + return NULL; > + > + pool_entry_t *pool = odp_pool_to_entry(handle.pool_id); > + > + /* If pool not created, handle is invalid */ > + if (pool->s.pool_shm == ODP_SHM_INVALID) > + return NULL; > + > + uint32_t buf_stride = pool->s.buf_stride / ODP_CACHE_LINE_SIZE; > + > + /* A valid buffer index must be on stride, and must be in range */ > + if ((handle.index % buf_stride != 0) || > + ((uint32_t)(handle.index / buf_stride) >= pool->s.params.num_bufs)) > + return NULL; > + > + buf_hdr = (odp_buffer_hdr_t *)(void *) > + (pool->s.pool_base_addr + > + (handle.index * ODP_CACHE_LINE_SIZE)); > + > + /* Handle is valid, so buffer is valid if it is allocated */ > + return buf_hdr->allocator == ODP_FREEBUF ? NULL : buf_hdr; > +} > + > +int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf); > + > +static inline void *buffer_map(odp_buffer_hdr_t *buf, > + uint32_t offset, > + uint32_t *seglen, > + uint32_t limit) > +{ > + int seg_index = offset / buf->segsize; > + int seg_offset = offset % buf->segsize; > + > + if (seglen != NULL) { > + uint32_t buf_left = limit - offset; > + *seglen = buf_left < buf->segsize ? > + buf_left : buf->segsize - seg_offset; > + } > + > + return (void *)(seg_offset + (uint8_t *)buf->addr[seg_index]); > +} > + > +#ifdef __cplusplus > +} > +#endif > + > +#endif > diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h > index 0027bfc..632dcbf 100644 > --- a/platform/linux-generic/include/odp_buffer_internal.h > +++ b/platform/linux-generic/include/odp_buffer_internal.h > @@ -24,99 +24,131 @@ extern "C" { > #include <odp_buffer.h> > #include <odp_debug.h> > #include <odp_align.h> > - > -/* TODO: move these to correct files */ > - > -typedef uint64_t odp_phys_addr_t; > +#include <odp_align_internal.h> > +#include <odp_config.h> > +#include <odp_byteorder.h> > +#include <odp_thread.h> > + > + > +#define ODP_BITSIZE(x) \ > + ((x) <= 2 ? 1 : \ > + ((x) <= 4 ? 2 : \ > + ((x) <= 8 ? 3 : \ > + ((x) <= 16 ? 4 : \ > + ((x) <= 32 ? 5 : \ > + ((x) <= 64 ? 6 : \ > + ((x) <= 128 ? 7 : \ > + ((x) <= 256 ? 8 : \ > + ((x) <= 512 ? 9 : \ > + ((x) <= 1024 ? 10 : \ > + ((x) <= 2048 ? 11 : \ > + ((x) <= 4096 ? 12 : \ > + ((x) <= 8196 ? 13 : \ > + ((x) <= 16384 ? 14 : \ > + ((x) <= 32768 ? 15 : \ > + ((x) <= 65536 ? 16 : \ > + (0/0))))))))))))))))) > + > +ODP_STATIC_ASSERT(ODP_CONFIG_BUF_SEG_SIZE >= 256, > + "ODP Segment size must be a minimum of 256 bytes"); > + > +ODP_STATIC_ASSERT((ODP_CONFIG_BUF_SEG_SIZE % ODP_CACHE_LINE_SIZE) == 0, > + "ODP Segment size must be a multiple of cache line size"); > + > +ODP_STATIC_ASSERT((ODP_CONFIG_BUF_MAX_SIZE % ODP_CONFIG_BUF_SEG_SIZE) == 0, > + "Buffer max size must be a multiple of segment size"); > + > +#define ODP_BUFFER_MAX_SEG (ODP_CONFIG_BUF_MAX_SIZE/ODP_CONFIG_BUF_SEG_SIZE) > + > +/* We can optimize storage of small buffers within metadata area */ > +#define ODP_MAX_INLINE_BUF ((sizeof(void *)) * (ODP_BUFFER_MAX_SEG - 1)) > + > +#define ODP_BUFFER_POOL_BITS ODP_BITSIZE(ODP_CONFIG_BUFFER_POOLS) > +#define ODP_BUFFER_SEG_BITS ODP_BITSIZE(ODP_BUFFER_MAX_SEG) > +#define ODP_BUFFER_INDEX_BITS (32 - ODP_BUFFER_POOL_BITS - ODP_BUFFER_SEG_BITS) > +#define ODP_BUFFER_PREFIX_BITS (ODP_BUFFER_POOL_BITS + ODP_BUFFER_INDEX_BITS) > +#define ODP_BUFFER_MAX_POOLS (1 << ODP_BUFFER_POOL_BITS) > +#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS) > > #define ODP_BUFFER_MAX_INDEX (ODP_BUFFER_MAX_BUFFERS - 2) > #define ODP_BUFFER_INVALID_INDEX (ODP_BUFFER_MAX_BUFFERS - 1) > > -#define ODP_BUFS_PER_CHUNK 16 > -#define ODP_BUFS_PER_SCATTER 4 > - > -#define ODP_BUFFER_TYPE_CHUNK 0xffff > - > - > -#define ODP_BUFFER_POOL_BITS 4 > -#define ODP_BUFFER_INDEX_BITS (32 - ODP_BUFFER_POOL_BITS) > -#define ODP_BUFFER_MAX_POOLS (1 << ODP_BUFFER_POOL_BITS) > -#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS) > - > typedef union odp_buffer_bits_t { > uint32_t u32; > odp_buffer_t handle; > > struct { > +#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN > uint32_t pool_id:ODP_BUFFER_POOL_BITS; > uint32_t index:ODP_BUFFER_INDEX_BITS; > + uint32_t seg:ODP_BUFFER_SEG_BITS; > +#else > + uint32_t seg:ODP_BUFFER_SEG_BITS; > + uint32_t index:ODP_BUFFER_INDEX_BITS; > + uint32_t pool_id:ODP_BUFFER_POOL_BITS; > +#endif > }; > -} odp_buffer_bits_t; > > + struct { > +#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN > + uint32_t prefix:ODP_BUFFER_PREFIX_BITS; > + uint32_t pfxseg:ODP_BUFFER_SEG_BITS; > +#else > + uint32_t pfxseg:ODP_BUFFER_SEG_BITS; > + uint32_t prefix:ODP_BUFFER_PREFIX_BITS; > +#endif > + }; > +} odp_buffer_bits_t; > > /* forward declaration */ > struct odp_buffer_hdr_t; > > - > -/* > - * Scatter/gather list of buffers > - */ > -typedef struct odp_buffer_scatter_t { > - /* buffer pointers */ > - struct odp_buffer_hdr_t *buf[ODP_BUFS_PER_SCATTER]; > - int num_bufs; /* num buffers */ > - int pos; /* position on the list */ > - size_t total_len; /* Total length */ > -} odp_buffer_scatter_t; > - > - > -/* > - * Chunk of buffers (in single pool) > - */ > -typedef struct odp_buffer_chunk_t { > - uint32_t num_bufs; /* num buffers */ > - uint32_t buf_index[ODP_BUFS_PER_CHUNK]; /* buffers */ > -} odp_buffer_chunk_t; > - > - > /* Common buffer header */ > typedef struct odp_buffer_hdr_t { > struct odp_buffer_hdr_t *next; /* next buf in a list */ > + int allocator; /* allocating thread id */ > odp_buffer_bits_t handle; /* handle */ > - odp_phys_addr_t phys_addr; /* physical data start address */ > - void *addr; /* virtual data start address */ > - uint32_t index; /* buf index in the pool */ > + union { > + uint32_t all; > + struct { > + uint32_t zeroized:1; /* Zeroize buf data on free */ > + uint32_t hdrdata:1; /* Data is in buffer hdr */ > + }; > + } flags; > + int type; /* buffer type */ > size_t size; /* max data size */ > - size_t cur_offset; /* current offset */ > odp_atomic_u32_t ref_count; /* reference count */ > - odp_buffer_scatter_t scatter; /* Scatter/gather list */ > - int type; /* type of next header */ > odp_buffer_pool_t pool_hdl; /* buffer pool handle */ nit: We can directly store pool_entry_s* inside buffer_hdr_t instead of pool handle. > - > + union { > + uint64_t buf_u64; /* user u64 */ > + void *buf_ctx; /* user context */ > + void *udata_addr; /* user metadata addr */ > + }; > + size_t udata_size; /* size of user metadata */ > + uint32_t segcount; /* segment count */ > + uint32_t segsize; /* segment size */ > + void *addr[ODP_BUFFER_MAX_SEG]; /* block addrs */ > } odp_buffer_hdr_t; > > -/* Ensure next header starts from 8 byte align */ > -ODP_STATIC_ASSERT((sizeof(odp_buffer_hdr_t) % 8) == 0, "ODP_BUFFER_HDR_T__SIZE_ERROR"); > +typedef struct odp_buffer_hdr_stride { > + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_buffer_hdr_t))]; > +} odp_buffer_hdr_stride; > > +typedef struct odp_buf_blk_t { > + struct odp_buf_blk_t *next; > + struct odp_buf_blk_t *prev; > +} odp_buf_blk_t; > > /* Raw buffer header */ > typedef struct { > odp_buffer_hdr_t buf_hdr; /* common buffer header */ > - uint8_t buf_data[]; /* start of buffer data area */ > } odp_raw_buffer_hdr_t; > > +/* Free buffer marker */ > +#define ODP_FREEBUF -1 > > -/* Chunk header */ > -typedef struct odp_buffer_chunk_hdr_t { > - odp_buffer_hdr_t buf_hdr; > - odp_buffer_chunk_t chunk; > -} odp_buffer_chunk_hdr_t; > - > - > -int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf); > - > -void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src); > - > +/* Forward declarations */ > +odp_buffer_t buffer_alloc(odp_buffer_pool_t pool, size_t size); > > #ifdef __cplusplus > } > diff --git a/platform/linux-generic/include/odp_buffer_pool_internal.h b/platform/linux-generic/include/odp_buffer_pool_internal.h > index e0210bd..347be39 100644 > --- a/platform/linux-generic/include/odp_buffer_pool_internal.h > +++ b/platform/linux-generic/include/odp_buffer_pool_internal.h > @@ -19,12 +19,44 @@ extern "C" { > #endif > > #include <odp_std_types.h> > +#include <odp_align.h> > +#include <odp_align_internal.h> > #include <odp_buffer_pool.h> > #include <odp_buffer_internal.h> > -#include <odp_align.h> > #include <odp_hints.h> > #include <odp_config.h> > #include <odp_debug.h> > +#include <odp_shared_memory.h> > +#include <odp_atomic.h> > +#include <odp_atomic_internal.h> > +#include <string.h> > + > +/** > + * Buffer initialization routine prototype > + * > + * @note Routines of this type MAY be passed as part of the > + * _odp_buffer_pool_init_t structure to be called whenever a > + * buffer is allocated to initialize the user metadata > + * associated with that buffer. > + */ > +typedef void (_odp_buf_init_t)(odp_buffer_t buf, void *buf_init_arg); > + > +/** > + * Buffer pool initialization parameters > + * Used to communicate buffer pool initialization options. Internal for now. > + */ > +typedef struct _odp_buffer_pool_init_t { > + size_t udata_size; /**< Size of user metadata for each buffer */ > + _odp_buf_init_t *buf_init; /**< Buffer initialization routine to use */ > + void *buf_init_arg; /**< Argument to be passed to buf_init() */ > +} _odp_buffer_pool_init_t; /**< Type of buffer initialization struct */ > + > +/* Local cache for buffer alloc/free acceleration */ > +typedef struct local_cache_t { > + odp_buffer_hdr_t *buf_freelist; /* The local cache */ > + uint64_t bufallocs; /* Local buffer alloc count */ > + uint64_t buffrees; /* Local buffer free count */ > +} local_cache_t; > > /* Use ticketlock instead of spinlock */ > #define POOL_USE_TICKETLOCK > @@ -39,6 +71,17 @@ extern "C" { > #include <odp_spinlock.h> > #endif > > +#ifdef POOL_USE_TICKETLOCK > +#include <odp_ticketlock.h> > +#define LOCK(a) odp_ticketlock_lock(a) > +#define UNLOCK(a) odp_ticketlock_unlock(a) > +#define LOCK_INIT(a) odp_ticketlock_init(a) > +#else > +#include <odp_spinlock.h> > +#define LOCK(a) odp_spinlock_lock(a) > +#define UNLOCK(a) odp_spinlock_unlock(a) > +#define LOCK_INIT(a) odp_spinlock_init(a) > +#endif > > struct pool_entry_s { > #ifdef POOL_USE_TICKETLOCK > @@ -47,66 +90,292 @@ struct pool_entry_s { > odp_spinlock_t lock ODP_ALIGNED_CACHE; > #endif > > - odp_buffer_chunk_hdr_t *head; > - uint64_t free_bufs; > char name[ODP_BUFFER_POOL_NAME_LEN]; > - > - odp_buffer_pool_t pool_hdl ODP_ALIGNED_CACHE; > - uintptr_t buf_base; > - size_t buf_size; > - size_t buf_offset; > - uint64_t num_bufs; > - void *pool_base_addr; > - uint64_t pool_size; > - size_t user_size; > - size_t user_align; > - int buf_type; > - size_t hdr_size; > + odp_buffer_pool_param_t params; > + _odp_buffer_pool_init_t init_params; > + odp_buffer_pool_t pool_hdl; > + uint32_t pool_id; > + odp_shm_t pool_shm; > + union { > + uint32_t all; > + struct { > + uint32_t has_name:1; > + uint32_t user_supplied_shm:1; > + uint32_t unsegmented:1; > + uint32_t zeroized:1; > + uint32_t predefined:1; > + }; > + } flags; > + uint32_t quiesced; > + uint32_t low_wm_assert; > + uint8_t *pool_base_addr; > + size_t pool_size; > + uint32_t buf_stride; > + _odp_atomic_ptr_t buf_freelist; > + _odp_atomic_ptr_t blk_freelist; > + odp_atomic_u32_t bufcount; > + odp_atomic_u32_t blkcount; > + odp_atomic_u64_t bufallocs; > + odp_atomic_u64_t buffrees; > + odp_atomic_u64_t blkallocs; > + odp_atomic_u64_t blkfrees; > + odp_atomic_u64_t bufempty; > + odp_atomic_u64_t blkempty; > + odp_atomic_u64_t high_wm_count; > + odp_atomic_u64_t low_wm_count; > + uint32_t seg_size; > + uint32_t high_wm; > + uint32_t low_wm; > + uint32_t headroom; > + uint32_t tailroom; > }; > > +typedef union pool_entry_u { > + struct pool_entry_s s; > + > + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))]; > +} pool_entry_t; > > extern void *pool_entry_ptr[]; > > +#if defined(ODP_CONFIG_SECURE_POOLS) && (ODP_CONFIG_SECURE_POOLS == 1) > +#define buffer_is_secure(buf) (buf->flags.zeroized) > +#define pool_is_secure(pool) (pool->flags.zeroized) > +#else > +#define buffer_is_secure(buf) 0 > +#define pool_is_secure(pool) 0 > +#endif > + > +#define TAG_ALIGN ((size_t)16) > > -static inline void *get_pool_entry(uint32_t pool_id) > +#define odp_cs(ptr, old, new) \ > + _odp_atomic_ptr_cmp_xchg_strong(&ptr, (void **)&old, (void *)new, \ > + _ODP_MEMMODEL_SC, \ > + _ODP_MEMMODEL_SC) > + > +/* Helper functions for pointer tagging to avoid ABA race conditions */ > +#define odp_tag(ptr) \ > + (((size_t)ptr) & (TAG_ALIGN - 1)) > + > +#define odp_detag(ptr) \ > + ((typeof(ptr))(((size_t)ptr) & -TAG_ALIGN)) > + > +#define odp_retag(ptr, tag) \ > + ((typeof(ptr))(((size_t)ptr) | odp_tag(tag))) > + > + > +static inline void *get_blk(struct pool_entry_s *pool) > { > - return pool_entry_ptr[pool_id]; > + void *oldhead, *myhead, *newhead; > + > + oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, _ODP_MEMMODEL_ACQ); > + > + do { > + size_t tag = odp_tag(oldhead); > + myhead = odp_detag(oldhead); > + if (odp_unlikely(myhead == NULL)) > + break; > + newhead = odp_retag(((odp_buf_blk_t *)myhead)->next, tag + 1); > + } while (odp_cs(pool->blk_freelist, oldhead, newhead) == 0); > + > + if (odp_unlikely(myhead == NULL)) > + odp_atomic_inc_u64(&pool->blkempty); > + else > + odp_atomic_dec_u32(&pool->blkcount); > + > + return (void *)myhead; > } > > +static inline void ret_blk(struct pool_entry_s *pool, void *block) > +{ > + void *oldhead, *myhead, *myblock; > + > + oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, _ODP_MEMMODEL_ACQ); > + > + do { > + size_t tag = odp_tag(oldhead); > + myhead = odp_detag(oldhead); > + ((odp_buf_blk_t *)block)->next = myhead; > + myblock = odp_retag(block, tag + 1); > + } while (odp_cs(pool->blk_freelist, oldhead, myblock) == 0); > > -static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf) > + odp_atomic_inc_u32(&pool->blkcount); > + odp_atomic_inc_u64(&pool->blkfrees); > +} > + > +static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool) > { > - odp_buffer_bits_t handle; > - uint32_t pool_id; > - uint32_t index; > - struct pool_entry_s *pool; > - odp_buffer_hdr_t *hdr; > - > - handle.u32 = buf; > - pool_id = handle.pool_id; > - index = handle.index; > - > -#ifdef POOL_ERROR_CHECK > - if (odp_unlikely(pool_id > ODP_CONFIG_BUFFER_POOLS)) { > - ODP_ERR("odp_buf_to_hdr: Bad pool id\n"); > - return NULL; > + odp_buffer_hdr_t *oldhead, *myhead, *newhead; > + > + oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, _ODP_MEMMODEL_ACQ); > + > + do { > + size_t tag = odp_tag(oldhead); > + myhead = odp_detag(oldhead); > + if (odp_unlikely(myhead == NULL)) > + break; > + newhead = odp_retag(myhead->next, tag + 1); > + } while (odp_cs(pool->buf_freelist, oldhead, newhead) == 0); > + > + if (odp_unlikely(myhead == NULL)) { > + odp_atomic_inc_u64(&pool->bufempty); > + } else { > + uint64_t bufcount = > + odp_atomic_fetch_sub_u32(&pool->bufcount, 1) - 1; > + > + /* Check for low watermark condition */ > + if (bufcount == pool->low_wm && !pool->low_wm_assert) { > + pool->low_wm_assert = 1; > + odp_atomic_inc_u64(&pool->low_wm_count); > + } > + > + odp_atomic_inc_u64(&pool->bufallocs); > + myhead->next = myhead; /* Mark buffer allocated */ > + myhead->allocator = odp_thread_id(); > } > -#endif > > - pool = get_pool_entry(pool_id); > + return (void *)myhead; > +} > + > +static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t *buf) > +{ > + odp_buffer_hdr_t *oldhead, *myhead, *mybuf; > + > + buf->allocator = ODP_FREEBUF; /* Mark buffer free */ > > -#ifdef POOL_ERROR_CHECK > - if (odp_unlikely(index > pool->num_bufs - 1)) { > - ODP_ERR("odp_buf_to_hdr: Bad buffer index\n"); > - return NULL; > + if (!buf->flags.hdrdata && buf->type != ODP_BUFFER_TYPE_RAW) { > + while (buf->segcount > 0) { > + if (buffer_is_secure(buf) || pool_is_secure(pool)) > + memset(buf->addr[buf->segcount - 1], > + 0, buf->segsize); > + ret_blk(pool, buf->addr[--buf->segcount]); > + } > + buf->size = 0; > } > -#endif > > - hdr = (odp_buffer_hdr_t *)(pool->buf_base + index * pool->buf_size); > + oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, _ODP_MEMMODEL_ACQ); > + > + do { > + size_t tag = odp_tag(oldhead); > + myhead = odp_detag(oldhead); > + buf->next = myhead; > + mybuf = odp_retag(buf, tag + 1); > + } while (odp_cs(pool->buf_freelist, oldhead, mybuf) == 0); > + > + uint64_t bufcount = odp_atomic_fetch_add_u32(&pool->bufcount, 1) + 1; > > - return hdr; > + /* Check if low watermark condition should be deasserted */ > + if (bufcount == pool->high_wm && pool->low_wm_assert) { > + pool->low_wm_assert = 0; > + odp_atomic_inc_u64(&pool->high_wm_count); > + } > + > + odp_atomic_inc_u64(&pool->buffrees); > +} > + > +static inline void *get_local_buf(local_cache_t *buf_cache, > + struct pool_entry_s *pool, > + size_t totsize) > +{ > + odp_buffer_hdr_t *buf = buf_cache->buf_freelist; > + > + if (odp_likely(buf != NULL)) { > + buf_cache->buf_freelist = buf->next; > + > + if (odp_unlikely(buf->size < totsize)) { > + size_t needed = totsize - buf->size; > + > + do { > + void *blk = get_blk(pool); > + if (odp_unlikely(blk == NULL)) { > + ret_buf(pool, buf); > + buf_cache->buffrees--; > + return NULL; > + } > + buf->addr[buf->segcount++] = blk; > + needed -= pool->seg_size; > + } while ((ssize_t)needed > 0); > + > + buf->size = buf->segcount * pool->seg_size; > + } > + > + buf_cache->bufallocs++; > + buf->allocator = odp_thread_id(); /* Mark buffer allocated */ > + } > + > + return buf; > +} > + > +static inline void ret_local_buf(local_cache_t *buf_cache, > + odp_buffer_hdr_t *buf) > +{ > + buf->allocator = ODP_FREEBUF; > + buf->next = buf_cache->buf_freelist; > + buf_cache->buf_freelist = buf; > + > + buf_cache->buffrees++; > +} > + > +static inline void flush_cache(local_cache_t *buf_cache, > + struct pool_entry_s *pool) > +{ > + odp_buffer_hdr_t *buf = buf_cache->buf_freelist; > + uint32_t flush_count = 0; > + > + while (buf != NULL) { > + odp_buffer_hdr_t *next = buf->next; > + ret_buf(pool, buf); > + buf = next; > + flush_count++; > + } > + > + odp_atomic_add_u64(&pool->bufallocs, buf_cache->bufallocs); > + odp_atomic_add_u64(&pool->buffrees, buf_cache->buffrees - flush_count); > + > + buf_cache->buf_freelist = NULL; > + buf_cache->bufallocs = 0; > + buf_cache->buffrees = 0; > +} > + > +static inline odp_buffer_pool_t pool_index_to_handle(uint32_t pool_id) > +{ > + return pool_id; > +} > + > +static inline uint32_t pool_handle_to_index(odp_buffer_pool_t pool_hdl) > +{ > + return pool_hdl; > +} > + > +static inline void *get_pool_entry(uint32_t pool_id) > +{ > + return pool_entry_ptr[pool_id]; > +} > + > +static inline pool_entry_t *odp_pool_to_entry(odp_buffer_pool_t pool) > +{ > + return (pool_entry_t *)get_pool_entry(pool_handle_to_index(pool)); > +} > + > +static inline pool_entry_t *odp_buf_to_pool(odp_buffer_hdr_t *buf) > +{ > + return odp_pool_to_entry(buf->pool_hdl); > +} > + > +static inline uint32_t odp_buffer_pool_segment_size(odp_buffer_pool_t pool) > +{ > + return odp_pool_to_entry(pool)->s.seg_size; > +} > + > +static inline uint32_t odp_buffer_pool_headroom(odp_buffer_pool_t pool) > +{ > + return odp_pool_to_entry(pool)->s.headroom; > } > > +static inline uint32_t odp_buffer_pool_tailroom(odp_buffer_pool_t pool) > +{ > + return odp_pool_to_entry(pool)->s.tailroom; > +} > > #ifdef __cplusplus > } > diff --git a/platform/linux-generic/include/odp_internal.h b/platform/linux-generic/include/odp_internal.h > index f8c1596..11d6393 100644 > --- a/platform/linux-generic/include/odp_internal.h > +++ b/platform/linux-generic/include/odp_internal.h > @@ -42,6 +42,8 @@ int odp_schedule_init_local(void); > int odp_timer_init_global(void); > int odp_timer_disarm_all(void); > > +void _odp_flush_caches(void); > + > #ifdef __cplusplus > } > #endif > diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h > index 49c59b2..f34a83d 100644 > --- a/platform/linux-generic/include/odp_packet_internal.h > +++ b/platform/linux-generic/include/odp_packet_internal.h > @@ -22,6 +22,7 @@ extern "C" { > #include <odp_debug.h> > #include <odp_buffer_internal.h> > #include <odp_buffer_pool_internal.h> > +#include <odp_buffer_inlines.h> > #include <odp_packet.h> > #include <odp_packet_io.h> > > @@ -92,7 +93,8 @@ typedef union { > }; > } output_flags_t; > > -ODP_STATIC_ASSERT(sizeof(output_flags_t) == sizeof(uint32_t), "OUTPUT_FLAGS_SIZE_ERROR"); > +ODP_STATIC_ASSERT(sizeof(output_flags_t) == sizeof(uint32_t), > + "OUTPUT_FLAGS_SIZE_ERROR"); > > /** > * Internal Packet header > @@ -105,25 +107,23 @@ typedef struct { > error_flags_t error_flags; > output_flags_t output_flags; > > - uint32_t frame_offset; /**< offset to start of frame, even on error */ > uint32_t l2_offset; /**< offset to L2 hdr, e.g. Eth */ > uint32_t l3_offset; /**< offset to L3 hdr, e.g. IPv4, IPv6 */ > uint32_t l4_offset; /**< offset to L4 hdr (TCP, UDP, SCTP, also ICMP) */ > > uint32_t frame_len; > + uint32_t headroom; > + uint32_t tailroom; > > uint64_t user_ctx; /* user context */ > > odp_pktio_t input; > - > - uint32_t pad; > - uint8_t buf_data[]; /* start of buffer data area */ > } odp_packet_hdr_t; > > -ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) == ODP_OFFSETOF(odp_packet_hdr_t, buf_data), > - "ODP_PACKET_HDR_T__SIZE_ERR"); > -ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) % sizeof(uint64_t) == 0, > - "ODP_PACKET_HDR_T__SIZE_ERR2"); > +typedef struct odp_packet_hdr_stride { > + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_packet_hdr_t))]; > +} odp_packet_hdr_stride; > + > > /** > * Return the packet header > @@ -138,6 +138,38 @@ static inline odp_packet_hdr_t *odp_packet_hdr(odp_packet_t pkt) > */ > void odp_packet_parse(odp_packet_t pkt, size_t len, size_t l2_offset); > > +/** > + * Initialize packet buffer > + */ > +static inline void packet_init(pool_entry_t *pool, > + odp_packet_hdr_t *pkt_hdr, > + size_t size) > +{ > + /* > + * Reset parser metadata. Note that we clear via memset to make > + * this routine indepenent of any additional adds to packet metadata. > + */ > + const size_t start_offset = ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr); > + uint8_t *start; > + size_t len; > + > + start = (uint8_t *)pkt_hdr + start_offset; > + len = sizeof(odp_packet_hdr_t) - start_offset; > + memset(start, 0, len); > + > + /* > + * Packet headroom is set from the pool's headroom > + * Packet tailroom is rounded up to fill the last > + * segment occupied by the allocated length. > + */ > + pkt_hdr->frame_len = size; > + pkt_hdr->headroom = pool->s.headroom; > + pkt_hdr->tailroom = > + (pool->s.seg_size * pkt_hdr->buf_hdr.segcount) - > + (pool->s.headroom + size); > +} > + > + > #ifdef __cplusplus > } > #endif > diff --git a/platform/linux-generic/include/odp_timer_internal.h b/platform/linux-generic/include/odp_timer_internal.h > index ad28f53..2ff36ce 100644 > --- a/platform/linux-generic/include/odp_timer_internal.h > +++ b/platform/linux-generic/include/odp_timer_internal.h > @@ -51,14 +51,9 @@ typedef struct odp_timeout_hdr_t { > uint8_t buf_data[]; > } odp_timeout_hdr_t; > > - > - > -ODP_STATIC_ASSERT(sizeof(odp_timeout_hdr_t) == > - ODP_OFFSETOF(odp_timeout_hdr_t, buf_data), > - "ODP_TIMEOUT_HDR_T__SIZE_ERR"); > - > -ODP_STATIC_ASSERT(sizeof(odp_timeout_hdr_t) % sizeof(uint64_t) == 0, > - "ODP_TIMEOUT_HDR_T__SIZE_ERR2"); > +typedef struct odp_timeout_hdr_stride { > + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_timeout_hdr_t))]; > +} odp_timeout_hdr_stride; > > > /** > diff --git a/platform/linux-generic/odp_buffer.c b/platform/linux-generic/odp_buffer.c > index bcbb99a..c1bef54 100644 > --- a/platform/linux-generic/odp_buffer.c > +++ b/platform/linux-generic/odp_buffer.c > @@ -5,8 +5,9 @@ > */ > > #include <odp_buffer.h> > -#include <odp_buffer_internal.h> > #include <odp_buffer_pool_internal.h> > +#include <odp_buffer_internal.h> > +#include <odp_buffer_inlines.h> > > #include <string.h> > #include <stdio.h> > @@ -16,7 +17,7 @@ void *odp_buffer_addr(odp_buffer_t buf) > { > odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf); > > - return hdr->addr; > + return hdr->addr[0]; > } > > > @@ -38,15 +39,11 @@ int odp_buffer_type(odp_buffer_t buf) > > int odp_buffer_is_valid(odp_buffer_t buf) > { > - odp_buffer_bits_t handle; > - > - handle.u32 = buf; > - > - return (handle.index != ODP_BUFFER_INVALID_INDEX); > + return validate_buf(buf) != NULL; > } > > > -int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf) > +int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf) > { > odp_buffer_hdr_t *hdr; > int len = 0; > @@ -63,28 +60,14 @@ int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf) > len += snprintf(&str[len], n-len, > " pool %i\n", hdr->pool_hdl); > len += snprintf(&str[len], n-len, > - " index %"PRIu32"\n", hdr->index); > - len += snprintf(&str[len], n-len, > - " phy_addr %"PRIu64"\n", hdr->phys_addr); > - len += snprintf(&str[len], n-len, > " addr %p\n", hdr->addr); > len += snprintf(&str[len], n-len, > " size %zu\n", hdr->size); > len += snprintf(&str[len], n-len, > - " cur_offset %zu\n", hdr->cur_offset); > - len += snprintf(&str[len], n-len, > " ref_count %i\n", > odp_atomic_load_u32(&hdr->ref_count)); > len += snprintf(&str[len], n-len, > " type %i\n", hdr->type); > - len += snprintf(&str[len], n-len, > - " Scatter list\n"); > - len += snprintf(&str[len], n-len, > - " num_bufs %i\n", hdr->scatter.num_bufs); > - len += snprintf(&str[len], n-len, > - " pos %i\n", hdr->scatter.pos); > - len += snprintf(&str[len], n-len, > - " total_len %zu\n", hdr->scatter.total_len); > > return len; > } > @@ -101,9 +84,3 @@ void odp_buffer_print(odp_buffer_t buf) > > ODP_PRINT("\n%s\n", str); > } > - > -void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src) > -{ > - (void)buf_dst; > - (void)buf_src; > -} > diff --git a/platform/linux-generic/odp_buffer_pool.c b/platform/linux-generic/odp_buffer_pool.c > index 83c51fa..e3f90a2 100644 > --- a/platform/linux-generic/odp_buffer_pool.c > +++ b/platform/linux-generic/odp_buffer_pool.c > @@ -6,8 +6,9 @@ > > #include <odp_std_types.h> > #include <odp_buffer_pool.h> > -#include <odp_buffer_pool_internal.h> > #include <odp_buffer_internal.h> > +#include <odp_buffer_pool_internal.h> > +#include <odp_buffer_inlines.h> > #include <odp_packet_internal.h> > #include <odp_timer_internal.h> > #include <odp_align_internal.h> > @@ -17,57 +18,35 @@ > #include <odp_config.h> > #include <odp_hints.h> > #include <odp_debug_internal.h> > +#include <odp_atomic_internal.h> > > #include <string.h> > #include <stdlib.h> > > > -#ifdef POOL_USE_TICKETLOCK > -#include <odp_ticketlock.h> > -#define LOCK(a) odp_ticketlock_lock(a) > -#define UNLOCK(a) odp_ticketlock_unlock(a) > -#define LOCK_INIT(a) odp_ticketlock_init(a) > -#else > -#include <odp_spinlock.h> > -#define LOCK(a) odp_spinlock_lock(a) > -#define UNLOCK(a) odp_spinlock_unlock(a) > -#define LOCK_INIT(a) odp_spinlock_init(a) > -#endif > - > - > #if ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS > #error ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS > #endif > > -#define NULL_INDEX ((uint32_t)-1) > > -union buffer_type_any_u { > +typedef union buffer_type_any_u { > odp_buffer_hdr_t buf; > odp_packet_hdr_t pkt; > odp_timeout_hdr_t tmo; > -}; > - > -ODP_STATIC_ASSERT((sizeof(union buffer_type_any_u) % 8) == 0, > - "BUFFER_TYPE_ANY_U__SIZE_ERR"); > +} odp_anybuf_t; > > /* Any buffer type header */ > typedef struct { > union buffer_type_any_u any_hdr; /* any buffer type */ > - uint8_t buf_data[]; /* start of buffer data area */ > } odp_any_buffer_hdr_t; > > - > -typedef union pool_entry_u { > - struct pool_entry_s s; > - > - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))]; > - > -} pool_entry_t; > +typedef struct odp_any_hdr_stride { > + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_any_buffer_hdr_t))]; > +} odp_any_hdr_stride; > > > typedef struct pool_table_t { > pool_entry_t pool[ODP_CONFIG_BUFFER_POOLS]; > - > } pool_table_t; > > > @@ -77,38 +56,8 @@ static pool_table_t *pool_tbl; > /* Pool entry pointers (for inlining) */ > void *pool_entry_ptr[ODP_CONFIG_BUFFER_POOLS]; > > - > -static __thread odp_buffer_chunk_hdr_t *local_chunk[ODP_CONFIG_BUFFER_POOLS]; > - > - > -static inline odp_buffer_pool_t pool_index_to_handle(uint32_t pool_id) > -{ > - return pool_id + 1; > -} > - > - > -static inline uint32_t pool_handle_to_index(odp_buffer_pool_t pool_hdl) > -{ > - return pool_hdl -1; > -} > - > - > -static inline void set_handle(odp_buffer_hdr_t *hdr, > - pool_entry_t *pool, uint32_t index) > -{ > - odp_buffer_pool_t pool_hdl = pool->s.pool_hdl; > - uint32_t pool_id = pool_handle_to_index(pool_hdl); > - > - if (pool_id >= ODP_CONFIG_BUFFER_POOLS) > - ODP_ABORT("set_handle: Bad pool handle %u\n", pool_hdl); > - > - if (index > ODP_BUFFER_MAX_INDEX) > - ODP_ERR("set_handle: Bad buffer index\n"); > - > - hdr->handle.pool_id = pool_id; > - hdr->handle.index = index; > -} > - > +/* Local cache for buffer alloc/free acceleration */ > +static __thread local_cache_t local_cache[ODP_CONFIG_BUFFER_POOLS]; > > int odp_buffer_pool_init_global(void) > { > @@ -131,7 +80,7 @@ int odp_buffer_pool_init_global(void) > pool_entry_t *pool = &pool_tbl->pool[i]; > LOCK_INIT(&pool->s.lock); > pool->s.pool_hdl = pool_index_to_handle(i); > - > + pool->s.pool_id = i; > pool_entry_ptr[i] = pool; > } > > @@ -143,269 +92,258 @@ int odp_buffer_pool_init_global(void) > return 0; > } > > +/** > + * Buffer pool creation > + */ > > -static odp_buffer_hdr_t *index_to_hdr(pool_entry_t *pool, uint32_t index) > -{ > - odp_buffer_hdr_t *hdr; > - > - hdr = (odp_buffer_hdr_t *)(pool->s.buf_base + index * pool->s.buf_size); > - return hdr; > -} > - > - > -static void add_buf_index(odp_buffer_chunk_hdr_t *chunk_hdr, uint32_t index) > -{ > - uint32_t i = chunk_hdr->chunk.num_bufs; > - chunk_hdr->chunk.buf_index[i] = index; > - chunk_hdr->chunk.num_bufs++; > -} > - > - > -static uint32_t rem_buf_index(odp_buffer_chunk_hdr_t *chunk_hdr) > +odp_buffer_pool_t odp_buffer_pool_create(const char *name, > + odp_shm_t shm, > + odp_buffer_pool_param_t *params) > { > - uint32_t index; > + odp_buffer_pool_t pool_hdl = ODP_BUFFER_POOL_INVALID; > + pool_entry_t *pool; > uint32_t i; > > - i = chunk_hdr->chunk.num_bufs - 1; > - index = chunk_hdr->chunk.buf_index[i]; > - chunk_hdr->chunk.num_bufs--; > - return index; > -} > - > - > -static odp_buffer_chunk_hdr_t *next_chunk(pool_entry_t *pool, > - odp_buffer_chunk_hdr_t *chunk_hdr) > -{ > - uint32_t index; > + /* Default initialization paramters */ > + static _odp_buffer_pool_init_t default_init_params = { > + .udata_size = 0, > + .buf_init = NULL, > + .buf_init_arg = NULL, > + }; > > - index = chunk_hdr->chunk.buf_index[ODP_BUFS_PER_CHUNK-1]; > - if (index == NULL_INDEX) > - return NULL; > - else > - return (odp_buffer_chunk_hdr_t *)index_to_hdr(pool, index); > -} > + _odp_buffer_pool_init_t *init_params = &default_init_params; > > + if (params == NULL) > + return ODP_BUFFER_POOL_INVALID; > > -static odp_buffer_chunk_hdr_t *rem_chunk(pool_entry_t *pool) > -{ > - odp_buffer_chunk_hdr_t *chunk_hdr; > - > - chunk_hdr = pool->s.head; > - if (chunk_hdr == NULL) { > - /* Pool is empty */ > - return NULL; > - } > - > - pool->s.head = next_chunk(pool, chunk_hdr); > - pool->s.free_bufs -= ODP_BUFS_PER_CHUNK; > - > - /* unlink */ > - rem_buf_index(chunk_hdr); > - return chunk_hdr; > -} > - > - > -static void add_chunk(pool_entry_t *pool, odp_buffer_chunk_hdr_t *chunk_hdr) > -{ > - if (pool->s.head) /* link pool head to the chunk */ > - add_buf_index(chunk_hdr, pool->s.head->buf_hdr.index); > - else > - add_buf_index(chunk_hdr, NULL_INDEX); > - > - pool->s.head = chunk_hdr; > - pool->s.free_bufs += ODP_BUFS_PER_CHUNK; > -} > - > - > -static void check_align(pool_entry_t *pool, odp_buffer_hdr_t *hdr) > -{ > - if (!ODP_ALIGNED_CHECK_POWER_2(hdr->addr, pool->s.user_align)) { > - ODP_ABORT("check_align: user data align error %p, align %zu\n", > - hdr->addr, pool->s.user_align); > - } > + /* Restriction for v1.0: All buffers are unsegmented */ > + const int unsegmented = 1; > > - if (!ODP_ALIGNED_CHECK_POWER_2(hdr, ODP_CACHE_LINE_SIZE)) { > - ODP_ABORT("check_align: hdr align error %p, align %i\n", > - hdr, ODP_CACHE_LINE_SIZE); > - } > -} > + /* Restriction for v1.0: No zeroization support */ > + const int zeroized = 0; > > + /* Restriction for v1.0: No udata support */ > + uint32_t udata_stride = (init_params->udata_size > sizeof(void *)) ? > + ODP_CACHE_LINE_SIZE_ROUNDUP(init_params->udata_size) : > + 0; > > -static void fill_hdr(void *ptr, pool_entry_t *pool, uint32_t index, > - int buf_type) > -{ > - odp_buffer_hdr_t *hdr = (odp_buffer_hdr_t *)ptr; > - size_t size = pool->s.hdr_size; > - uint8_t *buf_data; > + uint32_t blk_size, buf_stride; > > - if (buf_type == ODP_BUFFER_TYPE_CHUNK) > - size = sizeof(odp_buffer_chunk_hdr_t); > + switch (params->buf_type) { > + case ODP_BUFFER_TYPE_RAW: > + blk_size = params->buf_size; > > - switch (pool->s.buf_type) { > - odp_raw_buffer_hdr_t *raw_hdr; > - odp_packet_hdr_t *packet_hdr; > - odp_timeout_hdr_t *tmo_hdr; > - odp_any_buffer_hdr_t *any_hdr; > + /* Optimize small raw buffers */ > + if (blk_size > ODP_MAX_INLINE_BUF) > + blk_size = ODP_ALIGN_ROUNDUP(blk_size, TAG_ALIGN); > > - case ODP_BUFFER_TYPE_RAW: > - raw_hdr = ptr; > - buf_data = raw_hdr->buf_data; > + buf_stride = sizeof(odp_buffer_hdr_stride); > break; > + > case ODP_BUFFER_TYPE_PACKET: > - packet_hdr = ptr; > - buf_data = packet_hdr->buf_data; > + if (unsegmented) > + blk_size = > + ODP_CACHE_LINE_SIZE_ROUNDUP(params->buf_size); > + else > + blk_size = ODP_ALIGN_ROUNDUP(params->buf_size, > + ODP_CONFIG_BUF_SEG_SIZE); > + buf_stride = sizeof(odp_packet_hdr_stride); > break; > + > case ODP_BUFFER_TYPE_TIMEOUT: > - tmo_hdr = ptr; > - buf_data = tmo_hdr->buf_data; > + blk_size = 0; /* Timeouts have no block data, only metadata */ > + buf_stride = sizeof(odp_timeout_hdr_stride); > break; > + > case ODP_BUFFER_TYPE_ANY: > - any_hdr = ptr; > - buf_data = any_hdr->buf_data; > + if (unsegmented) > + blk_size = > + ODP_CACHE_LINE_SIZE_ROUNDUP(params->buf_size); > + else > + blk_size = ODP_ALIGN_ROUNDUP(params->buf_size, > + ODP_CONFIG_BUF_SEG_SIZE); > + buf_stride = sizeof(odp_any_hdr_stride); > break; > - default: > - ODP_ABORT("Bad buffer type\n"); > - } > - > - memset(hdr, 0, size); > - > - set_handle(hdr, pool, index); > - > - hdr->addr = &buf_data[pool->s.buf_offset - pool->s.hdr_size]; > - hdr->index = index; > - hdr->size = pool->s.user_size; > - hdr->pool_hdl = pool->s.pool_hdl; > - hdr->type = buf_type; > - > - check_align(pool, hdr); > -} > - > - > -static void link_bufs(pool_entry_t *pool) > -{ > - odp_buffer_chunk_hdr_t *chunk_hdr; > - size_t hdr_size; > - size_t data_size; > - size_t data_align; > - size_t tot_size; > - size_t offset; > - size_t min_size; > - uint64_t pool_size; > - uintptr_t buf_base; > - uint32_t index; > - uintptr_t pool_base; > - int buf_type; > - > - buf_type = pool->s.buf_type; > - data_size = pool->s.user_size; > - data_align = pool->s.user_align; > - pool_size = pool->s.pool_size; > - pool_base = (uintptr_t) pool->s.pool_base_addr; > - > - if (buf_type == ODP_BUFFER_TYPE_RAW) { > - hdr_size = sizeof(odp_raw_buffer_hdr_t); > - } else if (buf_type == ODP_BUFFER_TYPE_PACKET) { > - hdr_size = sizeof(odp_packet_hdr_t); > - } else if (buf_type == ODP_BUFFER_TYPE_TIMEOUT) { > - hdr_size = sizeof(odp_timeout_hdr_t); > - } else if (buf_type == ODP_BUFFER_TYPE_ANY) { > - hdr_size = sizeof(odp_any_buffer_hdr_t); > - } else > - ODP_ABORT("odp_buffer_pool_create: Bad type %i\n", buf_type); > - > - > - /* Chunk must fit into buffer data area.*/ > - min_size = sizeof(odp_buffer_chunk_hdr_t) - hdr_size; > - if (data_size < min_size) > - data_size = min_size; > - > - /* Roundup data size to full cachelines */ > - data_size = ODP_CACHE_LINE_SIZE_ROUNDUP(data_size); > - > - /* Min cacheline alignment for buffer header and data */ > - data_align = ODP_CACHE_LINE_SIZE_ROUNDUP(data_align); > - offset = ODP_CACHE_LINE_SIZE_ROUNDUP(hdr_size); > - > - /* Multiples of cacheline size */ > - if (data_size > data_align) > - tot_size = data_size + offset; > - else > - tot_size = data_align + offset; > - > - /* First buffer */ > - buf_base = ODP_ALIGN_ROUNDUP(pool_base + offset, data_align) - offset; > - > - pool->s.hdr_size = hdr_size; > - pool->s.buf_base = buf_base; > - pool->s.buf_size = tot_size; > - pool->s.buf_offset = offset; > - index = 0; > > - chunk_hdr = (odp_buffer_chunk_hdr_t *)index_to_hdr(pool, index); > - pool->s.head = NULL; > - pool_size -= buf_base - pool_base; > - > - while (pool_size > ODP_BUFS_PER_CHUNK * tot_size) { > - int i; > - > - fill_hdr(chunk_hdr, pool, index, ODP_BUFFER_TYPE_CHUNK); > - > - index++; > - > - for (i = 0; i < ODP_BUFS_PER_CHUNK - 1; i++) { > - odp_buffer_hdr_t *hdr = index_to_hdr(pool, index); > - > - fill_hdr(hdr, pool, index, buf_type); > - > - add_buf_index(chunk_hdr, index); > - index++; > - } > - > - add_chunk(pool, chunk_hdr); > - > - chunk_hdr = (odp_buffer_chunk_hdr_t *)index_to_hdr(pool, > - index); > - pool->s.num_bufs += ODP_BUFS_PER_CHUNK; > - pool_size -= ODP_BUFS_PER_CHUNK * tot_size; > + default: > + return ODP_BUFFER_POOL_INVALID; > } > -} > > + /* Validate requested number of buffers against addressable limits */ > + if (params->num_bufs > > + (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE))) > + return ODP_BUFFER_POOL_INVALID; > > -odp_buffer_pool_t odp_buffer_pool_create(const char *name, > - void *base_addr, uint64_t size, > - size_t buf_size, size_t buf_align, > - int buf_type) > -{ > - odp_buffer_pool_t pool_hdl = ODP_BUFFER_POOL_INVALID; > - pool_entry_t *pool; > - uint32_t i; > - > + /* Find an unused buffer pool slot and iniitalize it as requested */ > for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) { > pool = get_pool_entry(i); > > LOCK(&pool->s.lock); > + if (pool->s.pool_shm != ODP_SHM_INVALID) { > + UNLOCK(&pool->s.lock); > + continue; > + } > > - if (pool->s.buf_base == 0) { > - /* found free pool */ > + /* found free pool */ > + size_t block_size, mdata_size, udata_size; > > + pool->s.flags.all = 0; > + > + if (name == NULL) { > + pool->s.name[0] = 0; > + } else { > strncpy(pool->s.name, name, > ODP_BUFFER_POOL_NAME_LEN - 1); > pool->s.name[ODP_BUFFER_POOL_NAME_LEN - 1] = 0; > - pool->s.pool_base_addr = base_addr; > - pool->s.pool_size = size; > - pool->s.user_size = buf_size; > - pool->s.user_align = buf_align; > - pool->s.buf_type = buf_type; > - > - link_bufs(pool); > - > - UNLOCK(&pool->s.lock); > + pool->s.flags.has_name = 1; > + } > > - pool_hdl = pool->s.pool_hdl; > - break; > + pool->s.params = *params; > + pool->s.init_params = *init_params; > + > + mdata_size = params->num_bufs * buf_stride; > + udata_size = params->num_bufs * udata_stride; > + > + /* Optimize for short buffers: Data stored in buffer hdr */ > + if (blk_size <= ODP_MAX_INLINE_BUF) > + block_size = 0; > + else > + block_size = params->num_bufs * blk_size; > + > + pool->s.pool_size = ODP_PAGE_SIZE_ROUNDUP(mdata_size + > + udata_size + > + block_size); > + > + if (shm == ODP_SHM_NULL) { > + shm = odp_shm_reserve(pool->s.name, > + pool->s.pool_size, > + ODP_PAGE_SIZE, 0); > + if (shm == ODP_SHM_INVALID) { > + UNLOCK(&pool->s.lock); > + return ODP_BUFFER_INVALID; > + } > + pool->s.pool_base_addr = odp_shm_addr(shm); > + } else { > + odp_shm_info_t info; > + if (odp_shm_info(shm, &info) != 0 || > + info.size < pool->s.pool_size) { > + UNLOCK(&pool->s.lock); > + return ODP_BUFFER_POOL_INVALID; > + } > + pool->s.pool_base_addr = odp_shm_addr(shm); > + void *page_addr = > + ODP_ALIGN_ROUNDUP_PTR(pool->s.pool_base_addr, > + ODP_PAGE_SIZE); > + if (pool->s.pool_base_addr != page_addr) { > + if (info.size < pool->s.pool_size + > + ((size_t)page_addr - > + (size_t)pool->s.pool_base_addr)) { > + UNLOCK(&pool->s.lock); > + return ODP_BUFFER_POOL_INVALID; > + } > + pool->s.pool_base_addr = page_addr; > + } > + pool->s.flags.user_supplied_shm = 1; > } > > + pool->s.pool_shm = shm; > + > + /* Now safe to unlock since pool entry has been allocated */ > UNLOCK(&pool->s.lock); > + > + pool->s.flags.unsegmented = unsegmented; > + pool->s.flags.zeroized = zeroized; > + pool->s.seg_size = unsegmented ? > + blk_size : ODP_CONFIG_BUF_SEG_SIZE; > + > + uint8_t *udata_base_addr = pool->s.pool_base_addr + mdata_size; > + uint8_t *block_base_addr = udata_base_addr + udata_size; > + > + pool->s.buf_stride = buf_stride; > + _odp_atomic_ptr_store(&pool->s.buf_freelist, NULL, > + _ODP_MEMMODEL_RLX); > + _odp_atomic_ptr_store(&pool->s.blk_freelist, NULL, > + _ODP_MEMMODEL_RLX); > + > + /* Initialization will increment these to their target vals */ > + odp_atomic_store_u32(&pool->s.bufcount, 0); > + odp_atomic_store_u32(&pool->s.blkcount, 0); > + > + uint8_t *buf = udata_base_addr - buf_stride; > + uint8_t *udat = udata_stride == 0 ? NULL : > + block_base_addr - udata_stride; > + > + /* Init buffer common header and add to pool buffer freelist */ > + do { > + odp_buffer_hdr_t *tmp = > + (odp_buffer_hdr_t *)(void *)buf; > + > + /* Iniitalize buffer metadata */ > + tmp->allocator = ODP_FREEBUF; > + tmp->flags.all = 0; > + tmp->flags.zeroized = zeroized; > + tmp->size = 0; > + odp_atomic_store_u32(&tmp->ref_count, 0); > + tmp->type = params->buf_type; > + tmp->pool_hdl = pool->s.pool_hdl; > + tmp->udata_addr = (void *)udat; > + tmp->udata_size = init_params->udata_size; > + tmp->segcount = 0; > + tmp->segsize = pool->s.seg_size; > + tmp->handle.handle = odp_buffer_encode_handle(tmp); > + > + /* Set 1st seg addr for zero-len buffers */ > + tmp->addr[0] = NULL; > + > + /* Special case for short buffer data */ > + if (blk_size <= ODP_MAX_INLINE_BUF) { > + tmp->flags.hdrdata = 1; > + if (blk_size > 0) { > + tmp->segcount = 1; > + tmp->addr[0] = &tmp->addr[1]; > + tmp->size = blk_size; > + } > + } > + > + /* Push buffer onto pool's freelist */ > + ret_buf(&pool->s, tmp); > + buf -= buf_stride; > + udat -= udata_stride; > + } while (buf >= pool->s.pool_base_addr); > + > + /* Form block freelist for pool */ > + uint8_t *blk = pool->s.pool_base_addr + pool->s.pool_size - > + pool->s.seg_size; > + > + if (blk_size > ODP_MAX_INLINE_BUF) > + do { > + ret_blk(&pool->s, blk); > + blk -= pool->s.seg_size; > + } while (blk >= block_base_addr); > + > + /* Initialize pool statistics counters */ > + odp_atomic_store_u64(&pool->s.bufallocs, 0); > + odp_atomic_store_u64(&pool->s.buffrees, 0); > + odp_atomic_store_u64(&pool->s.blkallocs, 0); > + odp_atomic_store_u64(&pool->s.blkfrees, 0); > + odp_atomic_store_u64(&pool->s.bufempty, 0); > + odp_atomic_store_u64(&pool->s.blkempty, 0); > + odp_atomic_store_u64(&pool->s.high_wm_count, 0); > + odp_atomic_store_u64(&pool->s.low_wm_count, 0); > + > + /* Reset other pool globals to initial state */ > + pool->s.low_wm_assert = 0; > + pool->s.quiesced = 0; > + pool->s.low_wm_assert = 0; > + pool->s.headroom = 0; > + pool->s.tailroom = 0; > + > + /* Watermarks are hard-coded for now to control caching */ > + pool->s.high_wm = params->num_bufs / 2; > + pool->s.low_wm = params->num_bufs / 4; > + > + pool_hdl = pool->s.pool_hdl; > + break; > } > > return pool_hdl; > @@ -432,145 +370,200 @@ odp_buffer_pool_t odp_buffer_pool_lookup(const char *name) > return ODP_BUFFER_POOL_INVALID; > } > > - > -odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool_hdl) > +int odp_buffer_pool_info(odp_buffer_pool_t pool_hdl, > + odp_shm_t *shm, > + odp_buffer_pool_info_t *info) > { > - pool_entry_t *pool; > - odp_buffer_chunk_hdr_t *chunk; > - odp_buffer_bits_t handle; > uint32_t pool_id = pool_handle_to_index(pool_hdl); > + pool_entry_t *pool = get_pool_entry(pool_id); > > - pool = get_pool_entry(pool_id); > - chunk = local_chunk[pool_id]; > + if (pool == NULL || info == NULL) > + return -1; > > - if (chunk == NULL) { > - LOCK(&pool->s.lock); > - chunk = rem_chunk(pool); > - UNLOCK(&pool->s.lock); > + *shm = pool->s.flags.user_supplied_shm ? > + pool->s.pool_shm : ODP_SHM_NULL; > + info->name = pool->s.name; > + info->params.buf_size = pool->s.params.buf_size; > + info->params.buf_align = pool->s.params.buf_align; > + info->params.num_bufs = pool->s.params.num_bufs; > + info->params.buf_type = pool->s.params.buf_type; > > - if (chunk == NULL) > - return ODP_BUFFER_INVALID; > + return 0; > +} > > - local_chunk[pool_id] = chunk; > - } > +int odp_buffer_pool_destroy(odp_buffer_pool_t pool_hdl) > +{ > + uint32_t pool_id = pool_handle_to_index(pool_hdl); > + pool_entry_t *pool = get_pool_entry(pool_id); > > - if (chunk->chunk.num_bufs == 0) { > - /* give the chunk buffer */ > - local_chunk[pool_id] = NULL; > - chunk->buf_hdr.type = pool->s.buf_type; > + if (pool == NULL) > + return -1; > > - handle = chunk->buf_hdr.handle; > - } else { > - odp_buffer_hdr_t *hdr; > - uint32_t index; > - index = rem_buf_index(chunk); > - hdr = index_to_hdr(pool, index); > + LOCK(&pool->s.lock); > > - handle = hdr->handle; > + if (pool->s.pool_shm == ODP_SHM_INVALID || > + odp_atomic_load_u32(&pool->s.bufcount) < pool->s.params.num_bufs || > + pool->s.flags.predefined) { > + UNLOCK(&pool->s.lock); > + return -1; > } > > - return handle.u32; > -} > + if (!pool->s.flags.user_supplied_shm) > + odp_shm_free(pool->s.pool_shm); > > + pool->s.pool_shm = ODP_SHM_INVALID; > + UNLOCK(&pool->s.lock); > > -void odp_buffer_free(odp_buffer_t buf) > + return 0; > +} > + > +odp_buffer_t buffer_alloc(odp_buffer_pool_t pool_hdl, size_t size) > { > - odp_buffer_hdr_t *hdr; > - uint32_t pool_id; > - pool_entry_t *pool; > - odp_buffer_chunk_hdr_t *chunk_hdr; > + uint32_t pool_id = pool_handle_to_index(pool_hdl); > + pool_entry_t *pool = get_pool_entry(pool_id); > + size_t totsize = pool->s.headroom + size + pool->s.tailroom; > + odp_anybuf_t *buf; > > - hdr = odp_buf_to_hdr(buf); > - pool_id = pool_handle_to_index(hdr->pool_hdl); > - pool = get_pool_entry(pool_id); > - chunk_hdr = local_chunk[pool_id]; > + /* Reject oversized allocation requests */ > + if ((pool->s.flags.unsegmented && totsize > pool->s.seg_size) || > + (!pool->s.flags.unsegmented && totsize > ODP_CONFIG_BUF_MAX_SIZE)) > + return ODP_BUFFER_INVALID; > > - if (chunk_hdr && chunk_hdr->chunk.num_bufs == ODP_BUFS_PER_CHUNK - 1) { > - /* Current chunk is full. Push back to the pool */ > - LOCK(&pool->s.lock); > - add_chunk(pool, chunk_hdr); > - UNLOCK(&pool->s.lock); > - chunk_hdr = NULL; > + /* Try to satisfy request from the local cache */ > + buf = (odp_anybuf_t *)(void *)get_local_buf(&local_cache[pool_id], > + &pool->s, totsize); > + > + /* If cache is empty, satisfy request from the pool */ > + if (odp_unlikely(buf == NULL)) { > + buf = (odp_anybuf_t *)(void *)get_buf(&pool->s); Future consideration: Technically even when this pool is empty the buffers could be available in the local cache of individual threads. maybe we can have a mechanism in which when low_wm is reached in the pool it can request buffers from the cache. > + > + if (odp_unlikely(buf == NULL)) > + return ODP_BUFFER_INVALID; > + > + /* Get blocks for this buffer, if pool uses application data */ > + if (buf->buf.size < totsize) { > + size_t needed = totsize - buf->buf.size; > + do { > + uint8_t *blk = get_blk(&pool->s); > + if (blk == NULL) { > + ret_buf(&pool->s, &buf->buf); > + return ODP_BUFFER_INVALID; > + } > + buf->buf.addr[buf->buf.segcount++] = blk; > + needed -= pool->s.seg_size; > + } while ((ssize_t)needed > 0); > + buf->buf.size = buf->buf.segcount * pool->s.seg_size; > + } > } > > - if (chunk_hdr == NULL) { > - /* Use this buffer */ > - chunk_hdr = (odp_buffer_chunk_hdr_t *)hdr; > - local_chunk[pool_id] = chunk_hdr; > - chunk_hdr->chunk.num_bufs = 0; > - } else { > - /* Add to current chunk */ > - add_buf_index(chunk_hdr, hdr->index); > + /* By default, buffers inherit their pool's zeroization setting */ > + buf->buf.flags.zeroized = pool->s.flags.zeroized; > + > + if (buf->buf.type == ODP_BUFFER_TYPE_PACKET) { > + packet_init(pool, &buf->pkt, size); > + > + if (pool->s.init_params.buf_init != NULL) > + (*pool->s.init_params.buf_init) > + (buf->buf.handle.handle, > + pool->s.init_params.buf_init_arg); > } > + > + return odp_hdr_to_buf(&buf->buf); > } > > +odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool_hdl) > +{ > + return buffer_alloc(pool_hdl, > + odp_pool_to_entry(pool_hdl)->s.params.buf_size); > +} > > -odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf) > +void odp_buffer_free(odp_buffer_t buf) > { > - odp_buffer_hdr_t *hdr; > + odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr(buf); > + pool_entry_t *pool = odp_buf_to_pool(buf_hdr); > > - hdr = odp_buf_to_hdr(buf); > - return hdr->pool_hdl; > + if (odp_unlikely(pool->s.low_wm_assert)) > + ret_buf(&pool->s, buf_hdr); > + else > + ret_local_buf(&local_cache[pool->s.pool_id], buf_hdr); > } We need to have high_wm_assert for local_cache buffer list so that a single thread does not hold all the free buffers in the system. This could happen at the receiving thread of a IPC communication which receives the buffer and frees and could potentially hold all the buffers > > +void _odp_flush_caches(void) > +{ > + int i; > + > + for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) { > + pool_entry_t *pool = get_pool_entry(i); > + flush_cache(&local_cache[i], &pool->s); > + } > +} > > void odp_buffer_pool_print(odp_buffer_pool_t pool_hdl) > { > pool_entry_t *pool; > - odp_buffer_chunk_hdr_t *chunk_hdr; > - uint32_t i; > uint32_t pool_id; > > pool_id = pool_handle_to_index(pool_hdl); > pool = get_pool_entry(pool_id); > > - ODP_PRINT("Pool info\n"); > - ODP_PRINT("---------\n"); > - ODP_PRINT(" pool %i\n", pool->s.pool_hdl); > - ODP_PRINT(" name %s\n", pool->s.name); > - ODP_PRINT(" pool base %p\n", pool->s.pool_base_addr); > - ODP_PRINT(" buf base 0x%"PRIxPTR"\n", pool->s.buf_base); > - ODP_PRINT(" pool size 0x%"PRIx64"\n", pool->s.pool_size); > - ODP_PRINT(" buf size %zu\n", pool->s.user_size); > - ODP_PRINT(" buf align %zu\n", pool->s.user_align); > - ODP_PRINT(" hdr size %zu\n", pool->s.hdr_size); > - ODP_PRINT(" alloc size %zu\n", pool->s.buf_size); > - ODP_PRINT(" offset to hdr %zu\n", pool->s.buf_offset); > - ODP_PRINT(" num bufs %"PRIu64"\n", pool->s.num_bufs); > - ODP_PRINT(" free bufs %"PRIu64"\n", pool->s.free_bufs); > - > - /* first chunk */ > - chunk_hdr = pool->s.head; > - > - if (chunk_hdr == NULL) { > - ODP_ERR(" POOL EMPTY\n"); > - return; > - } > - > - ODP_PRINT("\n First chunk\n"); > - > - for (i = 0; i < chunk_hdr->chunk.num_bufs - 1; i++) { > - uint32_t index; > - odp_buffer_hdr_t *hdr; > - > - index = chunk_hdr->chunk.buf_index[i]; > - hdr = index_to_hdr(pool, index); > - > - ODP_PRINT(" [%i] addr %p, id %"PRIu32"\n", i, hdr->addr, > - index); > - } > - > - ODP_PRINT(" [%i] addr %p, id %"PRIu32"\n", i, chunk_hdr->buf_hdr.addr, > - chunk_hdr->buf_hdr.index); > - > - /* next chunk */ > - chunk_hdr = next_chunk(pool, chunk_hdr); > + uint32_t bufcount = odp_atomic_load_u32(&pool->s.bufcount); > + uint32_t blkcount = odp_atomic_load_u32(&pool->s.blkcount); > + uint64_t bufallocs = odp_atomic_load_u64(&pool->s.bufallocs); > + uint64_t buffrees = odp_atomic_load_u64(&pool->s.buffrees); > + uint64_t blkallocs = odp_atomic_load_u64(&pool->s.blkallocs); > + uint64_t blkfrees = odp_atomic_load_u64(&pool->s.blkfrees); > + uint64_t bufempty = odp_atomic_load_u64(&pool->s.bufempty); > + uint64_t blkempty = odp_atomic_load_u64(&pool->s.blkempty); > + uint64_t hiwmct = odp_atomic_load_u64(&pool->s.high_wm_count); > + uint64_t lowmct = odp_atomic_load_u64(&pool->s.low_wm_count); > + > + ODP_DBG("Pool info\n"); > + ODP_DBG("---------\n"); > + ODP_DBG(" pool %i\n", pool->s.pool_hdl); > + ODP_DBG(" name %s\n", > + pool->s.flags.has_name ? pool->s.name : "Unnamed Pool"); > + ODP_DBG(" pool type %s\n", > + pool->s.params.buf_type == ODP_BUFFER_TYPE_RAW ? "raw" : > + (pool->s.params.buf_type == ODP_BUFFER_TYPE_PACKET ? "packet" : > + (pool->s.params.buf_type == ODP_BUFFER_TYPE_TIMEOUT ? "timeout" : > + (pool->s.params.buf_type == ODP_BUFFER_TYPE_ANY ? "any" : > + "unknown")))); > + ODP_DBG(" pool storage %sODP managed\n", > + pool->s.flags.user_supplied_shm ? > + "application provided, " : ""); > + ODP_DBG(" pool status %s\n", > + pool->s.quiesced ? "quiesced" : "active"); > + ODP_DBG(" pool opts %s, %s, %s\n", > + pool->s.flags.unsegmented ? "unsegmented" : "segmented", > + pool->s.flags.zeroized ? "zeroized" : "non-zeroized", > + pool->s.flags.predefined ? "predefined" : "created"); > + ODP_DBG(" pool base %p\n", pool->s.pool_base_addr); > + ODP_DBG(" pool size %zu (%zu pages)\n", > + pool->s.pool_size, pool->s.pool_size / ODP_PAGE_SIZE); > + ODP_DBG(" udata size %zu\n", pool->s.init_params.udata_size); > + ODP_DBG(" buf size %zu\n", pool->s.params.buf_size); > + ODP_DBG(" num bufs %u\n", pool->s.params.num_bufs); > + ODP_DBG(" bufs available %u %s\n", > + bufcount, > + pool->s.low_wm_assert ? " **low wm asserted**" : ""); > + ODP_DBG(" bufs in use %u\n", pool->s.params.num_bufs - bufcount); > + ODP_DBG(" buf allocs %lu\n", bufallocs); > + ODP_DBG(" buf frees %lu\n", buffrees); > + ODP_DBG(" buf empty %lu\n", bufempty); > + ODP_DBG(" blk size %zu\n", > + pool->s.seg_size > ODP_MAX_INLINE_BUF ? pool->s.seg_size : 0); > + ODP_DBG(" blks available %u\n", blkcount); > + ODP_DBG(" blk allocs %lu\n", blkallocs); > + ODP_DBG(" blk frees %lu\n", blkfrees); > + ODP_DBG(" blk empty %lu\n", blkempty); > + ODP_DBG(" high wm value %lu\n", pool->s.high_wm); > + ODP_DBG(" high wm count %lu\n", hiwmct); > + ODP_DBG(" low wm value %lu\n", pool->s.low_wm); > + ODP_DBG(" low wm count %lu\n", lowmct); > +} > > - if (chunk_hdr) { > - ODP_PRINT(" Next chunk\n"); > - ODP_PRINT(" addr %p, id %"PRIu32"\n", chunk_hdr->buf_hdr.addr, > - chunk_hdr->buf_hdr.index); > - } > > - ODP_PRINT("\n"); > +odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf) > +{ > + return odp_buf_to_hdr(buf)->pool_hdl; > } > diff --git a/platform/linux-generic/odp_linux.c b/platform/linux-generic/odp_linux.c > index ecd77b3..95761a9 100644 > --- a/platform/linux-generic/odp_linux.c > +++ b/platform/linux-generic/odp_linux.c > @@ -43,7 +43,9 @@ static void *odp_run_start_routine(void *arg) > return NULL; > } > > - return start_args->start_routine(start_args->arg); > + void *ret = start_args->start_routine(start_args->arg); > + _odp_flush_caches(); > + return ret; > } > > > diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c > index a1bf18e..726e086 100644 > --- a/platform/linux-generic/odp_packet.c > +++ b/platform/linux-generic/odp_packet.c > @@ -24,17 +24,9 @@ static inline uint8_t parse_ipv6(odp_packet_hdr_t *pkt_hdr, > void odp_packet_init(odp_packet_t pkt) > { > odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt); > - const size_t start_offset = ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr); > - uint8_t *start; > - size_t len; > - > - start = (uint8_t *)pkt_hdr + start_offset; > - len = ODP_OFFSETOF(odp_packet_hdr_t, buf_data) - start_offset; > - memset(start, 0, len); > + pool_entry_t *pool = odp_buf_to_pool(&pkt_hdr->buf_hdr); > > - pkt_hdr->l2_offset = ODP_PACKET_OFFSET_INVALID; > - pkt_hdr->l3_offset = ODP_PACKET_OFFSET_INVALID; > - pkt_hdr->l4_offset = ODP_PACKET_OFFSET_INVALID; > + packet_init(pool, pkt_hdr, 0); > } > > odp_packet_t odp_packet_from_buffer(odp_buffer_t buf) > @@ -64,7 +56,7 @@ uint8_t *odp_packet_addr(odp_packet_t pkt) > > uint8_t *odp_packet_data(odp_packet_t pkt) > { > - return odp_packet_addr(pkt) + odp_packet_hdr(pkt)->frame_offset; > + return odp_packet_addr(pkt) + odp_packet_hdr(pkt)->headroom; > } > > > @@ -131,20 +123,13 @@ void odp_packet_set_l4_offset(odp_packet_t pkt, size_t offset) > > int odp_packet_is_segmented(odp_packet_t pkt) > { > - odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr((odp_buffer_t)pkt); > - > - if (buf_hdr->scatter.num_bufs == 0) > - return 0; > - else > - return 1; > + return odp_packet_hdr(pkt)->buf_hdr.segcount > 1; > } > > > int odp_packet_seg_count(odp_packet_t pkt) > { > - odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr((odp_buffer_t)pkt); > - > - return (int)buf_hdr->scatter.num_bufs + 1; > + return odp_packet_hdr(pkt)->buf_hdr.segcount; > } > > > @@ -170,7 +155,7 @@ void odp_packet_parse(odp_packet_t pkt, size_t len, size_t frame_offset) > uint8_t ip_proto = 0; > > pkt_hdr->input_flags.eth = 1; > - pkt_hdr->frame_offset = frame_offset; > + pkt_hdr->l2_offset = frame_offset; > pkt_hdr->frame_len = len; > > if (len > ODPH_ETH_LEN_MAX) > @@ -330,8 +315,6 @@ void odp_packet_print(odp_packet_t pkt) > len += snprintf(&str[len], n-len, > " output_flags 0x%x\n", hdr->output_flags.all); > len += snprintf(&str[len], n-len, > - " frame_offset %u\n", hdr->frame_offset); > - len += snprintf(&str[len], n-len, > " l2_offset %u\n", hdr->l2_offset); > len += snprintf(&str[len], n-len, > " l3_offset %u\n", hdr->l3_offset); > @@ -358,14 +341,13 @@ int odp_packet_copy(odp_packet_t pkt_dst, odp_packet_t pkt_src) > if (pkt_dst == ODP_PACKET_INVALID || pkt_src == ODP_PACKET_INVALID) > return -1; > > - if (pkt_hdr_dst->buf_hdr.size < > - pkt_hdr_src->frame_len + pkt_hdr_src->frame_offset) > + if (pkt_hdr_dst->buf_hdr.size < pkt_hdr_src->frame_len) > return -1; > > /* Copy packet header */ > start_dst = (uint8_t *)pkt_hdr_dst + start_offset; > start_src = (uint8_t *)pkt_hdr_src + start_offset; > - len = ODP_OFFSETOF(odp_packet_hdr_t, buf_data) - start_offset; > + len = sizeof(odp_packet_hdr_t) - start_offset; > memcpy(start_dst, start_src, len); > > /* Copy frame payload */ > @@ -374,13 +356,6 @@ int odp_packet_copy(odp_packet_t pkt_dst, odp_packet_t pkt_src) > len = pkt_hdr_src->frame_len; > memcpy(start_dst, start_src, len); > > - /* Copy useful things from the buffer header */ > - pkt_hdr_dst->buf_hdr.cur_offset = pkt_hdr_src->buf_hdr.cur_offset; > - > - /* Create a copy of the scatter list */ > - odp_buffer_copy_scatter(odp_packet_to_buffer(pkt_dst), > - odp_packet_to_buffer(pkt_src)); > - > return 0; > } > > diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c > index c278094..a7c5e42 100644 > --- a/platform/linux-generic/odp_queue.c > +++ b/platform/linux-generic/odp_queue.c > @@ -11,6 +11,7 @@ > #include <odp_buffer.h> > #include <odp_buffer_internal.h> > #include <odp_buffer_pool_internal.h> > +#include <odp_buffer_inlines.h> > #include <odp_internal.h> > #include <odp_shared_memory.h> > #include <odp_schedule_internal.h> > diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c > index 7c09c23..2f0cfe4 100644 > --- a/platform/linux-generic/odp_schedule.c > +++ b/platform/linux-generic/odp_schedule.c > @@ -83,8 +83,8 @@ int odp_schedule_init_global(void) > { > odp_shm_t shm; > odp_buffer_pool_t pool; > - void *pool_base; > int i, j; > + odp_buffer_pool_param_t params; > > ODP_DBG("Schedule init ... "); > > @@ -99,20 +99,12 @@ int odp_schedule_init_global(void) > return -1; > } > > - shm = odp_shm_reserve("odp_sched_pool", > - SCHED_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > + params.buf_size = sizeof(queue_desc_t); > + params.buf_align = ODP_CACHE_LINE_SIZE; > + params.num_bufs = SCHED_POOL_SIZE/sizeof(queue_desc_t); > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > - pool_base = odp_shm_addr(shm); > - > - if (pool_base == NULL) { > - ODP_ERR("Schedule init: Shm reserve failed.\n"); > - return -1; > - } > - > - pool = odp_buffer_pool_create("odp_sched_pool", pool_base, > - SCHED_POOL_SIZE, sizeof(queue_desc_t), > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_RAW); > + pool = odp_buffer_pool_create("odp_sched_pool", ODP_SHM_NULL, ¶ms); > > if (pool == ODP_BUFFER_POOL_INVALID) { > ODP_ERR("Schedule init: Pool create failed.\n"); > diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c > index a4fef58..7bd6874 100644 > --- a/platform/linux-generic/odp_timer.c > +++ b/platform/linux-generic/odp_timer.c > @@ -5,9 +5,10 @@ > */ > > #include <odp_timer.h> > -#include <odp_timer_internal.h> > #include <odp_time.h> > #include <odp_buffer_pool_internal.h> > +#include <odp_buffer_inlines.h> > +#include <odp_timer_internal.h> > #include <odp_internal.h> > #include <odp_atomic.h> > #include <odp_spinlock.h> > diff --git a/test/api_test/odp_timer_ping.c b/test/api_test/odp_timer_ping.c > index 48f1885..aa2a490 100644 > --- a/test/api_test/odp_timer_ping.c > +++ b/test/api_test/odp_timer_ping.c > @@ -321,9 +321,8 @@ int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED) > ping_arg_t pingarg; > odp_queue_t queue; > odp_buffer_pool_t pool; > - void *pool_base; > int i; > - odp_shm_t shm; > + odp_buffer_pool_param_t params; > > if (odp_test_global_init() != 0) > return -1; > @@ -336,14 +335,14 @@ int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED) > /* > * Create message pool > */ > - shm = odp_shm_reserve("msg_pool", > - MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > - > - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, > - BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_RAW); > + > + params.buf_size = BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = MSG_POOL_SIZE/BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_RAW; > + > + pool = odp_buffer_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); > + > if (pool == ODP_BUFFER_POOL_INVALID) { > LOG_ERR("Pool create failed.\n"); > return -1; > diff --git a/test/validation/odp_crypto.c b/test/validation/odp_crypto.c > index 03ca438..72cf0f0 100644 > --- a/test/validation/odp_crypto.c > +++ b/test/validation/odp_crypto.c > @@ -25,26 +25,17 @@ CU_SuiteInfo odp_testsuites[] = { > > int tests_global_init(void) > { > - odp_shm_t shm; > - void *pool_base; > + odp_buffer_pool_param_t params; > odp_buffer_pool_t pool; > odp_queue_t out_queue; > > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, > - ODP_CACHE_LINE_SIZE, 0); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - pool_base = odp_shm_addr(shm); > - if (!pool_base) { > - fprintf(stderr, "Packet pool allocation failed.\n"); > - return -1; > - } > + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, ¶ms); > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (ODP_BUFFER_POOL_INVALID == pool) { > fprintf(stderr, "Packet pool creation failed.\n"); > return -1; > @@ -55,20 +46,14 @@ int tests_global_init(void) > fprintf(stderr, "Crypto outq creation failed.\n"); > return -1; > } > - shm = odp_shm_reserve("shm_compl_pool", > - SHM_COMPL_POOL_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_SHM_SW_ONLY); > - pool_base = odp_shm_addr(shm); > - if (!pool_base) { > - fprintf(stderr, "Completion pool allocation failed.\n"); > - return -1; > - } > - pool = odp_buffer_pool_create("compl_pool", pool_base, > - SHM_COMPL_POOL_SIZE, > - SHM_COMPL_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_RAW); > + > + params.buf_size = SHM_COMPL_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_COMPL_POOL_SIZE/SHM_COMPL_POOL_BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_RAW; > + > + pool = odp_buffer_pool_create("compl_pool", ODP_SHM_NULL, ¶ms); > + > if (ODP_BUFFER_POOL_INVALID == pool) { > fprintf(stderr, "Completion pool creation failed.\n"); > return -1; > diff --git a/test/validation/odp_queue.c b/test/validation/odp_queue.c > index 2c8fe80..6e05ad0 100644 > --- a/test/validation/odp_queue.c > +++ b/test/validation/odp_queue.c > @@ -16,21 +16,14 @@ static int queue_contest = 0xff; > static int init_queue_suite(void) > { > odp_buffer_pool_t pool; > - void *pool_base; > - odp_shm_t shm; > + odp_buffer_pool_param_t params; > > - shm = odp_shm_reserve("msg_pool", > - MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > + params.buf_size = 0; > + params.buf_align = ODP_CACHE_LINE_SIZE; > + params.num_bufs = 1024 * 10; > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > - pool_base = odp_shm_addr(shm); > - > - if (NULL == pool_base) { > - printf("Shared memory reserve failed.\n"); > - return -1; > - } > - > - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, 0, > - ODP_CACHE_LINE_SIZE, ODP_BUFFER_TYPE_RAW); > + pool = odp_buffer_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); > > if (ODP_BUFFER_POOL_INVALID == pool) { > printf("Pool create failed.\n");
See responses inline. Bill On Mon, Dec 8, 2014 at 7:58 AM, Bala <bala.manoharan@linaro.org> wrote: > Hi, > > Comments inline. > > Regards, > Bala > On Monday 08 December 2014 04:54 AM, Bill Fischofer wrote: > >> Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> >> --- >> >> Petri: Please review the following files here: >> platform/linux-generic/include/api/odp_buffer.h >> platform/linux-generic/include/api/odp_buffer_pool.h >> platform/linux-generic/include/api/odp_config.h >> >> This patch is complete and compilable/testable. It is RFC pending >> Petri approval of the public API headers and recommendations for >> final packaging. >> >> example/generator/odp_generator.c | 19 +- >> example/ipsec/odp_ipsec.c | 57 +- >> example/l2fwd/odp_l2fwd.c | 19 +- >> example/odp_example/odp_example.c | 18 +- >> example/packet/odp_pktio.c | 19 +- >> example/timer/odp_timer_test.c | 13 +- >> platform/linux-generic/include/api/odp_buffer.h | 3 +- >> .../linux-generic/include/api/odp_buffer_pool.h | 103 ++- >> platform/linux-generic/include/api/odp_config.h | 19 + >> .../linux-generic/include/api/odp_platform_types.h | 12 + >> .../linux-generic/include/api/odp_shared_memory.h | 10 +- >> .../linux-generic/include/odp_buffer_inlines.h | 150 ++++ >> .../linux-generic/include/odp_buffer_internal.h | 150 ++-- >> .../include/odp_buffer_pool_internal.h | 351 ++++++++-- >> platform/linux-generic/include/odp_internal.h | 2 + >> .../linux-generic/include/odp_packet_internal.h | 50 +- >> .../linux-generic/include/odp_timer_internal.h | 11 +- >> platform/linux-generic/odp_buffer.c | 33 +- >> platform/linux-generic/odp_buffer_pool.c | 777 >> ++++++++++----------- >> platform/linux-generic/odp_linux.c | 4 +- >> platform/linux-generic/odp_packet.c | 41 +- >> platform/linux-generic/odp_queue.c | 1 + >> platform/linux-generic/odp_schedule.c | 20 +- >> platform/linux-generic/odp_timer.c | 3 +- >> test/api_test/odp_timer_ping.c | 19 +- >> test/validation/odp_crypto.c | 43 +- >> test/validation/odp_queue.c | 19 +- >> 27 files changed, 1208 insertions(+), 758 deletions(-) >> create mode 100644 platform/linux-generic/include/odp_buffer_inlines.h >> >> diff --git a/example/generator/odp_generator.c b/example/generator/odp_ >> generator.c >> index 73b0369..476cbef 100644 >> --- a/example/generator/odp_generator.c >> +++ b/example/generator/odp_generator.c >> @@ -522,11 +522,11 @@ int main(int argc, char *argv[]) >> odph_linux_pthread_t thread_tbl[MAX_WORKERS]; >> odp_buffer_pool_t pool; >> int num_workers; >> - void *pool_base; >> int i; >> int first_core; >> int core_count; >> odp_shm_t shm; >> + odp_buffer_pool_param_t params; >> /* Init ODP before calling anything else */ >> if (odp_init_global(NULL, NULL)) { >> @@ -589,20 +589,13 @@ int main(int argc, char *argv[]) >> printf("First core: %i\n\n", first_core); >> /* Create packet pool */ >> - shm = odp_shm_reserve("shm_packet_pool", >> - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); >> - pool_base = odp_shm_addr(shm); >> + params.buf_size = SHM_PKT_POOL_BUF_SIZE; >> + params.buf_align = 0; >> + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; >> + params.buf_type = ODP_BUFFER_TYPE_PACKET; >> - if (pool_base == NULL) { >> - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); >> - exit(EXIT_FAILURE); >> - } >> + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, >> ¶ms); >> - pool = odp_buffer_pool_create("packet_pool", pool_base, >> - SHM_PKT_POOL_SIZE, >> - SHM_PKT_POOL_BUF_SIZE, >> - ODP_CACHE_LINE_SIZE, >> - ODP_BUFFER_TYPE_PACKET); >> if (pool == ODP_BUFFER_POOL_INVALID) { >> EXAMPLE_ERR("Error: packet pool create failed.\n"); >> exit(EXIT_FAILURE); >> diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c >> index 76d27c5..f96338c 100644 >> --- a/example/ipsec/odp_ipsec.c >> +++ b/example/ipsec/odp_ipsec.c >> @@ -367,8 +367,7 @@ static >> void ipsec_init_pre(void) >> { >> odp_queue_param_t qparam; >> - void *pool_base; >> - odp_shm_t shm; >> + odp_buffer_pool_param_t params; >> /* >> * Create queues >> @@ -401,16 +400,12 @@ void ipsec_init_pre(void) >> } >> /* Create output buffer pool */ >> - shm = odp_shm_reserve("shm_out_pool", >> - SHM_OUT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); >> - >> - pool_base = odp_shm_addr(shm); >> + params.buf_size = SHM_OUT_POOL_BUF_SIZE; >> + params.buf_align = 0; >> + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; >> + params.buf_type = ODP_BUFFER_TYPE_PACKET; >> - out_pool = odp_buffer_pool_create("out_pool", pool_base, >> - SHM_OUT_POOL_SIZE, >> - SHM_OUT_POOL_BUF_SIZE, >> - ODP_CACHE_LINE_SIZE, >> - ODP_BUFFER_TYPE_PACKET); >> + out_pool = odp_buffer_pool_create("out_pool", ODP_SHM_NULL, >> ¶ms); >> if (ODP_BUFFER_POOL_INVALID == out_pool) { >> EXAMPLE_ERR("Error: message pool create failed.\n"); >> @@ -1176,12 +1171,12 @@ main(int argc, char *argv[]) >> { >> odph_linux_pthread_t thread_tbl[MAX_WORKERS]; >> int num_workers; >> - void *pool_base; >> int i; >> int first_core; >> int core_count; >> int stream_count; >> odp_shm_t shm; >> + odp_buffer_pool_param_t params; >> /* Init ODP before calling anything else */ >> if (odp_init_global(NULL, NULL)) { >> @@ -1241,42 +1236,28 @@ main(int argc, char *argv[]) >> printf("First core: %i\n\n", first_core); >> /* Create packet buffer pool */ >> - shm = odp_shm_reserve("shm_packet_pool", >> - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); >> + params.buf_size = SHM_PKT_POOL_BUF_SIZE; >> + params.buf_align = 0; >> + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; >> + params.buf_type = ODP_BUFFER_TYPE_PACKET; >> - pool_base = odp_shm_addr(shm); >> - >> - if (NULL == pool_base) { >> - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); >> - exit(EXIT_FAILURE); >> - } >> + pkt_pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, >> + ¶ms); >> - pkt_pool = odp_buffer_pool_create("packet_pool", pool_base, >> - SHM_PKT_POOL_SIZE, >> - SHM_PKT_POOL_BUF_SIZE, >> - ODP_CACHE_LINE_SIZE, >> - ODP_BUFFER_TYPE_PACKET); >> if (ODP_BUFFER_POOL_INVALID == pkt_pool) { >> EXAMPLE_ERR("Error: packet pool create failed.\n"); >> exit(EXIT_FAILURE); >> } >> /* Create context buffer pool */ >> - shm = odp_shm_reserve("shm_ctx_pool", >> - SHM_CTX_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); >> - >> - pool_base = odp_shm_addr(shm); >> + params.buf_size = SHM_CTX_POOL_BUF_SIZE; >> + params.buf_align = 0; >> + params.num_bufs = SHM_CTX_POOL_BUF_COUNT; >> + params.buf_type = ODP_BUFFER_TYPE_RAW; >> - if (NULL == pool_base) { >> - EXAMPLE_ERR("Error: context pool mem alloc failed.\n"); >> - exit(EXIT_FAILURE); >> - } >> + ctx_pool = odp_buffer_pool_create("ctx_pool", ODP_SHM_NULL, >> + ¶ms); >> - ctx_pool = odp_buffer_pool_create("ctx_pool", pool_base, >> - SHM_CTX_POOL_SIZE, >> - SHM_CTX_POOL_BUF_SIZE, >> - ODP_CACHE_LINE_SIZE, >> - ODP_BUFFER_TYPE_RAW); >> if (ODP_BUFFER_POOL_INVALID == ctx_pool) { >> EXAMPLE_ERR("Error: context pool create failed.\n"); >> exit(EXIT_FAILURE); >> diff --git a/example/l2fwd/odp_l2fwd.c b/example/l2fwd/odp_l2fwd.c >> index ebac8c5..3c1fd6a 100644 >> --- a/example/l2fwd/odp_l2fwd.c >> +++ b/example/l2fwd/odp_l2fwd.c >> @@ -314,12 +314,12 @@ int main(int argc, char *argv[]) >> { >> odph_linux_pthread_t thread_tbl[MAX_WORKERS]; >> odp_buffer_pool_t pool; >> - void *pool_base; >> int i; >> int first_core; >> int core_count; >> odp_pktio_t pktio; >> odp_shm_t shm; >> + odp_buffer_pool_param_t params; >> /* Init ODP before calling anything else */ >> if (odp_init_global(NULL, NULL)) { >> @@ -383,20 +383,13 @@ int main(int argc, char *argv[]) >> printf("First core: %i\n\n", first_core); >> /* Create packet pool */ >> - shm = odp_shm_reserve("shm_packet_pool", >> - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); >> - pool_base = odp_shm_addr(shm); >> + params.buf_size = SHM_PKT_POOL_BUF_SIZE; >> + params.buf_align = 0; >> + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; >> + params.buf_type = ODP_BUFFER_TYPE_PACKET; >> - if (pool_base == NULL) { >> - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); >> - exit(EXIT_FAILURE); >> - } >> + pool = odp_buffer_pool_create("packet pool", ODP_SHM_NULL, >> ¶ms); >> - pool = odp_buffer_pool_create("packet_pool", pool_base, >> - SHM_PKT_POOL_SIZE, >> - SHM_PKT_POOL_BUF_SIZE, >> - ODP_CACHE_LINE_SIZE, >> - ODP_BUFFER_TYPE_PACKET); >> if (pool == ODP_BUFFER_POOL_INVALID) { >> EXAMPLE_ERR("Error: packet pool create failed.\n"); >> exit(EXIT_FAILURE); >> diff --git a/example/odp_example/odp_example.c b/example/odp_example/odp_ >> example.c >> index 96a2912..8373f12 100644 >> --- a/example/odp_example/odp_example.c >> +++ b/example/odp_example/odp_example.c >> @@ -954,13 +954,13 @@ int main(int argc, char *argv[]) >> test_args_t args; >> int num_workers; >> odp_buffer_pool_t pool; >> - void *pool_base; >> odp_queue_t queue; >> int i, j; >> int prios; >> int first_core; >> odp_shm_t shm; >> test_globals_t *globals; >> + odp_buffer_pool_param_t params; >> printf("\nODP example starts\n\n"); >> @@ -1042,19 +1042,13 @@ int main(int argc, char *argv[]) >> /* >> * Create message pool >> */ >> - shm = odp_shm_reserve("msg_pool", >> - MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); >> - pool_base = odp_shm_addr(shm); >> + params.buf_size = sizeof(test_message_t); >> + params.buf_align = 0; >> + params.num_bufs = MSG_POOL_SIZE/sizeof(test_message_t); >> + params.buf_type = ODP_BUFFER_TYPE_RAW; >> - if (pool_base == NULL) { >> - EXAMPLE_ERR("Shared memory reserve failed.\n"); >> - return -1; >> - } >> - >> - pool = odp_buffer_pool_create("msg_pool", pool_base, >> MSG_POOL_SIZE, >> - sizeof(test_message_t), >> - ODP_CACHE_LINE_SIZE, >> ODP_BUFFER_TYPE_RAW); >> + pool = odp_buffer_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); >> if (pool == ODP_BUFFER_POOL_INVALID) { >> EXAMPLE_ERR("Pool create failed.\n"); >> diff --git a/example/packet/odp_pktio.c b/example/packet/odp_pktio.c >> index 7d51682..f2e7b2d 100644 >> --- a/example/packet/odp_pktio.c >> +++ b/example/packet/odp_pktio.c >> @@ -331,11 +331,11 @@ int main(int argc, char *argv[]) >> odph_linux_pthread_t thread_tbl[MAX_WORKERS]; >> odp_buffer_pool_t pool; >> int num_workers; >> - void *pool_base; >> int i; >> int first_core; >> int core_count; >> odp_shm_t shm; >> + odp_buffer_pool_param_t params; >> /* Init ODP before calling anything else */ >> if (odp_init_global(NULL, NULL)) { >> @@ -389,20 +389,13 @@ int main(int argc, char *argv[]) >> printf("First core: %i\n\n", first_core); >> /* Create packet pool */ >> - shm = odp_shm_reserve("shm_packet_pool", >> - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); >> - pool_base = odp_shm_addr(shm); >> + params.buf_size = SHM_PKT_POOL_BUF_SIZE; >> + params.buf_align = 0; >> + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; >> + params.buf_type = ODP_BUFFER_TYPE_PACKET; >> - if (pool_base == NULL) { >> - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); >> - exit(EXIT_FAILURE); >> - } >> + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, >> ¶ms); >> - pool = odp_buffer_pool_create("packet_pool", pool_base, >> - SHM_PKT_POOL_SIZE, >> - SHM_PKT_POOL_BUF_SIZE, >> - ODP_CACHE_LINE_SIZE, >> - ODP_BUFFER_TYPE_PACKET); >> if (pool == ODP_BUFFER_POOL_INVALID) { >> EXAMPLE_ERR("Error: packet pool create failed.\n"); >> exit(EXIT_FAILURE); >> diff --git a/example/timer/odp_timer_test.c b/example/timer/odp_timer_ >> test.c >> index 9968bfe..0d6e31a 100644 >> --- a/example/timer/odp_timer_test.c >> +++ b/example/timer/odp_timer_test.c >> @@ -244,12 +244,12 @@ int main(int argc, char *argv[]) >> test_args_t args; >> int num_workers; >> odp_buffer_pool_t pool; >> - void *pool_base; >> odp_queue_t queue; >> int first_core; >> uint64_t cycles, ns; >> odp_queue_param_t param; >> odp_shm_t shm; >> + odp_buffer_pool_param_t params; >> printf("\nODP timer example starts\n"); >> @@ -313,12 +313,13 @@ int main(int argc, char *argv[]) >> */ >> shm = odp_shm_reserve("msg_pool", >> MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); >> - pool_base = odp_shm_addr(shm); >> - pool = odp_buffer_pool_create("msg_pool", pool_base, >> MSG_POOL_SIZE, >> - 0, >> - ODP_CACHE_LINE_SIZE, >> - ODP_BUFFER_TYPE_TIMEOUT); >> + params.buf_size = 0; >> + params.buf_align = 0; >> + params.num_bufs = MSG_POOL_SIZE; >> + params.buf_type = ODP_BUFFER_TYPE_TIMEOUT; >> + >> + pool = odp_buffer_pool_create("msg_pool", shm, ¶ms); >> if (pool == ODP_BUFFER_POOL_INVALID) { >> EXAMPLE_ERR("Pool create failed.\n"); >> diff --git a/platform/linux-generic/include/api/odp_buffer.h >> b/platform/linux-generic/include/api/odp_buffer.h >> index da23120..e981324 100644 >> --- a/platform/linux-generic/include/api/odp_buffer.h >> +++ b/platform/linux-generic/include/api/odp_buffer.h >> @@ -68,7 +68,8 @@ int odp_buffer_type(odp_buffer_t buf); >> * >> * @param buf Buffer handle >> * >> - * @return 1 if valid, otherwise 0 >> + * @retval 1 Buffer handle represents a valid buffer. >> + * @retval 0 Buffer handle does not represent a valid buffer. >> */ >> int odp_buffer_is_valid(odp_buffer_t buf); >> diff --git a/platform/linux-generic/include/api/odp_buffer_pool.h >> b/platform/linux-generic/include/api/odp_buffer_pool.h >> index 30b83e0..3d85066 100644 >> --- a/platform/linux-generic/include/api/odp_buffer_pool.h >> +++ b/platform/linux-generic/include/api/odp_buffer_pool.h >> @@ -32,42 +32,114 @@ extern "C" { >> /** Maximum queue name lenght in chars */ >> #define ODP_BUFFER_POOL_NAME_LEN 32 >> -/** Invalid buffer pool */ >> -#define ODP_BUFFER_POOL_INVALID 0 >> +/** >> + * Buffer pool parameters >> + * Used to communicate buffer pool creation options. >> + */ >> +typedef struct odp_buffer_pool_param_t { >> + size_t buf_size; /**< Buffer size in bytes. The maximum >> + number of bytes application will >> + store in each buffer. */ >> + size_t buf_align; /**< Minimum buffer alignment in bytes. >> + Valid values are powers of two. Use 0 >> + for default alignment. Default will >> + always be a multiple of 8. */ >> + uint32_t num_bufs; /**< Number of buffers in the pool */ >> + int buf_type; /**< Buffer type */ >> +} odp_buffer_pool_param_t; >> /** >> * Create a buffer pool >> + * This routine is used to create a buffer pool. It take three >> + * arguments: the optional name of the pool to be created, an optional >> shared >> + * memory handle, and a parameter struct that describes the pool to be >> + * created. If a name is not specified the result is an anonymous pool >> that >> + * cannot be referenced by odp_buffer_pool_lookup(). >> + * >> + * @param name Name of the pool, max ODP_BUFFER_POOL_NAME_LEN-1 >> chars. >> + * May be specified as NULL for anonymous pools. >> * >> - * @param name Name of the pool (max ODP_BUFFER_POOL_NAME_LEN - 1 >> chars) >> - * @param base_addr Pool base address >> - * @param size Pool size in bytes >> - * @param buf_size Buffer size in bytes >> - * @param buf_align Minimum buffer alignment >> - * @param buf_type Buffer type >> + * @param shm The shared memory object in which to create the pool. >> + * Use ODP_SHM_NULL to reserve default memory type >> + * for the buffer type. >> * >> - * @return Buffer pool handle >> + * @param params Buffer pool parameters. >> + * >> + * @retval Handle Buffer pool handle on success >> + * @retval ODP_BUFFER_POOL_INVALID if call failed >> */ >> + >> odp_buffer_pool_t odp_buffer_pool_create(const char *name, >> - void *base_addr, uint64_t size, >> - size_t buf_size, size_t >> buf_align, >> - int buf_type); >> + odp_shm_t shm, >> + odp_buffer_pool_param_t *params); >> +/** >> + * Destroy a buffer pool previously created by odp_buffer_pool_create() >> + * >> + * @param pool Handle of the buffer pool to be destroyed >> + * >> + * @retval 0 Success >> + * @retval -1 Failure >> + * >> + * @note This routine destroys a previously created buffer pool. This >> call >> + * does not destroy any shared memory object passed to >> + * odp_buffer_pool_create() used to store the buffer pool contents. The >> caller >> + * takes responsibility for that. If no shared memory object was passed >> as >> + * part of the create call, then this routine will destroy any internal >> shared >> + * memory objects associated with the buffer pool. Results are undefined >> if >> + * an attempt is made to destroy a buffer pool that contains allocated or >> + * otherwise active buffers. >> + */ >> +int odp_buffer_pool_destroy(odp_buffer_pool_t pool); >> /** >> * Find a buffer pool by name >> * >> * @param name Name of the pool >> * >> - * @return Buffer pool handle, or ODP_BUFFER_POOL_INVALID if not found. >> + * @retval Handle Buffer pool handle on successs >> + * @retval ODP_BUFFER_POOL_INVALID if not found >> + * >> + * @note This routine cannot be used to look up an anonymous pool (one >> created >> + * with no name). >> */ >> odp_buffer_pool_t odp_buffer_pool_lookup(const char *name); >> +/** >> + * Buffer pool information struct >> + * Used to get information about a buffer pool. >> + */ >> +typedef struct odp_buffer_pool_info_t { >> + const char *name; /**< pool name */ >> + odp_buffer_pool_param_t params; /**< pool parameters */ >> +} odp_buffer_pool_info_t; >> + >> +/** >> + * Retrieve information about a buffer pool >> + * >> + * @param pool Buffer pool handle >> + * >> + * @param shm Recieves odp_shm_t supplied by caller at >> + * pool creation, or ODP_SHM_NULL if the >> + * pool is managed internally. >> + * >> + * @param[out] info Receives an odp_buffer_pool_info_t object >> + * that describes the pool. >> + * >> + * @retval 0 Success >> + * @retval -1 Failure. Info could not be retrieved. >> + */ >> + >> +int odp_buffer_pool_info(odp_buffer_pool_t pool, odp_shm_t *shm, >> + odp_buffer_pool_info_t *info); >> /** >> * Print buffer pool info >> * >> * @param pool Pool handle >> * >> + * @note This routine writes implementation-defined information about the >> + * specified buffer pool to the ODP log. The intended use is for >> debugging. >> */ >> void odp_buffer_pool_print(odp_buffer_pool_t pool); >> @@ -78,7 +150,8 @@ void odp_buffer_pool_print(odp_buffer_pool_t pool); >> * The validity of a buffer can be cheked at any time with >> odp_buffer_is_valid() >> * @param pool Pool handle >> * >> - * @return Buffer handle or ODP_BUFFER_INVALID >> + * @retval Handle Buffer handle of allocated buffer >> + * @retval ODP_BUFFER_INVALID Allocation failed >> */ >> odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool); >> @@ -97,7 +170,7 @@ void odp_buffer_free(odp_buffer_t buf); >> * >> * @param buf Buffer handle >> * >> - * @return Buffer pool the buffer was allocated from >> + * @retval Handle Buffer pool handle that the buffer was allocated from >> */ >> odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf); >> diff --git a/platform/linux-generic/include/api/odp_config.h >> b/platform/linux-generic/include/api/odp_config.h >> index 906897c..5ca5bb2 100644 >> --- a/platform/linux-generic/include/api/odp_config.h >> +++ b/platform/linux-generic/include/api/odp_config.h >> @@ -49,6 +49,25 @@ extern "C" { >> #define ODP_CONFIG_PKTIO_ENTRIES 64 >> /** >> + * Buffer segment size to use >> + * This is the granularity of segmented buffers. Sized for now to be >> large >> + * enough to support 1500-byte packets since the raw socket interface >> does not >> + * support scatter/gather I/O. ODP requires a minimum segment size of 128 >> + * bytes with 256 recommended. Linux-generic code will enforce a 256 byte >> + * minimum. Note that the chosen segment size must be a multiple of >> + * ODP_CACHE_LINE_SIZE. >> + */ >> +#define ODP_CONFIG_BUF_SEG_SIZE (512*3) >> + >> +/** >> + * Maximum buffer size supported >> + * Must be an integral number of segments and should be large enough to >> + * accommodate jumbo packets. Attempts to allocate or extend buffers to >> sizes >> + * larger than this limit will fail. >> + */ >> +#define ODP_CONFIG_BUF_MAX_SIZE (ODP_CONFIG_BUF_SEG_SIZE*7) >> + >> +/** >> * @} >> */ >> diff --git a/platform/linux-generic/include/api/odp_platform_types.h >> b/platform/linux-generic/include/api/odp_platform_types.h >> index 4db47d3..2181eb6 100644 >> --- a/platform/linux-generic/include/api/odp_platform_types.h >> +++ b/platform/linux-generic/include/api/odp_platform_types.h >> @@ -26,6 +26,9 @@ >> /** ODP Buffer pool */ >> typedef uint32_t odp_buffer_pool_t; >> +/** Invalid buffer pool */ >> +#define ODP_BUFFER_POOL_INVALID (0xffffffff) >> + >> /** ODP buffer */ >> typedef uint32_t odp_buffer_t; >> @@ -65,6 +68,15 @@ typedef uint32_t odp_pktio_t; >> #define ODP_PKTIO_ANY ((odp_pktio_t)~0) >> /** >> + * ODP shared memory block >> + */ >> +typedef uint32_t odp_shm_t; >> + >> +/** Invalid shared memory block */ >> +#define ODP_SHM_INVALID 0 >> +#define ODP_SHM_NULL ODP_SHM_INVALID /**< Synonym for buffer pool use */ >> + >> +/** >> * @} >> */ >> diff --git a/platform/linux-generic/include/api/odp_shared_memory.h >> b/platform/linux-generic/include/api/odp_shared_memory.h >> index 26e208b..f70db5a 100644 >> --- a/platform/linux-generic/include/api/odp_shared_memory.h >> +++ b/platform/linux-generic/include/api/odp_shared_memory.h >> @@ -20,6 +20,7 @@ extern "C" { >> #include <odp_std_types.h> >> +#include <odp_platform_types.h> >> /** @defgroup odp_shared_memory ODP SHARED MEMORY >> * Operations on shared memory. >> @@ -38,15 +39,6 @@ extern "C" { >> #define ODP_SHM_PROC 0x2 /**< Share with external processes */ >> /** >> - * ODP shared memory block >> - */ >> -typedef uint32_t odp_shm_t; >> - >> -/** Invalid shared memory block */ >> -#define ODP_SHM_INVALID 0 >> - >> - >> -/** >> * Shared memory block info >> */ >> typedef struct odp_shm_info_t { >> diff --git a/platform/linux-generic/include/odp_buffer_inlines.h >> b/platform/linux-generic/include/odp_buffer_inlines.h >> new file mode 100644 >> index 0000000..9eb425c >> --- /dev/null >> +++ b/platform/linux-generic/include/odp_buffer_inlines.h >> @@ -0,0 +1,150 @@ >> +/* Copyright (c) 2014, Linaro Limited >> + * All rights reserved. >> + * >> + * SPDX-License-Identifier: BSD-3-Clause >> + */ >> + >> +/** >> + * @file >> + * >> + * Inline functions for ODP buffer mgmt routines - implementation >> internal >> + */ >> + >> +#ifndef ODP_BUFFER_INLINES_H_ >> +#define ODP_BUFFER_INLINES_H_ >> + >> +#ifdef __cplusplus >> +extern "C" { >> +#endif >> + >> +static inline odp_buffer_t odp_buffer_encode_handle(odp_buffer_hdr_t >> *hdr) >> +{ >> + odp_buffer_bits_t handle; >> + uint32_t pool_id = pool_handle_to_index(hdr->pool_hdl); >> + struct pool_entry_s *pool = get_pool_entry(pool_id); >> + >> + handle.pool_id = pool_id; >> + handle.index = ((uint8_t *)hdr - pool->pool_base_addr) / >> + ODP_CACHE_LINE_SIZE; >> + handle.seg = 0; >> + >> + return handle.u32; >> +} >> + >> +static inline odp_buffer_t odp_hdr_to_buf(odp_buffer_hdr_t *hdr) >> +{ >> + return hdr->handle.handle; >> +} >> + >> +static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf) >> +{ >> + odp_buffer_bits_t handle; >> + uint32_t pool_id; >> + uint32_t index; >> + struct pool_entry_s *pool; >> + >> + handle.u32 = buf; >> + pool_id = handle.pool_id; >> + index = handle.index; >> + >> +#ifdef POOL_ERROR_CHECK >> + if (odp_unlikely(pool_id > ODP_CONFIG_BUFFER_POOLS)) { >> + ODP_ERR("odp_buf_to_hdr: Bad pool id\n"); >> + return NULL; >> + } >> +#endif >> + >> + pool = get_pool_entry(pool_id); >> + >> +#ifdef POOL_ERROR_CHECK >> + if (odp_unlikely(index > pool->params.num_bufs - 1)) { >> + ODP_ERR("odp_buf_to_hdr: Bad buffer index\n"); >> + return NULL; >> + } >> +#endif >> + >> + return (odp_buffer_hdr_t *)(void *) >> + (pool->pool_base_addr + (index * ODP_CACHE_LINE_SIZE)); >> +} >> + >> +static inline uint32_t odp_buffer_refcount(odp_buffer_hdr_t *buf) >> +{ >> + return odp_atomic_load_u32(&buf->ref_count); >> +} >> + >> +static inline uint32_t odp_buffer_incr_refcount(odp_buffer_hdr_t *buf, >> + uint32_t val) >> +{ >> + return odp_atomic_fetch_add_u32(&buf->ref_count, val) + val; >> +} >> + >> +static inline uint32_t odp_buffer_decr_refcount(odp_buffer_hdr_t *buf, >> + uint32_t val) >> +{ >> + uint32_t tmp; >> + >> + tmp = odp_atomic_fetch_sub_u32(&buf->ref_count, val); >> + >> + if (tmp < val) { >> + odp_atomic_fetch_add_u32(&buf->ref_count, val - tmp); >> + return 0; >> + } else { >> + return tmp - val; >> + } >> +} >> > IMO, I do not see any use case where refcount gets increamented by a value > greater than 1 in a single API. > If we drop "val" from input function we can simply use odp_atomic_inc_u32 > apis. > These are internal APIs, not external (for now). The use-case for val vs. inc is multicast where a packet is going to be transmitted many times and then decremented with each transmission. > + >> +static inline odp_buffer_hdr_t *validate_buf(odp_buffer_t buf) >> +{ >> + odp_buffer_bits_t handle; >> + odp_buffer_hdr_t *buf_hdr; >> + handle.u32 = buf; >> + >> + /* For buffer handles, segment index must be 0 and pool id in >> range */ >> + if (handle.seg != 0 || handle.pool_id >= ODP_CONFIG_BUFFER_POOLS) >> + return NULL; >> + >> + pool_entry_t *pool = odp_pool_to_entry(handle.pool_id); >> + >> + /* If pool not created, handle is invalid */ >> + if (pool->s.pool_shm == ODP_SHM_INVALID) >> + return NULL; >> + >> + uint32_t buf_stride = pool->s.buf_stride / ODP_CACHE_LINE_SIZE; >> + >> + /* A valid buffer index must be on stride, and must be in range */ >> + if ((handle.index % buf_stride != 0) || >> + ((uint32_t)(handle.index / buf_stride) >= >> pool->s.params.num_bufs)) >> + return NULL; >> + >> + buf_hdr = (odp_buffer_hdr_t *)(void *) >> + (pool->s.pool_base_addr + >> + (handle.index * ODP_CACHE_LINE_SIZE)); >> + >> + /* Handle is valid, so buffer is valid if it is allocated */ >> + return buf_hdr->allocator == ODP_FREEBUF ? NULL : buf_hdr; >> +} >> + >> +int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf); >> + >> +static inline void *buffer_map(odp_buffer_hdr_t *buf, >> + uint32_t offset, >> + uint32_t *seglen, >> + uint32_t limit) >> +{ >> + int seg_index = offset / buf->segsize; >> + int seg_offset = offset % buf->segsize; >> + >> + if (seglen != NULL) { >> + uint32_t buf_left = limit - offset; >> + *seglen = buf_left < buf->segsize ? >> + buf_left : buf->segsize - seg_offset; >> + } >> + >> + return (void *)(seg_offset + (uint8_t *)buf->addr[seg_index]); >> +} >> + >> +#ifdef __cplusplus >> +} >> +#endif >> + >> +#endif >> diff --git a/platform/linux-generic/include/odp_buffer_internal.h >> b/platform/linux-generic/include/odp_buffer_internal.h >> index 0027bfc..632dcbf 100644 >> --- a/platform/linux-generic/include/odp_buffer_internal.h >> +++ b/platform/linux-generic/include/odp_buffer_internal.h >> @@ -24,99 +24,131 @@ extern "C" { >> #include <odp_buffer.h> >> #include <odp_debug.h> >> #include <odp_align.h> >> - >> -/* TODO: move these to correct files */ >> - >> -typedef uint64_t odp_phys_addr_t; >> +#include <odp_align_internal.h> >> +#include <odp_config.h> >> +#include <odp_byteorder.h> >> +#include <odp_thread.h> >> + >> + >> +#define ODP_BITSIZE(x) \ >> + ((x) <= 2 ? 1 : \ >> + ((x) <= 4 ? 2 : \ >> + ((x) <= 8 ? 3 : \ >> + ((x) <= 16 ? 4 : \ >> + ((x) <= 32 ? 5 : \ >> + ((x) <= 64 ? 6 : \ >> + ((x) <= 128 ? 7 : \ >> + ((x) <= 256 ? 8 : \ >> + ((x) <= 512 ? 9 : \ >> + ((x) <= 1024 ? 10 : \ >> + ((x) <= 2048 ? 11 : \ >> + ((x) <= 4096 ? 12 : \ >> + ((x) <= 8196 ? 13 : \ >> + ((x) <= 16384 ? 14 : \ >> + ((x) <= 32768 ? 15 : \ >> + ((x) <= 65536 ? 16 : \ >> + (0/0))))))))))))))))) >> + >> +ODP_STATIC_ASSERT(ODP_CONFIG_BUF_SEG_SIZE >= 256, >> + "ODP Segment size must be a minimum of 256 bytes"); >> + >> +ODP_STATIC_ASSERT((ODP_CONFIG_BUF_SEG_SIZE % ODP_CACHE_LINE_SIZE) == 0, >> + "ODP Segment size must be a multiple of cache line >> size"); >> + >> +ODP_STATIC_ASSERT((ODP_CONFIG_BUF_MAX_SIZE % ODP_CONFIG_BUF_SEG_SIZE) >> == 0, >> + "Buffer max size must be a multiple of segment size"); >> + >> +#define ODP_BUFFER_MAX_SEG (ODP_CONFIG_BUF_MAX_SIZE/ODP_ >> CONFIG_BUF_SEG_SIZE) >> + >> +/* We can optimize storage of small buffers within metadata area */ >> +#define ODP_MAX_INLINE_BUF ((sizeof(void *)) * (ODP_BUFFER_MAX_SEG - >> 1)) >> + >> +#define ODP_BUFFER_POOL_BITS ODP_BITSIZE(ODP_CONFIG_BUFFER_POOLS) >> +#define ODP_BUFFER_SEG_BITS ODP_BITSIZE(ODP_BUFFER_MAX_SEG) >> +#define ODP_BUFFER_INDEX_BITS (32 - ODP_BUFFER_POOL_BITS - >> ODP_BUFFER_SEG_BITS) >> +#define ODP_BUFFER_PREFIX_BITS (ODP_BUFFER_POOL_BITS + >> ODP_BUFFER_INDEX_BITS) >> +#define ODP_BUFFER_MAX_POOLS (1 << ODP_BUFFER_POOL_BITS) >> +#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS) >> #define ODP_BUFFER_MAX_INDEX (ODP_BUFFER_MAX_BUFFERS - 2) >> #define ODP_BUFFER_INVALID_INDEX (ODP_BUFFER_MAX_BUFFERS - 1) >> -#define ODP_BUFS_PER_CHUNK 16 >> -#define ODP_BUFS_PER_SCATTER 4 >> - >> -#define ODP_BUFFER_TYPE_CHUNK 0xffff >> - >> - >> -#define ODP_BUFFER_POOL_BITS 4 >> -#define ODP_BUFFER_INDEX_BITS (32 - ODP_BUFFER_POOL_BITS) >> -#define ODP_BUFFER_MAX_POOLS (1 << ODP_BUFFER_POOL_BITS) >> -#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS) >> - >> typedef union odp_buffer_bits_t { >> uint32_t u32; >> odp_buffer_t handle; >> struct { >> +#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN >> uint32_t pool_id:ODP_BUFFER_POOL_BITS; >> uint32_t index:ODP_BUFFER_INDEX_BITS; >> + uint32_t seg:ODP_BUFFER_SEG_BITS; >> +#else >> + uint32_t seg:ODP_BUFFER_SEG_BITS; >> + uint32_t index:ODP_BUFFER_INDEX_BITS; >> + uint32_t pool_id:ODP_BUFFER_POOL_BITS; >> +#endif >> }; >> -} odp_buffer_bits_t; >> + struct { >> +#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN >> + uint32_t prefix:ODP_BUFFER_PREFIX_BITS; >> + uint32_t pfxseg:ODP_BUFFER_SEG_BITS; >> +#else >> + uint32_t pfxseg:ODP_BUFFER_SEG_BITS; >> + uint32_t prefix:ODP_BUFFER_PREFIX_BITS; >> +#endif >> + }; >> +} odp_buffer_bits_t; >> /* forward declaration */ >> struct odp_buffer_hdr_t; >> - >> -/* >> - * Scatter/gather list of buffers >> - */ >> -typedef struct odp_buffer_scatter_t { >> - /* buffer pointers */ >> - struct odp_buffer_hdr_t *buf[ODP_BUFS_PER_SCATTER]; >> - int num_bufs; /* num buffers */ >> - int pos; /* position on the list */ >> - size_t total_len; /* Total length */ >> -} odp_buffer_scatter_t; >> - >> - >> -/* >> - * Chunk of buffers (in single pool) >> - */ >> -typedef struct odp_buffer_chunk_t { >> - uint32_t num_bufs; /* num buffers */ >> - uint32_t buf_index[ODP_BUFS_PER_CHUNK]; /* buffers */ >> -} odp_buffer_chunk_t; >> - >> - >> /* Common buffer header */ >> typedef struct odp_buffer_hdr_t { >> struct odp_buffer_hdr_t *next; /* next buf in a list */ >> + int allocator; /* allocating thread id */ >> odp_buffer_bits_t handle; /* handle */ >> - odp_phys_addr_t phys_addr; /* physical data start >> address */ >> - void *addr; /* virtual data start >> address */ >> - uint32_t index; /* buf index in the pool */ >> + union { >> + uint32_t all; >> + struct { >> + uint32_t zeroized:1; /* Zeroize buf data on free >> */ >> + uint32_t hdrdata:1; /* Data is in buffer hdr */ >> + }; >> + } flags; >> + int type; /* buffer type */ >> size_t size; /* max data size */ >> - size_t cur_offset; /* current offset */ >> odp_atomic_u32_t ref_count; /* reference count */ >> - odp_buffer_scatter_t scatter; /* Scatter/gather list */ >> - int type; /* type of next header */ >> odp_buffer_pool_t pool_hdl; /* buffer pool handle */ >> > nit: We can directly store pool_entry_s* inside buffer_hdr_t instead of > pool handle. > The handle is here for efficiency since this allows the handle to be derived from the header addr without re-encoding. It's a time/space tradeoff. > - >> + union { >> + uint64_t buf_u64; /* user u64 */ >> + void *buf_ctx; /* user context */ >> + void *udata_addr; /* user metadata addr */ >> + }; >> + size_t udata_size; /* size of user metadata */ >> + uint32_t segcount; /* segment count */ >> + uint32_t segsize; /* segment size */ >> + void *addr[ODP_BUFFER_MAX_SEG]; /* block addrs >> */ >> } odp_buffer_hdr_t; >> -/* Ensure next header starts from 8 byte align */ >> -ODP_STATIC_ASSERT((sizeof(odp_buffer_hdr_t) % 8) == 0, >> "ODP_BUFFER_HDR_T__SIZE_ERROR"); >> +typedef struct odp_buffer_hdr_stride { >> + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_buffer_hdr_ >> t))]; >> +} odp_buffer_hdr_stride; >> +typedef struct odp_buf_blk_t { >> + struct odp_buf_blk_t *next; >> + struct odp_buf_blk_t *prev; >> +} odp_buf_blk_t; >> /* Raw buffer header */ >> typedef struct { >> odp_buffer_hdr_t buf_hdr; /* common buffer header */ >> - uint8_t buf_data[]; /* start of buffer data area */ >> } odp_raw_buffer_hdr_t; >> +/* Free buffer marker */ >> +#define ODP_FREEBUF -1 >> -/* Chunk header */ >> -typedef struct odp_buffer_chunk_hdr_t { >> - odp_buffer_hdr_t buf_hdr; >> - odp_buffer_chunk_t chunk; >> -} odp_buffer_chunk_hdr_t; >> - >> - >> -int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf); >> - >> -void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t >> buf_src); >> - >> +/* Forward declarations */ >> +odp_buffer_t buffer_alloc(odp_buffer_pool_t pool, size_t size); >> #ifdef __cplusplus >> } >> diff --git a/platform/linux-generic/include/odp_buffer_pool_internal.h >> b/platform/linux-generic/include/odp_buffer_pool_internal.h >> index e0210bd..347be39 100644 >> --- a/platform/linux-generic/include/odp_buffer_pool_internal.h >> +++ b/platform/linux-generic/include/odp_buffer_pool_internal.h >> @@ -19,12 +19,44 @@ extern "C" { >> #endif >> #include <odp_std_types.h> >> +#include <odp_align.h> >> +#include <odp_align_internal.h> >> #include <odp_buffer_pool.h> >> #include <odp_buffer_internal.h> >> -#include <odp_align.h> >> #include <odp_hints.h> >> #include <odp_config.h> >> #include <odp_debug.h> >> +#include <odp_shared_memory.h> >> +#include <odp_atomic.h> >> +#include <odp_atomic_internal.h> >> +#include <string.h> >> + >> +/** >> + * Buffer initialization routine prototype >> + * >> + * @note Routines of this type MAY be passed as part of the >> + * _odp_buffer_pool_init_t structure to be called whenever a >> + * buffer is allocated to initialize the user metadata >> + * associated with that buffer. >> + */ >> +typedef void (_odp_buf_init_t)(odp_buffer_t buf, void *buf_init_arg); >> + >> +/** >> + * Buffer pool initialization parameters >> + * Used to communicate buffer pool initialization options. Internal for >> now. >> + */ >> +typedef struct _odp_buffer_pool_init_t { >> + size_t udata_size; /**< Size of user metadata for each >> buffer */ >> + _odp_buf_init_t *buf_init; /**< Buffer initialization routine to >> use */ >> + void *buf_init_arg; /**< Argument to be passed to >> buf_init() */ >> +} _odp_buffer_pool_init_t; /**< Type of buffer initialization >> struct */ >> + >> +/* Local cache for buffer alloc/free acceleration */ >> +typedef struct local_cache_t { >> + odp_buffer_hdr_t *buf_freelist; /* The local cache */ >> + uint64_t bufallocs; /* Local buffer alloc count */ >> + uint64_t buffrees; /* Local buffer free count */ >> +} local_cache_t; >> /* Use ticketlock instead of spinlock */ >> #define POOL_USE_TICKETLOCK >> @@ -39,6 +71,17 @@ extern "C" { >> #include <odp_spinlock.h> >> #endif >> +#ifdef POOL_USE_TICKETLOCK >> +#include <odp_ticketlock.h> >> +#define LOCK(a) odp_ticketlock_lock(a) >> +#define UNLOCK(a) odp_ticketlock_unlock(a) >> +#define LOCK_INIT(a) odp_ticketlock_init(a) >> +#else >> +#include <odp_spinlock.h> >> +#define LOCK(a) odp_spinlock_lock(a) >> +#define UNLOCK(a) odp_spinlock_unlock(a) >> +#define LOCK_INIT(a) odp_spinlock_init(a) >> +#endif >> struct pool_entry_s { >> #ifdef POOL_USE_TICKETLOCK >> @@ -47,66 +90,292 @@ struct pool_entry_s { >> odp_spinlock_t lock ODP_ALIGNED_CACHE; >> #endif >> - odp_buffer_chunk_hdr_t *head; >> - uint64_t free_bufs; >> char name[ODP_BUFFER_POOL_NAME_LEN]; >> - >> - odp_buffer_pool_t pool_hdl ODP_ALIGNED_CACHE; >> - uintptr_t buf_base; >> - size_t buf_size; >> - size_t buf_offset; >> - uint64_t num_bufs; >> - void *pool_base_addr; >> - uint64_t pool_size; >> - size_t user_size; >> - size_t user_align; >> - int buf_type; >> - size_t hdr_size; >> + odp_buffer_pool_param_t params; >> + _odp_buffer_pool_init_t init_params; >> + odp_buffer_pool_t pool_hdl; >> + uint32_t pool_id; >> + odp_shm_t pool_shm; >> + union { >> + uint32_t all; >> + struct { >> + uint32_t has_name:1; >> + uint32_t user_supplied_shm:1; >> + uint32_t unsegmented:1; >> + uint32_t zeroized:1; >> + uint32_t predefined:1; >> + }; >> + } flags; >> + uint32_t quiesced; >> + uint32_t low_wm_assert; >> + uint8_t *pool_base_addr; >> + size_t pool_size; >> + uint32_t buf_stride; >> + _odp_atomic_ptr_t buf_freelist; >> + _odp_atomic_ptr_t blk_freelist; >> + odp_atomic_u32_t bufcount; >> + odp_atomic_u32_t blkcount; >> + odp_atomic_u64_t bufallocs; >> + odp_atomic_u64_t buffrees; >> + odp_atomic_u64_t blkallocs; >> + odp_atomic_u64_t blkfrees; >> + odp_atomic_u64_t bufempty; >> + odp_atomic_u64_t blkempty; >> + odp_atomic_u64_t high_wm_count; >> + odp_atomic_u64_t low_wm_count; >> + uint32_t seg_size; >> + uint32_t high_wm; >> + uint32_t low_wm; >> + uint32_t headroom; >> + uint32_t tailroom; >> }; >> +typedef union pool_entry_u { >> + struct pool_entry_s s; >> + >> + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct >> pool_entry_s))]; >> +} pool_entry_t; >> extern void *pool_entry_ptr[]; >> +#if defined(ODP_CONFIG_SECURE_POOLS) && (ODP_CONFIG_SECURE_POOLS == 1) >> +#define buffer_is_secure(buf) (buf->flags.zeroized) >> +#define pool_is_secure(pool) (pool->flags.zeroized) >> +#else >> +#define buffer_is_secure(buf) 0 >> +#define pool_is_secure(pool) 0 >> +#endif >> + >> +#define TAG_ALIGN ((size_t)16) >> -static inline void *get_pool_entry(uint32_t pool_id) >> +#define odp_cs(ptr, old, new) \ >> + _odp_atomic_ptr_cmp_xchg_strong(&ptr, (void **)&old, (void >> *)new, \ >> + _ODP_MEMMODEL_SC, \ >> + _ODP_MEMMODEL_SC) >> + >> +/* Helper functions for pointer tagging to avoid ABA race conditions */ >> +#define odp_tag(ptr) \ >> + (((size_t)ptr) & (TAG_ALIGN - 1)) >> + >> +#define odp_detag(ptr) \ >> + ((typeof(ptr))(((size_t)ptr) & -TAG_ALIGN)) >> + >> +#define odp_retag(ptr, tag) \ >> + ((typeof(ptr))(((size_t)ptr) | odp_tag(tag))) >> + >> + >> +static inline void *get_blk(struct pool_entry_s *pool) >> { >> - return pool_entry_ptr[pool_id]; >> + void *oldhead, *myhead, *newhead; >> + >> + oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, >> _ODP_MEMMODEL_ACQ); >> + >> + do { >> + size_t tag = odp_tag(oldhead); >> + myhead = odp_detag(oldhead); >> + if (odp_unlikely(myhead == NULL)) >> + break; >> + newhead = odp_retag(((odp_buf_blk_t *)myhead)->next, tag >> + 1); >> + } while (odp_cs(pool->blk_freelist, oldhead, newhead) == 0); >> + >> + if (odp_unlikely(myhead == NULL)) >> + odp_atomic_inc_u64(&pool->blkempty); >> + else >> + odp_atomic_dec_u32(&pool->blkcount); >> + >> + return (void *)myhead; >> } >> +static inline void ret_blk(struct pool_entry_s *pool, void *block) >> +{ >> + void *oldhead, *myhead, *myblock; >> + >> + oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, >> _ODP_MEMMODEL_ACQ); >> + >> + do { >> + size_t tag = odp_tag(oldhead); >> + myhead = odp_detag(oldhead); >> + ((odp_buf_blk_t *)block)->next = myhead; >> + myblock = odp_retag(block, tag + 1); >> + } while (odp_cs(pool->blk_freelist, oldhead, myblock) == 0); >> -static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf) >> + odp_atomic_inc_u32(&pool->blkcount); >> + odp_atomic_inc_u64(&pool->blkfrees); >> +} >> + >> +static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool) >> { >> - odp_buffer_bits_t handle; >> - uint32_t pool_id; >> - uint32_t index; >> - struct pool_entry_s *pool; >> - odp_buffer_hdr_t *hdr; >> - >> - handle.u32 = buf; >> - pool_id = handle.pool_id; >> - index = handle.index; >> - >> -#ifdef POOL_ERROR_CHECK >> - if (odp_unlikely(pool_id > ODP_CONFIG_BUFFER_POOLS)) { >> - ODP_ERR("odp_buf_to_hdr: Bad pool id\n"); >> - return NULL; >> + odp_buffer_hdr_t *oldhead, *myhead, *newhead; >> + >> + oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, >> _ODP_MEMMODEL_ACQ); >> + >> + do { >> + size_t tag = odp_tag(oldhead); >> + myhead = odp_detag(oldhead); >> + if (odp_unlikely(myhead == NULL)) >> + break; >> + newhead = odp_retag(myhead->next, tag + 1); >> + } while (odp_cs(pool->buf_freelist, oldhead, newhead) == 0); >> + >> + if (odp_unlikely(myhead == NULL)) { >> + odp_atomic_inc_u64(&pool->bufempty); >> + } else { >> + uint64_t bufcount = >> + odp_atomic_fetch_sub_u32(&pool->bufcount, 1) - 1; >> + >> + /* Check for low watermark condition */ >> + if (bufcount == pool->low_wm && !pool->low_wm_assert) { >> + pool->low_wm_assert = 1; >> + odp_atomic_inc_u64(&pool->low_wm_count); >> + } >> + >> + odp_atomic_inc_u64(&pool->bufallocs); >> + myhead->next = myhead; /* Mark buffer allocated */ >> + myhead->allocator = odp_thread_id(); >> } >> -#endif >> - pool = get_pool_entry(pool_id); >> + return (void *)myhead; >> +} >> + >> +static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t >> *buf) >> +{ >> + odp_buffer_hdr_t *oldhead, *myhead, *mybuf; >> + >> + buf->allocator = ODP_FREEBUF; /* Mark buffer free */ >> -#ifdef POOL_ERROR_CHECK >> - if (odp_unlikely(index > pool->num_bufs - 1)) { >> - ODP_ERR("odp_buf_to_hdr: Bad buffer index\n"); >> - return NULL; >> + if (!buf->flags.hdrdata && buf->type != ODP_BUFFER_TYPE_RAW) { >> + while (buf->segcount > 0) { >> + if (buffer_is_secure(buf) || pool_is_secure(pool)) >> + memset(buf->addr[buf->segcount - 1], >> + 0, buf->segsize); >> + ret_blk(pool, buf->addr[--buf->segcount]); >> + } >> + buf->size = 0; >> } >> -#endif >> - hdr = (odp_buffer_hdr_t *)(pool->buf_base + index * >> pool->buf_size); >> + oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, >> _ODP_MEMMODEL_ACQ); >> + >> + do { >> + size_t tag = odp_tag(oldhead); >> + myhead = odp_detag(oldhead); >> + buf->next = myhead; >> + mybuf = odp_retag(buf, tag + 1); >> + } while (odp_cs(pool->buf_freelist, oldhead, mybuf) == 0); >> + >> + uint64_t bufcount = odp_atomic_fetch_add_u32(&pool->bufcount, 1) >> + 1; >> - return hdr; >> + /* Check if low watermark condition should be deasserted */ >> + if (bufcount == pool->high_wm && pool->low_wm_assert) { >> + pool->low_wm_assert = 0; >> + odp_atomic_inc_u64(&pool->high_wm_count); >> + } >> + >> + odp_atomic_inc_u64(&pool->buffrees); >> +} >> + >> +static inline void *get_local_buf(local_cache_t *buf_cache, >> + struct pool_entry_s *pool, >> + size_t totsize) >> +{ >> + odp_buffer_hdr_t *buf = buf_cache->buf_freelist; >> + >> + if (odp_likely(buf != NULL)) { >> + buf_cache->buf_freelist = buf->next; >> + >> + if (odp_unlikely(buf->size < totsize)) { >> + size_t needed = totsize - buf->size; >> + >> + do { >> + void *blk = get_blk(pool); >> + if (odp_unlikely(blk == NULL)) { >> + ret_buf(pool, buf); >> + buf_cache->buffrees--; >> + return NULL; >> + } >> + buf->addr[buf->segcount++] = blk; >> + needed -= pool->seg_size; >> + } while ((ssize_t)needed > 0); >> + >> + buf->size = buf->segcount * pool->seg_size; >> + } >> + >> + buf_cache->bufallocs++; >> + buf->allocator = odp_thread_id(); /* Mark buffer >> allocated */ >> + } >> + >> + return buf; >> +} >> + >> +static inline void ret_local_buf(local_cache_t *buf_cache, >> + odp_buffer_hdr_t *buf) >> +{ >> + buf->allocator = ODP_FREEBUF; >> + buf->next = buf_cache->buf_freelist; >> + buf_cache->buf_freelist = buf; >> + >> + buf_cache->buffrees++; >> +} >> + >> +static inline void flush_cache(local_cache_t *buf_cache, >> + struct pool_entry_s *pool) >> +{ >> + odp_buffer_hdr_t *buf = buf_cache->buf_freelist; >> + uint32_t flush_count = 0; >> + >> + while (buf != NULL) { >> + odp_buffer_hdr_t *next = buf->next; >> + ret_buf(pool, buf); >> + buf = next; >> + flush_count++; >> + } >> + >> + odp_atomic_add_u64(&pool->bufallocs, buf_cache->bufallocs); >> + odp_atomic_add_u64(&pool->buffrees, buf_cache->buffrees - >> flush_count); >> + >> + buf_cache->buf_freelist = NULL; >> + buf_cache->bufallocs = 0; >> + buf_cache->buffrees = 0; >> +} >> + >> +static inline odp_buffer_pool_t pool_index_to_handle(uint32_t pool_id) >> +{ >> + return pool_id; >> +} >> + >> +static inline uint32_t pool_handle_to_index(odp_buffer_pool_t pool_hdl) >> +{ >> + return pool_hdl; >> +} >> + >> +static inline void *get_pool_entry(uint32_t pool_id) >> +{ >> + return pool_entry_ptr[pool_id]; >> +} >> + >> +static inline pool_entry_t *odp_pool_to_entry(odp_buffer_pool_t pool) >> +{ >> + return (pool_entry_t *)get_pool_entry(pool_handle_ >> to_index(pool)); >> +} >> + >> +static inline pool_entry_t *odp_buf_to_pool(odp_buffer_hdr_t *buf) >> +{ >> + return odp_pool_to_entry(buf->pool_hdl); >> +} >> + >> +static inline uint32_t odp_buffer_pool_segment_size(odp_buffer_pool_t >> pool) >> +{ >> + return odp_pool_to_entry(pool)->s.seg_size; >> +} >> + >> +static inline uint32_t odp_buffer_pool_headroom(odp_buffer_pool_t pool) >> +{ >> + return odp_pool_to_entry(pool)->s.headroom; >> } >> +static inline uint32_t odp_buffer_pool_tailroom(odp_buffer_pool_t >> pool) >> +{ >> + return odp_pool_to_entry(pool)->s.tailroom; >> +} >> #ifdef __cplusplus >> } >> diff --git a/platform/linux-generic/include/odp_internal.h >> b/platform/linux-generic/include/odp_internal.h >> index f8c1596..11d6393 100644 >> --- a/platform/linux-generic/include/odp_internal.h >> +++ b/platform/linux-generic/include/odp_internal.h >> @@ -42,6 +42,8 @@ int odp_schedule_init_local(void); >> int odp_timer_init_global(void); >> int odp_timer_disarm_all(void); >> +void _odp_flush_caches(void); >> + >> #ifdef __cplusplus >> } >> #endif >> diff --git a/platform/linux-generic/include/odp_packet_internal.h >> b/platform/linux-generic/include/odp_packet_internal.h >> index 49c59b2..f34a83d 100644 >> --- a/platform/linux-generic/include/odp_packet_internal.h >> +++ b/platform/linux-generic/include/odp_packet_internal.h >> @@ -22,6 +22,7 @@ extern "C" { >> #include <odp_debug.h> >> #include <odp_buffer_internal.h> >> #include <odp_buffer_pool_internal.h> >> +#include <odp_buffer_inlines.h> >> #include <odp_packet.h> >> #include <odp_packet_io.h> >> @@ -92,7 +93,8 @@ typedef union { >> }; >> } output_flags_t; >> -ODP_STATIC_ASSERT(sizeof(output_flags_t) == sizeof(uint32_t), >> "OUTPUT_FLAGS_SIZE_ERROR"); >> +ODP_STATIC_ASSERT(sizeof(output_flags_t) == sizeof(uint32_t), >> + "OUTPUT_FLAGS_SIZE_ERROR"); >> /** >> * Internal Packet header >> @@ -105,25 +107,23 @@ typedef struct { >> error_flags_t error_flags; >> output_flags_t output_flags; >> - uint32_t frame_offset; /**< offset to start of frame, even on >> error */ >> uint32_t l2_offset; /**< offset to L2 hdr, e.g. Eth */ >> uint32_t l3_offset; /**< offset to L3 hdr, e.g. IPv4, IPv6 */ >> uint32_t l4_offset; /**< offset to L4 hdr (TCP, UDP, SCTP, also >> ICMP) */ >> uint32_t frame_len; >> + uint32_t headroom; >> + uint32_t tailroom; >> uint64_t user_ctx; /* user context */ >> odp_pktio_t input; >> - >> - uint32_t pad; >> - uint8_t buf_data[]; /* start of buffer data area */ >> } odp_packet_hdr_t; >> -ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) == >> ODP_OFFSETOF(odp_packet_hdr_t, buf_data), >> - "ODP_PACKET_HDR_T__SIZE_ERR"); >> -ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) % sizeof(uint64_t) == 0, >> - "ODP_PACKET_HDR_T__SIZE_ERR2"); >> +typedef struct odp_packet_hdr_stride { >> + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_packet_hdr_ >> t))]; >> +} odp_packet_hdr_stride; >> + >> /** >> * Return the packet header >> @@ -138,6 +138,38 @@ static inline odp_packet_hdr_t >> *odp_packet_hdr(odp_packet_t pkt) >> */ >> void odp_packet_parse(odp_packet_t pkt, size_t len, size_t l2_offset); >> +/** >> + * Initialize packet buffer >> + */ >> +static inline void packet_init(pool_entry_t *pool, >> + odp_packet_hdr_t *pkt_hdr, >> + size_t size) >> +{ >> + /* >> + * Reset parser metadata. Note that we clear via memset to make >> + * this routine indepenent of any additional adds to packet >> metadata. >> + */ >> + const size_t start_offset = ODP_FIELD_SIZEOF(odp_packet_hdr_t, >> buf_hdr); >> + uint8_t *start; >> + size_t len; >> + >> + start = (uint8_t *)pkt_hdr + start_offset; >> + len = sizeof(odp_packet_hdr_t) - start_offset; >> + memset(start, 0, len); >> + >> + /* >> + * Packet headroom is set from the pool's headroom >> + * Packet tailroom is rounded up to fill the last >> + * segment occupied by the allocated length. >> + */ >> + pkt_hdr->frame_len = size; >> + pkt_hdr->headroom = pool->s.headroom; >> + pkt_hdr->tailroom = >> + (pool->s.seg_size * pkt_hdr->buf_hdr.segcount) - >> + (pool->s.headroom + size); >> +} >> + >> + >> #ifdef __cplusplus >> } >> #endif >> diff --git a/platform/linux-generic/include/odp_timer_internal.h >> b/platform/linux-generic/include/odp_timer_internal.h >> index ad28f53..2ff36ce 100644 >> --- a/platform/linux-generic/include/odp_timer_internal.h >> +++ b/platform/linux-generic/include/odp_timer_internal.h >> @@ -51,14 +51,9 @@ typedef struct odp_timeout_hdr_t { >> uint8_t buf_data[]; >> } odp_timeout_hdr_t; >> - >> - >> -ODP_STATIC_ASSERT(sizeof(odp_timeout_hdr_t) == >> - ODP_OFFSETOF(odp_timeout_hdr_t, buf_data), >> - "ODP_TIMEOUT_HDR_T__SIZE_ERR"); >> - >> -ODP_STATIC_ASSERT(sizeof(odp_timeout_hdr_t) % sizeof(uint64_t) == 0, >> - "ODP_TIMEOUT_HDR_T__SIZE_ERR2"); >> +typedef struct odp_timeout_hdr_stride { >> + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_timeout_ >> hdr_t))]; >> +} odp_timeout_hdr_stride; >> /** >> diff --git a/platform/linux-generic/odp_buffer.c >> b/platform/linux-generic/odp_buffer.c >> index bcbb99a..c1bef54 100644 >> --- a/platform/linux-generic/odp_buffer.c >> +++ b/platform/linux-generic/odp_buffer.c >> @@ -5,8 +5,9 @@ >> */ >> #include <odp_buffer.h> >> -#include <odp_buffer_internal.h> >> #include <odp_buffer_pool_internal.h> >> +#include <odp_buffer_internal.h> >> +#include <odp_buffer_inlines.h> >> #include <string.h> >> #include <stdio.h> >> @@ -16,7 +17,7 @@ void *odp_buffer_addr(odp_buffer_t buf) >> { >> odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf); >> - return hdr->addr; >> + return hdr->addr[0]; >> } >> @@ -38,15 +39,11 @@ int odp_buffer_type(odp_buffer_t buf) >> int odp_buffer_is_valid(odp_buffer_t buf) >> { >> - odp_buffer_bits_t handle; >> - >> - handle.u32 = buf; >> - >> - return (handle.index != ODP_BUFFER_INVALID_INDEX); >> + return validate_buf(buf) != NULL; >> } >> -int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf) >> +int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf) >> { >> odp_buffer_hdr_t *hdr; >> int len = 0; >> @@ -63,28 +60,14 @@ int odp_buffer_snprint(char *str, size_t n, >> odp_buffer_t buf) >> len += snprintf(&str[len], n-len, >> " pool %i\n", hdr->pool_hdl); >> len += snprintf(&str[len], n-len, >> - " index %"PRIu32"\n", hdr->index); >> - len += snprintf(&str[len], n-len, >> - " phy_addr %"PRIu64"\n", hdr->phys_addr); >> - len += snprintf(&str[len], n-len, >> " addr %p\n", hdr->addr); >> len += snprintf(&str[len], n-len, >> " size %zu\n", hdr->size); >> len += snprintf(&str[len], n-len, >> - " cur_offset %zu\n", hdr->cur_offset); >> - len += snprintf(&str[len], n-len, >> " ref_count %i\n", >> odp_atomic_load_u32(&hdr->ref_count)); >> len += snprintf(&str[len], n-len, >> " type %i\n", hdr->type); >> - len += snprintf(&str[len], n-len, >> - " Scatter list\n"); >> - len += snprintf(&str[len], n-len, >> - " num_bufs %i\n", >> hdr->scatter.num_bufs); >> - len += snprintf(&str[len], n-len, >> - " pos %i\n", hdr->scatter.pos); >> - len += snprintf(&str[len], n-len, >> - " total_len %zu\n", >> hdr->scatter.total_len); >> return len; >> } >> @@ -101,9 +84,3 @@ void odp_buffer_print(odp_buffer_t buf) >> ODP_PRINT("\n%s\n", str); >> } >> - >> -void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src) >> -{ >> - (void)buf_dst; >> - (void)buf_src; >> -} >> diff --git a/platform/linux-generic/odp_buffer_pool.c >> b/platform/linux-generic/odp_buffer_pool.c >> index 83c51fa..e3f90a2 100644 >> --- a/platform/linux-generic/odp_buffer_pool.c >> +++ b/platform/linux-generic/odp_buffer_pool.c >> @@ -6,8 +6,9 @@ >> #include <odp_std_types.h> >> #include <odp_buffer_pool.h> >> -#include <odp_buffer_pool_internal.h> >> #include <odp_buffer_internal.h> >> +#include <odp_buffer_pool_internal.h> >> +#include <odp_buffer_inlines.h> >> #include <odp_packet_internal.h> >> #include <odp_timer_internal.h> >> #include <odp_align_internal.h> >> @@ -17,57 +18,35 @@ >> #include <odp_config.h> >> #include <odp_hints.h> >> #include <odp_debug_internal.h> >> +#include <odp_atomic_internal.h> >> #include <string.h> >> #include <stdlib.h> >> -#ifdef POOL_USE_TICKETLOCK >> -#include <odp_ticketlock.h> >> -#define LOCK(a) odp_ticketlock_lock(a) >> -#define UNLOCK(a) odp_ticketlock_unlock(a) >> -#define LOCK_INIT(a) odp_ticketlock_init(a) >> -#else >> -#include <odp_spinlock.h> >> -#define LOCK(a) odp_spinlock_lock(a) >> -#define UNLOCK(a) odp_spinlock_unlock(a) >> -#define LOCK_INIT(a) odp_spinlock_init(a) >> -#endif >> - >> - >> #if ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS >> #error ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS >> #endif >> -#define NULL_INDEX ((uint32_t)-1) >> -union buffer_type_any_u { >> +typedef union buffer_type_any_u { >> odp_buffer_hdr_t buf; >> odp_packet_hdr_t pkt; >> odp_timeout_hdr_t tmo; >> -}; >> - >> -ODP_STATIC_ASSERT((sizeof(union buffer_type_any_u) % 8) == 0, >> - "BUFFER_TYPE_ANY_U__SIZE_ERR"); >> +} odp_anybuf_t; >> /* Any buffer type header */ >> typedef struct { >> union buffer_type_any_u any_hdr; /* any buffer type */ >> - uint8_t buf_data[]; /* start of buffer data area >> */ >> } odp_any_buffer_hdr_t; >> - >> -typedef union pool_entry_u { >> - struct pool_entry_s s; >> - >> - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct >> pool_entry_s))]; >> - >> -} pool_entry_t; >> +typedef struct odp_any_hdr_stride { >> + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_any_buffer_ >> hdr_t))]; >> +} odp_any_hdr_stride; >> typedef struct pool_table_t { >> pool_entry_t pool[ODP_CONFIG_BUFFER_POOLS]; >> - >> } pool_table_t; >> @@ -77,38 +56,8 @@ static pool_table_t *pool_tbl; >> /* Pool entry pointers (for inlining) */ >> void *pool_entry_ptr[ODP_CONFIG_BUFFER_POOLS]; >> - >> -static __thread odp_buffer_chunk_hdr_t *local_chunk[ODP_CONFIG_ >> BUFFER_POOLS]; >> - >> - >> -static inline odp_buffer_pool_t pool_index_to_handle(uint32_t pool_id) >> -{ >> - return pool_id + 1; >> -} >> - >> - >> -static inline uint32_t pool_handle_to_index(odp_buffer_pool_t pool_hdl) >> -{ >> - return pool_hdl -1; >> -} >> - >> - >> -static inline void set_handle(odp_buffer_hdr_t *hdr, >> - pool_entry_t *pool, uint32_t index) >> -{ >> - odp_buffer_pool_t pool_hdl = pool->s.pool_hdl; >> - uint32_t pool_id = pool_handle_to_index(pool_hdl); >> - >> - if (pool_id >= ODP_CONFIG_BUFFER_POOLS) >> - ODP_ABORT("set_handle: Bad pool handle %u\n", pool_hdl); >> - >> - if (index > ODP_BUFFER_MAX_INDEX) >> - ODP_ERR("set_handle: Bad buffer index\n"); >> - >> - hdr->handle.pool_id = pool_id; >> - hdr->handle.index = index; >> -} >> - >> +/* Local cache for buffer alloc/free acceleration */ >> +static __thread local_cache_t local_cache[ODP_CONFIG_BUFFER_POOLS]; >> int odp_buffer_pool_init_global(void) >> { >> @@ -131,7 +80,7 @@ int odp_buffer_pool_init_global(void) >> pool_entry_t *pool = &pool_tbl->pool[i]; >> LOCK_INIT(&pool->s.lock); >> pool->s.pool_hdl = pool_index_to_handle(i); >> - >> + pool->s.pool_id = i; >> pool_entry_ptr[i] = pool; >> } >> @@ -143,269 +92,258 @@ int odp_buffer_pool_init_global(void) >> return 0; >> } >> +/** >> + * Buffer pool creation >> + */ >> -static odp_buffer_hdr_t *index_to_hdr(pool_entry_t *pool, uint32_t >> index) >> -{ >> - odp_buffer_hdr_t *hdr; >> - >> - hdr = (odp_buffer_hdr_t *)(pool->s.buf_base + index * >> pool->s.buf_size); >> - return hdr; >> -} >> - >> - >> -static void add_buf_index(odp_buffer_chunk_hdr_t *chunk_hdr, uint32_t >> index) >> -{ >> - uint32_t i = chunk_hdr->chunk.num_bufs; > >
Some more comments inline. Comments got missed in previous mail by mistake. Regards, Bala On 8 December 2014 at 04:54, Bill Fischofer <bill.fischofer@linaro.org> wrote: > Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> > --- > > Petri: Please review the following files here: > platform/linux-generic/include/api/odp_buffer.h > platform/linux-generic/include/api/odp_buffer_pool.h > platform/linux-generic/include/api/odp_config.h > > This patch is complete and compilable/testable. It is RFC pending > Petri approval of the public API headers and recommendations for > final packaging. > > example/generator/odp_generator.c | 19 +- > example/ipsec/odp_ipsec.c | 57 +- > example/l2fwd/odp_l2fwd.c | 19 +- > example/odp_example/odp_example.c | 18 +- > example/packet/odp_pktio.c | 19 +- > example/timer/odp_timer_test.c | 13 +- > platform/linux-generic/include/api/odp_buffer.h | 3 +- > .../linux-generic/include/api/odp_buffer_pool.h | 103 ++- > platform/linux-generic/include/api/odp_config.h | 19 + > .../linux-generic/include/api/odp_platform_types.h | 12 + > .../linux-generic/include/api/odp_shared_memory.h | 10 +- > .../linux-generic/include/odp_buffer_inlines.h | 150 ++++ > .../linux-generic/include/odp_buffer_internal.h | 150 ++-- > .../include/odp_buffer_pool_internal.h | 351 ++++++++-- > platform/linux-generic/include/odp_internal.h | 2 + > .../linux-generic/include/odp_packet_internal.h | 50 +- > .../linux-generic/include/odp_timer_internal.h | 11 +- > platform/linux-generic/odp_buffer.c | 33 +- > platform/linux-generic/odp_buffer_pool.c | 777 > ++++++++++----------- > platform/linux-generic/odp_linux.c | 4 +- > platform/linux-generic/odp_packet.c | 41 +- > platform/linux-generic/odp_queue.c | 1 + > platform/linux-generic/odp_schedule.c | 20 +- > platform/linux-generic/odp_timer.c | 3 +- > test/api_test/odp_timer_ping.c | 19 +- > test/validation/odp_crypto.c | 43 +- > test/validation/odp_queue.c | 19 +- > 27 files changed, 1208 insertions(+), 758 deletions(-) > create mode 100644 platform/linux-generic/include/odp_buffer_inlines.h > > diff --git a/example/generator/odp_generator.c > b/example/generator/odp_generator.c > index 73b0369..476cbef 100644 > --- a/example/generator/odp_generator.c > +++ b/example/generator/odp_generator.c > @@ -522,11 +522,11 @@ int main(int argc, char *argv[]) > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > odp_buffer_pool_t pool; > int num_workers; > - void *pool_base; > int i; > int first_core; > int core_count; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* Init ODP before calling anything else */ > if (odp_init_global(NULL, NULL)) { > @@ -589,20 +589,13 @@ int main(int argc, char *argv[]) > printf("First core: %i\n\n", first_core); > > /* Create packet pool */ > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - if (pool_base == NULL) { > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, > ¶ms); > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Error: packet pool create failed.\n"); > exit(EXIT_FAILURE); > diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c > index 76d27c5..f96338c 100644 > --- a/example/ipsec/odp_ipsec.c > +++ b/example/ipsec/odp_ipsec.c > @@ -367,8 +367,7 @@ static > void ipsec_init_pre(void) > { > odp_queue_param_t qparam; > - void *pool_base; > - odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* > * Create queues > @@ -401,16 +400,12 @@ void ipsec_init_pre(void) > } > > /* Create output buffer pool */ > - shm = odp_shm_reserve("shm_out_pool", > - SHM_OUT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_OUT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - out_pool = odp_buffer_pool_create("out_pool", pool_base, > - SHM_OUT_POOL_SIZE, > - SHM_OUT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > + out_pool = odp_buffer_pool_create("out_pool", ODP_SHM_NULL, > ¶ms); > > if (ODP_BUFFER_POOL_INVALID == out_pool) { > EXAMPLE_ERR("Error: message pool create failed.\n"); > @@ -1176,12 +1171,12 @@ main(int argc, char *argv[]) > { > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > int num_workers; > - void *pool_base; > int i; > int first_core; > int core_count; > int stream_count; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* Init ODP before calling anything else */ > if (odp_init_global(NULL, NULL)) { > @@ -1241,42 +1236,28 @@ main(int argc, char *argv[]) > printf("First core: %i\n\n", first_core); > > /* Create packet buffer pool */ > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - pool_base = odp_shm_addr(shm); > - > - if (NULL == pool_base) { > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + pkt_pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, > + ¶ms); > > - pkt_pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (ODP_BUFFER_POOL_INVALID == pkt_pool) { > EXAMPLE_ERR("Error: packet pool create failed.\n"); > exit(EXIT_FAILURE); > } > > /* Create context buffer pool */ > - shm = odp_shm_reserve("shm_ctx_pool", > - SHM_CTX_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_CTX_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_CTX_POOL_BUF_COUNT; > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > - if (NULL == pool_base) { > - EXAMPLE_ERR("Error: context pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + ctx_pool = odp_buffer_pool_create("ctx_pool", ODP_SHM_NULL, > + ¶ms); > > - ctx_pool = odp_buffer_pool_create("ctx_pool", pool_base, > - SHM_CTX_POOL_SIZE, > - SHM_CTX_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_RAW); > if (ODP_BUFFER_POOL_INVALID == ctx_pool) { > EXAMPLE_ERR("Error: context pool create failed.\n"); > exit(EXIT_FAILURE); > diff --git a/example/l2fwd/odp_l2fwd.c b/example/l2fwd/odp_l2fwd.c > index ebac8c5..3c1fd6a 100644 > --- a/example/l2fwd/odp_l2fwd.c > +++ b/example/l2fwd/odp_l2fwd.c > @@ -314,12 +314,12 @@ int main(int argc, char *argv[]) > { > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > odp_buffer_pool_t pool; > - void *pool_base; > int i; > int first_core; > int core_count; > odp_pktio_t pktio; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* Init ODP before calling anything else */ > if (odp_init_global(NULL, NULL)) { > @@ -383,20 +383,13 @@ int main(int argc, char *argv[]) > printf("First core: %i\n\n", first_core); > > /* Create packet pool */ > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - if (pool_base == NULL) { > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + pool = odp_buffer_pool_create("packet pool", ODP_SHM_NULL, > ¶ms); > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Error: packet pool create failed.\n"); > exit(EXIT_FAILURE); > diff --git a/example/odp_example/odp_example.c > b/example/odp_example/odp_example.c > index 96a2912..8373f12 100644 > --- a/example/odp_example/odp_example.c > +++ b/example/odp_example/odp_example.c > @@ -954,13 +954,13 @@ int main(int argc, char *argv[]) > test_args_t args; > int num_workers; > odp_buffer_pool_t pool; > - void *pool_base; > odp_queue_t queue; > int i, j; > int prios; > int first_core; > odp_shm_t shm; > test_globals_t *globals; > + odp_buffer_pool_param_t params; > > printf("\nODP example starts\n\n"); > > @@ -1042,19 +1042,13 @@ int main(int argc, char *argv[]) > /* > * Create message pool > */ > - shm = odp_shm_reserve("msg_pool", > - MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - pool_base = odp_shm_addr(shm); > + params.buf_size = sizeof(test_message_t); > + params.buf_align = 0; > + params.num_bufs = MSG_POOL_SIZE/sizeof(test_message_t); > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > - if (pool_base == NULL) { > - EXAMPLE_ERR("Shared memory reserve failed.\n"); > - return -1; > - } > - > - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, > - sizeof(test_message_t), > - ODP_CACHE_LINE_SIZE, > ODP_BUFFER_TYPE_RAW); > + pool = odp_buffer_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); > > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Pool create failed.\n"); > diff --git a/example/packet/odp_pktio.c b/example/packet/odp_pktio.c > index 7d51682..f2e7b2d 100644 > --- a/example/packet/odp_pktio.c > +++ b/example/packet/odp_pktio.c > @@ -331,11 +331,11 @@ int main(int argc, char *argv[]) > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > odp_buffer_pool_t pool; > int num_workers; > - void *pool_base; > int i; > int first_core; > int core_count; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* Init ODP before calling anything else */ > if (odp_init_global(NULL, NULL)) { > @@ -389,20 +389,13 @@ int main(int argc, char *argv[]) > printf("First core: %i\n\n", first_core); > > /* Create packet pool */ > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - if (pool_base == NULL) { > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, > ¶ms); > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Error: packet pool create failed.\n"); > exit(EXIT_FAILURE); > diff --git a/example/timer/odp_timer_test.c > b/example/timer/odp_timer_test.c > index 9968bfe..0d6e31a 100644 > --- a/example/timer/odp_timer_test.c > +++ b/example/timer/odp_timer_test.c > @@ -244,12 +244,12 @@ int main(int argc, char *argv[]) > test_args_t args; > int num_workers; > odp_buffer_pool_t pool; > - void *pool_base; > odp_queue_t queue; > int first_core; > uint64_t cycles, ns; > odp_queue_param_t param; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > printf("\nODP timer example starts\n"); > > @@ -313,12 +313,13 @@ int main(int argc, char *argv[]) > */ > shm = odp_shm_reserve("msg_pool", > MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > > - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, > - 0, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_TIMEOUT); > + params.buf_size = 0; > + params.buf_align = 0; > + params.num_bufs = MSG_POOL_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_TIMEOUT; > + > + pool = odp_buffer_pool_create("msg_pool", shm, ¶ms); > > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Pool create failed.\n"); > diff --git a/platform/linux-generic/include/api/odp_buffer.h > b/platform/linux-generic/include/api/odp_buffer.h > index da23120..e981324 100644 > --- a/platform/linux-generic/include/api/odp_buffer.h > +++ b/platform/linux-generic/include/api/odp_buffer.h > @@ -68,7 +68,8 @@ int odp_buffer_type(odp_buffer_t buf); > * > * @param buf Buffer handle > * > - * @return 1 if valid, otherwise 0 > + * @retval 1 Buffer handle represents a valid buffer. > + * @retval 0 Buffer handle does not represent a valid buffer. > */ > int odp_buffer_is_valid(odp_buffer_t buf); > > diff --git a/platform/linux-generic/include/api/odp_buffer_pool.h > b/platform/linux-generic/include/api/odp_buffer_pool.h > index 30b83e0..3d85066 100644 > --- a/platform/linux-generic/include/api/odp_buffer_pool.h > +++ b/platform/linux-generic/include/api/odp_buffer_pool.h > @@ -32,42 +32,114 @@ extern "C" { > /** Maximum queue name lenght in chars */ > #define ODP_BUFFER_POOL_NAME_LEN 32 > > -/** Invalid buffer pool */ > -#define ODP_BUFFER_POOL_INVALID 0 > +/** > + * Buffer pool parameters > + * Used to communicate buffer pool creation options. > + */ > +typedef struct odp_buffer_pool_param_t { > + size_t buf_size; /**< Buffer size in bytes. The maximum > + number of bytes application will > + store in each buffer. */ > + size_t buf_align; /**< Minimum buffer alignment in bytes. > + Valid values are powers of two. Use 0 > + for default alignment. Default will > + always be a multiple of 8. */ > + uint32_t num_bufs; /**< Number of buffers in the pool */ > + int buf_type; /**< Buffer type */ > +} odp_buffer_pool_param_t; > > /** > * Create a buffer pool > + * This routine is used to create a buffer pool. It take three > + * arguments: the optional name of the pool to be created, an optional > shared > + * memory handle, and a parameter struct that describes the pool to be > + * created. If a name is not specified the result is an anonymous pool > that > + * cannot be referenced by odp_buffer_pool_lookup(). > + * > + * @param name Name of the pool, max ODP_BUFFER_POOL_NAME_LEN-1 chars. > + * May be specified as NULL for anonymous pools. > * > - * @param name Name of the pool (max ODP_BUFFER_POOL_NAME_LEN - 1 > chars) > - * @param base_addr Pool base address > - * @param size Pool size in bytes > - * @param buf_size Buffer size in bytes > - * @param buf_align Minimum buffer alignment > - * @param buf_type Buffer type > + * @param shm The shared memory object in which to create the pool. > + * Use ODP_SHM_NULL to reserve default memory type > + * for the buffer type. > * > - * @return Buffer pool handle > + * @param params Buffer pool parameters. > + * > + * @retval Handle Buffer pool handle on success > + * @retval ODP_BUFFER_POOL_INVALID if call failed > */ > + > odp_buffer_pool_t odp_buffer_pool_create(const char *name, > - void *base_addr, uint64_t size, > - size_t buf_size, size_t buf_align, > - int buf_type); > + odp_shm_t shm, > + odp_buffer_pool_param_t *params); > > +/** > + * Destroy a buffer pool previously created by odp_buffer_pool_create() > + * > + * @param pool Handle of the buffer pool to be destroyed > + * > + * @retval 0 Success > + * @retval -1 Failure > + * > + * @note This routine destroys a previously created buffer pool. This call > + * does not destroy any shared memory object passed to > + * odp_buffer_pool_create() used to store the buffer pool contents. The > caller > + * takes responsibility for that. If no shared memory object was passed as > + * part of the create call, then this routine will destroy any internal > shared > + * memory objects associated with the buffer pool. Results are undefined > if > + * an attempt is made to destroy a buffer pool that contains allocated or > + * otherwise active buffers. > + */ > +int odp_buffer_pool_destroy(odp_buffer_pool_t pool); > > /** > * Find a buffer pool by name > * > * @param name Name of the pool > * > - * @return Buffer pool handle, or ODP_BUFFER_POOL_INVALID if not found. > + * @retval Handle Buffer pool handle on successs > + * @retval ODP_BUFFER_POOL_INVALID if not found > + * > + * @note This routine cannot be used to look up an anonymous pool (one > created > + * with no name). > */ > odp_buffer_pool_t odp_buffer_pool_lookup(const char *name); > > +/** > + * Buffer pool information struct > + * Used to get information about a buffer pool. > + */ > +typedef struct odp_buffer_pool_info_t { > + const char *name; /**< pool name */ > + odp_buffer_pool_param_t params; /**< pool parameters */ > +} odp_buffer_pool_info_t; > + > +/** > + * Retrieve information about a buffer pool > + * > + * @param pool Buffer pool handle > + * > + * @param shm Recieves odp_shm_t supplied by caller at > + * pool creation, or ODP_SHM_NULL if the > + * pool is managed internally. > + * > + * @param[out] info Receives an odp_buffer_pool_info_t object > + * that describes the pool. > + * > + * @retval 0 Success > + * @retval -1 Failure. Info could not be retrieved. > + */ > + > +int odp_buffer_pool_info(odp_buffer_pool_t pool, odp_shm_t *shm, > + odp_buffer_pool_info_t *info); > > /** > * Print buffer pool info > * > * @param pool Pool handle > * > + * @note This routine writes implementation-defined information about the > + * specified buffer pool to the ODP log. The intended use is for > debugging. > */ > void odp_buffer_pool_print(odp_buffer_pool_t pool); > > @@ -78,7 +150,8 @@ void odp_buffer_pool_print(odp_buffer_pool_t pool); > * The validity of a buffer can be cheked at any time with > odp_buffer_is_valid() > * @param pool Pool handle > * > - * @return Buffer handle or ODP_BUFFER_INVALID > + * @retval Handle Buffer handle of allocated buffer > + * @retval ODP_BUFFER_INVALID Allocation failed > */ > odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool); > > @@ -97,7 +170,7 @@ void odp_buffer_free(odp_buffer_t buf); > * > * @param buf Buffer handle > * > - * @return Buffer pool the buffer was allocated from > + * @retval Handle Buffer pool handle that the buffer was allocated from > */ > odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf); > > diff --git a/platform/linux-generic/include/api/odp_config.h > b/platform/linux-generic/include/api/odp_config.h > index 906897c..5ca5bb2 100644 > --- a/platform/linux-generic/include/api/odp_config.h > +++ b/platform/linux-generic/include/api/odp_config.h > @@ -49,6 +49,25 @@ extern "C" { > #define ODP_CONFIG_PKTIO_ENTRIES 64 > > /** > + * Buffer segment size to use > + * This is the granularity of segmented buffers. Sized for now to be large > + * enough to support 1500-byte packets since the raw socket interface > does not > + * support scatter/gather I/O. ODP requires a minimum segment size of 128 > + * bytes with 256 recommended. Linux-generic code will enforce a 256 byte > + * minimum. Note that the chosen segment size must be a multiple of > + * ODP_CACHE_LINE_SIZE. > + */ > +#define ODP_CONFIG_BUF_SEG_SIZE (512*3) > + > +/** > + * Maximum buffer size supported > + * Must be an integral number of segments and should be large enough to > + * accommodate jumbo packets. Attempts to allocate or extend buffers to > sizes > + * larger than this limit will fail. > + */ > +#define ODP_CONFIG_BUF_MAX_SIZE (ODP_CONFIG_BUF_SEG_SIZE*7) > + > +/** > * @} > */ > > diff --git a/platform/linux-generic/include/api/odp_platform_types.h > b/platform/linux-generic/include/api/odp_platform_types.h > index 4db47d3..2181eb6 100644 > --- a/platform/linux-generic/include/api/odp_platform_types.h > +++ b/platform/linux-generic/include/api/odp_platform_types.h > @@ -26,6 +26,9 @@ > /** ODP Buffer pool */ > typedef uint32_t odp_buffer_pool_t; > > +/** Invalid buffer pool */ > +#define ODP_BUFFER_POOL_INVALID (0xffffffff) > + > /** ODP buffer */ > typedef uint32_t odp_buffer_t; > > @@ -65,6 +68,15 @@ typedef uint32_t odp_pktio_t; > #define ODP_PKTIO_ANY ((odp_pktio_t)~0) > > /** > + * ODP shared memory block > + */ > +typedef uint32_t odp_shm_t; > + > +/** Invalid shared memory block */ > +#define ODP_SHM_INVALID 0 > +#define ODP_SHM_NULL ODP_SHM_INVALID /**< Synonym for buffer pool use */ > + > +/** > * @} > */ > > diff --git a/platform/linux-generic/include/api/odp_shared_memory.h > b/platform/linux-generic/include/api/odp_shared_memory.h > index 26e208b..f70db5a 100644 > --- a/platform/linux-generic/include/api/odp_shared_memory.h > +++ b/platform/linux-generic/include/api/odp_shared_memory.h > @@ -20,6 +20,7 @@ extern "C" { > > > #include <odp_std_types.h> > +#include <odp_platform_types.h> > > /** @defgroup odp_shared_memory ODP SHARED MEMORY > * Operations on shared memory. > @@ -38,15 +39,6 @@ extern "C" { > #define ODP_SHM_PROC 0x2 /**< Share with external processes */ > > /** > - * ODP shared memory block > - */ > -typedef uint32_t odp_shm_t; > - > -/** Invalid shared memory block */ > -#define ODP_SHM_INVALID 0 > - > - > -/** > * Shared memory block info > */ > typedef struct odp_shm_info_t { > diff --git a/platform/linux-generic/include/odp_buffer_inlines.h > b/platform/linux-generic/include/odp_buffer_inlines.h > new file mode 100644 > index 0000000..9eb425c > --- /dev/null > +++ b/platform/linux-generic/include/odp_buffer_inlines.h > @@ -0,0 +1,150 @@ > +/* Copyright (c) 2014, Linaro Limited > + * All rights reserved. > + * > + * SPDX-License-Identifier: BSD-3-Clause > + */ > + > +/** > + * @file > + * > + * Inline functions for ODP buffer mgmt routines - implementation internal > + */ > + > +#ifndef ODP_BUFFER_INLINES_H_ > +#define ODP_BUFFER_INLINES_H_ > + > +#ifdef __cplusplus > +extern "C" { > +#endif > + > +static inline odp_buffer_t odp_buffer_encode_handle(odp_buffer_hdr_t *hdr) > +{ > + odp_buffer_bits_t handle; > + uint32_t pool_id = pool_handle_to_index(hdr->pool_hdl); > + struct pool_entry_s *pool = get_pool_entry(pool_id); > + > + handle.pool_id = pool_id; > + handle.index = ((uint8_t *)hdr - pool->pool_base_addr) / > + ODP_CACHE_LINE_SIZE; > + handle.seg = 0; > + > + return handle.u32; > +} > + > +static inline odp_buffer_t odp_hdr_to_buf(odp_buffer_hdr_t *hdr) > +{ > + return hdr->handle.handle; > +} > + > +static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf) > +{ > + odp_buffer_bits_t handle; > + uint32_t pool_id; > + uint32_t index; > + struct pool_entry_s *pool; > + > + handle.u32 = buf; > + pool_id = handle.pool_id; > + index = handle.index; > + > +#ifdef POOL_ERROR_CHECK > + if (odp_unlikely(pool_id > ODP_CONFIG_BUFFER_POOLS)) { > + ODP_ERR("odp_buf_to_hdr: Bad pool id\n"); > + return NULL; > + } > +#endif > + > + pool = get_pool_entry(pool_id); > + > +#ifdef POOL_ERROR_CHECK > + if (odp_unlikely(index > pool->params.num_bufs - 1)) { > + ODP_ERR("odp_buf_to_hdr: Bad buffer index\n"); > + return NULL; > + } > +#endif > + > + return (odp_buffer_hdr_t *)(void *) > + (pool->pool_base_addr + (index * ODP_CACHE_LINE_SIZE)); > +} > + > +static inline uint32_t odp_buffer_refcount(odp_buffer_hdr_t *buf) > +{ > + return odp_atomic_load_u32(&buf->ref_count); > +} > + > +static inline uint32_t odp_buffer_incr_refcount(odp_buffer_hdr_t *buf, > + uint32_t val) > +{ > + return odp_atomic_fetch_add_u32(&buf->ref_count, val) + val; > +} > + > +static inline uint32_t odp_buffer_decr_refcount(odp_buffer_hdr_t *buf, > + uint32_t val) > +{ > + uint32_t tmp; > + > + tmp = odp_atomic_fetch_sub_u32(&buf->ref_count, val); > + > + if (tmp < val) { > + odp_atomic_fetch_add_u32(&buf->ref_count, val - tmp); > + return 0; > + } else { > + return tmp - val; > + } > +} > + > +static inline odp_buffer_hdr_t *validate_buf(odp_buffer_t buf) > +{ > + odp_buffer_bits_t handle; > + odp_buffer_hdr_t *buf_hdr; > + handle.u32 = buf; > + > + /* For buffer handles, segment index must be 0 and pool id in > range */ > + if (handle.seg != 0 || handle.pool_id >= ODP_CONFIG_BUFFER_POOLS) > + return NULL; > + > + pool_entry_t *pool = odp_pool_to_entry(handle.pool_id); > + > + /* If pool not created, handle is invalid */ > + if (pool->s.pool_shm == ODP_SHM_INVALID) > + return NULL; > + > + uint32_t buf_stride = pool->s.buf_stride / ODP_CACHE_LINE_SIZE; > + > + /* A valid buffer index must be on stride, and must be in range */ > + if ((handle.index % buf_stride != 0) || > + ((uint32_t)(handle.index / buf_stride) >= > pool->s.params.num_bufs)) > + return NULL; > + > + buf_hdr = (odp_buffer_hdr_t *)(void *) > + (pool->s.pool_base_addr + > + (handle.index * ODP_CACHE_LINE_SIZE)); > + > + /* Handle is valid, so buffer is valid if it is allocated */ > + return buf_hdr->allocator == ODP_FREEBUF ? NULL : buf_hdr; > +} > + > +int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf); > + > +static inline void *buffer_map(odp_buffer_hdr_t *buf, > + uint32_t offset, > + uint32_t *seglen, > + uint32_t limit) > +{ > + int seg_index = offset / buf->segsize; > + int seg_offset = offset % buf->segsize; > + > + if (seglen != NULL) { > + uint32_t buf_left = limit - offset; > + *seglen = buf_left < buf->segsize ? > + buf_left : buf->segsize - seg_offset; > + } > + > + return (void *)(seg_offset + (uint8_t *)buf->addr[seg_index]); > +} > + > +#ifdef __cplusplus > +} > +#endif > + > +#endif > diff --git a/platform/linux-generic/include/odp_buffer_internal.h > b/platform/linux-generic/include/odp_buffer_internal.h > index 0027bfc..632dcbf 100644 > --- a/platform/linux-generic/include/odp_buffer_internal.h > +++ b/platform/linux-generic/include/odp_buffer_internal.h > @@ -24,99 +24,131 @@ extern "C" { > #include <odp_buffer.h> > #include <odp_debug.h> > #include <odp_align.h> > - > -/* TODO: move these to correct files */ > - > -typedef uint64_t odp_phys_addr_t; > +#include <odp_align_internal.h> > +#include <odp_config.h> > +#include <odp_byteorder.h> > +#include <odp_thread.h> > + > + > +#define ODP_BITSIZE(x) \ > + ((x) <= 2 ? 1 : \ > + ((x) <= 4 ? 2 : \ > + ((x) <= 8 ? 3 : \ > + ((x) <= 16 ? 4 : \ > + ((x) <= 32 ? 5 : \ > + ((x) <= 64 ? 6 : \ > + ((x) <= 128 ? 7 : \ > + ((x) <= 256 ? 8 : \ > + ((x) <= 512 ? 9 : \ > + ((x) <= 1024 ? 10 : \ > + ((x) <= 2048 ? 11 : \ > + ((x) <= 4096 ? 12 : \ > + ((x) <= 8196 ? 13 : \ > + ((x) <= 16384 ? 14 : \ > + ((x) <= 32768 ? 15 : \ > + ((x) <= 65536 ? 16 : \ > + (0/0))))))))))))))))) > + > +ODP_STATIC_ASSERT(ODP_CONFIG_BUF_SEG_SIZE >= 256, > + "ODP Segment size must be a minimum of 256 bytes"); > + > +ODP_STATIC_ASSERT((ODP_CONFIG_BUF_SEG_SIZE % ODP_CACHE_LINE_SIZE) == 0, > + "ODP Segment size must be a multiple of cache line > size"); > + > +ODP_STATIC_ASSERT((ODP_CONFIG_BUF_MAX_SIZE % ODP_CONFIG_BUF_SEG_SIZE) == > 0, > + "Buffer max size must be a multiple of segment size"); > + > +#define ODP_BUFFER_MAX_SEG > (ODP_CONFIG_BUF_MAX_SIZE/ODP_CONFIG_BUF_SEG_SIZE) > + > +/* We can optimize storage of small buffers within metadata area */ > +#define ODP_MAX_INLINE_BUF ((sizeof(void *)) * (ODP_BUFFER_MAX_SEG - > 1)) > + > +#define ODP_BUFFER_POOL_BITS ODP_BITSIZE(ODP_CONFIG_BUFFER_POOLS) > +#define ODP_BUFFER_SEG_BITS ODP_BITSIZE(ODP_BUFFER_MAX_SEG) > +#define ODP_BUFFER_INDEX_BITS (32 - ODP_BUFFER_POOL_BITS - > ODP_BUFFER_SEG_BITS) > +#define ODP_BUFFER_PREFIX_BITS (ODP_BUFFER_POOL_BITS + > ODP_BUFFER_INDEX_BITS) > +#define ODP_BUFFER_MAX_POOLS (1 << ODP_BUFFER_POOL_BITS) > +#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS) > > #define ODP_BUFFER_MAX_INDEX (ODP_BUFFER_MAX_BUFFERS - 2) > #define ODP_BUFFER_INVALID_INDEX (ODP_BUFFER_MAX_BUFFERS - 1) > > -#define ODP_BUFS_PER_CHUNK 16 > -#define ODP_BUFS_PER_SCATTER 4 > - > -#define ODP_BUFFER_TYPE_CHUNK 0xffff > - > - > -#define ODP_BUFFER_POOL_BITS 4 > -#define ODP_BUFFER_INDEX_BITS (32 - ODP_BUFFER_POOL_BITS) > -#define ODP_BUFFER_MAX_POOLS (1 << ODP_BUFFER_POOL_BITS) > -#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS) > - > typedef union odp_buffer_bits_t { > uint32_t u32; > odp_buffer_t handle; > > struct { > +#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN > uint32_t pool_id:ODP_BUFFER_POOL_BITS; > uint32_t index:ODP_BUFFER_INDEX_BITS; > + uint32_t seg:ODP_BUFFER_SEG_BITS; > +#else > + uint32_t seg:ODP_BUFFER_SEG_BITS; > + uint32_t index:ODP_BUFFER_INDEX_BITS; > + uint32_t pool_id:ODP_BUFFER_POOL_BITS; > +#endif > }; > -} odp_buffer_bits_t; > > + struct { > +#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN > + uint32_t prefix:ODP_BUFFER_PREFIX_BITS; > + uint32_t pfxseg:ODP_BUFFER_SEG_BITS; > +#else > + uint32_t pfxseg:ODP_BUFFER_SEG_BITS; > + uint32_t prefix:ODP_BUFFER_PREFIX_BITS; > +#endif > + }; > +} odp_buffer_bits_t; > > /* forward declaration */ > struct odp_buffer_hdr_t; > > - > -/* > - * Scatter/gather list of buffers > - */ > -typedef struct odp_buffer_scatter_t { > - /* buffer pointers */ > - struct odp_buffer_hdr_t *buf[ODP_BUFS_PER_SCATTER]; > - int num_bufs; /* num buffers */ > - int pos; /* position on the list */ > - size_t total_len; /* Total length */ > -} odp_buffer_scatter_t; > - > - > -/* > - * Chunk of buffers (in single pool) > - */ > -typedef struct odp_buffer_chunk_t { > - uint32_t num_bufs; /* num buffers */ > - uint32_t buf_index[ODP_BUFS_PER_CHUNK]; /* buffers */ > -} odp_buffer_chunk_t; > - > - > /* Common buffer header */ > typedef struct odp_buffer_hdr_t { > struct odp_buffer_hdr_t *next; /* next buf in a list */ > + int allocator; /* allocating thread id */ > odp_buffer_bits_t handle; /* handle */ > - odp_phys_addr_t phys_addr; /* physical data start > address */ > - void *addr; /* virtual data start address > */ > - uint32_t index; /* buf index in the pool */ > + union { > + uint32_t all; > + struct { > + uint32_t zeroized:1; /* Zeroize buf data on free */ > + uint32_t hdrdata:1; /* Data is in buffer hdr */ > + }; > + } flags; > + int type; /* buffer type */ > size_t size; /* max data size */ > - size_t cur_offset; /* current offset */ > odp_atomic_u32_t ref_count; /* reference count */ > - odp_buffer_scatter_t scatter; /* Scatter/gather list */ > - int type; /* type of next header */ > odp_buffer_pool_t pool_hdl; /* buffer pool handle */ > - > + union { > + uint64_t buf_u64; /* user u64 */ > + void *buf_ctx; /* user context */ > + void *udata_addr; /* user metadata addr */ > + }; > + size_t udata_size; /* size of user metadata */ > + uint32_t segcount; /* segment count */ > + uint32_t segsize; /* segment size */ > + void *addr[ODP_BUFFER_MAX_SEG]; /* block addrs > */ > } odp_buffer_hdr_t; > > -/* Ensure next header starts from 8 byte align */ > -ODP_STATIC_ASSERT((sizeof(odp_buffer_hdr_t) % 8) == 0, > "ODP_BUFFER_HDR_T__SIZE_ERROR"); > +typedef struct odp_buffer_hdr_stride { > + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_buffer_hdr_t))]; > +} odp_buffer_hdr_stride; > > +typedef struct odp_buf_blk_t { > + struct odp_buf_blk_t *next; > + struct odp_buf_blk_t *prev; > +} odp_buf_blk_t; > > /* Raw buffer header */ > typedef struct { > odp_buffer_hdr_t buf_hdr; /* common buffer header */ > - uint8_t buf_data[]; /* start of buffer data area */ > } odp_raw_buffer_hdr_t; > > +/* Free buffer marker */ > +#define ODP_FREEBUF -1 > > -/* Chunk header */ > -typedef struct odp_buffer_chunk_hdr_t { > - odp_buffer_hdr_t buf_hdr; > - odp_buffer_chunk_t chunk; > -} odp_buffer_chunk_hdr_t; > - > - > -int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf); > - > -void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src); > - > +/* Forward declarations */ > +odp_buffer_t buffer_alloc(odp_buffer_pool_t pool, size_t size); > > #ifdef __cplusplus > } > diff --git a/platform/linux-generic/include/odp_buffer_pool_internal.h > b/platform/linux-generic/include/odp_buffer_pool_internal.h > index e0210bd..347be39 100644 > --- a/platform/linux-generic/include/odp_buffer_pool_internal.h > +++ b/platform/linux-generic/include/odp_buffer_pool_internal.h > @@ -19,12 +19,44 @@ extern "C" { > #endif > > #include <odp_std_types.h> > +#include <odp_align.h> > +#include <odp_align_internal.h> > #include <odp_buffer_pool.h> > #include <odp_buffer_internal.h> > -#include <odp_align.h> > #include <odp_hints.h> > #include <odp_config.h> > #include <odp_debug.h> > +#include <odp_shared_memory.h> > +#include <odp_atomic.h> > +#include <odp_atomic_internal.h> > +#include <string.h> > + > +/** > + * Buffer initialization routine prototype > + * > + * @note Routines of this type MAY be passed as part of the > + * _odp_buffer_pool_init_t structure to be called whenever a > + * buffer is allocated to initialize the user metadata > + * associated with that buffer. > + */ > +typedef void (_odp_buf_init_t)(odp_buffer_t buf, void *buf_init_arg); > + > +/** > + * Buffer pool initialization parameters > + * Used to communicate buffer pool initialization options. Internal for > now. > + */ > +typedef struct _odp_buffer_pool_init_t { > + size_t udata_size; /**< Size of user metadata for each > buffer */ > + _odp_buf_init_t *buf_init; /**< Buffer initialization routine to > use */ > + void *buf_init_arg; /**< Argument to be passed to > buf_init() */ > +} _odp_buffer_pool_init_t; /**< Type of buffer initialization > struct */ > + > +/* Local cache for buffer alloc/free acceleration */ > +typedef struct local_cache_t { > + odp_buffer_hdr_t *buf_freelist; /* The local cache */ > + uint64_t bufallocs; /* Local buffer alloc count */ > + uint64_t buffrees; /* Local buffer free count */ > +} local_cache_t; > > /* Use ticketlock instead of spinlock */ > #define POOL_USE_TICKETLOCK > @@ -39,6 +71,17 @@ extern "C" { > #include <odp_spinlock.h> > #endif > > +#ifdef POOL_USE_TICKETLOCK > +#include <odp_ticketlock.h> > +#define LOCK(a) odp_ticketlock_lock(a) > +#define UNLOCK(a) odp_ticketlock_unlock(a) > +#define LOCK_INIT(a) odp_ticketlock_init(a) > +#else > +#include <odp_spinlock.h> > +#define LOCK(a) odp_spinlock_lock(a) > +#define UNLOCK(a) odp_spinlock_unlock(a) > +#define LOCK_INIT(a) odp_spinlock_init(a) > +#endif > > struct pool_entry_s { > #ifdef POOL_USE_TICKETLOCK > @@ -47,66 +90,292 @@ struct pool_entry_s { > odp_spinlock_t lock ODP_ALIGNED_CACHE; > #endif > > - odp_buffer_chunk_hdr_t *head; > - uint64_t free_bufs; > char name[ODP_BUFFER_POOL_NAME_LEN]; > - > - odp_buffer_pool_t pool_hdl ODP_ALIGNED_CACHE; > - uintptr_t buf_base; > - size_t buf_size; > - size_t buf_offset; > - uint64_t num_bufs; > - void *pool_base_addr; > - uint64_t pool_size; > - size_t user_size; > - size_t user_align; > - int buf_type; > - size_t hdr_size; > + odp_buffer_pool_param_t params; > + _odp_buffer_pool_init_t init_params; > + odp_buffer_pool_t pool_hdl; > + uint32_t pool_id; > + odp_shm_t pool_shm; > + union { > + uint32_t all; > + struct { > + uint32_t has_name:1; > + uint32_t user_supplied_shm:1; > + uint32_t unsegmented:1; > + uint32_t zeroized:1; > + uint32_t predefined:1; > + }; > + } flags; > + uint32_t quiesced; > + uint32_t low_wm_assert; > + uint8_t *pool_base_addr; > + size_t pool_size; > + uint32_t buf_stride; > + _odp_atomic_ptr_t buf_freelist; > + _odp_atomic_ptr_t blk_freelist; > + odp_atomic_u32_t bufcount; > + odp_atomic_u32_t blkcount; > + odp_atomic_u64_t bufallocs; > + odp_atomic_u64_t buffrees; > + odp_atomic_u64_t blkallocs; > + odp_atomic_u64_t blkfrees; > + odp_atomic_u64_t bufempty; > + odp_atomic_u64_t blkempty; > + odp_atomic_u64_t high_wm_count; > + odp_atomic_u64_t low_wm_count; > + uint32_t seg_size; > + uint32_t high_wm; > + uint32_t low_wm; > + uint32_t headroom; > + uint32_t tailroom; > }; > > +typedef union pool_entry_u { > + struct pool_entry_s s; > + > + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct > pool_entry_s))]; > +} pool_entry_t; > > extern void *pool_entry_ptr[]; > > +#if defined(ODP_CONFIG_SECURE_POOLS) && (ODP_CONFIG_SECURE_POOLS == 1) > +#define buffer_is_secure(buf) (buf->flags.zeroized) > +#define pool_is_secure(pool) (pool->flags.zeroized) > +#else > +#define buffer_is_secure(buf) 0 > +#define pool_is_secure(pool) 0 > +#endif > + > +#define TAG_ALIGN ((size_t)16) > > -static inline void *get_pool_entry(uint32_t pool_id) > +#define odp_cs(ptr, old, new) \ > + _odp_atomic_ptr_cmp_xchg_strong(&ptr, (void **)&old, (void *)new, \ > + _ODP_MEMMODEL_SC, \ > + _ODP_MEMMODEL_SC) > + > +/* Helper functions for pointer tagging to avoid ABA race conditions */ > +#define odp_tag(ptr) \ > + (((size_t)ptr) & (TAG_ALIGN - 1)) > + > +#define odp_detag(ptr) \ > + ((typeof(ptr))(((size_t)ptr) & -TAG_ALIGN)) > + > +#define odp_retag(ptr, tag) \ > + ((typeof(ptr))(((size_t)ptr) | odp_tag(tag))) > + > + > +static inline void *get_blk(struct pool_entry_s *pool) > { > - return pool_entry_ptr[pool_id]; > + void *oldhead, *myhead, *newhead; > + > + oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, > _ODP_MEMMODEL_ACQ); > + > + do { > + size_t tag = odp_tag(oldhead); > + myhead = odp_detag(oldhead); > + if (odp_unlikely(myhead == NULL)) > + break; > + newhead = odp_retag(((odp_buf_blk_t *)myhead)->next, tag + > 1); > + } while (odp_cs(pool->blk_freelist, oldhead, newhead) == 0); > + > + if (odp_unlikely(myhead == NULL)) > + odp_atomic_inc_u64(&pool->blkempty); > + else > + odp_atomic_dec_u32(&pool->blkcount); > + > + return (void *)myhead; > } > > +static inline void ret_blk(struct pool_entry_s *pool, void *block) > +{ > + void *oldhead, *myhead, *myblock; > + > + oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, > _ODP_MEMMODEL_ACQ); > + > + do { > + size_t tag = odp_tag(oldhead); > + myhead = odp_detag(oldhead); > + ((odp_buf_blk_t *)block)->next = myhead; > + myblock = odp_retag(block, tag + 1); > + } while (odp_cs(pool->blk_freelist, oldhead, myblock) == 0); > > -static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf) > + odp_atomic_inc_u32(&pool->blkcount); > + odp_atomic_inc_u64(&pool->blkfrees); > +} > + > +static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool) > { > - odp_buffer_bits_t handle; > - uint32_t pool_id; > - uint32_t index; > - struct pool_entry_s *pool; > - odp_buffer_hdr_t *hdr; > - > - handle.u32 = buf; > - pool_id = handle.pool_id; > - index = handle.index; > - > -#ifdef POOL_ERROR_CHECK > - if (odp_unlikely(pool_id > ODP_CONFIG_BUFFER_POOLS)) { > - ODP_ERR("odp_buf_to_hdr: Bad pool id\n"); > - return NULL; > + odp_buffer_hdr_t *oldhead, *myhead, *newhead; > + > + oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, > _ODP_MEMMODEL_ACQ); > + > + do { > + size_t tag = odp_tag(oldhead); > + myhead = odp_detag(oldhead); > + if (odp_unlikely(myhead == NULL)) > + break; > + newhead = odp_retag(myhead->next, tag + 1); > + } while (odp_cs(pool->buf_freelist, oldhead, newhead) == 0); > + > + if (odp_unlikely(myhead == NULL)) { > + odp_atomic_inc_u64(&pool->bufempty); > + } else { > + uint64_t bufcount = > + odp_atomic_fetch_sub_u32(&pool->bufcount, 1) - 1; > + > + /* Check for low watermark condition */ > + if (bufcount == pool->low_wm && !pool->low_wm_assert) { > + pool->low_wm_assert = 1; > + odp_atomic_inc_u64(&pool->low_wm_count); > + } > + > + odp_atomic_inc_u64(&pool->bufallocs); > + myhead->next = myhead; /* Mark buffer allocated */ > + myhead->allocator = odp_thread_id(); > } > -#endif > > - pool = get_pool_entry(pool_id); > + return (void *)myhead; > +} > + > +static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t > *buf) > +{ > + odp_buffer_hdr_t *oldhead, *myhead, *mybuf; > + > + buf->allocator = ODP_FREEBUF; /* Mark buffer free */ > > -#ifdef POOL_ERROR_CHECK > - if (odp_unlikely(index > pool->num_bufs - 1)) { > - ODP_ERR("odp_buf_to_hdr: Bad buffer index\n"); > - return NULL; > + if (!buf->flags.hdrdata && buf->type != ODP_BUFFER_TYPE_RAW) { > + while (buf->segcount > 0) { > + if (buffer_is_secure(buf) || pool_is_secure(pool)) > + memset(buf->addr[buf->segcount - 1], > + 0, buf->segsize); > + ret_blk(pool, buf->addr[--buf->segcount]); > + } > + buf->size = 0; > } > -#endif > > - hdr = (odp_buffer_hdr_t *)(pool->buf_base + index * > pool->buf_size); > + oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, > _ODP_MEMMODEL_ACQ); > + > + do { > + size_t tag = odp_tag(oldhead); > + myhead = odp_detag(oldhead); > + buf->next = myhead; > + mybuf = odp_retag(buf, tag + 1); > + } while (odp_cs(pool->buf_freelist, oldhead, mybuf) == 0); > + > + uint64_t bufcount = odp_atomic_fetch_add_u32(&pool->bufcount, 1) + > 1; > > - return hdr; > + /* Check if low watermark condition should be deasserted */ > + if (bufcount == pool->high_wm && pool->low_wm_assert) { > + pool->low_wm_assert = 0; > + odp_atomic_inc_u64(&pool->high_wm_count); > + } > + > + odp_atomic_inc_u64(&pool->buffrees); > +} > + > +static inline void *get_local_buf(local_cache_t *buf_cache, > + struct pool_entry_s *pool, > + size_t totsize) > +{ > + odp_buffer_hdr_t *buf = buf_cache->buf_freelist; > + > + if (odp_likely(buf != NULL)) { > + buf_cache->buf_freelist = buf->next; > + > + if (odp_unlikely(buf->size < totsize)) { > + size_t needed = totsize - buf->size; > + > + do { > + void *blk = get_blk(pool); > + if (odp_unlikely(blk == NULL)) { > + ret_buf(pool, buf); > + buf_cache->buffrees--; > + return NULL; > + } > + buf->addr[buf->segcount++] = blk; > + needed -= pool->seg_size; > + } while ((ssize_t)needed > 0); > + > + buf->size = buf->segcount * pool->seg_size; > + } > + > + buf_cache->bufallocs++; > + buf->allocator = odp_thread_id(); /* Mark buffer > allocated */ > + } > + > + return buf; > +} > + > +static inline void ret_local_buf(local_cache_t *buf_cache, > + odp_buffer_hdr_t *buf) > +{ > + buf->allocator = ODP_FREEBUF; > + buf->next = buf_cache->buf_freelist; > + buf_cache->buf_freelist = buf; > + > + buf_cache->buffrees++; > +} > + > +static inline void flush_cache(local_cache_t *buf_cache, > + struct pool_entry_s *pool) > +{ > + odp_buffer_hdr_t *buf = buf_cache->buf_freelist; > + uint32_t flush_count = 0; > + > + while (buf != NULL) { > + odp_buffer_hdr_t *next = buf->next; > + ret_buf(pool, buf); > + buf = next; > + flush_count++; > + } > + > + odp_atomic_add_u64(&pool->bufallocs, buf_cache->bufallocs); > + odp_atomic_add_u64(&pool->buffrees, buf_cache->buffrees - > flush_count); > + > + buf_cache->buf_freelist = NULL; > + buf_cache->bufallocs = 0; > + buf_cache->buffrees = 0; > +} > + > +static inline odp_buffer_pool_t pool_index_to_handle(uint32_t pool_id) > +{ > + return pool_id; > +} > + > +static inline uint32_t pool_handle_to_index(odp_buffer_pool_t pool_hdl) > +{ > + return pool_hdl; > +} > + > +static inline void *get_pool_entry(uint32_t pool_id) > +{ > + return pool_entry_ptr[pool_id]; > +} > + > +static inline pool_entry_t *odp_pool_to_entry(odp_buffer_pool_t pool) > +{ > + return (pool_entry_t *)get_pool_entry(pool_handle_to_index(pool)); > +} > + > +static inline pool_entry_t *odp_buf_to_pool(odp_buffer_hdr_t *buf) > +{ > + return odp_pool_to_entry(buf->pool_hdl); > +} > + > +static inline uint32_t odp_buffer_pool_segment_size(odp_buffer_pool_t > pool) > +{ > + return odp_pool_to_entry(pool)->s.seg_size; > +} > + > +static inline uint32_t odp_buffer_pool_headroom(odp_buffer_pool_t pool) > +{ > + return odp_pool_to_entry(pool)->s.headroom; > } > > +static inline uint32_t odp_buffer_pool_tailroom(odp_buffer_pool_t pool) > +{ > + return odp_pool_to_entry(pool)->s.tailroom; > +} > > #ifdef __cplusplus > } > diff --git a/platform/linux-generic/include/odp_internal.h > b/platform/linux-generic/include/odp_internal.h > index f8c1596..11d6393 100644 > --- a/platform/linux-generic/include/odp_internal.h > +++ b/platform/linux-generic/include/odp_internal.h > @@ -42,6 +42,8 @@ int odp_schedule_init_local(void); > int odp_timer_init_global(void); > int odp_timer_disarm_all(void); > > +void _odp_flush_caches(void); > + > #ifdef __cplusplus > } > #endif > diff --git a/platform/linux-generic/include/odp_packet_internal.h > b/platform/linux-generic/include/odp_packet_internal.h > index 49c59b2..f34a83d 100644 > --- a/platform/linux-generic/include/odp_packet_internal.h > +++ b/platform/linux-generic/include/odp_packet_internal.h > @@ -22,6 +22,7 @@ extern "C" { > #include <odp_debug.h> > #include <odp_buffer_internal.h> > #include <odp_buffer_pool_internal.h> > +#include <odp_buffer_inlines.h> > #include <odp_packet.h> > #include <odp_packet_io.h> > > @@ -92,7 +93,8 @@ typedef union { > }; > } output_flags_t; > > -ODP_STATIC_ASSERT(sizeof(output_flags_t) == sizeof(uint32_t), > "OUTPUT_FLAGS_SIZE_ERROR"); > +ODP_STATIC_ASSERT(sizeof(output_flags_t) == sizeof(uint32_t), > + "OUTPUT_FLAGS_SIZE_ERROR"); > > /** > * Internal Packet header > @@ -105,25 +107,23 @@ typedef struct { > error_flags_t error_flags; > output_flags_t output_flags; > > - uint32_t frame_offset; /**< offset to start of frame, even on > error */ > uint32_t l2_offset; /**< offset to L2 hdr, e.g. Eth */ > uint32_t l3_offset; /**< offset to L3 hdr, e.g. IPv4, IPv6 */ > uint32_t l4_offset; /**< offset to L4 hdr (TCP, UDP, SCTP, also > ICMP) */ > > uint32_t frame_len; > + uint32_t headroom; > + uint32_t tailroom; > > uint64_t user_ctx; /* user context */ > > odp_pktio_t input; > - > - uint32_t pad; > - uint8_t buf_data[]; /* start of buffer data area */ > } odp_packet_hdr_t; > > -ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) == > ODP_OFFSETOF(odp_packet_hdr_t, buf_data), > - "ODP_PACKET_HDR_T__SIZE_ERR"); > -ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) % sizeof(uint64_t) == 0, > - "ODP_PACKET_HDR_T__SIZE_ERR2"); > +typedef struct odp_packet_hdr_stride { > + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_packet_hdr_t))]; > +} odp_packet_hdr_stride; > + > > /** > * Return the packet header > @@ -138,6 +138,38 @@ static inline odp_packet_hdr_t > *odp_packet_hdr(odp_packet_t pkt) > */ > void odp_packet_parse(odp_packet_t pkt, size_t len, size_t l2_offset); > > +/** > + * Initialize packet buffer > + */ > +static inline void packet_init(pool_entry_t *pool, > + odp_packet_hdr_t *pkt_hdr, > + size_t size) > +{ > + /* > + * Reset parser metadata. Note that we clear via memset to make > + * this routine indepenent of any additional adds to packet > metadata. > + */ > + const size_t start_offset = ODP_FIELD_SIZEOF(odp_packet_hdr_t, > buf_hdr); > + uint8_t *start; > + size_t len; > + > + start = (uint8_t *)pkt_hdr + start_offset; > + len = sizeof(odp_packet_hdr_t) - start_offset; > + memset(start, 0, len); > + > + /* > + * Packet headroom is set from the pool's headroom > + * Packet tailroom is rounded up to fill the last > + * segment occupied by the allocated length. > + */ > + pkt_hdr->frame_len = size; > + pkt_hdr->headroom = pool->s.headroom; > + pkt_hdr->tailroom = > + (pool->s.seg_size * pkt_hdr->buf_hdr.segcount) - > + (pool->s.headroom + size); > +} > + > + > #ifdef __cplusplus > } > #endif > diff --git a/platform/linux-generic/include/odp_timer_internal.h > b/platform/linux-generic/include/odp_timer_internal.h > index ad28f53..2ff36ce 100644 > --- a/platform/linux-generic/include/odp_timer_internal.h > +++ b/platform/linux-generic/include/odp_timer_internal.h > @@ -51,14 +51,9 @@ typedef struct odp_timeout_hdr_t { > uint8_t buf_data[]; > } odp_timeout_hdr_t; > > - > - > -ODP_STATIC_ASSERT(sizeof(odp_timeout_hdr_t) == > - ODP_OFFSETOF(odp_timeout_hdr_t, buf_data), > - "ODP_TIMEOUT_HDR_T__SIZE_ERR"); > - > -ODP_STATIC_ASSERT(sizeof(odp_timeout_hdr_t) % sizeof(uint64_t) == 0, > - "ODP_TIMEOUT_HDR_T__SIZE_ERR2"); > +typedef struct odp_timeout_hdr_stride { > + uint8_t > pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_timeout_hdr_t))]; > +} odp_timeout_hdr_stride; > > > /** > diff --git a/platform/linux-generic/odp_buffer.c > b/platform/linux-generic/odp_buffer.c > index bcbb99a..c1bef54 100644 > --- a/platform/linux-generic/odp_buffer.c > +++ b/platform/linux-generic/odp_buffer.c > @@ -5,8 +5,9 @@ > */ > > #include <odp_buffer.h> > -#include <odp_buffer_internal.h> > #include <odp_buffer_pool_internal.h> > +#include <odp_buffer_internal.h> > +#include <odp_buffer_inlines.h> > > #include <string.h> > #include <stdio.h> > @@ -16,7 +17,7 @@ void *odp_buffer_addr(odp_buffer_t buf) > { > odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf); > > - return hdr->addr; > + return hdr->addr[0]; > } > > > @@ -38,15 +39,11 @@ int odp_buffer_type(odp_buffer_t buf) > > int odp_buffer_is_valid(odp_buffer_t buf) > { > - odp_buffer_bits_t handle; > - > - handle.u32 = buf; > - > - return (handle.index != ODP_BUFFER_INVALID_INDEX); > + return validate_buf(buf) != NULL; > } > > > -int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf) > +int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf) > { > odp_buffer_hdr_t *hdr; > int len = 0; > @@ -63,28 +60,14 @@ int odp_buffer_snprint(char *str, size_t n, > odp_buffer_t buf) > len += snprintf(&str[len], n-len, > " pool %i\n", hdr->pool_hdl); > len += snprintf(&str[len], n-len, > - " index %"PRIu32"\n", hdr->index); > - len += snprintf(&str[len], n-len, > - " phy_addr %"PRIu64"\n", hdr->phys_addr); > - len += snprintf(&str[len], n-len, > " addr %p\n", hdr->addr); > len += snprintf(&str[len], n-len, > " size %zu\n", hdr->size); > len += snprintf(&str[len], n-len, > - " cur_offset %zu\n", hdr->cur_offset); > - len += snprintf(&str[len], n-len, > " ref_count %i\n", > odp_atomic_load_u32(&hdr->ref_count)); > len += snprintf(&str[len], n-len, > " type %i\n", hdr->type); > - len += snprintf(&str[len], n-len, > - " Scatter list\n"); > - len += snprintf(&str[len], n-len, > - " num_bufs %i\n", > hdr->scatter.num_bufs); > - len += snprintf(&str[len], n-len, > - " pos %i\n", hdr->scatter.pos); > - len += snprintf(&str[len], n-len, > - " total_len %zu\n", > hdr->scatter.total_len); > > return len; > } > @@ -101,9 +84,3 @@ void odp_buffer_print(odp_buffer_t buf) > > ODP_PRINT("\n%s\n", str); > } > - > -void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src) > -{ > - (void)buf_dst; > - (void)buf_src; > -} > diff --git a/platform/linux-generic/odp_buffer_pool.c > b/platform/linux-generic/odp_buffer_pool.c > index 83c51fa..e3f90a2 100644 > --- a/platform/linux-generic/odp_buffer_pool.c > +++ b/platform/linux-generic/odp_buffer_pool.c > @@ -6,8 +6,9 @@ > > #include <odp_std_types.h> > #include <odp_buffer_pool.h> > -#include <odp_buffer_pool_internal.h> > #include <odp_buffer_internal.h> > +#include <odp_buffer_pool_internal.h> > +#include <odp_buffer_inlines.h> > #include <odp_packet_internal.h> > #include <odp_timer_internal.h> > #include <odp_align_internal.h> > @@ -17,57 +18,35 @@ > #include <odp_config.h> > #include <odp_hints.h> > #include <odp_debug_internal.h> > +#include <odp_atomic_internal.h> > > #include <string.h> > #include <stdlib.h> > > > -#ifdef POOL_USE_TICKETLOCK > -#include <odp_ticketlock.h> > -#define LOCK(a) odp_ticketlock_lock(a) > -#define UNLOCK(a) odp_ticketlock_unlock(a) > -#define LOCK_INIT(a) odp_ticketlock_init(a) > -#else > -#include <odp_spinlock.h> > -#define LOCK(a) odp_spinlock_lock(a) > -#define UNLOCK(a) odp_spinlock_unlock(a) > -#define LOCK_INIT(a) odp_spinlock_init(a) > -#endif > - > - > #if ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS > #error ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS > #endif > > -#define NULL_INDEX ((uint32_t)-1) > > -union buffer_type_any_u { > +typedef union buffer_type_any_u { > odp_buffer_hdr_t buf; > odp_packet_hdr_t pkt; > odp_timeout_hdr_t tmo; > -}; > - > -ODP_STATIC_ASSERT((sizeof(union buffer_type_any_u) % 8) == 0, > - "BUFFER_TYPE_ANY_U__SIZE_ERR"); > +} odp_anybuf_t; > > /* Any buffer type header */ > typedef struct { > union buffer_type_any_u any_hdr; /* any buffer type */ > - uint8_t buf_data[]; /* start of buffer data area */ > } odp_any_buffer_hdr_t; > > - > -typedef union pool_entry_u { > - struct pool_entry_s s; > - > - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct > pool_entry_s))]; > - > -} pool_entry_t; > +typedef struct odp_any_hdr_stride { > + uint8_t > pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_any_buffer_hdr_t))]; > +} odp_any_hdr_stride; > > > typedef struct pool_table_t { > pool_entry_t pool[ODP_CONFIG_BUFFER_POOLS]; > - > } pool_table_t; > > > @@ -77,38 +56,8 @@ static pool_table_t *pool_tbl; > /* Pool entry pointers (for inlining) */ > void *pool_entry_ptr[ODP_CONFIG_BUFFER_POOLS]; > > - > -static __thread odp_buffer_chunk_hdr_t > *local_chunk[ODP_CONFIG_BUFFER_POOLS]; > - > - > -static inline odp_buffer_pool_t pool_index_to_handle(uint32_t pool_id) > -{ > - return pool_id + 1; > -} > - > - > -static inline uint32_t pool_handle_to_index(odp_buffer_pool_t pool_hdl) > -{ > - return pool_hdl -1; > -} > - > - > -static inline void set_handle(odp_buffer_hdr_t *hdr, > - pool_entry_t *pool, uint32_t index) > -{ > - odp_buffer_pool_t pool_hdl = pool->s.pool_hdl; > - uint32_t pool_id = pool_handle_to_index(pool_hdl); > - > - if (pool_id >= ODP_CONFIG_BUFFER_POOLS) > - ODP_ABORT("set_handle: Bad pool handle %u\n", pool_hdl); > - > - if (index > ODP_BUFFER_MAX_INDEX) > - ODP_ERR("set_handle: Bad buffer index\n"); > - > - hdr->handle.pool_id = pool_id; > - hdr->handle.index = index; > -} > - > +/* Local cache for buffer alloc/free acceleration */ > +static __thread local_cache_t local_cache[ODP_CONFIG_BUFFER_POOLS]; > > int odp_buffer_pool_init_global(void) > { > @@ -131,7 +80,7 @@ int odp_buffer_pool_init_global(void) > pool_entry_t *pool = &pool_tbl->pool[i]; > LOCK_INIT(&pool->s.lock); > pool->s.pool_hdl = pool_index_to_handle(i); > - > + pool->s.pool_id = i; > pool_entry_ptr[i] = pool; > } > > @@ -143,269 +92,258 @@ int odp_buffer_pool_init_global(void) > return 0; > } > > +/** > + * Buffer pool creation > + */ > > -static odp_buffer_hdr_t *index_to_hdr(pool_entry_t *pool, uint32_t index) > -{ > - odp_buffer_hdr_t *hdr; > - > - hdr = (odp_buffer_hdr_t *)(pool->s.buf_base + index * > pool->s.buf_size); > - return hdr; > -} > - > - > -static void add_buf_index(odp_buffer_chunk_hdr_t *chunk_hdr, uint32_t > index) > -{ > - uint32_t i = chunk_hdr->chunk.num_bufs; > - chunk_hdr->chunk.buf_index[i] = index; > - chunk_hdr->chunk.num_bufs++; > -} > - > - > -static uint32_t rem_buf_index(odp_buffer_chunk_hdr_t *chunk_hdr) > +odp_buffer_pool_t odp_buffer_pool_create(const char *name, > + odp_shm_t shm, > + odp_buffer_pool_param_t *params) > { > - uint32_t index; > + odp_buffer_pool_t pool_hdl = ODP_BUFFER_POOL_INVALID; > + pool_entry_t *pool; > uint32_t i; > > - i = chunk_hdr->chunk.num_bufs - 1; > - index = chunk_hdr->chunk.buf_index[i]; > - chunk_hdr->chunk.num_bufs--; > - return index; > -} > - > - > -static odp_buffer_chunk_hdr_t *next_chunk(pool_entry_t *pool, > - odp_buffer_chunk_hdr_t > *chunk_hdr) > -{ > - uint32_t index; > + /* Default initialization paramters */ > + static _odp_buffer_pool_init_t default_init_params = { > + .udata_size = 0, > + .buf_init = NULL, > + .buf_init_arg = NULL, > + }; > > - index = chunk_hdr->chunk.buf_index[ODP_BUFS_PER_CHUNK-1]; > - if (index == NULL_INDEX) > - return NULL; > - else > - return (odp_buffer_chunk_hdr_t *)index_to_hdr(pool, index); > -} > + _odp_buffer_pool_init_t *init_params = &default_init_params; > > + if (params == NULL) > + return ODP_BUFFER_POOL_INVALID; > > -static odp_buffer_chunk_hdr_t *rem_chunk(pool_entry_t *pool) > -{ > - odp_buffer_chunk_hdr_t *chunk_hdr; > - > - chunk_hdr = pool->s.head; > - if (chunk_hdr == NULL) { > - /* Pool is empty */ > - return NULL; > - } > - > - pool->s.head = next_chunk(pool, chunk_hdr); > - pool->s.free_bufs -= ODP_BUFS_PER_CHUNK; > - > - /* unlink */ > - rem_buf_index(chunk_hdr); > - return chunk_hdr; > -} > - > - > -static void add_chunk(pool_entry_t *pool, odp_buffer_chunk_hdr_t > *chunk_hdr) > -{ > - if (pool->s.head) /* link pool head to the chunk */ > - add_buf_index(chunk_hdr, pool->s.head->buf_hdr.index); > - else > - add_buf_index(chunk_hdr, NULL_INDEX); > - > - pool->s.head = chunk_hdr; > - pool->s.free_bufs += ODP_BUFS_PER_CHUNK; > -} > - > - > -static void check_align(pool_entry_t *pool, odp_buffer_hdr_t *hdr) > -{ > - if (!ODP_ALIGNED_CHECK_POWER_2(hdr->addr, pool->s.user_align)) { > - ODP_ABORT("check_align: user data align error %p, align > %zu\n", > - hdr->addr, pool->s.user_align); > - } > + /* Restriction for v1.0: All buffers are unsegmented */ > + const int unsegmented = 1; > > - if (!ODP_ALIGNED_CHECK_POWER_2(hdr, ODP_CACHE_LINE_SIZE)) { > - ODP_ABORT("check_align: hdr align error %p, align %i\n", > - hdr, ODP_CACHE_LINE_SIZE); > - } > -} > + /* Restriction for v1.0: No zeroization support */ > + const int zeroized = 0; > > + /* Restriction for v1.0: No udata support */ > + uint32_t udata_stride = (init_params->udata_size > sizeof(void *)) > ? > + ODP_CACHE_LINE_SIZE_ROUNDUP(init_params->udata_size) : > + 0; > > -static void fill_hdr(void *ptr, pool_entry_t *pool, uint32_t index, > - int buf_type) > -{ > - odp_buffer_hdr_t *hdr = (odp_buffer_hdr_t *)ptr; > - size_t size = pool->s.hdr_size; > - uint8_t *buf_data; > + uint32_t blk_size, buf_stride; > > - if (buf_type == ODP_BUFFER_TYPE_CHUNK) > - size = sizeof(odp_buffer_chunk_hdr_t); > + switch (params->buf_type) { > + case ODP_BUFFER_TYPE_RAW: > + blk_size = params->buf_size; > > - switch (pool->s.buf_type) { > - odp_raw_buffer_hdr_t *raw_hdr; > - odp_packet_hdr_t *packet_hdr; > - odp_timeout_hdr_t *tmo_hdr; > - odp_any_buffer_hdr_t *any_hdr; > + /* Optimize small raw buffers */ > + if (blk_size > ODP_MAX_INLINE_BUF) > + blk_size = ODP_ALIGN_ROUNDUP(blk_size, TAG_ALIGN); > > - case ODP_BUFFER_TYPE_RAW: > - raw_hdr = ptr; > - buf_data = raw_hdr->buf_data; > + buf_stride = sizeof(odp_buffer_hdr_stride); > break; > + > case ODP_BUFFER_TYPE_PACKET: > - packet_hdr = ptr; > - buf_data = packet_hdr->buf_data; > + if (unsegmented) > + blk_size = > + > ODP_CACHE_LINE_SIZE_ROUNDUP(params->buf_size); > + else > + blk_size = ODP_ALIGN_ROUNDUP(params->buf_size, > + > ODP_CONFIG_BUF_SEG_SIZE); > + buf_stride = sizeof(odp_packet_hdr_stride); > break; > + > case ODP_BUFFER_TYPE_TIMEOUT: > - tmo_hdr = ptr; > - buf_data = tmo_hdr->buf_data; > + blk_size = 0; /* Timeouts have no block data, only > metadata */ > + buf_stride = sizeof(odp_timeout_hdr_stride); > break; > + > case ODP_BUFFER_TYPE_ANY: > - any_hdr = ptr; > - buf_data = any_hdr->buf_data; > + if (unsegmented) > + blk_size = > + > ODP_CACHE_LINE_SIZE_ROUNDUP(params->buf_size); > + else > + blk_size = ODP_ALIGN_ROUNDUP(params->buf_size, > + > ODP_CONFIG_BUF_SEG_SIZE); > + buf_stride = sizeof(odp_any_hdr_stride); > break; > - default: > - ODP_ABORT("Bad buffer type\n"); > - } > - > - memset(hdr, 0, size); > - > - set_handle(hdr, pool, index); > - > - hdr->addr = &buf_data[pool->s.buf_offset - pool->s.hdr_size]; > - hdr->index = index; > - hdr->size = pool->s.user_size; > - hdr->pool_hdl = pool->s.pool_hdl; > - hdr->type = buf_type; > - > - check_align(pool, hdr); > -} > - > - > -static void link_bufs(pool_entry_t *pool) > -{ > - odp_buffer_chunk_hdr_t *chunk_hdr; > - size_t hdr_size; > - size_t data_size; > - size_t data_align; > - size_t tot_size; > - size_t offset; > - size_t min_size; > - uint64_t pool_size; > - uintptr_t buf_base; > - uint32_t index; > - uintptr_t pool_base; > - int buf_type; > - > - buf_type = pool->s.buf_type; > - data_size = pool->s.user_size; > - data_align = pool->s.user_align; > - pool_size = pool->s.pool_size; > - pool_base = (uintptr_t) pool->s.pool_base_addr; > - > - if (buf_type == ODP_BUFFER_TYPE_RAW) { > - hdr_size = sizeof(odp_raw_buffer_hdr_t); > - } else if (buf_type == ODP_BUFFER_TYPE_PACKET) { > - hdr_size = sizeof(odp_packet_hdr_t); > - } else if (buf_type == ODP_BUFFER_TYPE_TIMEOUT) { > - hdr_size = sizeof(odp_timeout_hdr_t); > - } else if (buf_type == ODP_BUFFER_TYPE_ANY) { > - hdr_size = sizeof(odp_any_buffer_hdr_t); > - } else > - ODP_ABORT("odp_buffer_pool_create: Bad type %i\n", > buf_type); > - > - > - /* Chunk must fit into buffer data area.*/ > - min_size = sizeof(odp_buffer_chunk_hdr_t) - hdr_size; > - if (data_size < min_size) > - data_size = min_size; > - > - /* Roundup data size to full cachelines */ > - data_size = ODP_CACHE_LINE_SIZE_ROUNDUP(data_size); > - > - /* Min cacheline alignment for buffer header and data */ > - data_align = ODP_CACHE_LINE_SIZE_ROUNDUP(data_align); > - offset = ODP_CACHE_LINE_SIZE_ROUNDUP(hdr_size); > - > - /* Multiples of cacheline size */ > - if (data_size > data_align) > - tot_size = data_size + offset; > - else > - tot_size = data_align + offset; > - > - /* First buffer */ > - buf_base = ODP_ALIGN_ROUNDUP(pool_base + offset, data_align) - > offset; > - > - pool->s.hdr_size = hdr_size; > - pool->s.buf_base = buf_base; > - pool->s.buf_size = tot_size; > - pool->s.buf_offset = offset; > - index = 0; > > - chunk_hdr = (odp_buffer_chunk_hdr_t *)index_to_hdr(pool, index); > - pool->s.head = NULL; > - pool_size -= buf_base - pool_base; > - > - while (pool_size > ODP_BUFS_PER_CHUNK * tot_size) { > - int i; > - > - fill_hdr(chunk_hdr, pool, index, ODP_BUFFER_TYPE_CHUNK); > - > - index++; > - > - for (i = 0; i < ODP_BUFS_PER_CHUNK - 1; i++) { > - odp_buffer_hdr_t *hdr = index_to_hdr(pool, index); > - > - fill_hdr(hdr, pool, index, buf_type); > - > - add_buf_index(chunk_hdr, index); > - index++; > - } > - > - add_chunk(pool, chunk_hdr); > - > - chunk_hdr = (odp_buffer_chunk_hdr_t *)index_to_hdr(pool, > - index); > - pool->s.num_bufs += ODP_BUFS_PER_CHUNK; > - pool_size -= ODP_BUFS_PER_CHUNK * tot_size; > + default: > + return ODP_BUFFER_POOL_INVALID; > } > -} > > + /* Validate requested number of buffers against addressable limits > */ > + if (params->num_bufs > > + (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE))) > + return ODP_BUFFER_POOL_INVALID; > > -odp_buffer_pool_t odp_buffer_pool_create(const char *name, > - void *base_addr, uint64_t size, > - size_t buf_size, size_t buf_align, > - int buf_type) > -{ > - odp_buffer_pool_t pool_hdl = ODP_BUFFER_POOL_INVALID; > - pool_entry_t *pool; > - uint32_t i; > - > + /* Find an unused buffer pool slot and iniitalize it as requested > */ > for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) { > pool = get_pool_entry(i); > > LOCK(&pool->s.lock); > + if (pool->s.pool_shm != ODP_SHM_INVALID) { > + UNLOCK(&pool->s.lock); > + continue; > + } > > - if (pool->s.buf_base == 0) { > - /* found free pool */ > + /* found free pool */ > + size_t block_size, mdata_size, udata_size; > > + pool->s.flags.all = 0; > + > + if (name == NULL) { > + pool->s.name[0] = 0; > + } else { > strncpy(pool->s.name, name, > ODP_BUFFER_POOL_NAME_LEN - 1); > pool->s.name[ODP_BUFFER_POOL_NAME_LEN - 1] = 0; > - pool->s.pool_base_addr = base_addr; > - pool->s.pool_size = size; > - pool->s.user_size = buf_size; > - pool->s.user_align = buf_align; > - pool->s.buf_type = buf_type; > - > - link_bufs(pool); > - > - UNLOCK(&pool->s.lock); > + pool->s.flags.has_name = 1; > + } > > - pool_hdl = pool->s.pool_hdl; > - break; > + pool->s.params = *params; > + pool->s.init_params = *init_params; > + > + mdata_size = params->num_bufs * buf_stride; > + udata_size = params->num_bufs * udata_stride; > + > + /* Optimize for short buffers: Data stored in buffer hdr */ > + if (blk_size <= ODP_MAX_INLINE_BUF) > + block_size = 0; > + else > + block_size = params->num_bufs * blk_size; > + > + pool->s.pool_size = ODP_PAGE_SIZE_ROUNDUP(mdata_size + > + udata_size + > + block_size); > + > + if (shm == ODP_SHM_NULL) { > + shm = odp_shm_reserve(pool->s.name, > + pool->s.pool_size, > + ODP_PAGE_SIZE, 0); > + if (shm == ODP_SHM_INVALID) { > + UNLOCK(&pool->s.lock); > + return ODP_BUFFER_INVALID; > + } > + pool->s.pool_base_addr = odp_shm_addr(shm); > + } else { > + odp_shm_info_t info; > + if (odp_shm_info(shm, &info) != 0 || > + info.size < pool->s.pool_size) { > + UNLOCK(&pool->s.lock); > + return ODP_BUFFER_POOL_INVALID; > + } > + pool->s.pool_base_addr = odp_shm_addr(shm); > + void *page_addr = > + > ODP_ALIGN_ROUNDUP_PTR(pool->s.pool_base_addr, > + ODP_PAGE_SIZE); > + if (pool->s.pool_base_addr != page_addr) { > + if (info.size < pool->s.pool_size + > + ((size_t)page_addr - > + (size_t)pool->s.pool_base_addr)) { > + UNLOCK(&pool->s.lock); > + return ODP_BUFFER_POOL_INVALID; > + } > + pool->s.pool_base_addr = page_addr; > + } > + pool->s.flags.user_supplied_shm = 1; > } > > + pool->s.pool_shm = shm; > + > + /* Now safe to unlock since pool entry has been allocated > */ > UNLOCK(&pool->s.lock); > + > + pool->s.flags.unsegmented = unsegmented; > + pool->s.flags.zeroized = zeroized; > + pool->s.seg_size = unsegmented ? > + blk_size : ODP_CONFIG_BUF_SEG_SIZE; > + > + uint8_t *udata_base_addr = pool->s.pool_base_addr + > mdata_size; > + uint8_t *block_base_addr = udata_base_addr + udata_size; > + > + pool->s.buf_stride = buf_stride; > + _odp_atomic_ptr_store(&pool->s.buf_freelist, NULL, > + _ODP_MEMMODEL_RLX); > + _odp_atomic_ptr_store(&pool->s.blk_freelist, NULL, > + _ODP_MEMMODEL_RLX); > + > + /* Initialization will increment these to their target > vals */ > + odp_atomic_store_u32(&pool->s.bufcount, 0); > + odp_atomic_store_u32(&pool->s.blkcount, 0); > + > + uint8_t *buf = udata_base_addr - buf_stride; > + uint8_t *udat = udata_stride == 0 ? NULL : > + block_base_addr - udata_stride; > + > + /* Init buffer common header and add to pool buffer > freelist */ > + do { > + odp_buffer_hdr_t *tmp = > + (odp_buffer_hdr_t *)(void *)buf; > + > + /* Iniitalize buffer metadata */ > + tmp->allocator = ODP_FREEBUF; > + tmp->flags.all = 0; > + tmp->flags.zeroized = zeroized; > + tmp->size = 0; > + odp_atomic_store_u32(&tmp->ref_count, 0); > + tmp->type = params->buf_type; > + tmp->pool_hdl = pool->s.pool_hdl; > + tmp->udata_addr = (void *)udat; > + tmp->udata_size = init_params->udata_size; > + tmp->segcount = 0; > + tmp->segsize = pool->s.seg_size; > + tmp->handle.handle = odp_buffer_encode_handle(tmp); > + > + /* Set 1st seg addr for zero-len buffers */ > + tmp->addr[0] = NULL; > + > + /* Special case for short buffer data */ > + if (blk_size <= ODP_MAX_INLINE_BUF) { > + tmp->flags.hdrdata = 1; > + if (blk_size > 0) { > + tmp->segcount = 1; > + tmp->addr[0] = &tmp->addr[1]; > + tmp->size = blk_size; > + } > + } > + > + /* Push buffer onto pool's freelist */ > + ret_buf(&pool->s, tmp); > + buf -= buf_stride; > + udat -= udata_stride; > + } while (buf >= pool->s.pool_base_addr); > + > + /* Form block freelist for pool */ > + uint8_t *blk = pool->s.pool_base_addr + pool->s.pool_size - > + pool->s.seg_size; > + > + if (blk_size > ODP_MAX_INLINE_BUF) > + do { > + ret_blk(&pool->s, blk); > + blk -= pool->s.seg_size; > + } while (blk >= block_base_addr); > + > + /* Initialize pool statistics counters */ > + odp_atomic_store_u64(&pool->s.bufallocs, 0); > + odp_atomic_store_u64(&pool->s.buffrees, 0); > + odp_atomic_store_u64(&pool->s.blkallocs, 0); > + odp_atomic_store_u64(&pool->s.blkfrees, 0); > + odp_atomic_store_u64(&pool->s.bufempty, 0); > + odp_atomic_store_u64(&pool->s.blkempty, 0); > + odp_atomic_store_u64(&pool->s.high_wm_count, 0); > + odp_atomic_store_u64(&pool->s.low_wm_count, 0); > + > + /* Reset other pool globals to initial state */ > + pool->s.low_wm_assert = 0; > + pool->s.quiesced = 0; > + pool->s.low_wm_assert = 0; > + pool->s.headroom = 0; > + pool->s.tailroom = 0; > + > + /* Watermarks are hard-coded for now to control caching */ > + pool->s.high_wm = params->num_bufs / 2; > + pool->s.low_wm = params->num_bufs / 4; > + > + pool_hdl = pool->s.pool_hdl; > + break; > } > > return pool_hdl; > @@ -432,145 +370,200 @@ odp_buffer_pool_t odp_buffer_pool_lookup(const > char *name) > return ODP_BUFFER_POOL_INVALID; > } > > - > -odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool_hdl) > +int odp_buffer_pool_info(odp_buffer_pool_t pool_hdl, > + odp_shm_t *shm, > + odp_buffer_pool_info_t *info) > { > - pool_entry_t *pool; > - odp_buffer_chunk_hdr_t *chunk; > - odp_buffer_bits_t handle; > uint32_t pool_id = pool_handle_to_index(pool_hdl); > + pool_entry_t *pool = get_pool_entry(pool_id); > > - pool = get_pool_entry(pool_id); > - chunk = local_chunk[pool_id]; > + if (pool == NULL || info == NULL) > + return -1; > > - if (chunk == NULL) { > - LOCK(&pool->s.lock); > - chunk = rem_chunk(pool); > - UNLOCK(&pool->s.lock); > + *shm = pool->s.flags.user_supplied_shm ? > + pool->s.pool_shm : ODP_SHM_NULL; > + info->name = pool->s.name; > + info->params.buf_size = pool->s.params.buf_size; > + info->params.buf_align = pool->s.params.buf_align; > + info->params.num_bufs = pool->s.params.num_bufs; > + info->params.buf_type = pool->s.params.buf_type; > > - if (chunk == NULL) > - return ODP_BUFFER_INVALID; > + return 0; > +} > > - local_chunk[pool_id] = chunk; > - } > +int odp_buffer_pool_destroy(odp_buffer_pool_t pool_hdl) > +{ > + uint32_t pool_id = pool_handle_to_index(pool_hdl); > + pool_entry_t *pool = get_pool_entry(pool_id); > > - if (chunk->chunk.num_bufs == 0) { > - /* give the chunk buffer */ > - local_chunk[pool_id] = NULL; > - chunk->buf_hdr.type = pool->s.buf_type; > + if (pool == NULL) > + return -1; > > - handle = chunk->buf_hdr.handle; > - } else { > - odp_buffer_hdr_t *hdr; > - uint32_t index; > - index = rem_buf_index(chunk); > - hdr = index_to_hdr(pool, index); > + LOCK(&pool->s.lock); > > - handle = hdr->handle; > + if (pool->s.pool_shm == ODP_SHM_INVALID || > + odp_atomic_load_u32(&pool->s.bufcount) < > pool->s.params.num_bufs || > + pool->s.flags.predefined) { > + UNLOCK(&pool->s.lock); > + return -1; > } > > - return handle.u32; > -} > + if (!pool->s.flags.user_supplied_shm) > + odp_shm_free(pool->s.pool_shm); > > + pool->s.pool_shm = ODP_SHM_INVALID; > + UNLOCK(&pool->s.lock); > > -void odp_buffer_free(odp_buffer_t buf) > + return 0; > +} > + > +odp_buffer_t buffer_alloc(odp_buffer_pool_t pool_hdl, size_t size) > { > - odp_buffer_hdr_t *hdr; > - uint32_t pool_id; > - pool_entry_t *pool; > - odp_buffer_chunk_hdr_t *chunk_hdr; > + uint32_t pool_id = pool_handle_to_index(pool_hdl); > + pool_entry_t *pool = get_pool_entry(pool_id); > + size_t totsize = pool->s.headroom + size + pool->s.tailroom; > + odp_anybuf_t *buf; > > - hdr = odp_buf_to_hdr(buf); > - pool_id = pool_handle_to_index(hdr->pool_hdl); > - pool = get_pool_entry(pool_id); > - chunk_hdr = local_chunk[pool_id]; > + /* Reject oversized allocation requests */ > + if ((pool->s.flags.unsegmented && totsize > pool->s.seg_size) || > + (!pool->s.flags.unsegmented && totsize > > ODP_CONFIG_BUF_MAX_SIZE)) > + return ODP_BUFFER_INVALID; > > - if (chunk_hdr && chunk_hdr->chunk.num_bufs == ODP_BUFS_PER_CHUNK - > 1) { > - /* Current chunk is full. Push back to the pool */ > - LOCK(&pool->s.lock); > - add_chunk(pool, chunk_hdr); > - UNLOCK(&pool->s.lock); > - chunk_hdr = NULL; > + /* Try to satisfy request from the local cache */ > + buf = (odp_anybuf_t *)(void *)get_local_buf(&local_cache[pool_id], > + &pool->s, totsize); > + > + /* If cache is empty, satisfy request from the pool */ > + if (odp_unlikely(buf == NULL)) { > + buf = (odp_anybuf_t *)(void *)get_buf(&pool->s); > + > + if (odp_unlikely(buf == NULL)) > + return ODP_BUFFER_INVALID; > + > + /* Get blocks for this buffer, if pool uses application > data */ > + if (buf->buf.size < totsize) { > + size_t needed = totsize - buf->buf.size; > + do { > + uint8_t *blk = get_blk(&pool->s); > + if (blk == NULL) { > + ret_buf(&pool->s, &buf->buf); > + return ODP_BUFFER_INVALID; > + } > + buf->buf.addr[buf->buf.segcount++] = blk; > + needed -= pool->s.seg_size; > + } while ((ssize_t)needed > 0); > + buf->buf.size = buf->buf.segcount * > pool->s.seg_size; > + } > } > > - if (chunk_hdr == NULL) { > - /* Use this buffer */ > - chunk_hdr = (odp_buffer_chunk_hdr_t *)hdr; > - local_chunk[pool_id] = chunk_hdr; > - chunk_hdr->chunk.num_bufs = 0; > - } else { > - /* Add to current chunk */ > - add_buf_index(chunk_hdr, hdr->index); > + /* By default, buffers inherit their pool's zeroization setting */ > + buf->buf.flags.zeroized = pool->s.flags.zeroized; > + > + if (buf->buf.type == ODP_BUFFER_TYPE_PACKET) { > + packet_init(pool, &buf->pkt, size); > + > + if (pool->s.init_params.buf_init != NULL) > + (*pool->s.init_params.buf_init) > + (buf->buf.handle.handle, > + pool->s.init_params.buf_init_arg); > } > + > + return odp_hdr_to_buf(&buf->buf); > } > > +odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool_hdl) > +{ > + return buffer_alloc(pool_hdl, > + > odp_pool_to_entry(pool_hdl)->s.params.buf_size); > +} > > -odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf) > +void odp_buffer_free(odp_buffer_t buf) > { > - odp_buffer_hdr_t *hdr; > + odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr(buf); > + pool_entry_t *pool = odp_buf_to_pool(buf_hdr); > > - hdr = odp_buf_to_hdr(buf); > - return hdr->pool_hdl; > + if (odp_unlikely(pool->s.low_wm_assert)) > + ret_buf(&pool->s, buf_hdr); > + else > + ret_local_buf(&local_cache[pool->s.pool_id], buf_hdr); > } > IMO, We need to have high_wm_assert for local_cache buffer list so that a single thread does not hold all the free buffers in the pool. This could happen at the receiving thread of a IPC communication which receives the buffer and frees and could potentially hold all the buffers > +void _odp_flush_caches(void) > +{ > + int i; > + > + for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) { > + pool_entry_t *pool = get_pool_entry(i); > + flush_cache(&local_cache[i], &pool->s); > + } > +} > > void odp_buffer_pool_print(odp_buffer_pool_t pool_hdl) > { > pool_entry_t *pool; > - odp_buffer_chunk_hdr_t *chunk_hdr; > - uint32_t i; > uint32_t pool_id; > > pool_id = pool_handle_to_index(pool_hdl); > pool = get_pool_entry(pool_id); > > - ODP_PRINT("Pool info\n"); > - ODP_PRINT("---------\n"); > - ODP_PRINT(" pool %i\n", pool->s.pool_hdl); > - ODP_PRINT(" name %s\n", pool->s.name); > - ODP_PRINT(" pool base %p\n", > pool->s.pool_base_addr); > - ODP_PRINT(" buf base 0x%"PRIxPTR"\n", pool->s.buf_base); > - ODP_PRINT(" pool size 0x%"PRIx64"\n", pool->s.pool_size); > - ODP_PRINT(" buf size %zu\n", pool->s.user_size); > - ODP_PRINT(" buf align %zu\n", pool->s.user_align); > - ODP_PRINT(" hdr size %zu\n", pool->s.hdr_size); > - ODP_PRINT(" alloc size %zu\n", pool->s.buf_size); > - ODP_PRINT(" offset to hdr %zu\n", pool->s.buf_offset); > - ODP_PRINT(" num bufs %"PRIu64"\n", pool->s.num_bufs); > - ODP_PRINT(" free bufs %"PRIu64"\n", pool->s.free_bufs); > - > - /* first chunk */ > - chunk_hdr = pool->s.head; > - > - if (chunk_hdr == NULL) { > - ODP_ERR(" POOL EMPTY\n"); > - return; > - } > - > - ODP_PRINT("\n First chunk\n"); > - > - for (i = 0; i < chunk_hdr->chunk.num_bufs - 1; i++) { > - uint32_t index; > - odp_buffer_hdr_t *hdr; > - > - index = chunk_hdr->chunk.buf_index[i]; > - hdr = index_to_hdr(pool, index); > - > - ODP_PRINT(" [%i] addr %p, id %"PRIu32"\n", i, hdr->addr, > - index); > - } > - > - ODP_PRINT(" [%i] addr %p, id %"PRIu32"\n", i, > chunk_hdr->buf_hdr.addr, > - chunk_hdr->buf_hdr.index); > - > - /* next chunk */ > - chunk_hdr = next_chunk(pool, chunk_hdr); > + uint32_t bufcount = odp_atomic_load_u32(&pool->s.bufcount); > + uint32_t blkcount = odp_atomic_load_u32(&pool->s.blkcount); > + uint64_t bufallocs = odp_atomic_load_u64(&pool->s.bufallocs); > + uint64_t buffrees = odp_atomic_load_u64(&pool->s.buffrees); > + uint64_t blkallocs = odp_atomic_load_u64(&pool->s.blkallocs); > + uint64_t blkfrees = odp_atomic_load_u64(&pool->s.blkfrees); > + uint64_t bufempty = odp_atomic_load_u64(&pool->s.bufempty); > + uint64_t blkempty = odp_atomic_load_u64(&pool->s.blkempty); > + uint64_t hiwmct = odp_atomic_load_u64(&pool->s.high_wm_count); > + uint64_t lowmct = odp_atomic_load_u64(&pool->s.low_wm_count); > + > + ODP_DBG("Pool info\n"); > + ODP_DBG("---------\n"); > + ODP_DBG(" pool %i\n", pool->s.pool_hdl); > + ODP_DBG(" name %s\n", > + pool->s.flags.has_name ? pool->s.name : "Unnamed Pool"); > + ODP_DBG(" pool type %s\n", > + pool->s.params.buf_type == ODP_BUFFER_TYPE_RAW ? "raw" : > + (pool->s.params.buf_type == ODP_BUFFER_TYPE_PACKET ? > "packet" : > + (pool->s.params.buf_type == ODP_BUFFER_TYPE_TIMEOUT ? > "timeout" : > + (pool->s.params.buf_type == ODP_BUFFER_TYPE_ANY ? "any" : > + "unknown")))); > + ODP_DBG(" pool storage %sODP managed\n", > + pool->s.flags.user_supplied_shm ? > + "application provided, " : ""); > + ODP_DBG(" pool status %s\n", > + pool->s.quiesced ? "quiesced" : "active"); > + ODP_DBG(" pool opts %s, %s, %s\n", > + pool->s.flags.unsegmented ? "unsegmented" : "segmented", > + pool->s.flags.zeroized ? "zeroized" : "non-zeroized", > + pool->s.flags.predefined ? "predefined" : "created"); > + ODP_DBG(" pool base %p\n", pool->s.pool_base_addr); > + ODP_DBG(" pool size %zu (%zu pages)\n", > + pool->s.pool_size, pool->s.pool_size / ODP_PAGE_SIZE); > + ODP_DBG(" udata size %zu\n", pool->s.init_params.udata_size); > + ODP_DBG(" buf size %zu\n", pool->s.params.buf_size); > + ODP_DBG(" num bufs %u\n", pool->s.params.num_bufs); > + ODP_DBG(" bufs available %u %s\n", > + bufcount, > + pool->s.low_wm_assert ? " **low wm asserted**" : ""); > + ODP_DBG(" bufs in use %u\n", pool->s.params.num_bufs - > bufcount); > + ODP_DBG(" buf allocs %lu\n", bufallocs); > + ODP_DBG(" buf frees %lu\n", buffrees); > + ODP_DBG(" buf empty %lu\n", bufempty); > + ODP_DBG(" blk size %zu\n", > + pool->s.seg_size > ODP_MAX_INLINE_BUF ? pool->s.seg_size : > 0); > + ODP_DBG(" blks available %u\n", blkcount); > + ODP_DBG(" blk allocs %lu\n", blkallocs); > + ODP_DBG(" blk frees %lu\n", blkfrees); > + ODP_DBG(" blk empty %lu\n", blkempty); > + ODP_DBG(" high wm value %lu\n", pool->s.high_wm); > + ODP_DBG(" high wm count %lu\n", hiwmct); > + ODP_DBG(" low wm value %lu\n", pool->s.low_wm); > + ODP_DBG(" low wm count %lu\n", lowmct); > +} > > - if (chunk_hdr) { > - ODP_PRINT(" Next chunk\n"); > - ODP_PRINT(" addr %p, id %"PRIu32"\n", > chunk_hdr->buf_hdr.addr, > - chunk_hdr->buf_hdr.index); > - } > > - ODP_PRINT("\n"); > +odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf) > +{ > + return odp_buf_to_hdr(buf)->pool_hdl; > } > diff --git a/platform/linux-generic/odp_linux.c > b/platform/linux-generic/odp_linux.c > index ecd77b3..95761a9 100644 > --- a/platform/linux-generic/odp_linux.c > +++ b/platform/linux-generic/odp_linux.c > @@ -43,7 +43,9 @@ static void *odp_run_start_routine(void *arg) > return NULL; > } > > - return start_args->start_routine(start_args->arg); > + void *ret = start_args->start_routine(start_args->arg); > + _odp_flush_caches(); > + return ret; > } > > > diff --git a/platform/linux-generic/odp_packet.c > b/platform/linux-generic/odp_packet.c > index a1bf18e..726e086 100644 > --- a/platform/linux-generic/odp_packet.c > +++ b/platform/linux-generic/odp_packet.c > @@ -24,17 +24,9 @@ static inline uint8_t parse_ipv6(odp_packet_hdr_t > *pkt_hdr, > void odp_packet_init(odp_packet_t pkt) > { > odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt); > - const size_t start_offset = ODP_FIELD_SIZEOF(odp_packet_hdr_t, > buf_hdr); > - uint8_t *start; > - size_t len; > - > - start = (uint8_t *)pkt_hdr + start_offset; > - len = ODP_OFFSETOF(odp_packet_hdr_t, buf_data) - start_offset; > - memset(start, 0, len); > + pool_entry_t *pool = odp_buf_to_pool(&pkt_hdr->buf_hdr); > > - pkt_hdr->l2_offset = ODP_PACKET_OFFSET_INVALID; > - pkt_hdr->l3_offset = ODP_PACKET_OFFSET_INVALID; > - pkt_hdr->l4_offset = ODP_PACKET_OFFSET_INVALID; > + packet_init(pool, pkt_hdr, 0); > } > > odp_packet_t odp_packet_from_buffer(odp_buffer_t buf) > @@ -64,7 +56,7 @@ uint8_t *odp_packet_addr(odp_packet_t pkt) > > uint8_t *odp_packet_data(odp_packet_t pkt) > { > - return odp_packet_addr(pkt) + odp_packet_hdr(pkt)->frame_offset; > + return odp_packet_addr(pkt) + odp_packet_hdr(pkt)->headroom; > } > > > @@ -131,20 +123,13 @@ void odp_packet_set_l4_offset(odp_packet_t pkt, > size_t offset) > > int odp_packet_is_segmented(odp_packet_t pkt) > { > - odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr((odp_buffer_t)pkt); > - > - if (buf_hdr->scatter.num_bufs == 0) > - return 0; > - else > - return 1; > + return odp_packet_hdr(pkt)->buf_hdr.segcount > 1; > } > > > int odp_packet_seg_count(odp_packet_t pkt) > { > - odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr((odp_buffer_t)pkt); > - > - return (int)buf_hdr->scatter.num_bufs + 1; > + return odp_packet_hdr(pkt)->buf_hdr.segcount; > } > > > @@ -170,7 +155,7 @@ void odp_packet_parse(odp_packet_t pkt, size_t len, > size_t frame_offset) > uint8_t ip_proto = 0; > > pkt_hdr->input_flags.eth = 1; > - pkt_hdr->frame_offset = frame_offset; > + pkt_hdr->l2_offset = frame_offset; > pkt_hdr->frame_len = len; > > if (len > ODPH_ETH_LEN_MAX) > @@ -330,8 +315,6 @@ void odp_packet_print(odp_packet_t pkt) > len += snprintf(&str[len], n-len, > " output_flags 0x%x\n", hdr->output_flags.all); > len += snprintf(&str[len], n-len, > - " frame_offset %u\n", hdr->frame_offset); > - len += snprintf(&str[len], n-len, > " l2_offset %u\n", hdr->l2_offset); > len += snprintf(&str[len], n-len, > " l3_offset %u\n", hdr->l3_offset); > @@ -358,14 +341,13 @@ int odp_packet_copy(odp_packet_t pkt_dst, > odp_packet_t pkt_src) > if (pkt_dst == ODP_PACKET_INVALID || pkt_src == ODP_PACKET_INVALID) > return -1; > > - if (pkt_hdr_dst->buf_hdr.size < > - pkt_hdr_src->frame_len + pkt_hdr_src->frame_offset) > + if (pkt_hdr_dst->buf_hdr.size < pkt_hdr_src->frame_len) > return -1; > > /* Copy packet header */ > start_dst = (uint8_t *)pkt_hdr_dst + start_offset; > start_src = (uint8_t *)pkt_hdr_src + start_offset; > - len = ODP_OFFSETOF(odp_packet_hdr_t, buf_data) - start_offset; > + len = sizeof(odp_packet_hdr_t) - start_offset; > memcpy(start_dst, start_src, len); > > /* Copy frame payload */ > @@ -374,13 +356,6 @@ int odp_packet_copy(odp_packet_t pkt_dst, > odp_packet_t pkt_src) > len = pkt_hdr_src->frame_len; > memcpy(start_dst, start_src, len); > > - /* Copy useful things from the buffer header */ > - pkt_hdr_dst->buf_hdr.cur_offset = pkt_hdr_src->buf_hdr.cur_offset; > - > - /* Create a copy of the scatter list */ > - odp_buffer_copy_scatter(odp_packet_to_buffer(pkt_dst), > - odp_packet_to_buffer(pkt_src)); > - > return 0; > } > > diff --git a/platform/linux-generic/odp_queue.c > b/platform/linux-generic/odp_queue.c > index c278094..a7c5e42 100644 > --- a/platform/linux-generic/odp_queue.c > +++ b/platform/linux-generic/odp_queue.c > @@ -11,6 +11,7 @@ > #include <odp_buffer.h> > #include <odp_buffer_internal.h> > #include <odp_buffer_pool_internal.h> > +#include <odp_buffer_inlines.h> > #include <odp_internal.h> > #include <odp_shared_memory.h> > #include <odp_schedule_internal.h> > diff --git a/platform/linux-generic/odp_schedule.c > b/platform/linux-generic/odp_schedule.c > index 7c09c23..2f0cfe4 100644 > --- a/platform/linux-generic/odp_schedule.c > +++ b/platform/linux-generic/odp_schedule.c > @@ -83,8 +83,8 @@ int odp_schedule_init_global(void) > { > odp_shm_t shm; > odp_buffer_pool_t pool; > - void *pool_base; > int i, j; > + odp_buffer_pool_param_t params; > > ODP_DBG("Schedule init ... "); > > @@ -99,20 +99,12 @@ int odp_schedule_init_global(void) > return -1; > } > > - shm = odp_shm_reserve("odp_sched_pool", > - SCHED_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > + params.buf_size = sizeof(queue_desc_t); > + params.buf_align = ODP_CACHE_LINE_SIZE; > + params.num_bufs = SCHED_POOL_SIZE/sizeof(queue_desc_t); > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > - pool_base = odp_shm_addr(shm); > - > - if (pool_base == NULL) { > - ODP_ERR("Schedule init: Shm reserve failed.\n"); > - return -1; > - } > - > - pool = odp_buffer_pool_create("odp_sched_pool", pool_base, > - SCHED_POOL_SIZE, > sizeof(queue_desc_t), > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_RAW); > + pool = odp_buffer_pool_create("odp_sched_pool", ODP_SHM_NULL, > ¶ms); > > if (pool == ODP_BUFFER_POOL_INVALID) { > ODP_ERR("Schedule init: Pool create failed.\n"); > diff --git a/platform/linux-generic/odp_timer.c > b/platform/linux-generic/odp_timer.c > index a4fef58..7bd6874 100644 > --- a/platform/linux-generic/odp_timer.c > +++ b/platform/linux-generic/odp_timer.c > @@ -5,9 +5,10 @@ > */ > > #include <odp_timer.h> > -#include <odp_timer_internal.h> > #include <odp_time.h> > #include <odp_buffer_pool_internal.h> > +#include <odp_buffer_inlines.h> > +#include <odp_timer_internal.h> > #include <odp_internal.h> > #include <odp_atomic.h> > #include <odp_spinlock.h> > diff --git a/test/api_test/odp_timer_ping.c > b/test/api_test/odp_timer_ping.c > index 48f1885..aa2a490 100644 > --- a/test/api_test/odp_timer_ping.c > +++ b/test/api_test/odp_timer_ping.c > @@ -321,9 +321,8 @@ int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED) > ping_arg_t pingarg; > odp_queue_t queue; > odp_buffer_pool_t pool; > - void *pool_base; > int i; > - odp_shm_t shm; > + odp_buffer_pool_param_t params; > > if (odp_test_global_init() != 0) > return -1; > @@ -336,14 +335,14 @@ int main(int argc ODP_UNUSED, char *argv[] > ODP_UNUSED) > /* > * Create message pool > */ > - shm = odp_shm_reserve("msg_pool", > - MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > - > - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, > - BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_RAW); > + > + params.buf_size = BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = MSG_POOL_SIZE/BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_RAW; > + > + pool = odp_buffer_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); > + > if (pool == ODP_BUFFER_POOL_INVALID) { > LOG_ERR("Pool create failed.\n"); > return -1; > diff --git a/test/validation/odp_crypto.c b/test/validation/odp_crypto.c > index 03ca438..72cf0f0 100644 > --- a/test/validation/odp_crypto.c > +++ b/test/validation/odp_crypto.c > @@ -25,26 +25,17 @@ CU_SuiteInfo odp_testsuites[] = { > > int tests_global_init(void) > { > - odp_shm_t shm; > - void *pool_base; > + odp_buffer_pool_param_t params; > odp_buffer_pool_t pool; > odp_queue_t out_queue; > > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, > - ODP_CACHE_LINE_SIZE, 0); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - pool_base = odp_shm_addr(shm); > - if (!pool_base) { > - fprintf(stderr, "Packet pool allocation failed.\n"); > - return -1; > - } > + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, > ¶ms); > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (ODP_BUFFER_POOL_INVALID == pool) { > fprintf(stderr, "Packet pool creation failed.\n"); > return -1; > @@ -55,20 +46,14 @@ int tests_global_init(void) > fprintf(stderr, "Crypto outq creation failed.\n"); > return -1; > } > - shm = odp_shm_reserve("shm_compl_pool", > - SHM_COMPL_POOL_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_SHM_SW_ONLY); > - pool_base = odp_shm_addr(shm); > - if (!pool_base) { > - fprintf(stderr, "Completion pool allocation failed.\n"); > - return -1; > - } > - pool = odp_buffer_pool_create("compl_pool", pool_base, > - SHM_COMPL_POOL_SIZE, > - SHM_COMPL_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_RAW); > + > + params.buf_size = SHM_COMPL_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_COMPL_POOL_SIZE/SHM_COMPL_POOL_BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_RAW; > + > + pool = odp_buffer_pool_create("compl_pool", ODP_SHM_NULL, ¶ms); > + > if (ODP_BUFFER_POOL_INVALID == pool) { > fprintf(stderr, "Completion pool creation failed.\n"); > return -1; > diff --git a/test/validation/odp_queue.c b/test/validation/odp_queue.c > index 2c8fe80..6e05ad0 100644 > --- a/test/validation/odp_queue.c > +++ b/test/validation/odp_queue.c > @@ -16,21 +16,14 @@ static int queue_contest = 0xff; > static int init_queue_suite(void) > { > odp_buffer_pool_t pool; > - void *pool_base; > - odp_shm_t shm; > + odp_buffer_pool_param_t params; > > - shm = odp_shm_reserve("msg_pool", > - MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > + params.buf_size = 0; > + params.buf_align = ODP_CACHE_LINE_SIZE; > + params.num_bufs = 1024 * 10; > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > - pool_base = odp_shm_addr(shm); > - > - if (NULL == pool_base) { > - printf("Shared memory reserve failed.\n"); > - return -1; > - } > - > - pool = odp_buffer_pool_create("msg_pool", pool_base, > MSG_POOL_SIZE, 0, > - ODP_CACHE_LINE_SIZE, > ODP_BUFFER_TYPE_RAW); > + pool = odp_buffer_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); > > if (ODP_BUFFER_POOL_INVALID == pool) { > printf("Pool create failed.\n"); > -- > 1.8.3.2 > > > _______________________________________________ > lng-odp mailing list > lng-odp@lists.linaro.org > http://lists.linaro.org/mailman/listinfo/lng-odp >
Bala and I had a hangout earlier to discuss his comments. See inline responses for Petri's. On Mon, Dec 8, 2014 at 10:38 AM, Savolainen, Petri (NSN - FI/Espoo) < petri.savolainen@nsn.com> wrote: > Didn't review the implementation. > > > -----Original Message----- > > From: lng-odp-bounces@lists.linaro.org [mailto:lng-odp- > > bounces@lists.linaro.org] On Behalf Of ext Bill Fischofer > > Sent: Monday, December 08, 2014 1:24 AM > > To: lng-odp@lists.linaro.org > > Subject: [lng-odp] [RFC PATCH] RFC: Implement v0.5 buffer pool APIs > > > > Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> > > --- > > > > Petri: Please review the following files here: > > platform/linux-generic/include/api/odp_buffer.h > > platform/linux-generic/include/api/odp_buffer_pool.h > > platform/linux-generic/include/api/odp_config.h > > > > This patch is complete and compilable/testable. It is RFC pending > > Petri approval of the public API headers and recommendations for > > final packaging. > > > > example/generator/odp_generator.c | 19 +- > > example/ipsec/odp_ipsec.c | 57 +- > > example/l2fwd/odp_l2fwd.c | 19 +- > > example/odp_example/odp_example.c | 18 +- > > example/packet/odp_pktio.c | 19 +- > > example/timer/odp_timer_test.c | 13 +- > > platform/linux-generic/include/api/odp_buffer.h | 3 +- > > .../linux-generic/include/api/odp_buffer_pool.h | 103 ++- > > platform/linux-generic/include/api/odp_config.h | 19 + > > .../linux-generic/include/api/odp_platform_types.h | 12 + > > .../linux-generic/include/api/odp_shared_memory.h | 10 +- > > .../linux-generic/include/odp_buffer_inlines.h | 150 ++++ > > .../linux-generic/include/odp_buffer_internal.h | 150 ++-- > > .../include/odp_buffer_pool_internal.h | 351 ++++++++-- > > platform/linux-generic/include/odp_internal.h | 2 + > > .../linux-generic/include/odp_packet_internal.h | 50 +- > > .../linux-generic/include/odp_timer_internal.h | 11 +- > > platform/linux-generic/odp_buffer.c | 33 +- > > platform/linux-generic/odp_buffer_pool.c | 777 > ++++++++++------ > > ----- > > platform/linux-generic/odp_linux.c | 4 +- > > platform/linux-generic/odp_packet.c | 41 +- > > platform/linux-generic/odp_queue.c | 1 + > > platform/linux-generic/odp_schedule.c | 20 +- > > platform/linux-generic/odp_timer.c | 3 +- > > test/api_test/odp_timer_ping.c | 19 +- > > test/validation/odp_crypto.c | 43 +- > > test/validation/odp_queue.c | 19 +- > > 27 files changed, 1208 insertions(+), 758 deletions(-) > > create mode 100644 platform/linux-generic/include/odp_buffer_inlines.h > > > > diff --git a/example/generator/odp_generator.c > > b/example/generator/odp_generator.c > > index 73b0369..476cbef 100644 > > --- a/example/generator/odp_generator.c > > +++ b/example/generator/odp_generator.c > > @@ -522,11 +522,11 @@ int main(int argc, char *argv[]) > > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > > odp_buffer_pool_t pool; > > int num_workers; > > - void *pool_base; > > int i; > > int first_core; > > int core_count; > > odp_shm_t shm; > > + odp_buffer_pool_param_t params; > > > > /* Init ODP before calling anything else */ > > if (odp_init_global(NULL, NULL)) { > > @@ -589,20 +589,13 @@ int main(int argc, char *argv[]) > > printf("First core: %i\n\n", first_core); > > > > /* Create packet pool */ > > - shm = odp_shm_reserve("shm_packet_pool", > > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - pool_base = odp_shm_addr(shm); > > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > > + params.buf_align = 0; > > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > > > - if (pool_base == NULL) { > > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > > - exit(EXIT_FAILURE); > > - } > > + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, > ¶ms); > > > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > > - SHM_PKT_POOL_SIZE, > > - SHM_PKT_POOL_BUF_SIZE, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_PACKET); > > if (pool == ODP_BUFFER_POOL_INVALID) { > > EXAMPLE_ERR("Error: packet pool create failed.\n"); > > exit(EXIT_FAILURE); > > diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c > > index 76d27c5..f96338c 100644 > > --- a/example/ipsec/odp_ipsec.c > > +++ b/example/ipsec/odp_ipsec.c > > @@ -367,8 +367,7 @@ static > > void ipsec_init_pre(void) > > { > > odp_queue_param_t qparam; > > - void *pool_base; > > - odp_shm_t shm; > > + odp_buffer_pool_param_t params; > > > > /* > > * Create queues > > @@ -401,16 +400,12 @@ void ipsec_init_pre(void) > > } > > > > /* Create output buffer pool */ > > - shm = odp_shm_reserve("shm_out_pool", > > - SHM_OUT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - > > - pool_base = odp_shm_addr(shm); > > + params.buf_size = SHM_OUT_POOL_BUF_SIZE; > > + params.buf_align = 0; > > + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; > > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > > > - out_pool = odp_buffer_pool_create("out_pool", pool_base, > > - SHM_OUT_POOL_SIZE, > > - SHM_OUT_POOL_BUF_SIZE, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_PACKET); > > + out_pool = odp_buffer_pool_create("out_pool", ODP_SHM_NULL, > > ¶ms); > > > > if (ODP_BUFFER_POOL_INVALID == out_pool) { > > EXAMPLE_ERR("Error: message pool create failed.\n"); > > @@ -1176,12 +1171,12 @@ main(int argc, char *argv[]) > > { > > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > > int num_workers; > > - void *pool_base; > > int i; > > int first_core; > > int core_count; > > int stream_count; > > odp_shm_t shm; > > + odp_buffer_pool_param_t params; > > > > /* Init ODP before calling anything else */ > > if (odp_init_global(NULL, NULL)) { > > @@ -1241,42 +1236,28 @@ main(int argc, char *argv[]) > > printf("First core: %i\n\n", first_core); > > > > /* Create packet buffer pool */ > > - shm = odp_shm_reserve("shm_packet_pool", > > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > > + params.buf_align = 0; > > + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; > > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > > > - pool_base = odp_shm_addr(shm); > > - > > - if (NULL == pool_base) { > > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > > - exit(EXIT_FAILURE); > > - } > > + pkt_pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, > > + ¶ms); > > > > - pkt_pool = odp_buffer_pool_create("packet_pool", pool_base, > > - SHM_PKT_POOL_SIZE, > > - SHM_PKT_POOL_BUF_SIZE, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_PACKET); > > if (ODP_BUFFER_POOL_INVALID == pkt_pool) { > > EXAMPLE_ERR("Error: packet pool create failed.\n"); > > exit(EXIT_FAILURE); > > } > > > > /* Create context buffer pool */ > > - shm = odp_shm_reserve("shm_ctx_pool", > > - SHM_CTX_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - > > - pool_base = odp_shm_addr(shm); > > + params.buf_size = SHM_CTX_POOL_BUF_SIZE; > > + params.buf_align = 0; > > + params.num_bufs = SHM_CTX_POOL_BUF_COUNT; > > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > > > - if (NULL == pool_base) { > > - EXAMPLE_ERR("Error: context pool mem alloc failed.\n"); > > - exit(EXIT_FAILURE); > > - } > > + ctx_pool = odp_buffer_pool_create("ctx_pool", ODP_SHM_NULL, > > + ¶ms); > > > > - ctx_pool = odp_buffer_pool_create("ctx_pool", pool_base, > > - SHM_CTX_POOL_SIZE, > > - SHM_CTX_POOL_BUF_SIZE, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_RAW); > > if (ODP_BUFFER_POOL_INVALID == ctx_pool) { > > EXAMPLE_ERR("Error: context pool create failed.\n"); > > exit(EXIT_FAILURE); > > diff --git a/example/l2fwd/odp_l2fwd.c b/example/l2fwd/odp_l2fwd.c > > index ebac8c5..3c1fd6a 100644 > > --- a/example/l2fwd/odp_l2fwd.c > > +++ b/example/l2fwd/odp_l2fwd.c > > @@ -314,12 +314,12 @@ int main(int argc, char *argv[]) > > { > > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > > odp_buffer_pool_t pool; > > - void *pool_base; > > int i; > > int first_core; > > int core_count; > > odp_pktio_t pktio; > > odp_shm_t shm; > > + odp_buffer_pool_param_t params; > > > > /* Init ODP before calling anything else */ > > if (odp_init_global(NULL, NULL)) { > > @@ -383,20 +383,13 @@ int main(int argc, char *argv[]) > > printf("First core: %i\n\n", first_core); > > > > /* Create packet pool */ > > - shm = odp_shm_reserve("shm_packet_pool", > > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - pool_base = odp_shm_addr(shm); > > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > > + params.buf_align = 0; > > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > > > - if (pool_base == NULL) { > > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > > - exit(EXIT_FAILURE); > > - } > > + pool = odp_buffer_pool_create("packet pool", ODP_SHM_NULL, > ¶ms); > > > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > > - SHM_PKT_POOL_SIZE, > > - SHM_PKT_POOL_BUF_SIZE, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_PACKET); > > if (pool == ODP_BUFFER_POOL_INVALID) { > > EXAMPLE_ERR("Error: packet pool create failed.\n"); > > exit(EXIT_FAILURE); > > diff --git a/example/odp_example/odp_example.c > > b/example/odp_example/odp_example.c > > index 96a2912..8373f12 100644 > > --- a/example/odp_example/odp_example.c > > +++ b/example/odp_example/odp_example.c > > @@ -954,13 +954,13 @@ int main(int argc, char *argv[]) > > test_args_t args; > > int num_workers; > > odp_buffer_pool_t pool; > > - void *pool_base; > > odp_queue_t queue; > > int i, j; > > int prios; > > int first_core; > > odp_shm_t shm; > > test_globals_t *globals; > > + odp_buffer_pool_param_t params; > > > > printf("\nODP example starts\n\n"); > > > > @@ -1042,19 +1042,13 @@ int main(int argc, char *argv[]) > > /* > > * Create message pool > > */ > > - shm = odp_shm_reserve("msg_pool", > > - MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > > > - pool_base = odp_shm_addr(shm); > > + params.buf_size = sizeof(test_message_t); > > + params.buf_align = 0; > > + params.num_bufs = MSG_POOL_SIZE/sizeof(test_message_t); > > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > > > - if (pool_base == NULL) { > > - EXAMPLE_ERR("Shared memory reserve failed.\n"); > > - return -1; > > - } > > - > > - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, > > - sizeof(test_message_t), > > - ODP_CACHE_LINE_SIZE, > ODP_BUFFER_TYPE_RAW); > > + pool = odp_buffer_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); > > > > if (pool == ODP_BUFFER_POOL_INVALID) { > > EXAMPLE_ERR("Pool create failed.\n"); > > diff --git a/example/packet/odp_pktio.c b/example/packet/odp_pktio.c > > index 7d51682..f2e7b2d 100644 > > --- a/example/packet/odp_pktio.c > > +++ b/example/packet/odp_pktio.c > > @@ -331,11 +331,11 @@ int main(int argc, char *argv[]) > > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > > odp_buffer_pool_t pool; > > int num_workers; > > - void *pool_base; > > int i; > > int first_core; > > int core_count; > > odp_shm_t shm; > > + odp_buffer_pool_param_t params; > > > > /* Init ODP before calling anything else */ > > if (odp_init_global(NULL, NULL)) { > > @@ -389,20 +389,13 @@ int main(int argc, char *argv[]) > > printf("First core: %i\n\n", first_core); > > > > /* Create packet pool */ > > - shm = odp_shm_reserve("shm_packet_pool", > > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - pool_base = odp_shm_addr(shm); > > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > > + params.buf_align = 0; > > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > > > - if (pool_base == NULL) { > > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > > - exit(EXIT_FAILURE); > > - } > > + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, > ¶ms); > > > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > > - SHM_PKT_POOL_SIZE, > > - SHM_PKT_POOL_BUF_SIZE, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_PACKET); > > if (pool == ODP_BUFFER_POOL_INVALID) { > > EXAMPLE_ERR("Error: packet pool create failed.\n"); > > exit(EXIT_FAILURE); > > diff --git a/example/timer/odp_timer_test.c > > b/example/timer/odp_timer_test.c > > index 9968bfe..0d6e31a 100644 > > --- a/example/timer/odp_timer_test.c > > +++ b/example/timer/odp_timer_test.c > > @@ -244,12 +244,12 @@ int main(int argc, char *argv[]) > > test_args_t args; > > int num_workers; > > odp_buffer_pool_t pool; > > - void *pool_base; > > odp_queue_t queue; > > int first_core; > > uint64_t cycles, ns; > > odp_queue_param_t param; > > odp_shm_t shm; > > + odp_buffer_pool_param_t params; > > > > printf("\nODP timer example starts\n"); > > > > @@ -313,12 +313,13 @@ int main(int argc, char *argv[]) > > */ > > shm = odp_shm_reserve("msg_pool", > > MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - pool_base = odp_shm_addr(shm); > > > > - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, > > - 0, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_TIMEOUT); > > + params.buf_size = 0; > > + params.buf_align = 0; > > + params.num_bufs = MSG_POOL_SIZE; > > + params.buf_type = ODP_BUFFER_TYPE_TIMEOUT; > > + > > + pool = odp_buffer_pool_create("msg_pool", shm, ¶ms); > > > > if (pool == ODP_BUFFER_POOL_INVALID) { > > EXAMPLE_ERR("Pool create failed.\n"); > > diff --git a/platform/linux-generic/include/api/odp_buffer.h > > b/platform/linux-generic/include/api/odp_buffer.h > > index da23120..e981324 100644 > > --- a/platform/linux-generic/include/api/odp_buffer.h > > +++ b/platform/linux-generic/include/api/odp_buffer.h > > @@ -68,7 +68,8 @@ int odp_buffer_type(odp_buffer_t buf); > > * > > * @param buf Buffer handle > > * > > - * @return 1 if valid, otherwise 0 > > + * @retval 1 Buffer handle represents a valid buffer. > > + * @retval 0 Buffer handle does not represent a valid buffer. > > */ > > int odp_buffer_is_valid(odp_buffer_t buf); > > > > diff --git a/platform/linux-generic/include/api/odp_buffer_pool.h > > b/platform/linux-generic/include/api/odp_buffer_pool.h > > index 30b83e0..3d85066 100644 > > --- a/platform/linux-generic/include/api/odp_buffer_pool.h > > +++ b/platform/linux-generic/include/api/odp_buffer_pool.h > > @@ -32,42 +32,114 @@ extern "C" { > > /** Maximum queue name lenght in chars */ > > #define ODP_BUFFER_POOL_NAME_LEN 32 > > > > -/** Invalid buffer pool */ > > -#define ODP_BUFFER_POOL_INVALID 0 > > +/** > > + * Buffer pool parameters > > + * Used to communicate buffer pool creation options. > > + */ > > +typedef struct odp_buffer_pool_param_t { > > + size_t buf_size; /**< Buffer size in bytes. The maximum > > + number of bytes application will > > + store in each buffer. */ > > To be in align with packet API: > uint32_t buf_len; /**< Buffer length in bytes. The maximum > number of bytes application will > store in each buffer. In case of packet > type buffers this will be rounded up to > ODP_CONFIG_PACKET_BUF_LEN_MIN. > */ > > This is what odp_buffer_len() or odp_packet_seg_buf_len(pkt, seg) would > return - so for the first segment it includes the headroom. > > ODP_CONFIG_PACKET_HEADROOM < ODP_CONFIG_PACKET_BUF_LEN_MIN <= buf_len > > Default ODP_CONFIG_PACKET_HEADROOM could be e.g. 64 bytes. > I was thinking of making the default for linux-generic be 66 since that would offset the packet by two bytes to allow the IP and TCP headers to be word-aligned. Reasonable? > > ODP_CONFIG_PACKET_BUF_LEN_MIN 128 (or 256) could be defined also in > odp_config.h. Buf_len would be rounded up into this value. > > > > + size_t buf_align; /**< Minimum buffer alignment in bytes. > > + Valid values are powers of two. Use 0 > > + for default alignment. Default will > > + always be a multiple of 8. */ > > uint32_t buf_align; > OK, I will revise these to uint32_t. For packets, the buf_size will be interpreted as the packet data size and the implementation will add in the configured headroom and/or tailroom to get the actual number of bytes that are reserved for the packet. I'll also add the ODP_CONFIG_PACKET_BUF_LEN_MIN value and will set it to 256. > > > > + uint32_t num_bufs; /**< Number of buffers in the pool */ > > + int buf_type; /**< Buffer type */ > > +} odp_buffer_pool_param_t; > > > > /** > > * Create a buffer pool > > + * This routine is used to create a buffer pool. It take three > > + * arguments: the optional name of the pool to be created, an optional > > shared > > + * memory handle, and a parameter struct that describes the pool to be > > + * created. If a name is not specified the result is an anonymous pool > > that > > + * cannot be referenced by odp_buffer_pool_lookup(). > > + * > > + * @param name Name of the pool, max ODP_BUFFER_POOL_NAME_LEN-1 > > chars. > > + * May be specified as NULL for anonymous pools. > > * > > - * @param name Name of the pool (max ODP_BUFFER_POOL_NAME_LEN - 1 > > chars) > > - * @param base_addr Pool base address > > - * @param size Pool size in bytes > > - * @param buf_size Buffer size in bytes > > - * @param buf_align Minimum buffer alignment > > - * @param buf_type Buffer type > > + * @param shm The shared memory object in which to create the pool. > > + * Use ODP_SHM_NULL to reserve default memory type > > + * for the buffer type. > > * > > - * @return Buffer pool handle > > + * @param params Buffer pool parameters. > > + * > > + * @retval Handle Buffer pool handle on success > > + * @retval ODP_BUFFER_POOL_INVALID if call failed > > */ > > + > > odp_buffer_pool_t odp_buffer_pool_create(const char *name, > > - void *base_addr, uint64_t size, > > - size_t buf_size, size_t buf_align, > > - int buf_type); > > + odp_shm_t shm, > > + odp_buffer_pool_param_t *params); > > > > +/** > > + * Destroy a buffer pool previously created by odp_buffer_pool_create() > > + * > > + * @param pool Handle of the buffer pool to be destroyed > > + * > > + * @retval 0 Success > > + * @retval -1 Failure > > + * > > + * @note This routine destroys a previously created buffer pool. This > > call > > + * does not destroy any shared memory object passed to > > + * odp_buffer_pool_create() used to store the buffer pool contents. The > > caller > > + * takes responsibility for that. If no shared memory object was passed > > as > > + * part of the create call, then this routine will destroy any internal > > shared > > + * memory objects associated with the buffer pool. Results are undefined > > if > > + * an attempt is made to destroy a buffer pool that contains allocated > or > > + * otherwise active buffers. > > + */ > > +int odp_buffer_pool_destroy(odp_buffer_pool_t pool); > > > > /** > > * Find a buffer pool by name > > * > > * @param name Name of the pool > > * > > - * @return Buffer pool handle, or ODP_BUFFER_POOL_INVALID if not found. > > + * @retval Handle Buffer pool handle on successs > > Typos: here ^^^^^^^ and here ^ > Will correct success typo. Not sure what other typo you see. The syntax of @retval is that the first token that follows the keyword is the return value and the rest of the line is the explanation of that value, so this isn't to be read as a sentence. > > > + * @retval ODP_BUFFER_POOL_INVALID if not found > > + * > > + * @note This routine cannot be used to look up an anonymous pool (one > > created > > + * with no name). > > */ > > odp_buffer_pool_t odp_buffer_pool_lookup(const char *name); > > > > +/** > > + * Buffer pool information struct > > + * Used to get information about a buffer pool. > > + */ > > +typedef struct odp_buffer_pool_info_t { > > + const char *name; /**< pool name */ > > Add SHM handle into the info struct: odp_shm_t shm; > I'm just implementing what's in the API Delta document (see pp. 6-7). When we discussed this you indicated that you wanted the shm to be returned as a separate return parameter of odp_buffer_pool_info(). If you want it integrated into the odp_buffer_pool_info_t I have no problem doing that, but it should be changed in the spec as well. As I recall, your argument for keeping them separate was that shm is a separate input parameter to odp_buffer_pool_create() rather than being part of the odp_buffer_pool_param_t struct, so having it as a separate output parameter keeps things symmetric. > > > + odp_buffer_pool_param_t params; /**< pool parameters */ > > +} odp_buffer_pool_info_t; > > + > > +/** > > + * Retrieve information about a buffer pool > > + * > > + * @param pool Buffer pool handle > > + * > > + * @param shm Recieves odp_shm_t supplied by caller at > > + * pool creation, or ODP_SHM_NULL if the > > + * pool is managed internally. > > Remove: shm as it's part of info struct > Same comment as above. No problem changing this if you've changed your mind. > > > + * > > + * @param[out] info Receives an odp_buffer_pool_info_t object > > + * that describes the pool. > > + * > > + * @retval 0 Success > > + * @retval -1 Failure. Info could not be retrieved. > > + */ > > + > > +int odp_buffer_pool_info(odp_buffer_pool_t pool, odp_shm_t *shm, > > + odp_buffer_pool_info_t *info); > > > > /** > > * Print buffer pool info > > * > > * @param pool Pool handle > > * > > + * @note This routine writes implementation-defined information about > the > > + * specified buffer pool to the ODP log. The intended use is for > > debugging. > > */ > > void odp_buffer_pool_print(odp_buffer_pool_t pool); > > > > @@ -78,7 +150,8 @@ void odp_buffer_pool_print(odp_buffer_pool_t pool); > > * The validity of a buffer can be cheked at any time with > > odp_buffer_is_valid() > > * @param pool Pool handle > > * > > - * @return Buffer handle or ODP_BUFFER_INVALID > > + * @retval Handle Buffer handle of allocated buffer > > Typo: ^^^^^^ > Not a typo. See previous comments about how @retval works. The generated doxygen looks fine. > > > + * @retval ODP_BUFFER_INVALID Allocation failed > > */ > > odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool); > > > > @@ -97,7 +170,7 @@ void odp_buffer_free(odp_buffer_t buf); > > * > > * @param buf Buffer handle > > * > > - * @return Buffer pool the buffer was allocated from > > + * @retval Handle Buffer pool handle that the buffer was allocated from > > */ > > odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf); > > > Buffer API call. Would be more logical in odp_buffer.h (can be moved > later). > I agree, however earlier you argued that these were more logically grouped within odp_buffer_pool.h. They were part of this file historically because of the circular typedef dependencies. Now that we have odp_platform_types.h this is no longer the case so I'll move odp_buffer_alloc() and odp_buffer_free() to odp_buffer.h. > > > > > diff --git a/platform/linux-generic/include/api/odp_config.h > > b/platform/linux-generic/include/api/odp_config.h > > index 906897c..5ca5bb2 100644 > > --- a/platform/linux-generic/include/api/odp_config.h > > +++ b/platform/linux-generic/include/api/odp_config.h > > @@ -49,6 +49,25 @@ extern "C" { > > #define ODP_CONFIG_PKTIO_ENTRIES 64 > > > > /** > > + * Buffer segment size to use > > + * This is the granularity of segmented buffers. Sized for now to be > > There are no segmented buffers. Packet have segmentation. Buf_len == > segment size. > In the linux-generic implementation the same buffer structures are used across all buffer types. I can rename this or make this an internal parameter as you prefer. Please advise. > > > large > > + * enough to support 1500-byte packets since the raw socket interface > > does not > > + * support scatter/gather I/O. ODP requires a minimum segment size of > 128 > > + * bytes with 256 recommended. Linux-generic code will enforce a 256 > byte > > + * minimum. Note that the chosen segment size must be a multiple of > > + * ODP_CACHE_LINE_SIZE. > > + */ > > +#define ODP_CONFIG_BUF_SEG_SIZE (512*3) > > Same as ODP_CONFIG_PACKET_BUF_LEN_MIN. > Not quite. ODP_CONFIG_PACKET_BUF_LEN_MIN would be the lower bound of this option and is intended to be an architectural limit. As I mentioned, currently linux-generic only supports unsegmented packets because the existing odp_packet_socket.c cannot handle scatter/gather read/writes. Doing this naively by receiving/transmitting to a pktio-level fixed buffer which is then copied to or filled from ODP segments would impose a severe performance penalty, so until native scatter/gather support is added we're stuck. However the APIs and implementation are fully prepared to deal with segmented packets when that support is added. And of course other implementations that make use of linux-generic that don't have this restriction can use the current code directly for that purpose but just hooking it to their native I/O mechanisms. > > > + > > +/** > > + * Maximum buffer size supported > > + * Must be an integral number of segments and should be large enough to > > + * accommodate jumbo packets. Attempts to allocate or extend buffers to > > sizes > > + * larger than this limit will fail. > > + */ > > +#define ODP_CONFIG_BUF_MAX_SIZE (ODP_CONFIG_BUF_SEG_SIZE*7) > > + > > This could be instead maximum number of segments per packet: > > Add ODP_CONFIG_PACKET_NUM_SEGS_MAX into odp_config.h ? > I think it's cleaner to specify this limit this way since packet length is independent of how a given implementation may choose to break packets into segments. Note, for example, that there is no implication that packet segments are all of equal size, even though many implementations will in fact do this. So trying to specify this in terms of numbers of segments would leave doubt as to how big a packet can be handled. This way the limit the application cares about is stated precisely and directly. > > > > +/** > > * @} > > */ > > > > diff --git a/platform/linux-generic/include/api/odp_platform_types.h > > b/platform/linux-generic/include/api/odp_platform_types.h > > index 4db47d3..2181eb6 100644 > > --- a/platform/linux-generic/include/api/odp_platform_types.h > > +++ b/platform/linux-generic/include/api/odp_platform_types.h > > @@ -26,6 +26,9 @@ > > /** ODP Buffer pool */ > > typedef uint32_t odp_buffer_pool_t; > > > > +/** Invalid buffer pool */ > > +#define ODP_BUFFER_POOL_INVALID (0xffffffff) > > > This was and should be defined as 0. It's easier to catch handles that are > init with memset(0) but not set. > All typedefs and associated implementation structures are explicitly implementation-defined and are not architected. This is the platform types for linux-generic. Other platforms will set these to whatever works best for that platform. In my case 0 may be a valid buffer handle and it would add additional overhead to exclude it. odp_buffer_is_valid() provides a robust check on handle validity whenever needed. In any event these are opaque types and the value(s) they contain are meaningless from an application perspective. > > > -Petri > > > > + > > /** ODP buffer */ > > typedef uint32_t odp_buffer_t; > > > > @@ -65,6 +68,15 @@ typedef uint32_t odp_pktio_t; > > #define ODP_PKTIO_ANY ((odp_pktio_t)~0) > > > > /** > > + * ODP shared memory block > > + */ > > +typedef uint32_t odp_shm_t; > > + > > +/** Invalid shared memory block */ > > +#define ODP_SHM_INVALID 0 > > +#define ODP_SHM_NULL ODP_SHM_INVALID /**< Synonym for buffer pool use */ > > + > > +/** > > * @} > > */ > > > > diff --git a/platform/linux-generic/include/api/odp_shared_memory.h > > b/platform/linux-generic/include/api/odp_shared_memory.h > > index 26e208b..f70db5a 100644 > > --- a/platform/linux-generic/include/api/odp_shared_memory.h > > +++ b/platform/linux-generic/include/api/odp_shared_memory.h > > @@ -20,6 +20,7 @@ extern "C" { > > > > > > #include <odp_std_types.h> > > +#include <odp_platform_types.h> > > > > /** @defgroup odp_shared_memory ODP SHARED MEMORY > > * Operations on shared memory. > > @@ -38,15 +39,6 @@ extern "C" { > > #define ODP_SHM_PROC 0x2 /**< Share with external processes */ > > > > /** > > - * ODP shared memory block > > - */ > > -typedef uint32_t odp_shm_t; > > - > > -/** Invalid shared memory block */ > > -#define ODP_SHM_INVALID 0 > > - > > - > > -/** > > * Shared memory block info > > */ > > typedef struct odp_shm_info_t { > aro.org/mailman/listinfo/lng-odp >
From: ext Bill Fischofer [mailto:bill.fischofer@linaro.org] Sent: Monday, December 08, 2014 8:35 PM To: Savolainen, Petri (NSN - FI/Espoo) Cc: lng-odp@lists.linaro.org Subject: Re: [lng-odp] [RFC PATCH] RFC: Implement v0.5 buffer pool APIs Bala and I had a hangout earlier to discuss his comments. See inline responses for Petri's. On Mon, Dec 8, 2014 at 10:38 AM, Savolainen, Petri (NSN - FI/Espoo) <petri.savolainen@nsn.com<mailto:petri.savolainen@nsn.com>> wrote: Didn't review the implementation. > -----Original Message----- > From: lng-odp-bounces@lists.linaro.org<mailto:lng-odp-bounces@lists.linaro.org> [mailto:lng-odp-<mailto:lng-odp-> > bounces@lists.linaro.org<mailto:bounces@lists.linaro.org>] On Behalf Of ext Bill Fischofer > Sent: Monday, December 08, 2014 1:24 AM > To: lng-odp@lists.linaro.org<mailto:lng-odp@lists.linaro.org> > Subject: [lng-odp] [RFC PATCH] RFC: Implement v0.5 buffer pool APIs > > Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org<mailto:bill.fischofer@linaro.org>> > --- > > Petri: Please review the following files here: > platform/linux-generic/include/api/odp_buffer.h > platform/linux-generic/include/api/odp_buffer_pool.h > platform/linux-generic/include/api/odp_config.h > > This patch is complete and compilable/testable. It is RFC pending > Petri approval of the public API headers and recommendations for > final packaging. > > example/generator/odp_generator.c | 19 +- > example/ipsec/odp_ipsec.c | 57 +- > example/l2fwd/odp_l2fwd.c | 19 +- > example/odp_example/odp_example.c | 18 +- > example/packet/odp_pktio.c | 19 +- > example/timer/odp_timer_test.c | 13 +- > platform/linux-generic/include/api/odp_buffer.h | 3 +- > .../linux-generic/include/api/odp_buffer_pool.h | 103 ++- > platform/linux-generic/include/api/odp_config.h | 19 + > .../linux-generic/include/api/odp_platform_types.h | 12 + > .../linux-generic/include/api/odp_shared_memory.h | 10 +- > .../linux-generic/include/odp_buffer_inlines.h | 150 ++++ > .../linux-generic/include/odp_buffer_internal.h | 150 ++-- > .../include/odp_buffer_pool_internal.h | 351 ++++++++-- > platform/linux-generic/include/odp_internal.h | 2 + > .../linux-generic/include/odp_packet_internal.h | 50 +- > .../linux-generic/include/odp_timer_internal.h | 11 +- > platform/linux-generic/odp_buffer.c | 33 +- > platform/linux-generic/odp_buffer_pool.c | 777 ++++++++++------ > ----- > platform/linux-generic/odp_linux.c | 4 +- > platform/linux-generic/odp_packet.c | 41 +- > platform/linux-generic/odp_queue.c | 1 + > platform/linux-generic/odp_schedule.c | 20 +- > platform/linux-generic/odp_timer.c | 3 +- > test/api_test/odp_timer_ping.c | 19 +- > test/validation/odp_crypto.c | 43 +- > test/validation/odp_queue.c | 19 +- > 27 files changed, 1208 insertions(+), 758 deletions(-) > create mode 100644 platform/linux-generic/include/odp_buffer_inlines.h > > diff --git a/example/generator/odp_generator.c > b/example/generator/odp_generator.c > index 73b0369..476cbef 100644 > --- a/example/generator/odp_generator.c > +++ b/example/generator/odp_generator.c > @@ -522,11 +522,11 @@ int main(int argc, char *argv[]) > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > odp_buffer_pool_t pool; > int num_workers; > - void *pool_base; > int i; > int first_core; > int core_count; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* Init ODP before calling anything else */ > if (odp_init_global(NULL, NULL)) { > @@ -589,20 +589,13 @@ int main(int argc, char *argv[]) > printf("First core: %i\n\n", first_core); > > /* Create packet pool */ > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - if (pool_base == NULL) { > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, ¶ms); > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Error: packet pool create failed.\n"); > exit(EXIT_FAILURE); > diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c > index 76d27c5..f96338c 100644 > --- a/example/ipsec/odp_ipsec.c > +++ b/example/ipsec/odp_ipsec.c > @@ -367,8 +367,7 @@ static > void ipsec_init_pre(void) > { > odp_queue_param_t qparam; > - void *pool_base; > - odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* > * Create queues > @@ -401,16 +400,12 @@ void ipsec_init_pre(void) > } > > /* Create output buffer pool */ > - shm = odp_shm_reserve("shm_out_pool", > - SHM_OUT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_OUT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - out_pool = odp_buffer_pool_create("out_pool", pool_base, > - SHM_OUT_POOL_SIZE, > - SHM_OUT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > + out_pool = odp_buffer_pool_create("out_pool", ODP_SHM_NULL, > ¶ms); > > if (ODP_BUFFER_POOL_INVALID == out_pool) { > EXAMPLE_ERR("Error: message pool create failed.\n"); > @@ -1176,12 +1171,12 @@ main(int argc, char *argv[]) > { > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > int num_workers; > - void *pool_base; > int i; > int first_core; > int core_count; > int stream_count; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* Init ODP before calling anything else */ > if (odp_init_global(NULL, NULL)) { > @@ -1241,42 +1236,28 @@ main(int argc, char *argv[]) > printf("First core: %i\n\n", first_core); > > /* Create packet buffer pool */ > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - pool_base = odp_shm_addr(shm); > - > - if (NULL == pool_base) { > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + pkt_pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, > + ¶ms); > > - pkt_pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (ODP_BUFFER_POOL_INVALID == pkt_pool) { > EXAMPLE_ERR("Error: packet pool create failed.\n"); > exit(EXIT_FAILURE); > } > > /* Create context buffer pool */ > - shm = odp_shm_reserve("shm_ctx_pool", > - SHM_CTX_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_CTX_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_CTX_POOL_BUF_COUNT; > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > - if (NULL == pool_base) { > - EXAMPLE_ERR("Error: context pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + ctx_pool = odp_buffer_pool_create("ctx_pool", ODP_SHM_NULL, > + ¶ms); > > - ctx_pool = odp_buffer_pool_create("ctx_pool", pool_base, > - SHM_CTX_POOL_SIZE, > - SHM_CTX_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_RAW); > if (ODP_BUFFER_POOL_INVALID == ctx_pool) { > EXAMPLE_ERR("Error: context pool create failed.\n"); > exit(EXIT_FAILURE); > diff --git a/example/l2fwd/odp_l2fwd.c b/example/l2fwd/odp_l2fwd.c > index ebac8c5..3c1fd6a 100644 > --- a/example/l2fwd/odp_l2fwd.c > +++ b/example/l2fwd/odp_l2fwd.c > @@ -314,12 +314,12 @@ int main(int argc, char *argv[]) > { > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > odp_buffer_pool_t pool; > - void *pool_base; > int i; > int first_core; > int core_count; > odp_pktio_t pktio; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* Init ODP before calling anything else */ > if (odp_init_global(NULL, NULL)) { > @@ -383,20 +383,13 @@ int main(int argc, char *argv[]) > printf("First core: %i\n\n", first_core); > > /* Create packet pool */ > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - if (pool_base == NULL) { > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + pool = odp_buffer_pool_create("packet pool", ODP_SHM_NULL, ¶ms); > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Error: packet pool create failed.\n"); > exit(EXIT_FAILURE); > diff --git a/example/odp_example/odp_example.c > b/example/odp_example/odp_example.c > index 96a2912..8373f12 100644 > --- a/example/odp_example/odp_example.c > +++ b/example/odp_example/odp_example.c > @@ -954,13 +954,13 @@ int main(int argc, char *argv[]) > test_args_t args; > int num_workers; > odp_buffer_pool_t pool; > - void *pool_base; > odp_queue_t queue; > int i, j; > int prios; > int first_core; > odp_shm_t shm; > test_globals_t *globals; > + odp_buffer_pool_param_t params; > > printf("\nODP example starts\n\n"); > > @@ -1042,19 +1042,13 @@ int main(int argc, char *argv[]) > /* > * Create message pool > */ > - shm = odp_shm_reserve("msg_pool", > - MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - pool_base = odp_shm_addr(shm); > + params.buf_size = sizeof(test_message_t); > + params.buf_align = 0; > + params.num_bufs = MSG_POOL_SIZE/sizeof(test_message_t); > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > - if (pool_base == NULL) { > - EXAMPLE_ERR("Shared memory reserve failed.\n"); > - return -1; > - } > - > - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, > - sizeof(test_message_t), > - ODP_CACHE_LINE_SIZE, ODP_BUFFER_TYPE_RAW); > + pool = odp_buffer_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); > > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Pool create failed.\n"); > diff --git a/example/packet/odp_pktio.c b/example/packet/odp_pktio.c > index 7d51682..f2e7b2d 100644 > --- a/example/packet/odp_pktio.c > +++ b/example/packet/odp_pktio.c > @@ -331,11 +331,11 @@ int main(int argc, char *argv[]) > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > odp_buffer_pool_t pool; > int num_workers; > - void *pool_base; > int i; > int first_core; > int core_count; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > /* Init ODP before calling anything else */ > if (odp_init_global(NULL, NULL)) { > @@ -389,20 +389,13 @@ int main(int argc, char *argv[]) > printf("First core: %i\n\n", first_core); > > /* Create packet pool */ > - shm = odp_shm_reserve("shm_packet_pool", > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > + params.buf_align = 0; > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > - if (pool_base == NULL) { > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > - exit(EXIT_FAILURE); > - } > + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, ¶ms); > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > - SHM_PKT_POOL_SIZE, > - SHM_PKT_POOL_BUF_SIZE, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_PACKET); > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Error: packet pool create failed.\n"); > exit(EXIT_FAILURE); > diff --git a/example/timer/odp_timer_test.c > b/example/timer/odp_timer_test.c > index 9968bfe..0d6e31a 100644 > --- a/example/timer/odp_timer_test.c > +++ b/example/timer/odp_timer_test.c > @@ -244,12 +244,12 @@ int main(int argc, char *argv[]) > test_args_t args; > int num_workers; > odp_buffer_pool_t pool; > - void *pool_base; > odp_queue_t queue; > int first_core; > uint64_t cycles, ns; > odp_queue_param_t param; > odp_shm_t shm; > + odp_buffer_pool_param_t params; > > printf("\nODP timer example starts\n"); > > @@ -313,12 +313,13 @@ int main(int argc, char *argv[]) > */ > shm = odp_shm_reserve("msg_pool", > MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > - pool_base = odp_shm_addr(shm); > > - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, > - 0, > - ODP_CACHE_LINE_SIZE, > - ODP_BUFFER_TYPE_TIMEOUT); > + params.buf_size = 0; > + params.buf_align = 0; > + params.num_bufs = MSG_POOL_SIZE; > + params.buf_type = ODP_BUFFER_TYPE_TIMEOUT; > + > + pool = odp_buffer_pool_create("msg_pool", shm, ¶ms); > > if (pool == ODP_BUFFER_POOL_INVALID) { > EXAMPLE_ERR("Pool create failed.\n"); > diff --git a/platform/linux-generic/include/api/odp_buffer.h > b/platform/linux-generic/include/api/odp_buffer.h > index da23120..e981324 100644 > --- a/platform/linux-generic/include/api/odp_buffer.h > +++ b/platform/linux-generic/include/api/odp_buffer.h > @@ -68,7 +68,8 @@ int odp_buffer_type(odp_buffer_t buf); > * > * @param buf Buffer handle > * > - * @return 1 if valid, otherwise 0 > + * @retval 1 Buffer handle represents a valid buffer. > + * @retval 0 Buffer handle does not represent a valid buffer. > */ > int odp_buffer_is_valid(odp_buffer_t buf); > > diff --git a/platform/linux-generic/include/api/odp_buffer_pool.h > b/platform/linux-generic/include/api/odp_buffer_pool.h > index 30b83e0..3d85066 100644 > --- a/platform/linux-generic/include/api/odp_buffer_pool.h > +++ b/platform/linux-generic/include/api/odp_buffer_pool.h > @@ -32,42 +32,114 @@ extern "C" { > /** Maximum queue name lenght in chars */ > #define ODP_BUFFER_POOL_NAME_LEN 32 > > -/** Invalid buffer pool */ > -#define ODP_BUFFER_POOL_INVALID 0 > +/** > + * Buffer pool parameters > + * Used to communicate buffer pool creation options. > + */ > +typedef struct odp_buffer_pool_param_t { > + size_t buf_size; /**< Buffer size in bytes. The maximum > + number of bytes application will > + store in each buffer. */ To be in align with packet API: uint32_t buf_len; /**< Buffer length in bytes. The maximum number of bytes application will store in each buffer. In case of packet type buffers this will be rounded up to ODP_CONFIG_PACKET_BUF_LEN_MIN. */ This is what odp_buffer_len() or odp_packet_seg_buf_len(pkt, seg) would return - so for the first segment it includes the headroom. ODP_CONFIG_PACKET_HEADROOM < ODP_CONFIG_PACKET_BUF_LEN_MIN <= buf_len Default ODP_CONFIG_PACKET_HEADROOM could be e.g. 64 bytes. I was thinking of making the default for linux-generic be 66 since that would offset the packet by two bytes to allow the IP and TCP headers to be word-aligned. Reasonable? Yes, headroom is minimum. Packet input decides the actual headroom per incoming packet. ODP_CONFIG_PACKET_BUF_LEN_MIN 128 (or 256) could be defined also in odp_config.h. Buf_len would be rounded up into this value. > + size_t buf_align; /**< Minimum buffer alignment in bytes. > + Valid values are powers of two. Use 0 > + for default alignment. Default will > + always be a multiple of 8. */ uint32_t buf_align; OK, I will revise these to uint32_t. For packets, the buf_size will be interpreted as the packet data size and the implementation will add in the configured headroom and/or tailroom to get the actual number of bytes that are reserved for the packet. I'll also add the ODP_CONFIG_PACKET_BUF_LEN_MIN value and will set it to 256. For packets: --------------- pool_param.buf_len == odp_packet_seg_buf_len(pkt, seg) In first seg: odp_packet_seg_buf_len() = odp_packet_headroom() + odp_packet_seg_data_len() In last seg: odp_packet_seg_buf_len() = odp_packet_tailroom() + odp_packet_seg_data_len() In mid seg: odp_packet_seg_buf_len() >= odp_packet_seg_data_len() For raw buffers: -------------------- pool_param.buf_len == odp_buffer_len() > + uint32_t num_bufs; /**< Number of buffers in the pool */ > + int buf_type; /**< Buffer type */ > +} odp_buffer_pool_param_t; > > /** > * Create a buffer pool > + * This routine is used to create a buffer pool. It take three > + * arguments: the optional name of the pool to be created, an optional > shared > + * memory handle, and a parameter struct that describes the pool to be > + * created. If a name is not specified the result is an anonymous pool > that > + * cannot be referenced by odp_buffer_pool_lookup(). > + * > + * @param name Name of the pool, max ODP_BUFFER_POOL_NAME_LEN-1 > chars. > + * May be specified as NULL for anonymous pools. > * > - * @param name Name of the pool (max ODP_BUFFER_POOL_NAME_LEN - 1 > chars) > - * @param base_addr Pool base address > - * @param size Pool size in bytes > - * @param buf_size Buffer size in bytes > - * @param buf_align Minimum buffer alignment > - * @param buf_type Buffer type > + * @param shm The shared memory object in which to create the pool. > + * Use ODP_SHM_NULL to reserve default memory type > + * for the buffer type. > * > - * @return Buffer pool handle > + * @param params Buffer pool parameters. > + * > + * @retval Handle Buffer pool handle on success > + * @retval ODP_BUFFER_POOL_INVALID if call failed > */ > + > odp_buffer_pool_t odp_buffer_pool_create(const char *name, > - void *base_addr, uint64_t size, > - size_t buf_size, size_t buf_align, > - int buf_type); > + odp_shm_t shm, > + odp_buffer_pool_param_t *params); > > +/** > + * Destroy a buffer pool previously created by odp_buffer_pool_create() > + * > + * @param pool Handle of the buffer pool to be destroyed > + * > + * @retval 0 Success > + * @retval -1 Failure > + * > + * @note This routine destroys a previously created buffer pool. This > call > + * does not destroy any shared memory object passed to > + * odp_buffer_pool_create() used to store the buffer pool contents. The > caller > + * takes responsibility for that. If no shared memory object was passed > as > + * part of the create call, then this routine will destroy any internal > shared > + * memory objects associated with the buffer pool. Results are undefined > if > + * an attempt is made to destroy a buffer pool that contains allocated or > + * otherwise active buffers. > + */ > +int odp_buffer_pool_destroy(odp_buffer_pool_t pool); > > /** > * Find a buffer pool by name > * > * @param name Name of the pool > * > - * @return Buffer pool handle, or ODP_BUFFER_POOL_INVALID if not found. > + * @retval Handle Buffer pool handle on successs Typos: here ^^^^^^^ and here ^ Will correct success typo. Not sure what other typo you see. The syntax of @retval is that the first token that follows the keyword is the return value and the rest of the line is the explanation of that value, so this isn't to be read as a sentence. OK, it wasn’t typo but a feature. Still I think documentation in header files should be easily readable. Should we use @return for handles and @retval only when there are defined values (1, 0, ODP_XYZ etc)? > + * @retval ODP_BUFFER_POOL_INVALID if not found > + * > + * @note This routine cannot be used to look up an anonymous pool (one > created > + * with no name). > */ > odp_buffer_pool_t odp_buffer_pool_lookup(const char *name); > > +/** > + * Buffer pool information struct > + * Used to get information about a buffer pool. > + */ > +typedef struct odp_buffer_pool_info_t { > + const char *name; /**< pool name */ Add SHM handle into the info struct: odp_shm_t shm; I'm just implementing what's in the API Delta document (see pp. 6-7). When we discussed this you indicated that you wanted the shm to be returned as a separate return parameter of odp_buffer_pool_info(). If you want it integrated into the odp_buffer_pool_info_t I have no problem doing that, but it should be changed in the spec as well. As I recall, your argument for keeping them separate was that shm is a separate input parameter to odp_buffer_pool_create() rather than being part of the odp_buffer_pool_param_t struct, so having it as a separate output parameter keeps things symmetric. Then it just got documented the wrong way around. I wanted to keep shm separate from the pool params struct, but include it to the info struct (just like the ‘name’ parameter). > + odp_buffer_pool_param_t params; /**< pool parameters */ > +} odp_buffer_pool_info_t; > + > +/** > + * Retrieve information about a buffer pool > + * > + * @param pool Buffer pool handle > + * > + * @param shm Recieves odp_shm_t supplied by caller at > + * pool creation, or ODP_SHM_NULL if the > + * pool is managed internally. Remove: shm as it's part of info struct Same comment as above. No problem changing this if you've changed your mind. > + * > + * @param[out] info Receives an odp_buffer_pool_info_t object > + * that describes the pool. > + * > + * @retval 0 Success > + * @retval -1 Failure. Info could not be retrieved. > + */ > + > +int odp_buffer_pool_info(odp_buffer_pool_t pool, odp_shm_t *shm, > + odp_buffer_pool_info_t *info); > > /** > * Print buffer pool info > * > * @param pool Pool handle > * > + * @note This routine writes implementation-defined information about the > + * specified buffer pool to the ODP log. The intended use is for > debugging. > */ > void odp_buffer_pool_print(odp_buffer_pool_t pool); > > @@ -78,7 +150,8 @@ void odp_buffer_pool_print(odp_buffer_pool_t pool); > * The validity of a buffer can be cheked at any time with > odp_buffer_is_valid() > * @param pool Pool handle > * > - * @return Buffer handle or ODP_BUFFER_INVALID > + * @retval Handle Buffer handle of allocated buffer Typo: ^^^^^^ Not a typo. See previous comments about how @retval works. The generated doxygen looks fine. > + * @retval ODP_BUFFER_INVALID Allocation failed > */ > odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool); > > @@ -97,7 +170,7 @@ void odp_buffer_free(odp_buffer_t buf); > * > * @param buf Buffer handle > * > - * @return Buffer pool the buffer was allocated from > + * @retval Handle Buffer pool handle that the buffer was allocated from > */ > odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf); Buffer API call. Would be more logical in odp_buffer.h (can be moved later). I agree, however earlier you argued that these were more logically grouped within odp_buffer_pool.h. They were part of this file historically because of the circular typedef dependencies. Now that we have odp_platform_types.h this is no longer the case so I'll move odp_buffer_alloc() and odp_buffer_free() to odp_buffer.h. Free/alloc can stay, since those work on pools (which was the discussion you mentioned). This can move since it works on the buffer itself (and don’t create circular dependency any more). > > diff --git a/platform/linux-generic/include/api/odp_config.h > b/platform/linux-generic/include/api/odp_config.h > index 906897c..5ca5bb2 100644 > --- a/platform/linux-generic/include/api/odp_config.h > +++ b/platform/linux-generic/include/api/odp_config.h > @@ -49,6 +49,25 @@ extern "C" { > #define ODP_CONFIG_PKTIO_ENTRIES 64 > > /** > + * Buffer segment size to use > + * This is the granularity of segmented buffers. Sized for now to be There are no segmented buffers. Packet have segmentation. Buf_len == segment size. In the linux-generic implementation the same buffer structures are used across all buffer types. I can rename this or make this an internal parameter as you prefer. Please advise. It is: ODP_CONFIG_PACKET_BUF_LEN_MIN. > large > + * enough to support 1500-byte packets since the raw socket interface > does not > + * support scatter/gather I/O. ODP requires a minimum segment size of 128 > + * bytes with 256 recommended. Linux-generic code will enforce a 256 byte > + * minimum. Note that the chosen segment size must be a multiple of > + * ODP_CACHE_LINE_SIZE. > + */ > +#define ODP_CONFIG_BUF_SEG_SIZE (512*3) Same as ODP_CONFIG_PACKET_BUF_LEN_MIN. Not quite. ODP_CONFIG_PACKET_BUF_LEN_MIN would be the lower bound of this option and is intended to be an architectural limit. As I mentioned, currently linux-generic only supports unsegmented packets because the existing odp_packet_socket.c cannot handle scatter/gather read/writes. Doing this naively by receiving/transmitting to a pktio-level fixed buffer which is then copied to or filled from ODP segments would impose a severe performance penalty, so until native scatter/gather support is added we're stuck. However the APIs and implementation are fully prepared to deal with segmented packets when that support is added. And of course other implementations that make use of linux-generic that don't have this restriction can use the current code directly for that purpose but just hooking it to their native I/O mechanisms. For packets: A buffer is a segment. If packet cannot fit into single segment (= single buffer from the pool), it will be stored into multiple segments (= buffers from the pool). For raw buffers: A buffer is the buffer. > + > +/** > + * Maximum buffer size supported > + * Must be an integral number of segments and should be large enough to > + * accommodate jumbo packets. Attempts to allocate or extend buffers to > sizes > + * larger than this limit will fail. > + */ > +#define ODP_CONFIG_BUF_MAX_SIZE (ODP_CONFIG_BUF_SEG_SIZE*7) > + This could be instead maximum number of segments per packet: Add ODP_CONFIG_PACKET_NUM_SEGS_MAX into odp_config.h ? I think it's cleaner to specify this limit this way since packet length is independent of how a given implementation may choose to break packets into segments. Note, for example, that there is no implication that packet segments are all of equal size, even though many implementations will in fact do this. So trying to specify this in terms of numbers of segments would leave doubt as to how big a packet can be handled. This way the limit the application cares about is stated precisely and directly. The it’s ODP_CONFIG_PACKET_BUF_LEN_MAX (sum of buffer lengths over all packet segments) > +/** > * @} > */ > > diff --git a/platform/linux-generic/include/api/odp_platform_types.h > b/platform/linux-generic/include/api/odp_platform_types.h > index 4db47d3..2181eb6 100644 > --- a/platform/linux-generic/include/api/odp_platform_types.h > +++ b/platform/linux-generic/include/api/odp_platform_types.h > @@ -26,6 +26,9 @@ > /** ODP Buffer pool */ > typedef uint32_t odp_buffer_pool_t; > > +/** Invalid buffer pool */ > +#define ODP_BUFFER_POOL_INVALID (0xffffffff) This was and should be defined as 0. It's easier to catch handles that are init with memset(0) but not set. All typedefs and associated implementation structures are explicitly implementation-defined and are not architected. This is the platform types for linux-generic. Other platforms will set these to whatever works best for that platform. In my case 0 may be a valid buffer handle and it would add additional overhead to exclude it. odp_buffer_is_valid() provides a robust check on handle validity whenever needed. In any event these are opaque types and the value(s) they contain are meaningless from an application perspective. In linux-generic, we have done some effort already to change all XXX_INVALID values into 0 for the reason I mentioned above. It’s for init code and debugging if all handles use the same invalid value (which is zero). -Petri
On Tue, Dec 9, 2014 at 5:05 AM, Savolainen, Petri (NSN - FI/Espoo) < petri.savolainen@nsn.com> wrote: > > > > > *From:* ext Bill Fischofer [mailto:bill.fischofer@linaro.org] > *Sent:* Monday, December 08, 2014 8:35 PM > *To:* Savolainen, Petri (NSN - FI/Espoo) > *Cc:* lng-odp@lists.linaro.org > *Subject:* Re: [lng-odp] [RFC PATCH] RFC: Implement v0.5 buffer pool APIs > > > > Bala and I had a hangout earlier to discuss his comments. See inline > responses for Petri's. > > > > On Mon, Dec 8, 2014 at 10:38 AM, Savolainen, Petri (NSN - FI/Espoo) < > petri.savolainen@nsn.com> wrote: > > Didn't review the implementation. > > > > -----Original Message----- > > From: lng-odp-bounces@lists.linaro.org [mailto:lng-odp- > > bounces@lists.linaro.org] On Behalf Of ext Bill Fischofer > > Sent: Monday, December 08, 2014 1:24 AM > > To: lng-odp@lists.linaro.org > > Subject: [lng-odp] [RFC PATCH] RFC: Implement v0.5 buffer pool APIs > > > > Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> > > --- > > > > Petri: Please review the following files here: > > platform/linux-generic/include/api/odp_buffer.h > > platform/linux-generic/include/api/odp_buffer_pool.h > > platform/linux-generic/include/api/odp_config.h > > > > This patch is complete and compilable/testable. It is RFC pending > > Petri approval of the public API headers and recommendations for > > final packaging. > > > > example/generator/odp_generator.c | 19 +- > > example/ipsec/odp_ipsec.c | 57 +- > > example/l2fwd/odp_l2fwd.c | 19 +- > > example/odp_example/odp_example.c | 18 +- > > example/packet/odp_pktio.c | 19 +- > > example/timer/odp_timer_test.c | 13 +- > > platform/linux-generic/include/api/odp_buffer.h | 3 +- > > .../linux-generic/include/api/odp_buffer_pool.h | 103 ++- > > platform/linux-generic/include/api/odp_config.h | 19 + > > .../linux-generic/include/api/odp_platform_types.h | 12 + > > .../linux-generic/include/api/odp_shared_memory.h | 10 +- > > .../linux-generic/include/odp_buffer_inlines.h | 150 ++++ > > .../linux-generic/include/odp_buffer_internal.h | 150 ++-- > > .../include/odp_buffer_pool_internal.h | 351 ++++++++-- > > platform/linux-generic/include/odp_internal.h | 2 + > > .../linux-generic/include/odp_packet_internal.h | 50 +- > > .../linux-generic/include/odp_timer_internal.h | 11 +- > > platform/linux-generic/odp_buffer.c | 33 +- > > platform/linux-generic/odp_buffer_pool.c | 777 > ++++++++++------ > > ----- > > platform/linux-generic/odp_linux.c | 4 +- > > platform/linux-generic/odp_packet.c | 41 +- > > platform/linux-generic/odp_queue.c | 1 + > > platform/linux-generic/odp_schedule.c | 20 +- > > platform/linux-generic/odp_timer.c | 3 +- > > test/api_test/odp_timer_ping.c | 19 +- > > test/validation/odp_crypto.c | 43 +- > > test/validation/odp_queue.c | 19 +- > > 27 files changed, 1208 insertions(+), 758 deletions(-) > > create mode 100644 platform/linux-generic/include/odp_buffer_inlines.h > > > > diff --git a/example/generator/odp_generator.c > > b/example/generator/odp_generator.c > > index 73b0369..476cbef 100644 > > --- a/example/generator/odp_generator.c > > +++ b/example/generator/odp_generator.c > > @@ -522,11 +522,11 @@ int main(int argc, char *argv[]) > > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > > odp_buffer_pool_t pool; > > int num_workers; > > - void *pool_base; > > int i; > > int first_core; > > int core_count; > > odp_shm_t shm; > > + odp_buffer_pool_param_t params; > > > > /* Init ODP before calling anything else */ > > if (odp_init_global(NULL, NULL)) { > > @@ -589,20 +589,13 @@ int main(int argc, char *argv[]) > > printf("First core: %i\n\n", first_core); > > > > /* Create packet pool */ > > - shm = odp_shm_reserve("shm_packet_pool", > > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - pool_base = odp_shm_addr(shm); > > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > > + params.buf_align = 0; > > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > > > - if (pool_base == NULL) { > > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > > - exit(EXIT_FAILURE); > > - } > > + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, > ¶ms); > > > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > > - SHM_PKT_POOL_SIZE, > > - SHM_PKT_POOL_BUF_SIZE, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_PACKET); > > if (pool == ODP_BUFFER_POOL_INVALID) { > > EXAMPLE_ERR("Error: packet pool create failed.\n"); > > exit(EXIT_FAILURE); > > diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c > > index 76d27c5..f96338c 100644 > > --- a/example/ipsec/odp_ipsec.c > > +++ b/example/ipsec/odp_ipsec.c > > @@ -367,8 +367,7 @@ static > > void ipsec_init_pre(void) > > { > > odp_queue_param_t qparam; > > - void *pool_base; > > - odp_shm_t shm; > > + odp_buffer_pool_param_t params; > > > > /* > > * Create queues > > @@ -401,16 +400,12 @@ void ipsec_init_pre(void) > > } > > > > /* Create output buffer pool */ > > - shm = odp_shm_reserve("shm_out_pool", > > - SHM_OUT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - > > - pool_base = odp_shm_addr(shm); > > + params.buf_size = SHM_OUT_POOL_BUF_SIZE; > > + params.buf_align = 0; > > + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; > > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > > > - out_pool = odp_buffer_pool_create("out_pool", pool_base, > > - SHM_OUT_POOL_SIZE, > > - SHM_OUT_POOL_BUF_SIZE, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_PACKET); > > + out_pool = odp_buffer_pool_create("out_pool", ODP_SHM_NULL, > > ¶ms); > > > > if (ODP_BUFFER_POOL_INVALID == out_pool) { > > EXAMPLE_ERR("Error: message pool create failed.\n"); > > @@ -1176,12 +1171,12 @@ main(int argc, char *argv[]) > > { > > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > > int num_workers; > > - void *pool_base; > > int i; > > int first_core; > > int core_count; > > int stream_count; > > odp_shm_t shm; > > + odp_buffer_pool_param_t params; > > > > /* Init ODP before calling anything else */ > > if (odp_init_global(NULL, NULL)) { > > @@ -1241,42 +1236,28 @@ main(int argc, char *argv[]) > > printf("First core: %i\n\n", first_core); > > > > /* Create packet buffer pool */ > > - shm = odp_shm_reserve("shm_packet_pool", > > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > > + params.buf_align = 0; > > + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; > > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > > > - pool_base = odp_shm_addr(shm); > > - > > - if (NULL == pool_base) { > > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > > - exit(EXIT_FAILURE); > > - } > > + pkt_pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, > > + ¶ms); > > > > - pkt_pool = odp_buffer_pool_create("packet_pool", pool_base, > > - SHM_PKT_POOL_SIZE, > > - SHM_PKT_POOL_BUF_SIZE, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_PACKET); > > if (ODP_BUFFER_POOL_INVALID == pkt_pool) { > > EXAMPLE_ERR("Error: packet pool create failed.\n"); > > exit(EXIT_FAILURE); > > } > > > > /* Create context buffer pool */ > > - shm = odp_shm_reserve("shm_ctx_pool", > > - SHM_CTX_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - > > - pool_base = odp_shm_addr(shm); > > + params.buf_size = SHM_CTX_POOL_BUF_SIZE; > > + params.buf_align = 0; > > + params.num_bufs = SHM_CTX_POOL_BUF_COUNT; > > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > > > - if (NULL == pool_base) { > > - EXAMPLE_ERR("Error: context pool mem alloc failed.\n"); > > - exit(EXIT_FAILURE); > > - } > > + ctx_pool = odp_buffer_pool_create("ctx_pool", ODP_SHM_NULL, > > + ¶ms); > > > > - ctx_pool = odp_buffer_pool_create("ctx_pool", pool_base, > > - SHM_CTX_POOL_SIZE, > > - SHM_CTX_POOL_BUF_SIZE, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_RAW); > > if (ODP_BUFFER_POOL_INVALID == ctx_pool) { > > EXAMPLE_ERR("Error: context pool create failed.\n"); > > exit(EXIT_FAILURE); > > diff --git a/example/l2fwd/odp_l2fwd.c b/example/l2fwd/odp_l2fwd.c > > index ebac8c5..3c1fd6a 100644 > > --- a/example/l2fwd/odp_l2fwd.c > > +++ b/example/l2fwd/odp_l2fwd.c > > @@ -314,12 +314,12 @@ int main(int argc, char *argv[]) > > { > > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > > odp_buffer_pool_t pool; > > - void *pool_base; > > int i; > > int first_core; > > int core_count; > > odp_pktio_t pktio; > > odp_shm_t shm; > > + odp_buffer_pool_param_t params; > > > > /* Init ODP before calling anything else */ > > if (odp_init_global(NULL, NULL)) { > > @@ -383,20 +383,13 @@ int main(int argc, char *argv[]) > > printf("First core: %i\n\n", first_core); > > > > /* Create packet pool */ > > - shm = odp_shm_reserve("shm_packet_pool", > > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - pool_base = odp_shm_addr(shm); > > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > > + params.buf_align = 0; > > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > > > - if (pool_base == NULL) { > > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > > - exit(EXIT_FAILURE); > > - } > > + pool = odp_buffer_pool_create("packet pool", ODP_SHM_NULL, > ¶ms); > > > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > > - SHM_PKT_POOL_SIZE, > > - SHM_PKT_POOL_BUF_SIZE, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_PACKET); > > if (pool == ODP_BUFFER_POOL_INVALID) { > > EXAMPLE_ERR("Error: packet pool create failed.\n"); > > exit(EXIT_FAILURE); > > diff --git a/example/odp_example/odp_example.c > > b/example/odp_example/odp_example.c > > index 96a2912..8373f12 100644 > > --- a/example/odp_example/odp_example.c > > +++ b/example/odp_example/odp_example.c > > @@ -954,13 +954,13 @@ int main(int argc, char *argv[]) > > test_args_t args; > > int num_workers; > > odp_buffer_pool_t pool; > > - void *pool_base; > > odp_queue_t queue; > > int i, j; > > int prios; > > int first_core; > > odp_shm_t shm; > > test_globals_t *globals; > > + odp_buffer_pool_param_t params; > > > > printf("\nODP example starts\n\n"); > > > > @@ -1042,19 +1042,13 @@ int main(int argc, char *argv[]) > > /* > > * Create message pool > > */ > > - shm = odp_shm_reserve("msg_pool", > > - MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > > > - pool_base = odp_shm_addr(shm); > > + params.buf_size = sizeof(test_message_t); > > + params.buf_align = 0; > > + params.num_bufs = MSG_POOL_SIZE/sizeof(test_message_t); > > + params.buf_type = ODP_BUFFER_TYPE_RAW; > > > > - if (pool_base == NULL) { > > - EXAMPLE_ERR("Shared memory reserve failed.\n"); > > - return -1; > > - } > > - > > - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, > > - sizeof(test_message_t), > > - ODP_CACHE_LINE_SIZE, > ODP_BUFFER_TYPE_RAW); > > + pool = odp_buffer_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); > > > > if (pool == ODP_BUFFER_POOL_INVALID) { > > EXAMPLE_ERR("Pool create failed.\n"); > > diff --git a/example/packet/odp_pktio.c b/example/packet/odp_pktio.c > > index 7d51682..f2e7b2d 100644 > > --- a/example/packet/odp_pktio.c > > +++ b/example/packet/odp_pktio.c > > @@ -331,11 +331,11 @@ int main(int argc, char *argv[]) > > odph_linux_pthread_t thread_tbl[MAX_WORKERS]; > > odp_buffer_pool_t pool; > > int num_workers; > > - void *pool_base; > > int i; > > int first_core; > > int core_count; > > odp_shm_t shm; > > + odp_buffer_pool_param_t params; > > > > /* Init ODP before calling anything else */ > > if (odp_init_global(NULL, NULL)) { > > @@ -389,20 +389,13 @@ int main(int argc, char *argv[]) > > printf("First core: %i\n\n", first_core); > > > > /* Create packet pool */ > > - shm = odp_shm_reserve("shm_packet_pool", > > - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - pool_base = odp_shm_addr(shm); > > + params.buf_size = SHM_PKT_POOL_BUF_SIZE; > > + params.buf_align = 0; > > + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; > > + params.buf_type = ODP_BUFFER_TYPE_PACKET; > > > > - if (pool_base == NULL) { > > - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); > > - exit(EXIT_FAILURE); > > - } > > + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, > ¶ms); > > > > - pool = odp_buffer_pool_create("packet_pool", pool_base, > > - SHM_PKT_POOL_SIZE, > > - SHM_PKT_POOL_BUF_SIZE, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_PACKET); > > if (pool == ODP_BUFFER_POOL_INVALID) { > > EXAMPLE_ERR("Error: packet pool create failed.\n"); > > exit(EXIT_FAILURE); > > diff --git a/example/timer/odp_timer_test.c > > b/example/timer/odp_timer_test.c > > index 9968bfe..0d6e31a 100644 > > --- a/example/timer/odp_timer_test.c > > +++ b/example/timer/odp_timer_test.c > > @@ -244,12 +244,12 @@ int main(int argc, char *argv[]) > > test_args_t args; > > int num_workers; > > odp_buffer_pool_t pool; > > - void *pool_base; > > odp_queue_t queue; > > int first_core; > > uint64_t cycles, ns; > > odp_queue_param_t param; > > odp_shm_t shm; > > + odp_buffer_pool_param_t params; > > > > printf("\nODP timer example starts\n"); > > > > @@ -313,12 +313,13 @@ int main(int argc, char *argv[]) > > */ > > shm = odp_shm_reserve("msg_pool", > > MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); > > - pool_base = odp_shm_addr(shm); > > > > - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, > > - 0, > > - ODP_CACHE_LINE_SIZE, > > - ODP_BUFFER_TYPE_TIMEOUT); > > + params.buf_size = 0; > > + params.buf_align = 0; > > + params.num_bufs = MSG_POOL_SIZE; > > + params.buf_type = ODP_BUFFER_TYPE_TIMEOUT; > > + > > + pool = odp_buffer_pool_create("msg_pool", shm, ¶ms); > > > > if (pool == ODP_BUFFER_POOL_INVALID) { > > EXAMPLE_ERR("Pool create failed.\n"); > > diff --git a/platform/linux-generic/include/api/odp_buffer.h > > b/platform/linux-generic/include/api/odp_buffer.h > > index da23120..e981324 100644 > > --- a/platform/linux-generic/include/api/odp_buffer.h > > +++ b/platform/linux-generic/include/api/odp_buffer.h > > @@ -68,7 +68,8 @@ int odp_buffer_type(odp_buffer_t buf); > > * > > * @param buf Buffer handle > > * > > - * @return 1 if valid, otherwise 0 > > + * @retval 1 Buffer handle represents a valid buffer. > > + * @retval 0 Buffer handle does not represent a valid buffer. > > */ > > int odp_buffer_is_valid(odp_buffer_t buf); > > > > diff --git a/platform/linux-generic/include/api/odp_buffer_pool.h > > b/platform/linux-generic/include/api/odp_buffer_pool.h > > index 30b83e0..3d85066 100644 > > --- a/platform/linux-generic/include/api/odp_buffer_pool.h > > +++ b/platform/linux-generic/include/api/odp_buffer_pool.h > > @@ -32,42 +32,114 @@ extern "C" { > > /** Maximum queue name lenght in chars */ > > #define ODP_BUFFER_POOL_NAME_LEN 32 > > > > -/** Invalid buffer pool */ > > -#define ODP_BUFFER_POOL_INVALID 0 > > +/** > > + * Buffer pool parameters > > + * Used to communicate buffer pool creation options. > > + */ > > +typedef struct odp_buffer_pool_param_t { > > + size_t buf_size; /**< Buffer size in bytes. The maximum > > + number of bytes application will > > + store in each buffer. */ > > To be in align with packet API: > uint32_t buf_len; /**< Buffer length in bytes. The maximum > number of bytes application will > store in each buffer. In case of packet > type buffers this will be rounded up to > ODP_CONFIG_PACKET_BUF_LEN_MIN. > */ > > This is what odp_buffer_len() or odp_packet_seg_buf_len(pkt, seg) would > return - so for the first segment it includes the headroom. > > ODP_CONFIG_PACKET_HEADROOM < ODP_CONFIG_PACKET_BUF_LEN_MIN <= buf_len > > Default ODP_CONFIG_PACKET_HEADROOM could be e.g. 64 bytes. > > > > I was thinking of making the default for linux-generic be 66 since that > would offset the packet by two bytes to allow the IP and TCP headers to be > word-aligned. Reasonable? > > > > Yes, headroom is minimum. Packet input decides the actual headroom per > incoming packet. > > > > > > > ODP_CONFIG_PACKET_BUF_LEN_MIN 128 (or 256) could be defined also in > odp_config.h. Buf_len would be rounded up into this value. > > > > + size_t buf_align; /**< Minimum buffer alignment in bytes. > > + Valid values are powers of two. Use 0 > > + for default alignment. Default will > > + always be a multiple of 8. */ > > uint32_t buf_align; > > > > OK, I will revise these to uint32_t. For packets, the buf_size will be > interpreted as the packet data size and the implementation will add in the > configured headroom and/or tailroom to get the actual number of bytes that > are reserved for the packet. I'll also add the > ODP_CONFIG_PACKET_BUF_LEN_MIN value and will set it to 256. > > > > For packets: > > --------------- > > > > pool_param.buf_len == odp_packet_seg_buf_len(pkt, seg) > > > > In first seg: odp_packet_seg_buf_len() = odp_packet_headroom() + > odp_packet_seg_data_len() > > In last seg: odp_packet_seg_buf_len() = odp_packet_tailroom() + > odp_packet_seg_data_len() > > In mid seg: odp_packet_seg_buf_len() >= odp_packet_seg_data_len() > > > > For raw buffers: > > -------------------- > > pool_param.buf_len == odp_buffer_len() > > > > > That is how these behave currently. > > > > + uint32_t num_bufs; /**< Number of buffers in the pool */ > > + int buf_type; /**< Buffer type */ > > +} odp_buffer_pool_param_t; > > > > /** > > * Create a buffer pool > > + * This routine is used to create a buffer pool. It take three > > + * arguments: the optional name of the pool to be created, an optional > > shared > > + * memory handle, and a parameter struct that describes the pool to be > > + * created. If a name is not specified the result is an anonymous pool > > that > > + * cannot be referenced by odp_buffer_pool_lookup(). > > + * > > + * @param name Name of the pool, max ODP_BUFFER_POOL_NAME_LEN-1 > > chars. > > + * May be specified as NULL for anonymous pools. > > * > > - * @param name Name of the pool (max ODP_BUFFER_POOL_NAME_LEN - 1 > > chars) > > - * @param base_addr Pool base address > > - * @param size Pool size in bytes > > - * @param buf_size Buffer size in bytes > > - * @param buf_align Minimum buffer alignment > > - * @param buf_type Buffer type > > + * @param shm The shared memory object in which to create the pool. > > + * Use ODP_SHM_NULL to reserve default memory type > > + * for the buffer type. > > * > > - * @return Buffer pool handle > > + * @param params Buffer pool parameters. > > + * > > + * @retval Handle Buffer pool handle on success > > + * @retval ODP_BUFFER_POOL_INVALID if call failed > > */ > > + > > odp_buffer_pool_t odp_buffer_pool_create(const char *name, > > - void *base_addr, uint64_t size, > > - size_t buf_size, size_t buf_align, > > - int buf_type); > > + odp_shm_t shm, > > + odp_buffer_pool_param_t *params); > > > > +/** > > + * Destroy a buffer pool previously created by odp_buffer_pool_create() > > + * > > + * @param pool Handle of the buffer pool to be destroyed > > + * > > + * @retval 0 Success > > + * @retval -1 Failure > > + * > > + * @note This routine destroys a previously created buffer pool. This > > call > > + * does not destroy any shared memory object passed to > > + * odp_buffer_pool_create() used to store the buffer pool contents. The > > caller > > + * takes responsibility for that. If no shared memory object was passed > > as > > + * part of the create call, then this routine will destroy any internal > > shared > > + * memory objects associated with the buffer pool. Results are undefined > > if > > + * an attempt is made to destroy a buffer pool that contains allocated > or > > + * otherwise active buffers. > > + */ > > +int odp_buffer_pool_destroy(odp_buffer_pool_t pool); > > > > /** > > * Find a buffer pool by name > > * > > * @param name Name of the pool > > * > > - * @return Buffer pool handle, or ODP_BUFFER_POOL_INVALID if not found. > > + * @retval Handle Buffer pool handle on successs > > Typos: here ^^^^^^^ and here ^ > > > > Will correct success typo. Not sure what other typo you see. The syntax of > @retval is that the first token that follows the > > keyword is the return value and the rest of the line is the explanation of > that value, so this isn't to be read > > as a sentence. > > > > OK, it wasn’t typo but a feature. Still I think documentation in header > files should be easily readable. Should we use @return for handles and > @retval only when there are defined values (1, 0, ODP_XYZ etc)? > > > These were @return, but Anders felt strongly that we should standardize on @retval. I'm fine with whatever you decide. Please advise. > > > > > + * @retval ODP_BUFFER_POOL_INVALID if not found > > + * > > + * @note This routine cannot be used to look up an anonymous pool (one > > created > > + * with no name). > > */ > > odp_buffer_pool_t odp_buffer_pool_lookup(const char *name); > > > > +/** > > + * Buffer pool information struct > > + * Used to get information about a buffer pool. > > + */ > > +typedef struct odp_buffer_pool_info_t { > > + const char *name; /**< pool name */ > > Add SHM handle into the info struct: odp_shm_t shm; > > > > I'm just implementing what's in the API Delta document (see pp. 6-7). > When we discussed this you indicated that you wanted the shm to be returned > as a separate return parameter of odp_buffer_pool_info(). If you want it > integrated into the odp_buffer_pool_info_t I have no problem doing that, > but it should be changed in the spec as well. > > > > As I recall, your argument for keeping them separate was that shm is a > separate input parameter to odp_buffer_pool_create() rather than being part > of the odp_buffer_pool_param_t struct, so having it as a separate output > parameter keeps things symmetric. > > > > Then it just got documented the wrong way around. I wanted to keep shm > separate from the pool params struct, but include it to the info struct > (just like the ‘name’ parameter). > > > > > This change is part of the v2 patch and the documentation in the .h file reflects this change. > > > + odp_buffer_pool_param_t params; /**< pool parameters */ > > +} odp_buffer_pool_info_t; > > + > > +/** > > + * Retrieve information about a buffer pool > > + * > > + * @param pool Buffer pool handle > > + * > > + * @param shm Recieves odp_shm_t supplied by caller at > > + * pool creation, or ODP_SHM_NULL if the > > + * pool is managed internally. > > Remove: shm as it's part of info struct > > > > Same comment as above. No problem changing this if you've changed your > mind. > > > > + * > > + * @param[out] info Receives an odp_buffer_pool_info_t object > > + * that describes the pool. > > + * > > + * @retval 0 Success > > + * @retval -1 Failure. Info could not be retrieved. > > + */ > > + > > +int odp_buffer_pool_info(odp_buffer_pool_t pool, odp_shm_t *shm, > > + odp_buffer_pool_info_t *info); > > > > /** > > * Print buffer pool info > > * > > * @param pool Pool handle > > * > > + * @note This routine writes implementation-defined information about > the > > + * specified buffer pool to the ODP log. The intended use is for > > debugging. > > */ > > void odp_buffer_pool_print(odp_buffer_pool_t pool); > > > > @@ -78,7 +150,8 @@ void odp_buffer_pool_print(odp_buffer_pool_t pool); > > * The validity of a buffer can be cheked at any time with > > odp_buffer_is_valid() > > * @param pool Pool handle > > * > > - * @return Buffer handle or ODP_BUFFER_INVALID > > + * @retval Handle Buffer handle of allocated buffer > > Typo: ^^^^^^ > > > > Not a typo. See previous comments about how @retval works. The generated > doxygen looks fine. > > > > + * @retval ODP_BUFFER_INVALID Allocation failed > > */ > > odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool); > > > > @@ -97,7 +170,7 @@ void odp_buffer_free(odp_buffer_t buf); > > * > > * @param buf Buffer handle > > * > > - * @return Buffer pool the buffer was allocated from > > + * @retval Handle Buffer pool handle that the buffer was allocated from > > */ > > odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf); > > > Buffer API call. Would be more logical in odp_buffer.h (can be moved > later). > > > > I agree, however earlier you argued that these were more logically grouped > within odp_buffer_pool.h. They were part of this file historically because > of the circular typedef dependencies. Now that we have > odp_platform_types.h this is no longer the case so I'll move > odp_buffer_alloc() and odp_buffer_free() to odp_buffer.h. > > > > Free/alloc can stay, since those work on pools (which was the discussion > you mentioned). This can move since it works on the buffer itself (and > don’t create circular dependency any more). > > > > > Sorry, I misread that. v2 moves odp_buffer_alloc() and odp_buffer_free() into odp_buffer.h. Do you want them moved back to odp_buffer_pool.h? v2 will move odp_buffer_pool() to odp_buffer.h. > > > > > > diff --git a/platform/linux-generic/include/api/odp_config.h > > b/platform/linux-generic/include/api/odp_config.h > > index 906897c..5ca5bb2 100644 > > --- a/platform/linux-generic/include/api/odp_config.h > > +++ b/platform/linux-generic/include/api/odp_config.h > > @@ -49,6 +49,25 @@ extern "C" { > > #define ODP_CONFIG_PKTIO_ENTRIES 64 > > > > /** > > + * Buffer segment size to use > > + * This is the granularity of segmented buffers. Sized for now to be > > There are no segmented buffers. Packet have segmentation. Buf_len == > segment size. > > > > In the linux-generic implementation the same buffer structures are used > across all buffer types. I can rename this or make this an internal > parameter as you prefer. Please advise. > > > > > > It is: ODP_CONFIG_PACKET_BUF_LEN_MIN. > > OK > > > large > > + * enough to support 1500-byte packets since the raw socket interface > > does not > > + * support scatter/gather I/O. ODP requires a minimum segment size of > 128 > > + * bytes with 256 recommended. Linux-generic code will enforce a 256 > byte > > + * minimum. Note that the chosen segment size must be a multiple of > > + * ODP_CACHE_LINE_SIZE. > > + */ > > +#define ODP_CONFIG_BUF_SEG_SIZE (512*3) > > Same as ODP_CONFIG_PACKET_BUF_LEN_MIN. > > > > Not quite. ODP_CONFIG_PACKET_BUF_LEN_MIN would be the lower bound of this > option and is intended to be an architectural limit. As I mentioned, > currently linux-generic only supports unsegmented packets because the > existing odp_packet_socket.c cannot handle scatter/gather read/writes. > Doing this naively by receiving/transmitting to a pktio-level fixed buffer > which is then copied to or filled from ODP segments would impose a severe > performance penalty, so until native scatter/gather support is added we're > stuck. However the APIs and implementation are fully prepared to deal with > segmented packets when that support is added. And of course other > implementations that make use of linux-generic that don't have this > restriction can use the current code directly for that purpose but just > hooking it to their native I/O mechanisms. > > > > For packets: > > A buffer is a segment. If packet cannot fit into single segment (= single > buffer from the pool), it will be stored into multiple segments (= buffers > from the pool). > > > > For raw buffers: > > A buffer is the buffer. > > > Yes, that's exactly how it currently works, except that I distinguish between a 'buffer', which is actually the metadata associated with the object, and the data blocks that form the backing store used to store buffer/packet data. The latter follow your definition above. > > > > > + > > +/** > > + * Maximum buffer size supported > > + * Must be an integral number of segments and should be large enough to > > + * accommodate jumbo packets. Attempts to allocate or extend buffers to > > sizes > > + * larger than this limit will fail. > > + */ > > +#define ODP_CONFIG_BUF_MAX_SIZE (ODP_CONFIG_BUF_SEG_SIZE*7) > > + > > This could be instead maximum number of segments per packet: > > Add ODP_CONFIG_PACKET_NUM_SEGS_MAX into odp_config.h ? > > > > I think it's cleaner to specify this limit this way since packet length is > independent of how a given implementation may choose to break packets into > segments. Note, for example, that there is no implication that packet > segments are all of equal size, even though many implementations will in > fact do this. So trying to specify this in terms of numbers of segments > would leave doubt as to how big a packet can be handled. This way the > limit the application cares about is stated precisely and directly. > > > > > > The it’s ODP_CONFIG_PACKET_BUF_LEN_MAX (sum of buffer lengths over all > packet segments) > > > > > It's a mouthful either way, but I'll change this to your name. > > > > > > +/** > > * @} > > */ > > > > diff --git a/platform/linux-generic/include/api/odp_platform_types.h > > b/platform/linux-generic/include/api/odp_platform_types.h > > index 4db47d3..2181eb6 100644 > > --- a/platform/linux-generic/include/api/odp_platform_types.h > > +++ b/platform/linux-generic/include/api/odp_platform_types.h > > @@ -26,6 +26,9 @@ > > /** ODP Buffer pool */ > > typedef uint32_t odp_buffer_pool_t; > > > > +/** Invalid buffer pool */ > > +#define ODP_BUFFER_POOL_INVALID (0xffffffff) > > > This was and should be defined as 0. It's easier to catch handles that are > init with memset(0) but not set. > > > > All typedefs and associated implementation structures are explicitly > implementation-defined and are not architected. This is the platform types > for linux-generic. Other platforms will set these to whatever works best > for that platform. In my case 0 may be a valid buffer handle and it would > add additional overhead to exclude it. odp_buffer_is_valid() provides a > robust check on handle validity whenever needed. In any event these are > opaque types and the value(s) they contain are meaningless from an > application perspective. > > > > > > In linux-generic, we have done some effort already to change all > XXX_INVALID values into 0 for the reason I mentioned above. It’s for init > code and debugging if all handles use the same invalid value (which is > zero). > This is not an API issue, it's an implementation issue. The reason I changed this is for performance. In the original code you started pool indices at 1 rather than 0, meaning that converting to/from pool handles always involved adding or subtracting 1, which seemed unnecessary overhead in what is a performance path. We're already explicit in stating that we expect applications to use valid handles in most API calls to eliminate the overhead of spurious validation and that we provide the xxx_is_valid() APIs to do robust validation when the application requires it. Currently if an application says odp_buffer_alloc(ODP_BUFFER_POOL_INVALID, ...) or passes a garbage handle to any other ODP API that takes a handle results are undefined no matter what value we assign to the designated INVALIDs. These are intended for application use on output to indicate recoverable errors like resource exhaustion. If we want to state that APIs MUST validate handles we can do that, but that imposes meaningful overhead. > > -Petri > > > > >
On Tue, Dec 9, 2014 at 7:25 AM, Savolainen, Petri (NSN - FI/Espoo) < petri.savolainen@nsn.com> wrote: > > > /** > > * Find a buffer pool by name > > * > > * @param name Name of the pool > > * > > - * @return Buffer pool handle, or ODP_BUFFER_POOL_INVALID if not found. > > + * @retval Handle Buffer pool handle on successs > > Typos: here ^^^^^^^ and here ^ > > > > Will correct success typo. Not sure what other typo you see. The syntax of > @retval is that the first token that follows the > > keyword is the return value and the rest of the line is the explanation of > that value, so this isn't to be read > > as a sentence. > > > > OK, it wasn’t typo but a feature. Still I think documentation in header > files should be easily readable. Should we use @return for handles and > @retval only when there are defined values (1, 0, ODP_XYZ etc)? > > > > > > These were @return, but Anders felt strongly that we should standardize on > @retval. I'm fine with whatever you decide. Please advise. > > > > > > This is generic documentation issue. I think that documentation should be > readable both in .h files and in doxygen generated documents. Maybe Mike or > Anders can propose tagging rules that work on both. > > > > This is not readable like this : “@retval Handle Buffer pool handle on > success” > > > > > > > > + * @retval ODP_BUFFER_INVALID Allocation failed > > */ > > odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool); > > > > @@ -97,7 +170,7 @@ void odp_buffer_free(odp_buffer_t buf); > > * > > * @param buf Buffer handle > > * > > - * @return Buffer pool the buffer was allocated from > > + * @retval Handle Buffer pool handle that the buffer was allocated from > > */ > > odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf); > > > Buffer API call. Would be more logical in odp_buffer.h (can be moved > later). > > > > I agree, however earlier you argued that these were more logically grouped > within odp_buffer_pool.h. They were part of this file historically because > of the circular typedef dependencies. Now that we have > odp_platform_types.h this is no longer the case so I'll move > odp_buffer_alloc() and odp_buffer_free() to odp_buffer.h. > > > > Free/alloc can stay, since those work on pools (which was the discussion > you mentioned). This can move since it works on the buffer itself (and > don’t create circular dependency any more). > > > > > > > > Sorry, I misread that. v2 moves odp_buffer_alloc() and odp_buffer_free() > into odp_buffer.h. Do you want them moved back to odp_buffer_pool.h? v2 > will move odp_buffer_pool() to odp_buffer.h. > > > > Yes. Back to the buffer pool header, please. > OK, look for this in v3 > > > > > > > > +/** > > * @} > > */ > > > > diff --git a/platform/linux-generic/include/api/odp_platform_types.h > > b/platform/linux-generic/include/api/odp_platform_types.h > > index 4db47d3..2181eb6 100644 > > --- a/platform/linux-generic/include/api/odp_platform_types.h > > +++ b/platform/linux-generic/include/api/odp_platform_types.h > > @@ -26,6 +26,9 @@ > > /** ODP Buffer pool */ > > typedef uint32_t odp_buffer_pool_t; > > > > +/** Invalid buffer pool */ > > +#define ODP_BUFFER_POOL_INVALID (0xffffffff) > > > This was and should be defined as 0. It's easier to catch handles that are > init with memset(0) but not set. > > > > All typedefs and associated implementation structures are explicitly > implementation-defined and are not architected. This is the platform types > for linux-generic. Other platforms will set these to whatever works best > for that platform. In my case 0 may be a valid buffer handle and it would > add additional overhead to exclude it. odp_buffer_is_valid() provides a > robust check on handle validity whenever needed. In any event these are > opaque types and the value(s) they contain are meaningless from an > application perspective. > > > > > > In linux-generic, we have done some effort already to change all > XXX_INVALID values into 0 for the reason I mentioned above. It’s for init > code and debugging if all handles use the same invalid value (which is > zero). > > > > This is not an API issue, it's an implementation issue. > > > > The reason I changed this is for performance. In the original code you > started pool indices at 1 rather than 0, meaning that converting to/from > pool handles always involved adding or subtracting 1, which seemed > unnecessary overhead in what is a performance path. We're already explicit > in stating that we expect applications to use valid handles in most API > calls to eliminate the overhead of spurious validation and that we provide > the xxx_is_valid() APIs to do robust validation when the application > requires it. > > > > Currently if an application says odp_buffer_alloc(ODP_BUFFER_POOL_INVALID, > ...) or passes a garbage handle to any other ODP API that takes a handle > results are undefined no matter what value we assign to the designated > INVALIDs. These are intended for application use on output to indicate > recoverable errors like resource exhaustion. If we want to state that APIs > MUST validate handles we can do that, but that imposes meaningful overhead. > > > > Yes, it’s implementation issue but it would be nice if our linux-generic > implementation would do it only in one way (#define XXX_INVALID 0) . Those > increment/decrement by ones does not really cause performance problems. > It’s max +1 cycle per operation and most likely less than that, since > compiler/out-of-order-cpu magic can very well run it in parallel to other > instructions. Number accesses to shared data and cache lines are the real > optimization hot spots (e.g. in this new implementation two CAS operations > per buffer alloc is pretty bad already) . > Classification already defines non-zero invalids, so we're not uniform here, not should we care about such things. That's why we're using abstract types so that implementations can define them in ways that make the most sense for them. The alloc path involves the same number of CAS operations as before since we don't have the LOCK/UNLOCK calls. With the addition of local caching, performance is equivalent to or better than the previous implementation. > > > -Petri > > >
diff --git a/example/generator/odp_generator.c b/example/generator/odp_generator.c index 73b0369..476cbef 100644 --- a/example/generator/odp_generator.c +++ b/example/generator/odp_generator.c @@ -522,11 +522,11 @@ int main(int argc, char *argv[]) odph_linux_pthread_t thread_tbl[MAX_WORKERS]; odp_buffer_pool_t pool; int num_workers; - void *pool_base; int i; int first_core; int core_count; odp_shm_t shm; + odp_buffer_pool_param_t params; /* Init ODP before calling anything else */ if (odp_init_global(NULL, NULL)) { @@ -589,20 +589,13 @@ int main(int argc, char *argv[]) printf("First core: %i\n\n", first_core); /* Create packet pool */ - shm = odp_shm_reserve("shm_packet_pool", - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); - pool_base = odp_shm_addr(shm); + params.buf_size = SHM_PKT_POOL_BUF_SIZE; + params.buf_align = 0; + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; + params.buf_type = ODP_BUFFER_TYPE_PACKET; - if (pool_base == NULL) { - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); - exit(EXIT_FAILURE); - } + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, ¶ms); - pool = odp_buffer_pool_create("packet_pool", pool_base, - SHM_PKT_POOL_SIZE, - SHM_PKT_POOL_BUF_SIZE, - ODP_CACHE_LINE_SIZE, - ODP_BUFFER_TYPE_PACKET); if (pool == ODP_BUFFER_POOL_INVALID) { EXAMPLE_ERR("Error: packet pool create failed.\n"); exit(EXIT_FAILURE); diff --git a/example/ipsec/odp_ipsec.c b/example/ipsec/odp_ipsec.c index 76d27c5..f96338c 100644 --- a/example/ipsec/odp_ipsec.c +++ b/example/ipsec/odp_ipsec.c @@ -367,8 +367,7 @@ static void ipsec_init_pre(void) { odp_queue_param_t qparam; - void *pool_base; - odp_shm_t shm; + odp_buffer_pool_param_t params; /* * Create queues @@ -401,16 +400,12 @@ void ipsec_init_pre(void) } /* Create output buffer pool */ - shm = odp_shm_reserve("shm_out_pool", - SHM_OUT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); - - pool_base = odp_shm_addr(shm); + params.buf_size = SHM_OUT_POOL_BUF_SIZE; + params.buf_align = 0; + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; + params.buf_type = ODP_BUFFER_TYPE_PACKET; - out_pool = odp_buffer_pool_create("out_pool", pool_base, - SHM_OUT_POOL_SIZE, - SHM_OUT_POOL_BUF_SIZE, - ODP_CACHE_LINE_SIZE, - ODP_BUFFER_TYPE_PACKET); + out_pool = odp_buffer_pool_create("out_pool", ODP_SHM_NULL, ¶ms); if (ODP_BUFFER_POOL_INVALID == out_pool) { EXAMPLE_ERR("Error: message pool create failed.\n"); @@ -1176,12 +1171,12 @@ main(int argc, char *argv[]) { odph_linux_pthread_t thread_tbl[MAX_WORKERS]; int num_workers; - void *pool_base; int i; int first_core; int core_count; int stream_count; odp_shm_t shm; + odp_buffer_pool_param_t params; /* Init ODP before calling anything else */ if (odp_init_global(NULL, NULL)) { @@ -1241,42 +1236,28 @@ main(int argc, char *argv[]) printf("First core: %i\n\n", first_core); /* Create packet buffer pool */ - shm = odp_shm_reserve("shm_packet_pool", - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); + params.buf_size = SHM_PKT_POOL_BUF_SIZE; + params.buf_align = 0; + params.num_bufs = SHM_PKT_POOL_BUF_COUNT; + params.buf_type = ODP_BUFFER_TYPE_PACKET; - pool_base = odp_shm_addr(shm); - - if (NULL == pool_base) { - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); - exit(EXIT_FAILURE); - } + pkt_pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, + ¶ms); - pkt_pool = odp_buffer_pool_create("packet_pool", pool_base, - SHM_PKT_POOL_SIZE, - SHM_PKT_POOL_BUF_SIZE, - ODP_CACHE_LINE_SIZE, - ODP_BUFFER_TYPE_PACKET); if (ODP_BUFFER_POOL_INVALID == pkt_pool) { EXAMPLE_ERR("Error: packet pool create failed.\n"); exit(EXIT_FAILURE); } /* Create context buffer pool */ - shm = odp_shm_reserve("shm_ctx_pool", - SHM_CTX_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); - - pool_base = odp_shm_addr(shm); + params.buf_size = SHM_CTX_POOL_BUF_SIZE; + params.buf_align = 0; + params.num_bufs = SHM_CTX_POOL_BUF_COUNT; + params.buf_type = ODP_BUFFER_TYPE_RAW; - if (NULL == pool_base) { - EXAMPLE_ERR("Error: context pool mem alloc failed.\n"); - exit(EXIT_FAILURE); - } + ctx_pool = odp_buffer_pool_create("ctx_pool", ODP_SHM_NULL, + ¶ms); - ctx_pool = odp_buffer_pool_create("ctx_pool", pool_base, - SHM_CTX_POOL_SIZE, - SHM_CTX_POOL_BUF_SIZE, - ODP_CACHE_LINE_SIZE, - ODP_BUFFER_TYPE_RAW); if (ODP_BUFFER_POOL_INVALID == ctx_pool) { EXAMPLE_ERR("Error: context pool create failed.\n"); exit(EXIT_FAILURE); diff --git a/example/l2fwd/odp_l2fwd.c b/example/l2fwd/odp_l2fwd.c index ebac8c5..3c1fd6a 100644 --- a/example/l2fwd/odp_l2fwd.c +++ b/example/l2fwd/odp_l2fwd.c @@ -314,12 +314,12 @@ int main(int argc, char *argv[]) { odph_linux_pthread_t thread_tbl[MAX_WORKERS]; odp_buffer_pool_t pool; - void *pool_base; int i; int first_core; int core_count; odp_pktio_t pktio; odp_shm_t shm; + odp_buffer_pool_param_t params; /* Init ODP before calling anything else */ if (odp_init_global(NULL, NULL)) { @@ -383,20 +383,13 @@ int main(int argc, char *argv[]) printf("First core: %i\n\n", first_core); /* Create packet pool */ - shm = odp_shm_reserve("shm_packet_pool", - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); - pool_base = odp_shm_addr(shm); + params.buf_size = SHM_PKT_POOL_BUF_SIZE; + params.buf_align = 0; + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; + params.buf_type = ODP_BUFFER_TYPE_PACKET; - if (pool_base == NULL) { - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); - exit(EXIT_FAILURE); - } + pool = odp_buffer_pool_create("packet pool", ODP_SHM_NULL, ¶ms); - pool = odp_buffer_pool_create("packet_pool", pool_base, - SHM_PKT_POOL_SIZE, - SHM_PKT_POOL_BUF_SIZE, - ODP_CACHE_LINE_SIZE, - ODP_BUFFER_TYPE_PACKET); if (pool == ODP_BUFFER_POOL_INVALID) { EXAMPLE_ERR("Error: packet pool create failed.\n"); exit(EXIT_FAILURE); diff --git a/example/odp_example/odp_example.c b/example/odp_example/odp_example.c index 96a2912..8373f12 100644 --- a/example/odp_example/odp_example.c +++ b/example/odp_example/odp_example.c @@ -954,13 +954,13 @@ int main(int argc, char *argv[]) test_args_t args; int num_workers; odp_buffer_pool_t pool; - void *pool_base; odp_queue_t queue; int i, j; int prios; int first_core; odp_shm_t shm; test_globals_t *globals; + odp_buffer_pool_param_t params; printf("\nODP example starts\n\n"); @@ -1042,19 +1042,13 @@ int main(int argc, char *argv[]) /* * Create message pool */ - shm = odp_shm_reserve("msg_pool", - MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); - pool_base = odp_shm_addr(shm); + params.buf_size = sizeof(test_message_t); + params.buf_align = 0; + params.num_bufs = MSG_POOL_SIZE/sizeof(test_message_t); + params.buf_type = ODP_BUFFER_TYPE_RAW; - if (pool_base == NULL) { - EXAMPLE_ERR("Shared memory reserve failed.\n"); - return -1; - } - - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, - sizeof(test_message_t), - ODP_CACHE_LINE_SIZE, ODP_BUFFER_TYPE_RAW); + pool = odp_buffer_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); if (pool == ODP_BUFFER_POOL_INVALID) { EXAMPLE_ERR("Pool create failed.\n"); diff --git a/example/packet/odp_pktio.c b/example/packet/odp_pktio.c index 7d51682..f2e7b2d 100644 --- a/example/packet/odp_pktio.c +++ b/example/packet/odp_pktio.c @@ -331,11 +331,11 @@ int main(int argc, char *argv[]) odph_linux_pthread_t thread_tbl[MAX_WORKERS]; odp_buffer_pool_t pool; int num_workers; - void *pool_base; int i; int first_core; int core_count; odp_shm_t shm; + odp_buffer_pool_param_t params; /* Init ODP before calling anything else */ if (odp_init_global(NULL, NULL)) { @@ -389,20 +389,13 @@ int main(int argc, char *argv[]) printf("First core: %i\n\n", first_core); /* Create packet pool */ - shm = odp_shm_reserve("shm_packet_pool", - SHM_PKT_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); - pool_base = odp_shm_addr(shm); + params.buf_size = SHM_PKT_POOL_BUF_SIZE; + params.buf_align = 0; + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; + params.buf_type = ODP_BUFFER_TYPE_PACKET; - if (pool_base == NULL) { - EXAMPLE_ERR("Error: packet pool mem alloc failed.\n"); - exit(EXIT_FAILURE); - } + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, ¶ms); - pool = odp_buffer_pool_create("packet_pool", pool_base, - SHM_PKT_POOL_SIZE, - SHM_PKT_POOL_BUF_SIZE, - ODP_CACHE_LINE_SIZE, - ODP_BUFFER_TYPE_PACKET); if (pool == ODP_BUFFER_POOL_INVALID) { EXAMPLE_ERR("Error: packet pool create failed.\n"); exit(EXIT_FAILURE); diff --git a/example/timer/odp_timer_test.c b/example/timer/odp_timer_test.c index 9968bfe..0d6e31a 100644 --- a/example/timer/odp_timer_test.c +++ b/example/timer/odp_timer_test.c @@ -244,12 +244,12 @@ int main(int argc, char *argv[]) test_args_t args; int num_workers; odp_buffer_pool_t pool; - void *pool_base; odp_queue_t queue; int first_core; uint64_t cycles, ns; odp_queue_param_t param; odp_shm_t shm; + odp_buffer_pool_param_t params; printf("\nODP timer example starts\n"); @@ -313,12 +313,13 @@ int main(int argc, char *argv[]) */ shm = odp_shm_reserve("msg_pool", MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); - pool_base = odp_shm_addr(shm); - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, - 0, - ODP_CACHE_LINE_SIZE, - ODP_BUFFER_TYPE_TIMEOUT); + params.buf_size = 0; + params.buf_align = 0; + params.num_bufs = MSG_POOL_SIZE; + params.buf_type = ODP_BUFFER_TYPE_TIMEOUT; + + pool = odp_buffer_pool_create("msg_pool", shm, ¶ms); if (pool == ODP_BUFFER_POOL_INVALID) { EXAMPLE_ERR("Pool create failed.\n"); diff --git a/platform/linux-generic/include/api/odp_buffer.h b/platform/linux-generic/include/api/odp_buffer.h index da23120..e981324 100644 --- a/platform/linux-generic/include/api/odp_buffer.h +++ b/platform/linux-generic/include/api/odp_buffer.h @@ -68,7 +68,8 @@ int odp_buffer_type(odp_buffer_t buf); * * @param buf Buffer handle * - * @return 1 if valid, otherwise 0 + * @retval 1 Buffer handle represents a valid buffer. + * @retval 0 Buffer handle does not represent a valid buffer. */ int odp_buffer_is_valid(odp_buffer_t buf); diff --git a/platform/linux-generic/include/api/odp_buffer_pool.h b/platform/linux-generic/include/api/odp_buffer_pool.h index 30b83e0..3d85066 100644 --- a/platform/linux-generic/include/api/odp_buffer_pool.h +++ b/platform/linux-generic/include/api/odp_buffer_pool.h @@ -32,42 +32,114 @@ extern "C" { /** Maximum queue name lenght in chars */ #define ODP_BUFFER_POOL_NAME_LEN 32 -/** Invalid buffer pool */ -#define ODP_BUFFER_POOL_INVALID 0 +/** + * Buffer pool parameters + * Used to communicate buffer pool creation options. + */ +typedef struct odp_buffer_pool_param_t { + size_t buf_size; /**< Buffer size in bytes. The maximum + number of bytes application will + store in each buffer. */ + size_t buf_align; /**< Minimum buffer alignment in bytes. + Valid values are powers of two. Use 0 + for default alignment. Default will + always be a multiple of 8. */ + uint32_t num_bufs; /**< Number of buffers in the pool */ + int buf_type; /**< Buffer type */ +} odp_buffer_pool_param_t; /** * Create a buffer pool + * This routine is used to create a buffer pool. It take three + * arguments: the optional name of the pool to be created, an optional shared + * memory handle, and a parameter struct that describes the pool to be + * created. If a name is not specified the result is an anonymous pool that + * cannot be referenced by odp_buffer_pool_lookup(). + * + * @param name Name of the pool, max ODP_BUFFER_POOL_NAME_LEN-1 chars. + * May be specified as NULL for anonymous pools. * - * @param name Name of the pool (max ODP_BUFFER_POOL_NAME_LEN - 1 chars) - * @param base_addr Pool base address - * @param size Pool size in bytes - * @param buf_size Buffer size in bytes - * @param buf_align Minimum buffer alignment - * @param buf_type Buffer type + * @param shm The shared memory object in which to create the pool. + * Use ODP_SHM_NULL to reserve default memory type + * for the buffer type. * - * @return Buffer pool handle + * @param params Buffer pool parameters. + * + * @retval Handle Buffer pool handle on success + * @retval ODP_BUFFER_POOL_INVALID if call failed */ + odp_buffer_pool_t odp_buffer_pool_create(const char *name, - void *base_addr, uint64_t size, - size_t buf_size, size_t buf_align, - int buf_type); + odp_shm_t shm, + odp_buffer_pool_param_t *params); +/** + * Destroy a buffer pool previously created by odp_buffer_pool_create() + * + * @param pool Handle of the buffer pool to be destroyed + * + * @retval 0 Success + * @retval -1 Failure + * + * @note This routine destroys a previously created buffer pool. This call + * does not destroy any shared memory object passed to + * odp_buffer_pool_create() used to store the buffer pool contents. The caller + * takes responsibility for that. If no shared memory object was passed as + * part of the create call, then this routine will destroy any internal shared + * memory objects associated with the buffer pool. Results are undefined if + * an attempt is made to destroy a buffer pool that contains allocated or + * otherwise active buffers. + */ +int odp_buffer_pool_destroy(odp_buffer_pool_t pool); /** * Find a buffer pool by name * * @param name Name of the pool * - * @return Buffer pool handle, or ODP_BUFFER_POOL_INVALID if not found. + * @retval Handle Buffer pool handle on successs + * @retval ODP_BUFFER_POOL_INVALID if not found + * + * @note This routine cannot be used to look up an anonymous pool (one created + * with no name). */ odp_buffer_pool_t odp_buffer_pool_lookup(const char *name); +/** + * Buffer pool information struct + * Used to get information about a buffer pool. + */ +typedef struct odp_buffer_pool_info_t { + const char *name; /**< pool name */ + odp_buffer_pool_param_t params; /**< pool parameters */ +} odp_buffer_pool_info_t; + +/** + * Retrieve information about a buffer pool + * + * @param pool Buffer pool handle + * + * @param shm Recieves odp_shm_t supplied by caller at + * pool creation, or ODP_SHM_NULL if the + * pool is managed internally. + * + * @param[out] info Receives an odp_buffer_pool_info_t object + * that describes the pool. + * + * @retval 0 Success + * @retval -1 Failure. Info could not be retrieved. + */ + +int odp_buffer_pool_info(odp_buffer_pool_t pool, odp_shm_t *shm, + odp_buffer_pool_info_t *info); /** * Print buffer pool info * * @param pool Pool handle * + * @note This routine writes implementation-defined information about the + * specified buffer pool to the ODP log. The intended use is for debugging. */ void odp_buffer_pool_print(odp_buffer_pool_t pool); @@ -78,7 +150,8 @@ void odp_buffer_pool_print(odp_buffer_pool_t pool); * The validity of a buffer can be cheked at any time with odp_buffer_is_valid() * @param pool Pool handle * - * @return Buffer handle or ODP_BUFFER_INVALID + * @retval Handle Buffer handle of allocated buffer + * @retval ODP_BUFFER_INVALID Allocation failed */ odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool); @@ -97,7 +170,7 @@ void odp_buffer_free(odp_buffer_t buf); * * @param buf Buffer handle * - * @return Buffer pool the buffer was allocated from + * @retval Handle Buffer pool handle that the buffer was allocated from */ odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf); diff --git a/platform/linux-generic/include/api/odp_config.h b/platform/linux-generic/include/api/odp_config.h index 906897c..5ca5bb2 100644 --- a/platform/linux-generic/include/api/odp_config.h +++ b/platform/linux-generic/include/api/odp_config.h @@ -49,6 +49,25 @@ extern "C" { #define ODP_CONFIG_PKTIO_ENTRIES 64 /** + * Buffer segment size to use + * This is the granularity of segmented buffers. Sized for now to be large + * enough to support 1500-byte packets since the raw socket interface does not + * support scatter/gather I/O. ODP requires a minimum segment size of 128 + * bytes with 256 recommended. Linux-generic code will enforce a 256 byte + * minimum. Note that the chosen segment size must be a multiple of + * ODP_CACHE_LINE_SIZE. + */ +#define ODP_CONFIG_BUF_SEG_SIZE (512*3) + +/** + * Maximum buffer size supported + * Must be an integral number of segments and should be large enough to + * accommodate jumbo packets. Attempts to allocate or extend buffers to sizes + * larger than this limit will fail. + */ +#define ODP_CONFIG_BUF_MAX_SIZE (ODP_CONFIG_BUF_SEG_SIZE*7) + +/** * @} */ diff --git a/platform/linux-generic/include/api/odp_platform_types.h b/platform/linux-generic/include/api/odp_platform_types.h index 4db47d3..2181eb6 100644 --- a/platform/linux-generic/include/api/odp_platform_types.h +++ b/platform/linux-generic/include/api/odp_platform_types.h @@ -26,6 +26,9 @@ /** ODP Buffer pool */ typedef uint32_t odp_buffer_pool_t; +/** Invalid buffer pool */ +#define ODP_BUFFER_POOL_INVALID (0xffffffff) + /** ODP buffer */ typedef uint32_t odp_buffer_t; @@ -65,6 +68,15 @@ typedef uint32_t odp_pktio_t; #define ODP_PKTIO_ANY ((odp_pktio_t)~0) /** + * ODP shared memory block + */ +typedef uint32_t odp_shm_t; + +/** Invalid shared memory block */ +#define ODP_SHM_INVALID 0 +#define ODP_SHM_NULL ODP_SHM_INVALID /**< Synonym for buffer pool use */ + +/** * @} */ diff --git a/platform/linux-generic/include/api/odp_shared_memory.h b/platform/linux-generic/include/api/odp_shared_memory.h index 26e208b..f70db5a 100644 --- a/platform/linux-generic/include/api/odp_shared_memory.h +++ b/platform/linux-generic/include/api/odp_shared_memory.h @@ -20,6 +20,7 @@ extern "C" { #include <odp_std_types.h> +#include <odp_platform_types.h> /** @defgroup odp_shared_memory ODP SHARED MEMORY * Operations on shared memory. @@ -38,15 +39,6 @@ extern "C" { #define ODP_SHM_PROC 0x2 /**< Share with external processes */ /** - * ODP shared memory block - */ -typedef uint32_t odp_shm_t; - -/** Invalid shared memory block */ -#define ODP_SHM_INVALID 0 - - -/** * Shared memory block info */ typedef struct odp_shm_info_t { diff --git a/platform/linux-generic/include/odp_buffer_inlines.h b/platform/linux-generic/include/odp_buffer_inlines.h new file mode 100644 index 0000000..9eb425c --- /dev/null +++ b/platform/linux-generic/include/odp_buffer_inlines.h @@ -0,0 +1,150 @@ +/* Copyright (c) 2014, Linaro Limited + * All rights reserved. + * + * SPDX-License-Identifier: BSD-3-Clause + */ + +/** + * @file + * + * Inline functions for ODP buffer mgmt routines - implementation internal + */ + +#ifndef ODP_BUFFER_INLINES_H_ +#define ODP_BUFFER_INLINES_H_ + +#ifdef __cplusplus +extern "C" { +#endif + +static inline odp_buffer_t odp_buffer_encode_handle(odp_buffer_hdr_t *hdr) +{ + odp_buffer_bits_t handle; + uint32_t pool_id = pool_handle_to_index(hdr->pool_hdl); + struct pool_entry_s *pool = get_pool_entry(pool_id); + + handle.pool_id = pool_id; + handle.index = ((uint8_t *)hdr - pool->pool_base_addr) / + ODP_CACHE_LINE_SIZE; + handle.seg = 0; + + return handle.u32; +} + +static inline odp_buffer_t odp_hdr_to_buf(odp_buffer_hdr_t *hdr) +{ + return hdr->handle.handle; +} + +static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf) +{ + odp_buffer_bits_t handle; + uint32_t pool_id; + uint32_t index; + struct pool_entry_s *pool; + + handle.u32 = buf; + pool_id = handle.pool_id; + index = handle.index; + +#ifdef POOL_ERROR_CHECK + if (odp_unlikely(pool_id > ODP_CONFIG_BUFFER_POOLS)) { + ODP_ERR("odp_buf_to_hdr: Bad pool id\n"); + return NULL; + } +#endif + + pool = get_pool_entry(pool_id); + +#ifdef POOL_ERROR_CHECK + if (odp_unlikely(index > pool->params.num_bufs - 1)) { + ODP_ERR("odp_buf_to_hdr: Bad buffer index\n"); + return NULL; + } +#endif + + return (odp_buffer_hdr_t *)(void *) + (pool->pool_base_addr + (index * ODP_CACHE_LINE_SIZE)); +} + +static inline uint32_t odp_buffer_refcount(odp_buffer_hdr_t *buf) +{ + return odp_atomic_load_u32(&buf->ref_count); +} + +static inline uint32_t odp_buffer_incr_refcount(odp_buffer_hdr_t *buf, + uint32_t val) +{ + return odp_atomic_fetch_add_u32(&buf->ref_count, val) + val; +} + +static inline uint32_t odp_buffer_decr_refcount(odp_buffer_hdr_t *buf, + uint32_t val) +{ + uint32_t tmp; + + tmp = odp_atomic_fetch_sub_u32(&buf->ref_count, val); + + if (tmp < val) { + odp_atomic_fetch_add_u32(&buf->ref_count, val - tmp); + return 0; + } else { + return tmp - val; + } +} + +static inline odp_buffer_hdr_t *validate_buf(odp_buffer_t buf) +{ + odp_buffer_bits_t handle; + odp_buffer_hdr_t *buf_hdr; + handle.u32 = buf; + + /* For buffer handles, segment index must be 0 and pool id in range */ + if (handle.seg != 0 || handle.pool_id >= ODP_CONFIG_BUFFER_POOLS) + return NULL; + + pool_entry_t *pool = odp_pool_to_entry(handle.pool_id); + + /* If pool not created, handle is invalid */ + if (pool->s.pool_shm == ODP_SHM_INVALID) + return NULL; + + uint32_t buf_stride = pool->s.buf_stride / ODP_CACHE_LINE_SIZE; + + /* A valid buffer index must be on stride, and must be in range */ + if ((handle.index % buf_stride != 0) || + ((uint32_t)(handle.index / buf_stride) >= pool->s.params.num_bufs)) + return NULL; + + buf_hdr = (odp_buffer_hdr_t *)(void *) + (pool->s.pool_base_addr + + (handle.index * ODP_CACHE_LINE_SIZE)); + + /* Handle is valid, so buffer is valid if it is allocated */ + return buf_hdr->allocator == ODP_FREEBUF ? NULL : buf_hdr; +} + +int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf); + +static inline void *buffer_map(odp_buffer_hdr_t *buf, + uint32_t offset, + uint32_t *seglen, + uint32_t limit) +{ + int seg_index = offset / buf->segsize; + int seg_offset = offset % buf->segsize; + + if (seglen != NULL) { + uint32_t buf_left = limit - offset; + *seglen = buf_left < buf->segsize ? + buf_left : buf->segsize - seg_offset; + } + + return (void *)(seg_offset + (uint8_t *)buf->addr[seg_index]); +} + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/platform/linux-generic/include/odp_buffer_internal.h b/platform/linux-generic/include/odp_buffer_internal.h index 0027bfc..632dcbf 100644 --- a/platform/linux-generic/include/odp_buffer_internal.h +++ b/platform/linux-generic/include/odp_buffer_internal.h @@ -24,99 +24,131 @@ extern "C" { #include <odp_buffer.h> #include <odp_debug.h> #include <odp_align.h> - -/* TODO: move these to correct files */ - -typedef uint64_t odp_phys_addr_t; +#include <odp_align_internal.h> +#include <odp_config.h> +#include <odp_byteorder.h> +#include <odp_thread.h> + + +#define ODP_BITSIZE(x) \ + ((x) <= 2 ? 1 : \ + ((x) <= 4 ? 2 : \ + ((x) <= 8 ? 3 : \ + ((x) <= 16 ? 4 : \ + ((x) <= 32 ? 5 : \ + ((x) <= 64 ? 6 : \ + ((x) <= 128 ? 7 : \ + ((x) <= 256 ? 8 : \ + ((x) <= 512 ? 9 : \ + ((x) <= 1024 ? 10 : \ + ((x) <= 2048 ? 11 : \ + ((x) <= 4096 ? 12 : \ + ((x) <= 8196 ? 13 : \ + ((x) <= 16384 ? 14 : \ + ((x) <= 32768 ? 15 : \ + ((x) <= 65536 ? 16 : \ + (0/0))))))))))))))))) + +ODP_STATIC_ASSERT(ODP_CONFIG_BUF_SEG_SIZE >= 256, + "ODP Segment size must be a minimum of 256 bytes"); + +ODP_STATIC_ASSERT((ODP_CONFIG_BUF_SEG_SIZE % ODP_CACHE_LINE_SIZE) == 0, + "ODP Segment size must be a multiple of cache line size"); + +ODP_STATIC_ASSERT((ODP_CONFIG_BUF_MAX_SIZE % ODP_CONFIG_BUF_SEG_SIZE) == 0, + "Buffer max size must be a multiple of segment size"); + +#define ODP_BUFFER_MAX_SEG (ODP_CONFIG_BUF_MAX_SIZE/ODP_CONFIG_BUF_SEG_SIZE) + +/* We can optimize storage of small buffers within metadata area */ +#define ODP_MAX_INLINE_BUF ((sizeof(void *)) * (ODP_BUFFER_MAX_SEG - 1)) + +#define ODP_BUFFER_POOL_BITS ODP_BITSIZE(ODP_CONFIG_BUFFER_POOLS) +#define ODP_BUFFER_SEG_BITS ODP_BITSIZE(ODP_BUFFER_MAX_SEG) +#define ODP_BUFFER_INDEX_BITS (32 - ODP_BUFFER_POOL_BITS - ODP_BUFFER_SEG_BITS) +#define ODP_BUFFER_PREFIX_BITS (ODP_BUFFER_POOL_BITS + ODP_BUFFER_INDEX_BITS) +#define ODP_BUFFER_MAX_POOLS (1 << ODP_BUFFER_POOL_BITS) +#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS) #define ODP_BUFFER_MAX_INDEX (ODP_BUFFER_MAX_BUFFERS - 2) #define ODP_BUFFER_INVALID_INDEX (ODP_BUFFER_MAX_BUFFERS - 1) -#define ODP_BUFS_PER_CHUNK 16 -#define ODP_BUFS_PER_SCATTER 4 - -#define ODP_BUFFER_TYPE_CHUNK 0xffff - - -#define ODP_BUFFER_POOL_BITS 4 -#define ODP_BUFFER_INDEX_BITS (32 - ODP_BUFFER_POOL_BITS) -#define ODP_BUFFER_MAX_POOLS (1 << ODP_BUFFER_POOL_BITS) -#define ODP_BUFFER_MAX_BUFFERS (1 << ODP_BUFFER_INDEX_BITS) - typedef union odp_buffer_bits_t { uint32_t u32; odp_buffer_t handle; struct { +#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN uint32_t pool_id:ODP_BUFFER_POOL_BITS; uint32_t index:ODP_BUFFER_INDEX_BITS; + uint32_t seg:ODP_BUFFER_SEG_BITS; +#else + uint32_t seg:ODP_BUFFER_SEG_BITS; + uint32_t index:ODP_BUFFER_INDEX_BITS; + uint32_t pool_id:ODP_BUFFER_POOL_BITS; +#endif }; -} odp_buffer_bits_t; + struct { +#if ODP_BYTE_ORDER == ODP_BIG_ENDIAN + uint32_t prefix:ODP_BUFFER_PREFIX_BITS; + uint32_t pfxseg:ODP_BUFFER_SEG_BITS; +#else + uint32_t pfxseg:ODP_BUFFER_SEG_BITS; + uint32_t prefix:ODP_BUFFER_PREFIX_BITS; +#endif + }; +} odp_buffer_bits_t; /* forward declaration */ struct odp_buffer_hdr_t; - -/* - * Scatter/gather list of buffers - */ -typedef struct odp_buffer_scatter_t { - /* buffer pointers */ - struct odp_buffer_hdr_t *buf[ODP_BUFS_PER_SCATTER]; - int num_bufs; /* num buffers */ - int pos; /* position on the list */ - size_t total_len; /* Total length */ -} odp_buffer_scatter_t; - - -/* - * Chunk of buffers (in single pool) - */ -typedef struct odp_buffer_chunk_t { - uint32_t num_bufs; /* num buffers */ - uint32_t buf_index[ODP_BUFS_PER_CHUNK]; /* buffers */ -} odp_buffer_chunk_t; - - /* Common buffer header */ typedef struct odp_buffer_hdr_t { struct odp_buffer_hdr_t *next; /* next buf in a list */ + int allocator; /* allocating thread id */ odp_buffer_bits_t handle; /* handle */ - odp_phys_addr_t phys_addr; /* physical data start address */ - void *addr; /* virtual data start address */ - uint32_t index; /* buf index in the pool */ + union { + uint32_t all; + struct { + uint32_t zeroized:1; /* Zeroize buf data on free */ + uint32_t hdrdata:1; /* Data is in buffer hdr */ + }; + } flags; + int type; /* buffer type */ size_t size; /* max data size */ - size_t cur_offset; /* current offset */ odp_atomic_u32_t ref_count; /* reference count */ - odp_buffer_scatter_t scatter; /* Scatter/gather list */ - int type; /* type of next header */ odp_buffer_pool_t pool_hdl; /* buffer pool handle */ - + union { + uint64_t buf_u64; /* user u64 */ + void *buf_ctx; /* user context */ + void *udata_addr; /* user metadata addr */ + }; + size_t udata_size; /* size of user metadata */ + uint32_t segcount; /* segment count */ + uint32_t segsize; /* segment size */ + void *addr[ODP_BUFFER_MAX_SEG]; /* block addrs */ } odp_buffer_hdr_t; -/* Ensure next header starts from 8 byte align */ -ODP_STATIC_ASSERT((sizeof(odp_buffer_hdr_t) % 8) == 0, "ODP_BUFFER_HDR_T__SIZE_ERROR"); +typedef struct odp_buffer_hdr_stride { + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_buffer_hdr_t))]; +} odp_buffer_hdr_stride; +typedef struct odp_buf_blk_t { + struct odp_buf_blk_t *next; + struct odp_buf_blk_t *prev; +} odp_buf_blk_t; /* Raw buffer header */ typedef struct { odp_buffer_hdr_t buf_hdr; /* common buffer header */ - uint8_t buf_data[]; /* start of buffer data area */ } odp_raw_buffer_hdr_t; +/* Free buffer marker */ +#define ODP_FREEBUF -1 -/* Chunk header */ -typedef struct odp_buffer_chunk_hdr_t { - odp_buffer_hdr_t buf_hdr; - odp_buffer_chunk_t chunk; -} odp_buffer_chunk_hdr_t; - - -int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf); - -void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src); - +/* Forward declarations */ +odp_buffer_t buffer_alloc(odp_buffer_pool_t pool, size_t size); #ifdef __cplusplus } diff --git a/platform/linux-generic/include/odp_buffer_pool_internal.h b/platform/linux-generic/include/odp_buffer_pool_internal.h index e0210bd..347be39 100644 --- a/platform/linux-generic/include/odp_buffer_pool_internal.h +++ b/platform/linux-generic/include/odp_buffer_pool_internal.h @@ -19,12 +19,44 @@ extern "C" { #endif #include <odp_std_types.h> +#include <odp_align.h> +#include <odp_align_internal.h> #include <odp_buffer_pool.h> #include <odp_buffer_internal.h> -#include <odp_align.h> #include <odp_hints.h> #include <odp_config.h> #include <odp_debug.h> +#include <odp_shared_memory.h> +#include <odp_atomic.h> +#include <odp_atomic_internal.h> +#include <string.h> + +/** + * Buffer initialization routine prototype + * + * @note Routines of this type MAY be passed as part of the + * _odp_buffer_pool_init_t structure to be called whenever a + * buffer is allocated to initialize the user metadata + * associated with that buffer. + */ +typedef void (_odp_buf_init_t)(odp_buffer_t buf, void *buf_init_arg); + +/** + * Buffer pool initialization parameters + * Used to communicate buffer pool initialization options. Internal for now. + */ +typedef struct _odp_buffer_pool_init_t { + size_t udata_size; /**< Size of user metadata for each buffer */ + _odp_buf_init_t *buf_init; /**< Buffer initialization routine to use */ + void *buf_init_arg; /**< Argument to be passed to buf_init() */ +} _odp_buffer_pool_init_t; /**< Type of buffer initialization struct */ + +/* Local cache for buffer alloc/free acceleration */ +typedef struct local_cache_t { + odp_buffer_hdr_t *buf_freelist; /* The local cache */ + uint64_t bufallocs; /* Local buffer alloc count */ + uint64_t buffrees; /* Local buffer free count */ +} local_cache_t; /* Use ticketlock instead of spinlock */ #define POOL_USE_TICKETLOCK @@ -39,6 +71,17 @@ extern "C" { #include <odp_spinlock.h> #endif +#ifdef POOL_USE_TICKETLOCK +#include <odp_ticketlock.h> +#define LOCK(a) odp_ticketlock_lock(a) +#define UNLOCK(a) odp_ticketlock_unlock(a) +#define LOCK_INIT(a) odp_ticketlock_init(a) +#else +#include <odp_spinlock.h> +#define LOCK(a) odp_spinlock_lock(a) +#define UNLOCK(a) odp_spinlock_unlock(a) +#define LOCK_INIT(a) odp_spinlock_init(a) +#endif struct pool_entry_s { #ifdef POOL_USE_TICKETLOCK @@ -47,66 +90,292 @@ struct pool_entry_s { odp_spinlock_t lock ODP_ALIGNED_CACHE; #endif - odp_buffer_chunk_hdr_t *head; - uint64_t free_bufs; char name[ODP_BUFFER_POOL_NAME_LEN]; - - odp_buffer_pool_t pool_hdl ODP_ALIGNED_CACHE; - uintptr_t buf_base; - size_t buf_size; - size_t buf_offset; - uint64_t num_bufs; - void *pool_base_addr; - uint64_t pool_size; - size_t user_size; - size_t user_align; - int buf_type; - size_t hdr_size; + odp_buffer_pool_param_t params; + _odp_buffer_pool_init_t init_params; + odp_buffer_pool_t pool_hdl; + uint32_t pool_id; + odp_shm_t pool_shm; + union { + uint32_t all; + struct { + uint32_t has_name:1; + uint32_t user_supplied_shm:1; + uint32_t unsegmented:1; + uint32_t zeroized:1; + uint32_t predefined:1; + }; + } flags; + uint32_t quiesced; + uint32_t low_wm_assert; + uint8_t *pool_base_addr; + size_t pool_size; + uint32_t buf_stride; + _odp_atomic_ptr_t buf_freelist; + _odp_atomic_ptr_t blk_freelist; + odp_atomic_u32_t bufcount; + odp_atomic_u32_t blkcount; + odp_atomic_u64_t bufallocs; + odp_atomic_u64_t buffrees; + odp_atomic_u64_t blkallocs; + odp_atomic_u64_t blkfrees; + odp_atomic_u64_t bufempty; + odp_atomic_u64_t blkempty; + odp_atomic_u64_t high_wm_count; + odp_atomic_u64_t low_wm_count; + uint32_t seg_size; + uint32_t high_wm; + uint32_t low_wm; + uint32_t headroom; + uint32_t tailroom; }; +typedef union pool_entry_u { + struct pool_entry_s s; + + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))]; +} pool_entry_t; extern void *pool_entry_ptr[]; +#if defined(ODP_CONFIG_SECURE_POOLS) && (ODP_CONFIG_SECURE_POOLS == 1) +#define buffer_is_secure(buf) (buf->flags.zeroized) +#define pool_is_secure(pool) (pool->flags.zeroized) +#else +#define buffer_is_secure(buf) 0 +#define pool_is_secure(pool) 0 +#endif + +#define TAG_ALIGN ((size_t)16) -static inline void *get_pool_entry(uint32_t pool_id) +#define odp_cs(ptr, old, new) \ + _odp_atomic_ptr_cmp_xchg_strong(&ptr, (void **)&old, (void *)new, \ + _ODP_MEMMODEL_SC, \ + _ODP_MEMMODEL_SC) + +/* Helper functions for pointer tagging to avoid ABA race conditions */ +#define odp_tag(ptr) \ + (((size_t)ptr) & (TAG_ALIGN - 1)) + +#define odp_detag(ptr) \ + ((typeof(ptr))(((size_t)ptr) & -TAG_ALIGN)) + +#define odp_retag(ptr, tag) \ + ((typeof(ptr))(((size_t)ptr) | odp_tag(tag))) + + +static inline void *get_blk(struct pool_entry_s *pool) { - return pool_entry_ptr[pool_id]; + void *oldhead, *myhead, *newhead; + + oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, _ODP_MEMMODEL_ACQ); + + do { + size_t tag = odp_tag(oldhead); + myhead = odp_detag(oldhead); + if (odp_unlikely(myhead == NULL)) + break; + newhead = odp_retag(((odp_buf_blk_t *)myhead)->next, tag + 1); + } while (odp_cs(pool->blk_freelist, oldhead, newhead) == 0); + + if (odp_unlikely(myhead == NULL)) + odp_atomic_inc_u64(&pool->blkempty); + else + odp_atomic_dec_u32(&pool->blkcount); + + return (void *)myhead; } +static inline void ret_blk(struct pool_entry_s *pool, void *block) +{ + void *oldhead, *myhead, *myblock; + + oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, _ODP_MEMMODEL_ACQ); + + do { + size_t tag = odp_tag(oldhead); + myhead = odp_detag(oldhead); + ((odp_buf_blk_t *)block)->next = myhead; + myblock = odp_retag(block, tag + 1); + } while (odp_cs(pool->blk_freelist, oldhead, myblock) == 0); -static inline odp_buffer_hdr_t *odp_buf_to_hdr(odp_buffer_t buf) + odp_atomic_inc_u32(&pool->blkcount); + odp_atomic_inc_u64(&pool->blkfrees); +} + +static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool) { - odp_buffer_bits_t handle; - uint32_t pool_id; - uint32_t index; - struct pool_entry_s *pool; - odp_buffer_hdr_t *hdr; - - handle.u32 = buf; - pool_id = handle.pool_id; - index = handle.index; - -#ifdef POOL_ERROR_CHECK - if (odp_unlikely(pool_id > ODP_CONFIG_BUFFER_POOLS)) { - ODP_ERR("odp_buf_to_hdr: Bad pool id\n"); - return NULL; + odp_buffer_hdr_t *oldhead, *myhead, *newhead; + + oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, _ODP_MEMMODEL_ACQ); + + do { + size_t tag = odp_tag(oldhead); + myhead = odp_detag(oldhead); + if (odp_unlikely(myhead == NULL)) + break; + newhead = odp_retag(myhead->next, tag + 1); + } while (odp_cs(pool->buf_freelist, oldhead, newhead) == 0); + + if (odp_unlikely(myhead == NULL)) { + odp_atomic_inc_u64(&pool->bufempty); + } else { + uint64_t bufcount = + odp_atomic_fetch_sub_u32(&pool->bufcount, 1) - 1; + + /* Check for low watermark condition */ + if (bufcount == pool->low_wm && !pool->low_wm_assert) { + pool->low_wm_assert = 1; + odp_atomic_inc_u64(&pool->low_wm_count); + } + + odp_atomic_inc_u64(&pool->bufallocs); + myhead->next = myhead; /* Mark buffer allocated */ + myhead->allocator = odp_thread_id(); } -#endif - pool = get_pool_entry(pool_id); + return (void *)myhead; +} + +static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t *buf) +{ + odp_buffer_hdr_t *oldhead, *myhead, *mybuf; + + buf->allocator = ODP_FREEBUF; /* Mark buffer free */ -#ifdef POOL_ERROR_CHECK - if (odp_unlikely(index > pool->num_bufs - 1)) { - ODP_ERR("odp_buf_to_hdr: Bad buffer index\n"); - return NULL; + if (!buf->flags.hdrdata && buf->type != ODP_BUFFER_TYPE_RAW) { + while (buf->segcount > 0) { + if (buffer_is_secure(buf) || pool_is_secure(pool)) + memset(buf->addr[buf->segcount - 1], + 0, buf->segsize); + ret_blk(pool, buf->addr[--buf->segcount]); + } + buf->size = 0; } -#endif - hdr = (odp_buffer_hdr_t *)(pool->buf_base + index * pool->buf_size); + oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, _ODP_MEMMODEL_ACQ); + + do { + size_t tag = odp_tag(oldhead); + myhead = odp_detag(oldhead); + buf->next = myhead; + mybuf = odp_retag(buf, tag + 1); + } while (odp_cs(pool->buf_freelist, oldhead, mybuf) == 0); + + uint64_t bufcount = odp_atomic_fetch_add_u32(&pool->bufcount, 1) + 1; - return hdr; + /* Check if low watermark condition should be deasserted */ + if (bufcount == pool->high_wm && pool->low_wm_assert) { + pool->low_wm_assert = 0; + odp_atomic_inc_u64(&pool->high_wm_count); + } + + odp_atomic_inc_u64(&pool->buffrees); +} + +static inline void *get_local_buf(local_cache_t *buf_cache, + struct pool_entry_s *pool, + size_t totsize) +{ + odp_buffer_hdr_t *buf = buf_cache->buf_freelist; + + if (odp_likely(buf != NULL)) { + buf_cache->buf_freelist = buf->next; + + if (odp_unlikely(buf->size < totsize)) { + size_t needed = totsize - buf->size; + + do { + void *blk = get_blk(pool); + if (odp_unlikely(blk == NULL)) { + ret_buf(pool, buf); + buf_cache->buffrees--; + return NULL; + } + buf->addr[buf->segcount++] = blk; + needed -= pool->seg_size; + } while ((ssize_t)needed > 0); + + buf->size = buf->segcount * pool->seg_size; + } + + buf_cache->bufallocs++; + buf->allocator = odp_thread_id(); /* Mark buffer allocated */ + } + + return buf; +} + +static inline void ret_local_buf(local_cache_t *buf_cache, + odp_buffer_hdr_t *buf) +{ + buf->allocator = ODP_FREEBUF; + buf->next = buf_cache->buf_freelist; + buf_cache->buf_freelist = buf; + + buf_cache->buffrees++; +} + +static inline void flush_cache(local_cache_t *buf_cache, + struct pool_entry_s *pool) +{ + odp_buffer_hdr_t *buf = buf_cache->buf_freelist; + uint32_t flush_count = 0; + + while (buf != NULL) { + odp_buffer_hdr_t *next = buf->next; + ret_buf(pool, buf); + buf = next; + flush_count++; + } + + odp_atomic_add_u64(&pool->bufallocs, buf_cache->bufallocs); + odp_atomic_add_u64(&pool->buffrees, buf_cache->buffrees - flush_count); + + buf_cache->buf_freelist = NULL; + buf_cache->bufallocs = 0; + buf_cache->buffrees = 0; +} + +static inline odp_buffer_pool_t pool_index_to_handle(uint32_t pool_id) +{ + return pool_id; +} + +static inline uint32_t pool_handle_to_index(odp_buffer_pool_t pool_hdl) +{ + return pool_hdl; +} + +static inline void *get_pool_entry(uint32_t pool_id) +{ + return pool_entry_ptr[pool_id]; +} + +static inline pool_entry_t *odp_pool_to_entry(odp_buffer_pool_t pool) +{ + return (pool_entry_t *)get_pool_entry(pool_handle_to_index(pool)); +} + +static inline pool_entry_t *odp_buf_to_pool(odp_buffer_hdr_t *buf) +{ + return odp_pool_to_entry(buf->pool_hdl); +} + +static inline uint32_t odp_buffer_pool_segment_size(odp_buffer_pool_t pool) +{ + return odp_pool_to_entry(pool)->s.seg_size; +} + +static inline uint32_t odp_buffer_pool_headroom(odp_buffer_pool_t pool) +{ + return odp_pool_to_entry(pool)->s.headroom; } +static inline uint32_t odp_buffer_pool_tailroom(odp_buffer_pool_t pool) +{ + return odp_pool_to_entry(pool)->s.tailroom; +} #ifdef __cplusplus } diff --git a/platform/linux-generic/include/odp_internal.h b/platform/linux-generic/include/odp_internal.h index f8c1596..11d6393 100644 --- a/platform/linux-generic/include/odp_internal.h +++ b/platform/linux-generic/include/odp_internal.h @@ -42,6 +42,8 @@ int odp_schedule_init_local(void); int odp_timer_init_global(void); int odp_timer_disarm_all(void); +void _odp_flush_caches(void); + #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/include/odp_packet_internal.h b/platform/linux-generic/include/odp_packet_internal.h index 49c59b2..f34a83d 100644 --- a/platform/linux-generic/include/odp_packet_internal.h +++ b/platform/linux-generic/include/odp_packet_internal.h @@ -22,6 +22,7 @@ extern "C" { #include <odp_debug.h> #include <odp_buffer_internal.h> #include <odp_buffer_pool_internal.h> +#include <odp_buffer_inlines.h> #include <odp_packet.h> #include <odp_packet_io.h> @@ -92,7 +93,8 @@ typedef union { }; } output_flags_t; -ODP_STATIC_ASSERT(sizeof(output_flags_t) == sizeof(uint32_t), "OUTPUT_FLAGS_SIZE_ERROR"); +ODP_STATIC_ASSERT(sizeof(output_flags_t) == sizeof(uint32_t), + "OUTPUT_FLAGS_SIZE_ERROR"); /** * Internal Packet header @@ -105,25 +107,23 @@ typedef struct { error_flags_t error_flags; output_flags_t output_flags; - uint32_t frame_offset; /**< offset to start of frame, even on error */ uint32_t l2_offset; /**< offset to L2 hdr, e.g. Eth */ uint32_t l3_offset; /**< offset to L3 hdr, e.g. IPv4, IPv6 */ uint32_t l4_offset; /**< offset to L4 hdr (TCP, UDP, SCTP, also ICMP) */ uint32_t frame_len; + uint32_t headroom; + uint32_t tailroom; uint64_t user_ctx; /* user context */ odp_pktio_t input; - - uint32_t pad; - uint8_t buf_data[]; /* start of buffer data area */ } odp_packet_hdr_t; -ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) == ODP_OFFSETOF(odp_packet_hdr_t, buf_data), - "ODP_PACKET_HDR_T__SIZE_ERR"); -ODP_STATIC_ASSERT(sizeof(odp_packet_hdr_t) % sizeof(uint64_t) == 0, - "ODP_PACKET_HDR_T__SIZE_ERR2"); +typedef struct odp_packet_hdr_stride { + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_packet_hdr_t))]; +} odp_packet_hdr_stride; + /** * Return the packet header @@ -138,6 +138,38 @@ static inline odp_packet_hdr_t *odp_packet_hdr(odp_packet_t pkt) */ void odp_packet_parse(odp_packet_t pkt, size_t len, size_t l2_offset); +/** + * Initialize packet buffer + */ +static inline void packet_init(pool_entry_t *pool, + odp_packet_hdr_t *pkt_hdr, + size_t size) +{ + /* + * Reset parser metadata. Note that we clear via memset to make + * this routine indepenent of any additional adds to packet metadata. + */ + const size_t start_offset = ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr); + uint8_t *start; + size_t len; + + start = (uint8_t *)pkt_hdr + start_offset; + len = sizeof(odp_packet_hdr_t) - start_offset; + memset(start, 0, len); + + /* + * Packet headroom is set from the pool's headroom + * Packet tailroom is rounded up to fill the last + * segment occupied by the allocated length. + */ + pkt_hdr->frame_len = size; + pkt_hdr->headroom = pool->s.headroom; + pkt_hdr->tailroom = + (pool->s.seg_size * pkt_hdr->buf_hdr.segcount) - + (pool->s.headroom + size); +} + + #ifdef __cplusplus } #endif diff --git a/platform/linux-generic/include/odp_timer_internal.h b/platform/linux-generic/include/odp_timer_internal.h index ad28f53..2ff36ce 100644 --- a/platform/linux-generic/include/odp_timer_internal.h +++ b/platform/linux-generic/include/odp_timer_internal.h @@ -51,14 +51,9 @@ typedef struct odp_timeout_hdr_t { uint8_t buf_data[]; } odp_timeout_hdr_t; - - -ODP_STATIC_ASSERT(sizeof(odp_timeout_hdr_t) == - ODP_OFFSETOF(odp_timeout_hdr_t, buf_data), - "ODP_TIMEOUT_HDR_T__SIZE_ERR"); - -ODP_STATIC_ASSERT(sizeof(odp_timeout_hdr_t) % sizeof(uint64_t) == 0, - "ODP_TIMEOUT_HDR_T__SIZE_ERR2"); +typedef struct odp_timeout_hdr_stride { + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_timeout_hdr_t))]; +} odp_timeout_hdr_stride; /** diff --git a/platform/linux-generic/odp_buffer.c b/platform/linux-generic/odp_buffer.c index bcbb99a..c1bef54 100644 --- a/platform/linux-generic/odp_buffer.c +++ b/platform/linux-generic/odp_buffer.c @@ -5,8 +5,9 @@ */ #include <odp_buffer.h> -#include <odp_buffer_internal.h> #include <odp_buffer_pool_internal.h> +#include <odp_buffer_internal.h> +#include <odp_buffer_inlines.h> #include <string.h> #include <stdio.h> @@ -16,7 +17,7 @@ void *odp_buffer_addr(odp_buffer_t buf) { odp_buffer_hdr_t *hdr = odp_buf_to_hdr(buf); - return hdr->addr; + return hdr->addr[0]; } @@ -38,15 +39,11 @@ int odp_buffer_type(odp_buffer_t buf) int odp_buffer_is_valid(odp_buffer_t buf) { - odp_buffer_bits_t handle; - - handle.u32 = buf; - - return (handle.index != ODP_BUFFER_INVALID_INDEX); + return validate_buf(buf) != NULL; } -int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf) +int odp_buffer_snprint(char *str, uint32_t n, odp_buffer_t buf) { odp_buffer_hdr_t *hdr; int len = 0; @@ -63,28 +60,14 @@ int odp_buffer_snprint(char *str, size_t n, odp_buffer_t buf) len += snprintf(&str[len], n-len, " pool %i\n", hdr->pool_hdl); len += snprintf(&str[len], n-len, - " index %"PRIu32"\n", hdr->index); - len += snprintf(&str[len], n-len, - " phy_addr %"PRIu64"\n", hdr->phys_addr); - len += snprintf(&str[len], n-len, " addr %p\n", hdr->addr); len += snprintf(&str[len], n-len, " size %zu\n", hdr->size); len += snprintf(&str[len], n-len, - " cur_offset %zu\n", hdr->cur_offset); - len += snprintf(&str[len], n-len, " ref_count %i\n", odp_atomic_load_u32(&hdr->ref_count)); len += snprintf(&str[len], n-len, " type %i\n", hdr->type); - len += snprintf(&str[len], n-len, - " Scatter list\n"); - len += snprintf(&str[len], n-len, - " num_bufs %i\n", hdr->scatter.num_bufs); - len += snprintf(&str[len], n-len, - " pos %i\n", hdr->scatter.pos); - len += snprintf(&str[len], n-len, - " total_len %zu\n", hdr->scatter.total_len); return len; } @@ -101,9 +84,3 @@ void odp_buffer_print(odp_buffer_t buf) ODP_PRINT("\n%s\n", str); } - -void odp_buffer_copy_scatter(odp_buffer_t buf_dst, odp_buffer_t buf_src) -{ - (void)buf_dst; - (void)buf_src; -} diff --git a/platform/linux-generic/odp_buffer_pool.c b/platform/linux-generic/odp_buffer_pool.c index 83c51fa..e3f90a2 100644 --- a/platform/linux-generic/odp_buffer_pool.c +++ b/platform/linux-generic/odp_buffer_pool.c @@ -6,8 +6,9 @@ #include <odp_std_types.h> #include <odp_buffer_pool.h> -#include <odp_buffer_pool_internal.h> #include <odp_buffer_internal.h> +#include <odp_buffer_pool_internal.h> +#include <odp_buffer_inlines.h> #include <odp_packet_internal.h> #include <odp_timer_internal.h> #include <odp_align_internal.h> @@ -17,57 +18,35 @@ #include <odp_config.h> #include <odp_hints.h> #include <odp_debug_internal.h> +#include <odp_atomic_internal.h> #include <string.h> #include <stdlib.h> -#ifdef POOL_USE_TICKETLOCK -#include <odp_ticketlock.h> -#define LOCK(a) odp_ticketlock_lock(a) -#define UNLOCK(a) odp_ticketlock_unlock(a) -#define LOCK_INIT(a) odp_ticketlock_init(a) -#else -#include <odp_spinlock.h> -#define LOCK(a) odp_spinlock_lock(a) -#define UNLOCK(a) odp_spinlock_unlock(a) -#define LOCK_INIT(a) odp_spinlock_init(a) -#endif - - #if ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS #error ODP_CONFIG_BUFFER_POOLS > ODP_BUFFER_MAX_POOLS #endif -#define NULL_INDEX ((uint32_t)-1) -union buffer_type_any_u { +typedef union buffer_type_any_u { odp_buffer_hdr_t buf; odp_packet_hdr_t pkt; odp_timeout_hdr_t tmo; -}; - -ODP_STATIC_ASSERT((sizeof(union buffer_type_any_u) % 8) == 0, - "BUFFER_TYPE_ANY_U__SIZE_ERR"); +} odp_anybuf_t; /* Any buffer type header */ typedef struct { union buffer_type_any_u any_hdr; /* any buffer type */ - uint8_t buf_data[]; /* start of buffer data area */ } odp_any_buffer_hdr_t; - -typedef union pool_entry_u { - struct pool_entry_s s; - - uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))]; - -} pool_entry_t; +typedef struct odp_any_hdr_stride { + uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_any_buffer_hdr_t))]; +} odp_any_hdr_stride; typedef struct pool_table_t { pool_entry_t pool[ODP_CONFIG_BUFFER_POOLS]; - } pool_table_t; @@ -77,38 +56,8 @@ static pool_table_t *pool_tbl; /* Pool entry pointers (for inlining) */ void *pool_entry_ptr[ODP_CONFIG_BUFFER_POOLS]; - -static __thread odp_buffer_chunk_hdr_t *local_chunk[ODP_CONFIG_BUFFER_POOLS]; - - -static inline odp_buffer_pool_t pool_index_to_handle(uint32_t pool_id) -{ - return pool_id + 1; -} - - -static inline uint32_t pool_handle_to_index(odp_buffer_pool_t pool_hdl) -{ - return pool_hdl -1; -} - - -static inline void set_handle(odp_buffer_hdr_t *hdr, - pool_entry_t *pool, uint32_t index) -{ - odp_buffer_pool_t pool_hdl = pool->s.pool_hdl; - uint32_t pool_id = pool_handle_to_index(pool_hdl); - - if (pool_id >= ODP_CONFIG_BUFFER_POOLS) - ODP_ABORT("set_handle: Bad pool handle %u\n", pool_hdl); - - if (index > ODP_BUFFER_MAX_INDEX) - ODP_ERR("set_handle: Bad buffer index\n"); - - hdr->handle.pool_id = pool_id; - hdr->handle.index = index; -} - +/* Local cache for buffer alloc/free acceleration */ +static __thread local_cache_t local_cache[ODP_CONFIG_BUFFER_POOLS]; int odp_buffer_pool_init_global(void) { @@ -131,7 +80,7 @@ int odp_buffer_pool_init_global(void) pool_entry_t *pool = &pool_tbl->pool[i]; LOCK_INIT(&pool->s.lock); pool->s.pool_hdl = pool_index_to_handle(i); - + pool->s.pool_id = i; pool_entry_ptr[i] = pool; } @@ -143,269 +92,258 @@ int odp_buffer_pool_init_global(void) return 0; } +/** + * Buffer pool creation + */ -static odp_buffer_hdr_t *index_to_hdr(pool_entry_t *pool, uint32_t index) -{ - odp_buffer_hdr_t *hdr; - - hdr = (odp_buffer_hdr_t *)(pool->s.buf_base + index * pool->s.buf_size); - return hdr; -} - - -static void add_buf_index(odp_buffer_chunk_hdr_t *chunk_hdr, uint32_t index) -{ - uint32_t i = chunk_hdr->chunk.num_bufs; - chunk_hdr->chunk.buf_index[i] = index; - chunk_hdr->chunk.num_bufs++; -} - - -static uint32_t rem_buf_index(odp_buffer_chunk_hdr_t *chunk_hdr) +odp_buffer_pool_t odp_buffer_pool_create(const char *name, + odp_shm_t shm, + odp_buffer_pool_param_t *params) { - uint32_t index; + odp_buffer_pool_t pool_hdl = ODP_BUFFER_POOL_INVALID; + pool_entry_t *pool; uint32_t i; - i = chunk_hdr->chunk.num_bufs - 1; - index = chunk_hdr->chunk.buf_index[i]; - chunk_hdr->chunk.num_bufs--; - return index; -} - - -static odp_buffer_chunk_hdr_t *next_chunk(pool_entry_t *pool, - odp_buffer_chunk_hdr_t *chunk_hdr) -{ - uint32_t index; + /* Default initialization paramters */ + static _odp_buffer_pool_init_t default_init_params = { + .udata_size = 0, + .buf_init = NULL, + .buf_init_arg = NULL, + }; - index = chunk_hdr->chunk.buf_index[ODP_BUFS_PER_CHUNK-1]; - if (index == NULL_INDEX) - return NULL; - else - return (odp_buffer_chunk_hdr_t *)index_to_hdr(pool, index); -} + _odp_buffer_pool_init_t *init_params = &default_init_params; + if (params == NULL) + return ODP_BUFFER_POOL_INVALID; -static odp_buffer_chunk_hdr_t *rem_chunk(pool_entry_t *pool) -{ - odp_buffer_chunk_hdr_t *chunk_hdr; - - chunk_hdr = pool->s.head; - if (chunk_hdr == NULL) { - /* Pool is empty */ - return NULL; - } - - pool->s.head = next_chunk(pool, chunk_hdr); - pool->s.free_bufs -= ODP_BUFS_PER_CHUNK; - - /* unlink */ - rem_buf_index(chunk_hdr); - return chunk_hdr; -} - - -static void add_chunk(pool_entry_t *pool, odp_buffer_chunk_hdr_t *chunk_hdr) -{ - if (pool->s.head) /* link pool head to the chunk */ - add_buf_index(chunk_hdr, pool->s.head->buf_hdr.index); - else - add_buf_index(chunk_hdr, NULL_INDEX); - - pool->s.head = chunk_hdr; - pool->s.free_bufs += ODP_BUFS_PER_CHUNK; -} - - -static void check_align(pool_entry_t *pool, odp_buffer_hdr_t *hdr) -{ - if (!ODP_ALIGNED_CHECK_POWER_2(hdr->addr, pool->s.user_align)) { - ODP_ABORT("check_align: user data align error %p, align %zu\n", - hdr->addr, pool->s.user_align); - } + /* Restriction for v1.0: All buffers are unsegmented */ + const int unsegmented = 1; - if (!ODP_ALIGNED_CHECK_POWER_2(hdr, ODP_CACHE_LINE_SIZE)) { - ODP_ABORT("check_align: hdr align error %p, align %i\n", - hdr, ODP_CACHE_LINE_SIZE); - } -} + /* Restriction for v1.0: No zeroization support */ + const int zeroized = 0; + /* Restriction for v1.0: No udata support */ + uint32_t udata_stride = (init_params->udata_size > sizeof(void *)) ? + ODP_CACHE_LINE_SIZE_ROUNDUP(init_params->udata_size) : + 0; -static void fill_hdr(void *ptr, pool_entry_t *pool, uint32_t index, - int buf_type) -{ - odp_buffer_hdr_t *hdr = (odp_buffer_hdr_t *)ptr; - size_t size = pool->s.hdr_size; - uint8_t *buf_data; + uint32_t blk_size, buf_stride; - if (buf_type == ODP_BUFFER_TYPE_CHUNK) - size = sizeof(odp_buffer_chunk_hdr_t); + switch (params->buf_type) { + case ODP_BUFFER_TYPE_RAW: + blk_size = params->buf_size; - switch (pool->s.buf_type) { - odp_raw_buffer_hdr_t *raw_hdr; - odp_packet_hdr_t *packet_hdr; - odp_timeout_hdr_t *tmo_hdr; - odp_any_buffer_hdr_t *any_hdr; + /* Optimize small raw buffers */ + if (blk_size > ODP_MAX_INLINE_BUF) + blk_size = ODP_ALIGN_ROUNDUP(blk_size, TAG_ALIGN); - case ODP_BUFFER_TYPE_RAW: - raw_hdr = ptr; - buf_data = raw_hdr->buf_data; + buf_stride = sizeof(odp_buffer_hdr_stride); break; + case ODP_BUFFER_TYPE_PACKET: - packet_hdr = ptr; - buf_data = packet_hdr->buf_data; + if (unsegmented) + blk_size = + ODP_CACHE_LINE_SIZE_ROUNDUP(params->buf_size); + else + blk_size = ODP_ALIGN_ROUNDUP(params->buf_size, + ODP_CONFIG_BUF_SEG_SIZE); + buf_stride = sizeof(odp_packet_hdr_stride); break; + case ODP_BUFFER_TYPE_TIMEOUT: - tmo_hdr = ptr; - buf_data = tmo_hdr->buf_data; + blk_size = 0; /* Timeouts have no block data, only metadata */ + buf_stride = sizeof(odp_timeout_hdr_stride); break; + case ODP_BUFFER_TYPE_ANY: - any_hdr = ptr; - buf_data = any_hdr->buf_data; + if (unsegmented) + blk_size = + ODP_CACHE_LINE_SIZE_ROUNDUP(params->buf_size); + else + blk_size = ODP_ALIGN_ROUNDUP(params->buf_size, + ODP_CONFIG_BUF_SEG_SIZE); + buf_stride = sizeof(odp_any_hdr_stride); break; - default: - ODP_ABORT("Bad buffer type\n"); - } - - memset(hdr, 0, size); - - set_handle(hdr, pool, index); - - hdr->addr = &buf_data[pool->s.buf_offset - pool->s.hdr_size]; - hdr->index = index; - hdr->size = pool->s.user_size; - hdr->pool_hdl = pool->s.pool_hdl; - hdr->type = buf_type; - - check_align(pool, hdr); -} - - -static void link_bufs(pool_entry_t *pool) -{ - odp_buffer_chunk_hdr_t *chunk_hdr; - size_t hdr_size; - size_t data_size; - size_t data_align; - size_t tot_size; - size_t offset; - size_t min_size; - uint64_t pool_size; - uintptr_t buf_base; - uint32_t index; - uintptr_t pool_base; - int buf_type; - - buf_type = pool->s.buf_type; - data_size = pool->s.user_size; - data_align = pool->s.user_align; - pool_size = pool->s.pool_size; - pool_base = (uintptr_t) pool->s.pool_base_addr; - - if (buf_type == ODP_BUFFER_TYPE_RAW) { - hdr_size = sizeof(odp_raw_buffer_hdr_t); - } else if (buf_type == ODP_BUFFER_TYPE_PACKET) { - hdr_size = sizeof(odp_packet_hdr_t); - } else if (buf_type == ODP_BUFFER_TYPE_TIMEOUT) { - hdr_size = sizeof(odp_timeout_hdr_t); - } else if (buf_type == ODP_BUFFER_TYPE_ANY) { - hdr_size = sizeof(odp_any_buffer_hdr_t); - } else - ODP_ABORT("odp_buffer_pool_create: Bad type %i\n", buf_type); - - - /* Chunk must fit into buffer data area.*/ - min_size = sizeof(odp_buffer_chunk_hdr_t) - hdr_size; - if (data_size < min_size) - data_size = min_size; - - /* Roundup data size to full cachelines */ - data_size = ODP_CACHE_LINE_SIZE_ROUNDUP(data_size); - - /* Min cacheline alignment for buffer header and data */ - data_align = ODP_CACHE_LINE_SIZE_ROUNDUP(data_align); - offset = ODP_CACHE_LINE_SIZE_ROUNDUP(hdr_size); - - /* Multiples of cacheline size */ - if (data_size > data_align) - tot_size = data_size + offset; - else - tot_size = data_align + offset; - - /* First buffer */ - buf_base = ODP_ALIGN_ROUNDUP(pool_base + offset, data_align) - offset; - - pool->s.hdr_size = hdr_size; - pool->s.buf_base = buf_base; - pool->s.buf_size = tot_size; - pool->s.buf_offset = offset; - index = 0; - chunk_hdr = (odp_buffer_chunk_hdr_t *)index_to_hdr(pool, index); - pool->s.head = NULL; - pool_size -= buf_base - pool_base; - - while (pool_size > ODP_BUFS_PER_CHUNK * tot_size) { - int i; - - fill_hdr(chunk_hdr, pool, index, ODP_BUFFER_TYPE_CHUNK); - - index++; - - for (i = 0; i < ODP_BUFS_PER_CHUNK - 1; i++) { - odp_buffer_hdr_t *hdr = index_to_hdr(pool, index); - - fill_hdr(hdr, pool, index, buf_type); - - add_buf_index(chunk_hdr, index); - index++; - } - - add_chunk(pool, chunk_hdr); - - chunk_hdr = (odp_buffer_chunk_hdr_t *)index_to_hdr(pool, - index); - pool->s.num_bufs += ODP_BUFS_PER_CHUNK; - pool_size -= ODP_BUFS_PER_CHUNK * tot_size; + default: + return ODP_BUFFER_POOL_INVALID; } -} + /* Validate requested number of buffers against addressable limits */ + if (params->num_bufs > + (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE))) + return ODP_BUFFER_POOL_INVALID; -odp_buffer_pool_t odp_buffer_pool_create(const char *name, - void *base_addr, uint64_t size, - size_t buf_size, size_t buf_align, - int buf_type) -{ - odp_buffer_pool_t pool_hdl = ODP_BUFFER_POOL_INVALID; - pool_entry_t *pool; - uint32_t i; - + /* Find an unused buffer pool slot and iniitalize it as requested */ for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) { pool = get_pool_entry(i); LOCK(&pool->s.lock); + if (pool->s.pool_shm != ODP_SHM_INVALID) { + UNLOCK(&pool->s.lock); + continue; + } - if (pool->s.buf_base == 0) { - /* found free pool */ + /* found free pool */ + size_t block_size, mdata_size, udata_size; + pool->s.flags.all = 0; + + if (name == NULL) { + pool->s.name[0] = 0; + } else { strncpy(pool->s.name, name, ODP_BUFFER_POOL_NAME_LEN - 1); pool->s.name[ODP_BUFFER_POOL_NAME_LEN - 1] = 0; - pool->s.pool_base_addr = base_addr; - pool->s.pool_size = size; - pool->s.user_size = buf_size; - pool->s.user_align = buf_align; - pool->s.buf_type = buf_type; - - link_bufs(pool); - - UNLOCK(&pool->s.lock); + pool->s.flags.has_name = 1; + } - pool_hdl = pool->s.pool_hdl; - break; + pool->s.params = *params; + pool->s.init_params = *init_params; + + mdata_size = params->num_bufs * buf_stride; + udata_size = params->num_bufs * udata_stride; + + /* Optimize for short buffers: Data stored in buffer hdr */ + if (blk_size <= ODP_MAX_INLINE_BUF) + block_size = 0; + else + block_size = params->num_bufs * blk_size; + + pool->s.pool_size = ODP_PAGE_SIZE_ROUNDUP(mdata_size + + udata_size + + block_size); + + if (shm == ODP_SHM_NULL) { + shm = odp_shm_reserve(pool->s.name, + pool->s.pool_size, + ODP_PAGE_SIZE, 0); + if (shm == ODP_SHM_INVALID) { + UNLOCK(&pool->s.lock); + return ODP_BUFFER_INVALID; + } + pool->s.pool_base_addr = odp_shm_addr(shm); + } else { + odp_shm_info_t info; + if (odp_shm_info(shm, &info) != 0 || + info.size < pool->s.pool_size) { + UNLOCK(&pool->s.lock); + return ODP_BUFFER_POOL_INVALID; + } + pool->s.pool_base_addr = odp_shm_addr(shm); + void *page_addr = + ODP_ALIGN_ROUNDUP_PTR(pool->s.pool_base_addr, + ODP_PAGE_SIZE); + if (pool->s.pool_base_addr != page_addr) { + if (info.size < pool->s.pool_size + + ((size_t)page_addr - + (size_t)pool->s.pool_base_addr)) { + UNLOCK(&pool->s.lock); + return ODP_BUFFER_POOL_INVALID; + } + pool->s.pool_base_addr = page_addr; + } + pool->s.flags.user_supplied_shm = 1; } + pool->s.pool_shm = shm; + + /* Now safe to unlock since pool entry has been allocated */ UNLOCK(&pool->s.lock); + + pool->s.flags.unsegmented = unsegmented; + pool->s.flags.zeroized = zeroized; + pool->s.seg_size = unsegmented ? + blk_size : ODP_CONFIG_BUF_SEG_SIZE; + + uint8_t *udata_base_addr = pool->s.pool_base_addr + mdata_size; + uint8_t *block_base_addr = udata_base_addr + udata_size; + + pool->s.buf_stride = buf_stride; + _odp_atomic_ptr_store(&pool->s.buf_freelist, NULL, + _ODP_MEMMODEL_RLX); + _odp_atomic_ptr_store(&pool->s.blk_freelist, NULL, + _ODP_MEMMODEL_RLX); + + /* Initialization will increment these to their target vals */ + odp_atomic_store_u32(&pool->s.bufcount, 0); + odp_atomic_store_u32(&pool->s.blkcount, 0); + + uint8_t *buf = udata_base_addr - buf_stride; + uint8_t *udat = udata_stride == 0 ? NULL : + block_base_addr - udata_stride; + + /* Init buffer common header and add to pool buffer freelist */ + do { + odp_buffer_hdr_t *tmp = + (odp_buffer_hdr_t *)(void *)buf; + + /* Iniitalize buffer metadata */ + tmp->allocator = ODP_FREEBUF; + tmp->flags.all = 0; + tmp->flags.zeroized = zeroized; + tmp->size = 0; + odp_atomic_store_u32(&tmp->ref_count, 0); + tmp->type = params->buf_type; + tmp->pool_hdl = pool->s.pool_hdl; + tmp->udata_addr = (void *)udat; + tmp->udata_size = init_params->udata_size; + tmp->segcount = 0; + tmp->segsize = pool->s.seg_size; + tmp->handle.handle = odp_buffer_encode_handle(tmp); + + /* Set 1st seg addr for zero-len buffers */ + tmp->addr[0] = NULL; + + /* Special case for short buffer data */ + if (blk_size <= ODP_MAX_INLINE_BUF) { + tmp->flags.hdrdata = 1; + if (blk_size > 0) { + tmp->segcount = 1; + tmp->addr[0] = &tmp->addr[1]; + tmp->size = blk_size; + } + } + + /* Push buffer onto pool's freelist */ + ret_buf(&pool->s, tmp); + buf -= buf_stride; + udat -= udata_stride; + } while (buf >= pool->s.pool_base_addr); + + /* Form block freelist for pool */ + uint8_t *blk = pool->s.pool_base_addr + pool->s.pool_size - + pool->s.seg_size; + + if (blk_size > ODP_MAX_INLINE_BUF) + do { + ret_blk(&pool->s, blk); + blk -= pool->s.seg_size; + } while (blk >= block_base_addr); + + /* Initialize pool statistics counters */ + odp_atomic_store_u64(&pool->s.bufallocs, 0); + odp_atomic_store_u64(&pool->s.buffrees, 0); + odp_atomic_store_u64(&pool->s.blkallocs, 0); + odp_atomic_store_u64(&pool->s.blkfrees, 0); + odp_atomic_store_u64(&pool->s.bufempty, 0); + odp_atomic_store_u64(&pool->s.blkempty, 0); + odp_atomic_store_u64(&pool->s.high_wm_count, 0); + odp_atomic_store_u64(&pool->s.low_wm_count, 0); + + /* Reset other pool globals to initial state */ + pool->s.low_wm_assert = 0; + pool->s.quiesced = 0; + pool->s.low_wm_assert = 0; + pool->s.headroom = 0; + pool->s.tailroom = 0; + + /* Watermarks are hard-coded for now to control caching */ + pool->s.high_wm = params->num_bufs / 2; + pool->s.low_wm = params->num_bufs / 4; + + pool_hdl = pool->s.pool_hdl; + break; } return pool_hdl; @@ -432,145 +370,200 @@ odp_buffer_pool_t odp_buffer_pool_lookup(const char *name) return ODP_BUFFER_POOL_INVALID; } - -odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool_hdl) +int odp_buffer_pool_info(odp_buffer_pool_t pool_hdl, + odp_shm_t *shm, + odp_buffer_pool_info_t *info) { - pool_entry_t *pool; - odp_buffer_chunk_hdr_t *chunk; - odp_buffer_bits_t handle; uint32_t pool_id = pool_handle_to_index(pool_hdl); + pool_entry_t *pool = get_pool_entry(pool_id); - pool = get_pool_entry(pool_id); - chunk = local_chunk[pool_id]; + if (pool == NULL || info == NULL) + return -1; - if (chunk == NULL) { - LOCK(&pool->s.lock); - chunk = rem_chunk(pool); - UNLOCK(&pool->s.lock); + *shm = pool->s.flags.user_supplied_shm ? + pool->s.pool_shm : ODP_SHM_NULL; + info->name = pool->s.name; + info->params.buf_size = pool->s.params.buf_size; + info->params.buf_align = pool->s.params.buf_align; + info->params.num_bufs = pool->s.params.num_bufs; + info->params.buf_type = pool->s.params.buf_type; - if (chunk == NULL) - return ODP_BUFFER_INVALID; + return 0; +} - local_chunk[pool_id] = chunk; - } +int odp_buffer_pool_destroy(odp_buffer_pool_t pool_hdl) +{ + uint32_t pool_id = pool_handle_to_index(pool_hdl); + pool_entry_t *pool = get_pool_entry(pool_id); - if (chunk->chunk.num_bufs == 0) { - /* give the chunk buffer */ - local_chunk[pool_id] = NULL; - chunk->buf_hdr.type = pool->s.buf_type; + if (pool == NULL) + return -1; - handle = chunk->buf_hdr.handle; - } else { - odp_buffer_hdr_t *hdr; - uint32_t index; - index = rem_buf_index(chunk); - hdr = index_to_hdr(pool, index); + LOCK(&pool->s.lock); - handle = hdr->handle; + if (pool->s.pool_shm == ODP_SHM_INVALID || + odp_atomic_load_u32(&pool->s.bufcount) < pool->s.params.num_bufs || + pool->s.flags.predefined) { + UNLOCK(&pool->s.lock); + return -1; } - return handle.u32; -} + if (!pool->s.flags.user_supplied_shm) + odp_shm_free(pool->s.pool_shm); + pool->s.pool_shm = ODP_SHM_INVALID; + UNLOCK(&pool->s.lock); -void odp_buffer_free(odp_buffer_t buf) + return 0; +} + +odp_buffer_t buffer_alloc(odp_buffer_pool_t pool_hdl, size_t size) { - odp_buffer_hdr_t *hdr; - uint32_t pool_id; - pool_entry_t *pool; - odp_buffer_chunk_hdr_t *chunk_hdr; + uint32_t pool_id = pool_handle_to_index(pool_hdl); + pool_entry_t *pool = get_pool_entry(pool_id); + size_t totsize = pool->s.headroom + size + pool->s.tailroom; + odp_anybuf_t *buf; - hdr = odp_buf_to_hdr(buf); - pool_id = pool_handle_to_index(hdr->pool_hdl); - pool = get_pool_entry(pool_id); - chunk_hdr = local_chunk[pool_id]; + /* Reject oversized allocation requests */ + if ((pool->s.flags.unsegmented && totsize > pool->s.seg_size) || + (!pool->s.flags.unsegmented && totsize > ODP_CONFIG_BUF_MAX_SIZE)) + return ODP_BUFFER_INVALID; - if (chunk_hdr && chunk_hdr->chunk.num_bufs == ODP_BUFS_PER_CHUNK - 1) { - /* Current chunk is full. Push back to the pool */ - LOCK(&pool->s.lock); - add_chunk(pool, chunk_hdr); - UNLOCK(&pool->s.lock); - chunk_hdr = NULL; + /* Try to satisfy request from the local cache */ + buf = (odp_anybuf_t *)(void *)get_local_buf(&local_cache[pool_id], + &pool->s, totsize); + + /* If cache is empty, satisfy request from the pool */ + if (odp_unlikely(buf == NULL)) { + buf = (odp_anybuf_t *)(void *)get_buf(&pool->s); + + if (odp_unlikely(buf == NULL)) + return ODP_BUFFER_INVALID; + + /* Get blocks for this buffer, if pool uses application data */ + if (buf->buf.size < totsize) { + size_t needed = totsize - buf->buf.size; + do { + uint8_t *blk = get_blk(&pool->s); + if (blk == NULL) { + ret_buf(&pool->s, &buf->buf); + return ODP_BUFFER_INVALID; + } + buf->buf.addr[buf->buf.segcount++] = blk; + needed -= pool->s.seg_size; + } while ((ssize_t)needed > 0); + buf->buf.size = buf->buf.segcount * pool->s.seg_size; + } } - if (chunk_hdr == NULL) { - /* Use this buffer */ - chunk_hdr = (odp_buffer_chunk_hdr_t *)hdr; - local_chunk[pool_id] = chunk_hdr; - chunk_hdr->chunk.num_bufs = 0; - } else { - /* Add to current chunk */ - add_buf_index(chunk_hdr, hdr->index); + /* By default, buffers inherit their pool's zeroization setting */ + buf->buf.flags.zeroized = pool->s.flags.zeroized; + + if (buf->buf.type == ODP_BUFFER_TYPE_PACKET) { + packet_init(pool, &buf->pkt, size); + + if (pool->s.init_params.buf_init != NULL) + (*pool->s.init_params.buf_init) + (buf->buf.handle.handle, + pool->s.init_params.buf_init_arg); } + + return odp_hdr_to_buf(&buf->buf); } +odp_buffer_t odp_buffer_alloc(odp_buffer_pool_t pool_hdl) +{ + return buffer_alloc(pool_hdl, + odp_pool_to_entry(pool_hdl)->s.params.buf_size); +} -odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf) +void odp_buffer_free(odp_buffer_t buf) { - odp_buffer_hdr_t *hdr; + odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr(buf); + pool_entry_t *pool = odp_buf_to_pool(buf_hdr); - hdr = odp_buf_to_hdr(buf); - return hdr->pool_hdl; + if (odp_unlikely(pool->s.low_wm_assert)) + ret_buf(&pool->s, buf_hdr); + else + ret_local_buf(&local_cache[pool->s.pool_id], buf_hdr); } +void _odp_flush_caches(void) +{ + int i; + + for (i = 0; i < ODP_CONFIG_BUFFER_POOLS; i++) { + pool_entry_t *pool = get_pool_entry(i); + flush_cache(&local_cache[i], &pool->s); + } +} void odp_buffer_pool_print(odp_buffer_pool_t pool_hdl) { pool_entry_t *pool; - odp_buffer_chunk_hdr_t *chunk_hdr; - uint32_t i; uint32_t pool_id; pool_id = pool_handle_to_index(pool_hdl); pool = get_pool_entry(pool_id); - ODP_PRINT("Pool info\n"); - ODP_PRINT("---------\n"); - ODP_PRINT(" pool %i\n", pool->s.pool_hdl); - ODP_PRINT(" name %s\n", pool->s.name); - ODP_PRINT(" pool base %p\n", pool->s.pool_base_addr); - ODP_PRINT(" buf base 0x%"PRIxPTR"\n", pool->s.buf_base); - ODP_PRINT(" pool size 0x%"PRIx64"\n", pool->s.pool_size); - ODP_PRINT(" buf size %zu\n", pool->s.user_size); - ODP_PRINT(" buf align %zu\n", pool->s.user_align); - ODP_PRINT(" hdr size %zu\n", pool->s.hdr_size); - ODP_PRINT(" alloc size %zu\n", pool->s.buf_size); - ODP_PRINT(" offset to hdr %zu\n", pool->s.buf_offset); - ODP_PRINT(" num bufs %"PRIu64"\n", pool->s.num_bufs); - ODP_PRINT(" free bufs %"PRIu64"\n", pool->s.free_bufs); - - /* first chunk */ - chunk_hdr = pool->s.head; - - if (chunk_hdr == NULL) { - ODP_ERR(" POOL EMPTY\n"); - return; - } - - ODP_PRINT("\n First chunk\n"); - - for (i = 0; i < chunk_hdr->chunk.num_bufs - 1; i++) { - uint32_t index; - odp_buffer_hdr_t *hdr; - - index = chunk_hdr->chunk.buf_index[i]; - hdr = index_to_hdr(pool, index); - - ODP_PRINT(" [%i] addr %p, id %"PRIu32"\n", i, hdr->addr, - index); - } - - ODP_PRINT(" [%i] addr %p, id %"PRIu32"\n", i, chunk_hdr->buf_hdr.addr, - chunk_hdr->buf_hdr.index); - - /* next chunk */ - chunk_hdr = next_chunk(pool, chunk_hdr); + uint32_t bufcount = odp_atomic_load_u32(&pool->s.bufcount); + uint32_t blkcount = odp_atomic_load_u32(&pool->s.blkcount); + uint64_t bufallocs = odp_atomic_load_u64(&pool->s.bufallocs); + uint64_t buffrees = odp_atomic_load_u64(&pool->s.buffrees); + uint64_t blkallocs = odp_atomic_load_u64(&pool->s.blkallocs); + uint64_t blkfrees = odp_atomic_load_u64(&pool->s.blkfrees); + uint64_t bufempty = odp_atomic_load_u64(&pool->s.bufempty); + uint64_t blkempty = odp_atomic_load_u64(&pool->s.blkempty); + uint64_t hiwmct = odp_atomic_load_u64(&pool->s.high_wm_count); + uint64_t lowmct = odp_atomic_load_u64(&pool->s.low_wm_count); + + ODP_DBG("Pool info\n"); + ODP_DBG("---------\n"); + ODP_DBG(" pool %i\n", pool->s.pool_hdl); + ODP_DBG(" name %s\n", + pool->s.flags.has_name ? pool->s.name : "Unnamed Pool"); + ODP_DBG(" pool type %s\n", + pool->s.params.buf_type == ODP_BUFFER_TYPE_RAW ? "raw" : + (pool->s.params.buf_type == ODP_BUFFER_TYPE_PACKET ? "packet" : + (pool->s.params.buf_type == ODP_BUFFER_TYPE_TIMEOUT ? "timeout" : + (pool->s.params.buf_type == ODP_BUFFER_TYPE_ANY ? "any" : + "unknown")))); + ODP_DBG(" pool storage %sODP managed\n", + pool->s.flags.user_supplied_shm ? + "application provided, " : ""); + ODP_DBG(" pool status %s\n", + pool->s.quiesced ? "quiesced" : "active"); + ODP_DBG(" pool opts %s, %s, %s\n", + pool->s.flags.unsegmented ? "unsegmented" : "segmented", + pool->s.flags.zeroized ? "zeroized" : "non-zeroized", + pool->s.flags.predefined ? "predefined" : "created"); + ODP_DBG(" pool base %p\n", pool->s.pool_base_addr); + ODP_DBG(" pool size %zu (%zu pages)\n", + pool->s.pool_size, pool->s.pool_size / ODP_PAGE_SIZE); + ODP_DBG(" udata size %zu\n", pool->s.init_params.udata_size); + ODP_DBG(" buf size %zu\n", pool->s.params.buf_size); + ODP_DBG(" num bufs %u\n", pool->s.params.num_bufs); + ODP_DBG(" bufs available %u %s\n", + bufcount, + pool->s.low_wm_assert ? " **low wm asserted**" : ""); + ODP_DBG(" bufs in use %u\n", pool->s.params.num_bufs - bufcount); + ODP_DBG(" buf allocs %lu\n", bufallocs); + ODP_DBG(" buf frees %lu\n", buffrees); + ODP_DBG(" buf empty %lu\n", bufempty); + ODP_DBG(" blk size %zu\n", + pool->s.seg_size > ODP_MAX_INLINE_BUF ? pool->s.seg_size : 0); + ODP_DBG(" blks available %u\n", blkcount); + ODP_DBG(" blk allocs %lu\n", blkallocs); + ODP_DBG(" blk frees %lu\n", blkfrees); + ODP_DBG(" blk empty %lu\n", blkempty); + ODP_DBG(" high wm value %lu\n", pool->s.high_wm); + ODP_DBG(" high wm count %lu\n", hiwmct); + ODP_DBG(" low wm value %lu\n", pool->s.low_wm); + ODP_DBG(" low wm count %lu\n", lowmct); +} - if (chunk_hdr) { - ODP_PRINT(" Next chunk\n"); - ODP_PRINT(" addr %p, id %"PRIu32"\n", chunk_hdr->buf_hdr.addr, - chunk_hdr->buf_hdr.index); - } - ODP_PRINT("\n"); +odp_buffer_pool_t odp_buffer_pool(odp_buffer_t buf) +{ + return odp_buf_to_hdr(buf)->pool_hdl; } diff --git a/platform/linux-generic/odp_linux.c b/platform/linux-generic/odp_linux.c index ecd77b3..95761a9 100644 --- a/platform/linux-generic/odp_linux.c +++ b/platform/linux-generic/odp_linux.c @@ -43,7 +43,9 @@ static void *odp_run_start_routine(void *arg) return NULL; } - return start_args->start_routine(start_args->arg); + void *ret = start_args->start_routine(start_args->arg); + _odp_flush_caches(); + return ret; } diff --git a/platform/linux-generic/odp_packet.c b/platform/linux-generic/odp_packet.c index a1bf18e..726e086 100644 --- a/platform/linux-generic/odp_packet.c +++ b/platform/linux-generic/odp_packet.c @@ -24,17 +24,9 @@ static inline uint8_t parse_ipv6(odp_packet_hdr_t *pkt_hdr, void odp_packet_init(odp_packet_t pkt) { odp_packet_hdr_t *const pkt_hdr = odp_packet_hdr(pkt); - const size_t start_offset = ODP_FIELD_SIZEOF(odp_packet_hdr_t, buf_hdr); - uint8_t *start; - size_t len; - - start = (uint8_t *)pkt_hdr + start_offset; - len = ODP_OFFSETOF(odp_packet_hdr_t, buf_data) - start_offset; - memset(start, 0, len); + pool_entry_t *pool = odp_buf_to_pool(&pkt_hdr->buf_hdr); - pkt_hdr->l2_offset = ODP_PACKET_OFFSET_INVALID; - pkt_hdr->l3_offset = ODP_PACKET_OFFSET_INVALID; - pkt_hdr->l4_offset = ODP_PACKET_OFFSET_INVALID; + packet_init(pool, pkt_hdr, 0); } odp_packet_t odp_packet_from_buffer(odp_buffer_t buf) @@ -64,7 +56,7 @@ uint8_t *odp_packet_addr(odp_packet_t pkt) uint8_t *odp_packet_data(odp_packet_t pkt) { - return odp_packet_addr(pkt) + odp_packet_hdr(pkt)->frame_offset; + return odp_packet_addr(pkt) + odp_packet_hdr(pkt)->headroom; } @@ -131,20 +123,13 @@ void odp_packet_set_l4_offset(odp_packet_t pkt, size_t offset) int odp_packet_is_segmented(odp_packet_t pkt) { - odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr((odp_buffer_t)pkt); - - if (buf_hdr->scatter.num_bufs == 0) - return 0; - else - return 1; + return odp_packet_hdr(pkt)->buf_hdr.segcount > 1; } int odp_packet_seg_count(odp_packet_t pkt) { - odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr((odp_buffer_t)pkt); - - return (int)buf_hdr->scatter.num_bufs + 1; + return odp_packet_hdr(pkt)->buf_hdr.segcount; } @@ -170,7 +155,7 @@ void odp_packet_parse(odp_packet_t pkt, size_t len, size_t frame_offset) uint8_t ip_proto = 0; pkt_hdr->input_flags.eth = 1; - pkt_hdr->frame_offset = frame_offset; + pkt_hdr->l2_offset = frame_offset; pkt_hdr->frame_len = len; if (len > ODPH_ETH_LEN_MAX) @@ -330,8 +315,6 @@ void odp_packet_print(odp_packet_t pkt) len += snprintf(&str[len], n-len, " output_flags 0x%x\n", hdr->output_flags.all); len += snprintf(&str[len], n-len, - " frame_offset %u\n", hdr->frame_offset); - len += snprintf(&str[len], n-len, " l2_offset %u\n", hdr->l2_offset); len += snprintf(&str[len], n-len, " l3_offset %u\n", hdr->l3_offset); @@ -358,14 +341,13 @@ int odp_packet_copy(odp_packet_t pkt_dst, odp_packet_t pkt_src) if (pkt_dst == ODP_PACKET_INVALID || pkt_src == ODP_PACKET_INVALID) return -1; - if (pkt_hdr_dst->buf_hdr.size < - pkt_hdr_src->frame_len + pkt_hdr_src->frame_offset) + if (pkt_hdr_dst->buf_hdr.size < pkt_hdr_src->frame_len) return -1; /* Copy packet header */ start_dst = (uint8_t *)pkt_hdr_dst + start_offset; start_src = (uint8_t *)pkt_hdr_src + start_offset; - len = ODP_OFFSETOF(odp_packet_hdr_t, buf_data) - start_offset; + len = sizeof(odp_packet_hdr_t) - start_offset; memcpy(start_dst, start_src, len); /* Copy frame payload */ @@ -374,13 +356,6 @@ int odp_packet_copy(odp_packet_t pkt_dst, odp_packet_t pkt_src) len = pkt_hdr_src->frame_len; memcpy(start_dst, start_src, len); - /* Copy useful things from the buffer header */ - pkt_hdr_dst->buf_hdr.cur_offset = pkt_hdr_src->buf_hdr.cur_offset; - - /* Create a copy of the scatter list */ - odp_buffer_copy_scatter(odp_packet_to_buffer(pkt_dst), - odp_packet_to_buffer(pkt_src)); - return 0; } diff --git a/platform/linux-generic/odp_queue.c b/platform/linux-generic/odp_queue.c index c278094..a7c5e42 100644 --- a/platform/linux-generic/odp_queue.c +++ b/platform/linux-generic/odp_queue.c @@ -11,6 +11,7 @@ #include <odp_buffer.h> #include <odp_buffer_internal.h> #include <odp_buffer_pool_internal.h> +#include <odp_buffer_inlines.h> #include <odp_internal.h> #include <odp_shared_memory.h> #include <odp_schedule_internal.h> diff --git a/platform/linux-generic/odp_schedule.c b/platform/linux-generic/odp_schedule.c index 7c09c23..2f0cfe4 100644 --- a/platform/linux-generic/odp_schedule.c +++ b/platform/linux-generic/odp_schedule.c @@ -83,8 +83,8 @@ int odp_schedule_init_global(void) { odp_shm_t shm; odp_buffer_pool_t pool; - void *pool_base; int i, j; + odp_buffer_pool_param_t params; ODP_DBG("Schedule init ... "); @@ -99,20 +99,12 @@ int odp_schedule_init_global(void) return -1; } - shm = odp_shm_reserve("odp_sched_pool", - SCHED_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); + params.buf_size = sizeof(queue_desc_t); + params.buf_align = ODP_CACHE_LINE_SIZE; + params.num_bufs = SCHED_POOL_SIZE/sizeof(queue_desc_t); + params.buf_type = ODP_BUFFER_TYPE_RAW; - pool_base = odp_shm_addr(shm); - - if (pool_base == NULL) { - ODP_ERR("Schedule init: Shm reserve failed.\n"); - return -1; - } - - pool = odp_buffer_pool_create("odp_sched_pool", pool_base, - SCHED_POOL_SIZE, sizeof(queue_desc_t), - ODP_CACHE_LINE_SIZE, - ODP_BUFFER_TYPE_RAW); + pool = odp_buffer_pool_create("odp_sched_pool", ODP_SHM_NULL, ¶ms); if (pool == ODP_BUFFER_POOL_INVALID) { ODP_ERR("Schedule init: Pool create failed.\n"); diff --git a/platform/linux-generic/odp_timer.c b/platform/linux-generic/odp_timer.c index a4fef58..7bd6874 100644 --- a/platform/linux-generic/odp_timer.c +++ b/platform/linux-generic/odp_timer.c @@ -5,9 +5,10 @@ */ #include <odp_timer.h> -#include <odp_timer_internal.h> #include <odp_time.h> #include <odp_buffer_pool_internal.h> +#include <odp_buffer_inlines.h> +#include <odp_timer_internal.h> #include <odp_internal.h> #include <odp_atomic.h> #include <odp_spinlock.h> diff --git a/test/api_test/odp_timer_ping.c b/test/api_test/odp_timer_ping.c index 48f1885..aa2a490 100644 --- a/test/api_test/odp_timer_ping.c +++ b/test/api_test/odp_timer_ping.c @@ -321,9 +321,8 @@ int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED) ping_arg_t pingarg; odp_queue_t queue; odp_buffer_pool_t pool; - void *pool_base; int i; - odp_shm_t shm; + odp_buffer_pool_param_t params; if (odp_test_global_init() != 0) return -1; @@ -336,14 +335,14 @@ int main(int argc ODP_UNUSED, char *argv[] ODP_UNUSED) /* * Create message pool */ - shm = odp_shm_reserve("msg_pool", - MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); - pool_base = odp_shm_addr(shm); - - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, - BUF_SIZE, - ODP_CACHE_LINE_SIZE, - ODP_BUFFER_TYPE_RAW); + + params.buf_size = BUF_SIZE; + params.buf_align = 0; + params.num_bufs = MSG_POOL_SIZE/BUF_SIZE; + params.buf_type = ODP_BUFFER_TYPE_RAW; + + pool = odp_buffer_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); + if (pool == ODP_BUFFER_POOL_INVALID) { LOG_ERR("Pool create failed.\n"); return -1; diff --git a/test/validation/odp_crypto.c b/test/validation/odp_crypto.c index 03ca438..72cf0f0 100644 --- a/test/validation/odp_crypto.c +++ b/test/validation/odp_crypto.c @@ -25,26 +25,17 @@ CU_SuiteInfo odp_testsuites[] = { int tests_global_init(void) { - odp_shm_t shm; - void *pool_base; + odp_buffer_pool_param_t params; odp_buffer_pool_t pool; odp_queue_t out_queue; - shm = odp_shm_reserve("shm_packet_pool", - SHM_PKT_POOL_SIZE, - ODP_CACHE_LINE_SIZE, 0); + params.buf_size = SHM_PKT_POOL_BUF_SIZE; + params.buf_align = 0; + params.num_bufs = SHM_PKT_POOL_SIZE/SHM_PKT_POOL_BUF_SIZE; + params.buf_type = ODP_BUFFER_TYPE_PACKET; - pool_base = odp_shm_addr(shm); - if (!pool_base) { - fprintf(stderr, "Packet pool allocation failed.\n"); - return -1; - } + pool = odp_buffer_pool_create("packet_pool", ODP_SHM_NULL, ¶ms); - pool = odp_buffer_pool_create("packet_pool", pool_base, - SHM_PKT_POOL_SIZE, - SHM_PKT_POOL_BUF_SIZE, - ODP_CACHE_LINE_SIZE, - ODP_BUFFER_TYPE_PACKET); if (ODP_BUFFER_POOL_INVALID == pool) { fprintf(stderr, "Packet pool creation failed.\n"); return -1; @@ -55,20 +46,14 @@ int tests_global_init(void) fprintf(stderr, "Crypto outq creation failed.\n"); return -1; } - shm = odp_shm_reserve("shm_compl_pool", - SHM_COMPL_POOL_SIZE, - ODP_CACHE_LINE_SIZE, - ODP_SHM_SW_ONLY); - pool_base = odp_shm_addr(shm); - if (!pool_base) { - fprintf(stderr, "Completion pool allocation failed.\n"); - return -1; - } - pool = odp_buffer_pool_create("compl_pool", pool_base, - SHM_COMPL_POOL_SIZE, - SHM_COMPL_POOL_BUF_SIZE, - ODP_CACHE_LINE_SIZE, - ODP_BUFFER_TYPE_RAW); + + params.buf_size = SHM_COMPL_POOL_BUF_SIZE; + params.buf_align = 0; + params.num_bufs = SHM_COMPL_POOL_SIZE/SHM_COMPL_POOL_BUF_SIZE; + params.buf_type = ODP_BUFFER_TYPE_RAW; + + pool = odp_buffer_pool_create("compl_pool", ODP_SHM_NULL, ¶ms); + if (ODP_BUFFER_POOL_INVALID == pool) { fprintf(stderr, "Completion pool creation failed.\n"); return -1; diff --git a/test/validation/odp_queue.c b/test/validation/odp_queue.c index 2c8fe80..6e05ad0 100644 --- a/test/validation/odp_queue.c +++ b/test/validation/odp_queue.c @@ -16,21 +16,14 @@ static int queue_contest = 0xff; static int init_queue_suite(void) { odp_buffer_pool_t pool; - void *pool_base; - odp_shm_t shm; + odp_buffer_pool_param_t params; - shm = odp_shm_reserve("msg_pool", - MSG_POOL_SIZE, ODP_CACHE_LINE_SIZE, 0); + params.buf_size = 0; + params.buf_align = ODP_CACHE_LINE_SIZE; + params.num_bufs = 1024 * 10; + params.buf_type = ODP_BUFFER_TYPE_RAW; - pool_base = odp_shm_addr(shm); - - if (NULL == pool_base) { - printf("Shared memory reserve failed.\n"); - return -1; - } - - pool = odp_buffer_pool_create("msg_pool", pool_base, MSG_POOL_SIZE, 0, - ODP_CACHE_LINE_SIZE, ODP_BUFFER_TYPE_RAW); + pool = odp_buffer_pool_create("msg_pool", ODP_SHM_NULL, ¶ms); if (ODP_BUFFER_POOL_INVALID == pool) { printf("Pool create failed.\n");
Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> --- Petri: Please review the following files here: platform/linux-generic/include/api/odp_buffer.h platform/linux-generic/include/api/odp_buffer_pool.h platform/linux-generic/include/api/odp_config.h This patch is complete and compilable/testable. It is RFC pending Petri approval of the public API headers and recommendations for final packaging. example/generator/odp_generator.c | 19 +- example/ipsec/odp_ipsec.c | 57 +- example/l2fwd/odp_l2fwd.c | 19 +- example/odp_example/odp_example.c | 18 +- example/packet/odp_pktio.c | 19 +- example/timer/odp_timer_test.c | 13 +- platform/linux-generic/include/api/odp_buffer.h | 3 +- .../linux-generic/include/api/odp_buffer_pool.h | 103 ++- platform/linux-generic/include/api/odp_config.h | 19 + .../linux-generic/include/api/odp_platform_types.h | 12 + .../linux-generic/include/api/odp_shared_memory.h | 10 +- .../linux-generic/include/odp_buffer_inlines.h | 150 ++++ .../linux-generic/include/odp_buffer_internal.h | 150 ++-- .../include/odp_buffer_pool_internal.h | 351 ++++++++-- platform/linux-generic/include/odp_internal.h | 2 + .../linux-generic/include/odp_packet_internal.h | 50 +- .../linux-generic/include/odp_timer_internal.h | 11 +- platform/linux-generic/odp_buffer.c | 33 +- platform/linux-generic/odp_buffer_pool.c | 777 ++++++++++----------- platform/linux-generic/odp_linux.c | 4 +- platform/linux-generic/odp_packet.c | 41 +- platform/linux-generic/odp_queue.c | 1 + platform/linux-generic/odp_schedule.c | 20 +- platform/linux-generic/odp_timer.c | 3 +- test/api_test/odp_timer_ping.c | 19 +- test/validation/odp_crypto.c | 43 +- test/validation/odp_queue.c | 19 +- 27 files changed, 1208 insertions(+), 758 deletions(-) create mode 100644 platform/linux-generic/include/odp_buffer_inlines.h