@@ -105,7 +105,6 @@ noinst_HEADERS = \
${top_srcdir}/platform/linux-generic/include/odp_atomic_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_buffer_inlines.h \
${top_srcdir}/platform/linux-generic/include/odp_buffer_internal.h \
- ${top_srcdir}/platform/linux-generic/include/odp_buffer_pool_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_classification_datamodel.h \
${top_srcdir}/platform/linux-generic/include/odp_classification_inlines.h \
${top_srcdir}/platform/linux-generic/include/odp_classification_internal.h \
@@ -116,6 +115,7 @@ noinst_HEADERS = \
${top_srcdir}/platform/linux-generic/include/odp_packet_io_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_packet_io_queue.h \
${top_srcdir}/platform/linux-generic/include/odp_packet_socket.h \
+ ${top_srcdir}/platform/linux-generic/include/odp_pool_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_queue_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_schedule_internal.h \
${top_srcdir}/platform/linux-generic/include/odp_spin_internal.h \
@@ -137,7 +137,6 @@ subdirheaders_HEADERS = \
__LIB__libodp_la_SOURCES = \
odp_barrier.c \
odp_buffer.c \
- odp_buffer_pool.c \
odp_classification.c \
odp_cpumask.c \
odp_crypto.c \
@@ -150,6 +149,7 @@ __LIB__libodp_la_SOURCES = \
odp_packet_flags.c \
odp_packet_io.c \
odp_packet_socket.c \
+ odp_pool.c \
odp_queue.c \
odp_ring.c \
odp_rwlock.c \
@@ -18,7 +18,7 @@ extern "C" {
#endif
#include <odp_buffer_internal.h>
-#include <odp_buffer_pool_internal.h>
+#include <odp_pool_internal.h>
static inline odp_buffer_t odp_buffer_encode_handle(odp_buffer_hdr_t *hdr)
{
deleted file mode 100644
@@ -1,380 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-
-/**
- * @file
- *
- * ODP buffer pool - internal header
- */
-
-#ifndef ODP_BUFFER_POOL_INTERNAL_H_
-#define ODP_BUFFER_POOL_INTERNAL_H_
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-#include <odp/std_types.h>
-#include <odp/align.h>
-#include <odp_align_internal.h>
-#include <odp/pool.h>
-#include <odp_buffer_internal.h>
-#include <odp/hints.h>
-#include <odp/config.h>
-#include <odp/debug.h>
-#include <odp/shared_memory.h>
-#include <odp/atomic.h>
-#include <odp_atomic_internal.h>
-#include <string.h>
-
-/**
- * Buffer initialization routine prototype
- *
- * @note Routines of this type MAY be passed as part of the
- * _odp_buffer_pool_init_t structure to be called whenever a
- * buffer is allocated to initialize the user metadata
- * associated with that buffer.
- */
-typedef void (_odp_buf_init_t)(odp_buffer_t buf, void *buf_init_arg);
-
-/**
- * Buffer pool initialization parameters
- * Used to communicate buffer pool initialization options. Internal for now.
- */
-typedef struct _odp_buffer_pool_init_t {
- size_t udata_size; /**< Size of user metadata for each buffer */
- _odp_buf_init_t *buf_init; /**< Buffer initialization routine to use */
- void *buf_init_arg; /**< Argument to be passed to buf_init() */
-} _odp_buffer_pool_init_t; /**< Type of buffer initialization struct */
-
-/* Local cache for buffer alloc/free acceleration */
-typedef struct local_cache_t {
- odp_buffer_hdr_t *buf_freelist; /* The local cache */
- uint64_t bufallocs; /* Local buffer alloc count */
- uint64_t buffrees; /* Local buffer free count */
-} local_cache_t;
-
-/* Use ticketlock instead of spinlock */
-#define POOL_USE_TICKETLOCK
-
-/* Extra error checks */
-/* #define POOL_ERROR_CHECK */
-
-
-#ifdef POOL_USE_TICKETLOCK
-#include <odp/ticketlock.h>
-#define POOL_LOCK(a) odp_ticketlock_lock(a)
-#define POOL_UNLOCK(a) odp_ticketlock_unlock(a)
-#define POOL_LOCK_INIT(a) odp_ticketlock_init(a)
-#else
-#include <odp/spinlock.h>
-#define POOL_LOCK(a) odp_spinlock_lock(a)
-#define POOL_UNLOCK(a) odp_spinlock_unlock(a)
-#define POOL_LOCK_INIT(a) odp_spinlock_init(a)
-#endif
-
-struct pool_entry_s {
-#ifdef POOL_USE_TICKETLOCK
- odp_ticketlock_t lock ODP_ALIGNED_CACHE;
-#else
- odp_spinlock_t lock ODP_ALIGNED_CACHE;
-#endif
-
- char name[ODP_POOL_NAME_LEN];
- odp_pool_param_t params;
- _odp_buffer_pool_init_t init_params;
- odp_pool_t pool_hdl;
- uint32_t pool_id;
- odp_shm_t pool_shm;
- union {
- uint32_t all;
- struct {
- uint32_t has_name:1;
- uint32_t user_supplied_shm:1;
- uint32_t unsegmented:1;
- uint32_t zeroized:1;
- uint32_t predefined:1;
- };
- } flags;
- uint32_t quiesced;
- uint32_t low_wm_assert;
- uint8_t *pool_base_addr;
- uint8_t *pool_mdata_addr;
- size_t pool_size;
- uint32_t buf_align;
- uint32_t buf_stride;
- _odp_atomic_ptr_t buf_freelist;
- _odp_atomic_ptr_t blk_freelist;
- odp_atomic_u32_t bufcount;
- odp_atomic_u32_t blkcount;
- odp_atomic_u64_t bufallocs;
- odp_atomic_u64_t buffrees;
- odp_atomic_u64_t blkallocs;
- odp_atomic_u64_t blkfrees;
- odp_atomic_u64_t bufempty;
- odp_atomic_u64_t blkempty;
- odp_atomic_u64_t high_wm_count;
- odp_atomic_u64_t low_wm_count;
- uint32_t seg_size;
- uint32_t high_wm;
- uint32_t low_wm;
- uint32_t headroom;
- uint32_t tailroom;
-};
-
-typedef union pool_entry_u {
- struct pool_entry_s s;
-
- uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))];
-} pool_entry_t;
-
-extern void *pool_entry_ptr[];
-
-#if defined(ODP_CONFIG_SECURE_POOLS) && (ODP_CONFIG_SECURE_POOLS == 1)
-#define buffer_is_secure(buf) (buf->flags.zeroized)
-#define pool_is_secure(pool) (pool->flags.zeroized)
-#else
-#define buffer_is_secure(buf) 0
-#define pool_is_secure(pool) 0
-#endif
-
-#define TAG_ALIGN ((size_t)16)
-
-#define odp_cs(ptr, old, new) \
- _odp_atomic_ptr_cmp_xchg_strong(&ptr, (void **)&old, (void *)new, \
- _ODP_MEMMODEL_SC, \
- _ODP_MEMMODEL_SC)
-
-/* Helper functions for pointer tagging to avoid ABA race conditions */
-#define odp_tag(ptr) \
- (((size_t)ptr) & (TAG_ALIGN - 1))
-
-#define odp_detag(ptr) \
- ((void *)(((size_t)ptr) & -TAG_ALIGN))
-
-#define odp_retag(ptr, tag) \
- ((void *)(((size_t)ptr) | odp_tag(tag)))
-
-
-static inline void *get_blk(struct pool_entry_s *pool)
-{
- void *oldhead, *myhead, *newhead;
-
- oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, _ODP_MEMMODEL_ACQ);
-
- do {
- size_t tag = odp_tag(oldhead);
- myhead = odp_detag(oldhead);
- if (odp_unlikely(myhead == NULL))
- break;
- newhead = odp_retag(((odp_buf_blk_t *)myhead)->next, tag + 1);
- } while (odp_cs(pool->blk_freelist, oldhead, newhead) == 0);
-
- if (odp_unlikely(myhead == NULL))
- odp_atomic_inc_u64(&pool->blkempty);
- else
- odp_atomic_dec_u32(&pool->blkcount);
-
- return (void *)myhead;
-}
-
-static inline void ret_blk(struct pool_entry_s *pool, void *block)
-{
- void *oldhead, *myhead, *myblock;
-
- oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, _ODP_MEMMODEL_ACQ);
-
- do {
- size_t tag = odp_tag(oldhead);
- myhead = odp_detag(oldhead);
- ((odp_buf_blk_t *)block)->next = myhead;
- myblock = odp_retag(block, tag + 1);
- } while (odp_cs(pool->blk_freelist, oldhead, myblock) == 0);
-
- odp_atomic_inc_u32(&pool->blkcount);
- odp_atomic_inc_u64(&pool->blkfrees);
-}
-
-static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool)
-{
- odp_buffer_hdr_t *oldhead, *myhead, *newhead;
-
- oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, _ODP_MEMMODEL_ACQ);
-
- do {
- size_t tag = odp_tag(oldhead);
- myhead = odp_detag(oldhead);
- if (odp_unlikely(myhead == NULL))
- break;
- newhead = odp_retag(myhead->next, tag + 1);
- } while (odp_cs(pool->buf_freelist, oldhead, newhead) == 0);
-
- if (odp_unlikely(myhead == NULL)) {
- odp_atomic_inc_u64(&pool->bufempty);
- } else {
- uint64_t bufcount =
- odp_atomic_fetch_sub_u32(&pool->bufcount, 1) - 1;
-
- /* Check for low watermark condition */
- if (bufcount == pool->low_wm && !pool->low_wm_assert) {
- pool->low_wm_assert = 1;
- odp_atomic_inc_u64(&pool->low_wm_count);
- }
-
- odp_atomic_inc_u64(&pool->bufallocs);
- myhead->next = myhead; /* Mark buffer allocated */
- myhead->allocator = odp_thread_id();
- }
-
- return (void *)myhead;
-}
-
-static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t *buf)
-{
- odp_buffer_hdr_t *oldhead, *myhead, *mybuf;
-
- buf->allocator = ODP_FREEBUF; /* Mark buffer free */
-
- if (!buf->flags.hdrdata && buf->type != ODP_EVENT_BUFFER) {
- while (buf->segcount > 0) {
- if (buffer_is_secure(buf) || pool_is_secure(pool))
- memset(buf->addr[buf->segcount - 1],
- 0, buf->segsize);
- ret_blk(pool, buf->addr[--buf->segcount]);
- }
- buf->size = 0;
- }
-
- oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, _ODP_MEMMODEL_ACQ);
-
- do {
- size_t tag = odp_tag(oldhead);
- myhead = odp_detag(oldhead);
- buf->next = myhead;
- mybuf = odp_retag(buf, tag + 1);
- } while (odp_cs(pool->buf_freelist, oldhead, mybuf) == 0);
-
- uint64_t bufcount = odp_atomic_fetch_add_u32(&pool->bufcount, 1) + 1;
-
- /* Check if low watermark condition should be deasserted */
- if (bufcount == pool->high_wm && pool->low_wm_assert) {
- pool->low_wm_assert = 0;
- odp_atomic_inc_u64(&pool->high_wm_count);
- }
-
- odp_atomic_inc_u64(&pool->buffrees);
-}
-
-static inline void *get_local_buf(local_cache_t *buf_cache,
- struct pool_entry_s *pool,
- size_t totsize)
-{
- odp_buffer_hdr_t *buf = buf_cache->buf_freelist;
-
- if (odp_likely(buf != NULL)) {
- buf_cache->buf_freelist = buf->next;
-
- if (odp_unlikely(buf->size < totsize)) {
- intmax_t needed = totsize - buf->size;
-
- do {
- void *blk = get_blk(pool);
- if (odp_unlikely(blk == NULL)) {
- ret_buf(pool, buf);
- buf_cache->buffrees--;
- return NULL;
- }
- buf->addr[buf->segcount++] = blk;
- needed -= pool->seg_size;
- } while (needed > 0);
-
- buf->size = buf->segcount * pool->seg_size;
- }
-
- buf_cache->bufallocs++;
- buf->allocator = odp_thread_id(); /* Mark buffer allocated */
- }
-
- return buf;
-}
-
-static inline void ret_local_buf(local_cache_t *buf_cache,
- odp_buffer_hdr_t *buf)
-{
- buf->allocator = ODP_FREEBUF;
- buf->next = buf_cache->buf_freelist;
- buf_cache->buf_freelist = buf;
-
- buf_cache->buffrees++;
-}
-
-static inline void flush_cache(local_cache_t *buf_cache,
- struct pool_entry_s *pool)
-{
- odp_buffer_hdr_t *buf = buf_cache->buf_freelist;
- uint32_t flush_count = 0;
-
- while (buf != NULL) {
- odp_buffer_hdr_t *next = buf->next;
- ret_buf(pool, buf);
- buf = next;
- flush_count++;
- }
-
- odp_atomic_add_u64(&pool->bufallocs, buf_cache->bufallocs);
- odp_atomic_add_u64(&pool->buffrees, buf_cache->buffrees - flush_count);
-
- buf_cache->buf_freelist = NULL;
- buf_cache->bufallocs = 0;
- buf_cache->buffrees = 0;
-}
-
-static inline odp_pool_t pool_index_to_handle(uint32_t pool_id)
-{
- return _odp_cast_scalar(odp_pool_t, pool_id);
-}
-
-static inline uint32_t pool_handle_to_index(odp_pool_t pool_hdl)
-{
- return _odp_typeval(pool_hdl);
-}
-
-static inline void *get_pool_entry(uint32_t pool_id)
-{
- return pool_entry_ptr[pool_id];
-}
-
-static inline pool_entry_t *odp_pool_to_entry(odp_pool_t pool)
-{
- return (pool_entry_t *)get_pool_entry(pool_handle_to_index(pool));
-}
-
-static inline pool_entry_t *odp_buf_to_pool(odp_buffer_hdr_t *buf)
-{
- return odp_pool_to_entry(buf->pool_hdl);
-}
-
-static inline uint32_t odp_buffer_pool_segment_size(odp_pool_t pool)
-{
- return odp_pool_to_entry(pool)->s.seg_size;
-}
-
-static inline uint32_t odp_buffer_pool_headroom(odp_pool_t pool)
-{
- return odp_pool_to_entry(pool)->s.headroom;
-}
-
-static inline uint32_t odp_buffer_pool_tailroom(odp_pool_t pool)
-{
- return odp_pool_to_entry(pool)->s.tailroom;
-}
-
-#ifdef __cplusplus
-}
-#endif
-
-#endif
@@ -21,7 +21,7 @@ extern "C" {
#include <odp/spinlock.h>
#include <odp/classification.h>
-#include <odp_buffer_pool_internal.h>
+#include <odp_pool_internal.h>
#include <odp_packet_internal.h>
#include <odp_packet_io_internal.h>
#include <odp_queue_internal.h>
@@ -21,7 +21,7 @@ extern "C" {
#include <odp/align.h>
#include <odp/debug.h>
#include <odp_buffer_internal.h>
-#include <odp_buffer_pool_internal.h>
+#include <odp_pool_internal.h>
#include <odp_buffer_inlines.h>
#include <odp/packet.h>
#include <odp/packet_io.h>
new file mode 100644
@@ -0,0 +1,380 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP buffer pool - internal header
+ */
+
+#ifndef ODP_POOL_INTERNAL_H_
+#define ODP_POOL_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp/std_types.h>
+#include <odp/align.h>
+#include <odp_align_internal.h>
+#include <odp/pool.h>
+#include <odp_buffer_internal.h>
+#include <odp/hints.h>
+#include <odp/config.h>
+#include <odp/debug.h>
+#include <odp/shared_memory.h>
+#include <odp/atomic.h>
+#include <odp_atomic_internal.h>
+#include <string.h>
+
+/**
+ * Buffer initialization routine prototype
+ *
+ * @note Routines of this type MAY be passed as part of the
+ * _odp_buffer_pool_init_t structure to be called whenever a
+ * buffer is allocated to initialize the user metadata
+ * associated with that buffer.
+ */
+typedef void (_odp_buf_init_t)(odp_buffer_t buf, void *buf_init_arg);
+
+/**
+ * Buffer pool initialization parameters
+ * Used to communicate buffer pool initialization options. Internal for now.
+ */
+typedef struct _odp_buffer_pool_init_t {
+ size_t udata_size; /**< Size of user metadata for each buffer */
+ _odp_buf_init_t *buf_init; /**< Buffer initialization routine to use */
+ void *buf_init_arg; /**< Argument to be passed to buf_init() */
+} _odp_buffer_pool_init_t; /**< Type of buffer initialization struct */
+
+/* Local cache for buffer alloc/free acceleration */
+typedef struct local_cache_t {
+ odp_buffer_hdr_t *buf_freelist; /* The local cache */
+ uint64_t bufallocs; /* Local buffer alloc count */
+ uint64_t buffrees; /* Local buffer free count */
+} local_cache_t;
+
+/* Use ticketlock instead of spinlock */
+#define POOL_USE_TICKETLOCK
+
+/* Extra error checks */
+/* #define POOL_ERROR_CHECK */
+
+
+#ifdef POOL_USE_TICKETLOCK
+#include <odp/ticketlock.h>
+#define POOL_LOCK(a) odp_ticketlock_lock(a)
+#define POOL_UNLOCK(a) odp_ticketlock_unlock(a)
+#define POOL_LOCK_INIT(a) odp_ticketlock_init(a)
+#else
+#include <odp/spinlock.h>
+#define POOL_LOCK(a) odp_spinlock_lock(a)
+#define POOL_UNLOCK(a) odp_spinlock_unlock(a)
+#define POOL_LOCK_INIT(a) odp_spinlock_init(a)
+#endif
+
+struct pool_entry_s {
+#ifdef POOL_USE_TICKETLOCK
+ odp_ticketlock_t lock ODP_ALIGNED_CACHE;
+#else
+ odp_spinlock_t lock ODP_ALIGNED_CACHE;
+#endif
+
+ char name[ODP_POOL_NAME_LEN];
+ odp_pool_param_t params;
+ _odp_buffer_pool_init_t init_params;
+ odp_pool_t pool_hdl;
+ uint32_t pool_id;
+ odp_shm_t pool_shm;
+ union {
+ uint32_t all;
+ struct {
+ uint32_t has_name:1;
+ uint32_t user_supplied_shm:1;
+ uint32_t unsegmented:1;
+ uint32_t zeroized:1;
+ uint32_t predefined:1;
+ };
+ } flags;
+ uint32_t quiesced;
+ uint32_t low_wm_assert;
+ uint8_t *pool_base_addr;
+ uint8_t *pool_mdata_addr;
+ size_t pool_size;
+ uint32_t buf_align;
+ uint32_t buf_stride;
+ _odp_atomic_ptr_t buf_freelist;
+ _odp_atomic_ptr_t blk_freelist;
+ odp_atomic_u32_t bufcount;
+ odp_atomic_u32_t blkcount;
+ odp_atomic_u64_t bufallocs;
+ odp_atomic_u64_t buffrees;
+ odp_atomic_u64_t blkallocs;
+ odp_atomic_u64_t blkfrees;
+ odp_atomic_u64_t bufempty;
+ odp_atomic_u64_t blkempty;
+ odp_atomic_u64_t high_wm_count;
+ odp_atomic_u64_t low_wm_count;
+ uint32_t seg_size;
+ uint32_t high_wm;
+ uint32_t low_wm;
+ uint32_t headroom;
+ uint32_t tailroom;
+};
+
+typedef union pool_entry_u {
+ struct pool_entry_s s;
+
+ uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(struct pool_entry_s))];
+} pool_entry_t;
+
+extern void *pool_entry_ptr[];
+
+#if defined(ODP_CONFIG_SECURE_POOLS) && (ODP_CONFIG_SECURE_POOLS == 1)
+#define buffer_is_secure(buf) (buf->flags.zeroized)
+#define pool_is_secure(pool) (pool->flags.zeroized)
+#else
+#define buffer_is_secure(buf) 0
+#define pool_is_secure(pool) 0
+#endif
+
+#define TAG_ALIGN ((size_t)16)
+
+#define odp_cs(ptr, old, new) \
+ _odp_atomic_ptr_cmp_xchg_strong(&ptr, (void **)&old, (void *)new, \
+ _ODP_MEMMODEL_SC, \
+ _ODP_MEMMODEL_SC)
+
+/* Helper functions for pointer tagging to avoid ABA race conditions */
+#define odp_tag(ptr) \
+ (((size_t)ptr) & (TAG_ALIGN - 1))
+
+#define odp_detag(ptr) \
+ ((void *)(((size_t)ptr) & -TAG_ALIGN))
+
+#define odp_retag(ptr, tag) \
+ ((void *)(((size_t)ptr) | odp_tag(tag)))
+
+
+static inline void *get_blk(struct pool_entry_s *pool)
+{
+ void *oldhead, *myhead, *newhead;
+
+ oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, _ODP_MEMMODEL_ACQ);
+
+ do {
+ size_t tag = odp_tag(oldhead);
+ myhead = odp_detag(oldhead);
+ if (odp_unlikely(myhead == NULL))
+ break;
+ newhead = odp_retag(((odp_buf_blk_t *)myhead)->next, tag + 1);
+ } while (odp_cs(pool->blk_freelist, oldhead, newhead) == 0);
+
+ if (odp_unlikely(myhead == NULL))
+ odp_atomic_inc_u64(&pool->blkempty);
+ else
+ odp_atomic_dec_u32(&pool->blkcount);
+
+ return (void *)myhead;
+}
+
+static inline void ret_blk(struct pool_entry_s *pool, void *block)
+{
+ void *oldhead, *myhead, *myblock;
+
+ oldhead = _odp_atomic_ptr_load(&pool->blk_freelist, _ODP_MEMMODEL_ACQ);
+
+ do {
+ size_t tag = odp_tag(oldhead);
+ myhead = odp_detag(oldhead);
+ ((odp_buf_blk_t *)block)->next = myhead;
+ myblock = odp_retag(block, tag + 1);
+ } while (odp_cs(pool->blk_freelist, oldhead, myblock) == 0);
+
+ odp_atomic_inc_u32(&pool->blkcount);
+ odp_atomic_inc_u64(&pool->blkfrees);
+}
+
+static inline odp_buffer_hdr_t *get_buf(struct pool_entry_s *pool)
+{
+ odp_buffer_hdr_t *oldhead, *myhead, *newhead;
+
+ oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, _ODP_MEMMODEL_ACQ);
+
+ do {
+ size_t tag = odp_tag(oldhead);
+ myhead = odp_detag(oldhead);
+ if (odp_unlikely(myhead == NULL))
+ break;
+ newhead = odp_retag(myhead->next, tag + 1);
+ } while (odp_cs(pool->buf_freelist, oldhead, newhead) == 0);
+
+ if (odp_unlikely(myhead == NULL)) {
+ odp_atomic_inc_u64(&pool->bufempty);
+ } else {
+ uint64_t bufcount =
+ odp_atomic_fetch_sub_u32(&pool->bufcount, 1) - 1;
+
+ /* Check for low watermark condition */
+ if (bufcount == pool->low_wm && !pool->low_wm_assert) {
+ pool->low_wm_assert = 1;
+ odp_atomic_inc_u64(&pool->low_wm_count);
+ }
+
+ odp_atomic_inc_u64(&pool->bufallocs);
+ myhead->next = myhead; /* Mark buffer allocated */
+ myhead->allocator = odp_thread_id();
+ }
+
+ return (void *)myhead;
+}
+
+static inline void ret_buf(struct pool_entry_s *pool, odp_buffer_hdr_t *buf)
+{
+ odp_buffer_hdr_t *oldhead, *myhead, *mybuf;
+
+ buf->allocator = ODP_FREEBUF; /* Mark buffer free */
+
+ if (!buf->flags.hdrdata && buf->type != ODP_EVENT_BUFFER) {
+ while (buf->segcount > 0) {
+ if (buffer_is_secure(buf) || pool_is_secure(pool))
+ memset(buf->addr[buf->segcount - 1],
+ 0, buf->segsize);
+ ret_blk(pool, buf->addr[--buf->segcount]);
+ }
+ buf->size = 0;
+ }
+
+ oldhead = _odp_atomic_ptr_load(&pool->buf_freelist, _ODP_MEMMODEL_ACQ);
+
+ do {
+ size_t tag = odp_tag(oldhead);
+ myhead = odp_detag(oldhead);
+ buf->next = myhead;
+ mybuf = odp_retag(buf, tag + 1);
+ } while (odp_cs(pool->buf_freelist, oldhead, mybuf) == 0);
+
+ uint64_t bufcount = odp_atomic_fetch_add_u32(&pool->bufcount, 1) + 1;
+
+ /* Check if low watermark condition should be deasserted */
+ if (bufcount == pool->high_wm && pool->low_wm_assert) {
+ pool->low_wm_assert = 0;
+ odp_atomic_inc_u64(&pool->high_wm_count);
+ }
+
+ odp_atomic_inc_u64(&pool->buffrees);
+}
+
+static inline void *get_local_buf(local_cache_t *buf_cache,
+ struct pool_entry_s *pool,
+ size_t totsize)
+{
+ odp_buffer_hdr_t *buf = buf_cache->buf_freelist;
+
+ if (odp_likely(buf != NULL)) {
+ buf_cache->buf_freelist = buf->next;
+
+ if (odp_unlikely(buf->size < totsize)) {
+ intmax_t needed = totsize - buf->size;
+
+ do {
+ void *blk = get_blk(pool);
+ if (odp_unlikely(blk == NULL)) {
+ ret_buf(pool, buf);
+ buf_cache->buffrees--;
+ return NULL;
+ }
+ buf->addr[buf->segcount++] = blk;
+ needed -= pool->seg_size;
+ } while (needed > 0);
+
+ buf->size = buf->segcount * pool->seg_size;
+ }
+
+ buf_cache->bufallocs++;
+ buf->allocator = odp_thread_id(); /* Mark buffer allocated */
+ }
+
+ return buf;
+}
+
+static inline void ret_local_buf(local_cache_t *buf_cache,
+ odp_buffer_hdr_t *buf)
+{
+ buf->allocator = ODP_FREEBUF;
+ buf->next = buf_cache->buf_freelist;
+ buf_cache->buf_freelist = buf;
+
+ buf_cache->buffrees++;
+}
+
+static inline void flush_cache(local_cache_t *buf_cache,
+ struct pool_entry_s *pool)
+{
+ odp_buffer_hdr_t *buf = buf_cache->buf_freelist;
+ uint32_t flush_count = 0;
+
+ while (buf != NULL) {
+ odp_buffer_hdr_t *next = buf->next;
+ ret_buf(pool, buf);
+ buf = next;
+ flush_count++;
+ }
+
+ odp_atomic_add_u64(&pool->bufallocs, buf_cache->bufallocs);
+ odp_atomic_add_u64(&pool->buffrees, buf_cache->buffrees - flush_count);
+
+ buf_cache->buf_freelist = NULL;
+ buf_cache->bufallocs = 0;
+ buf_cache->buffrees = 0;
+}
+
+static inline odp_pool_t pool_index_to_handle(uint32_t pool_id)
+{
+ return _odp_cast_scalar(odp_pool_t, pool_id);
+}
+
+static inline uint32_t pool_handle_to_index(odp_pool_t pool_hdl)
+{
+ return _odp_typeval(pool_hdl);
+}
+
+static inline void *get_pool_entry(uint32_t pool_id)
+{
+ return pool_entry_ptr[pool_id];
+}
+
+static inline pool_entry_t *odp_pool_to_entry(odp_pool_t pool)
+{
+ return (pool_entry_t *)get_pool_entry(pool_handle_to_index(pool));
+}
+
+static inline pool_entry_t *odp_buf_to_pool(odp_buffer_hdr_t *buf)
+{
+ return odp_pool_to_entry(buf->pool_hdl);
+}
+
+static inline uint32_t odp_buffer_pool_segment_size(odp_pool_t pool)
+{
+ return odp_pool_to_entry(pool)->s.seg_size;
+}
+
+static inline uint32_t odp_buffer_pool_headroom(odp_pool_t pool)
+{
+ return odp_pool_to_entry(pool)->s.headroom;
+}
+
+static inline uint32_t odp_buffer_pool_tailroom(odp_pool_t pool)
+{
+ return odp_pool_to_entry(pool)->s.tailroom;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
@@ -17,7 +17,7 @@
#include <odp/align.h>
#include <odp/debug.h>
#include <odp_buffer_internal.h>
-#include <odp_buffer_pool_internal.h>
+#include <odp_pool_internal.h>
#include <odp/timer.h>
/**
@@ -5,7 +5,7 @@
*/
#include <odp/buffer.h>
-#include <odp_buffer_pool_internal.h>
+#include <odp_pool_internal.h>
#include <odp_buffer_internal.h>
#include <odp_buffer_inlines.h>
#include <odp_debug_internal.h>
deleted file mode 100644
@@ -1,617 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp/std_types.h>
-#include <odp/pool.h>
-#include <odp_buffer_internal.h>
-#include <odp_buffer_pool_internal.h>
-#include <odp_buffer_inlines.h>
-#include <odp_packet_internal.h>
-#include <odp_timer_internal.h>
-#include <odp_align_internal.h>
-#include <odp/shared_memory.h>
-#include <odp/align.h>
-#include <odp_internal.h>
-#include <odp/config.h>
-#include <odp/hints.h>
-#include <odp_debug_internal.h>
-#include <odp_atomic_internal.h>
-
-#include <string.h>
-#include <stdlib.h>
-
-
-#if ODP_CONFIG_POOLS > ODP_BUFFER_MAX_POOLS
-#error ODP_CONFIG_POOLS > ODP_BUFFER_MAX_POOLS
-#endif
-
-
-typedef union buffer_type_any_u {
- odp_buffer_hdr_t buf;
- odp_packet_hdr_t pkt;
- odp_timeout_hdr_t tmo;
-} odp_anybuf_t;
-
-_ODP_STATIC_ASSERT((sizeof(union buffer_type_any_u) % 8) == 0,
- "BUFFER_TYPE_ANY_U__SIZE_ERR");
-
-/* Any buffer type header */
-typedef struct {
- union buffer_type_any_u any_hdr; /* any buffer type */
-} odp_any_buffer_hdr_t;
-
-typedef struct odp_any_hdr_stride {
- uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_any_buffer_hdr_t))];
-} odp_any_hdr_stride;
-
-
-typedef struct pool_table_t {
- pool_entry_t pool[ODP_CONFIG_POOLS];
-} pool_table_t;
-
-
-/* The pool table */
-static pool_table_t *pool_tbl;
-
-/* Pool entry pointers (for inlining) */
-void *pool_entry_ptr[ODP_CONFIG_POOLS];
-
-/* Local cache for buffer alloc/free acceleration */
-static __thread local_cache_t local_cache[ODP_CONFIG_POOLS];
-
-int odp_buffer_pool_init_global(void)
-{
- uint32_t i;
- odp_shm_t shm;
-
- shm = odp_shm_reserve("odp_buffer_pools",
- sizeof(pool_table_t),
- sizeof(pool_entry_t), 0);
-
- pool_tbl = odp_shm_addr(shm);
-
- if (pool_tbl == NULL)
- return -1;
-
- memset(pool_tbl, 0, sizeof(pool_table_t));
-
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- /* init locks */
- pool_entry_t *pool = &pool_tbl->pool[i];
- POOL_LOCK_INIT(&pool->s.lock);
- pool->s.pool_hdl = pool_index_to_handle(i);
- pool->s.pool_id = i;
- pool_entry_ptr[i] = pool;
- }
-
- ODP_DBG("\nBuffer pool init global\n");
- ODP_DBG(" pool_entry_s size %zu\n", sizeof(struct pool_entry_s));
- ODP_DBG(" pool_entry_t size %zu\n", sizeof(pool_entry_t));
- ODP_DBG(" odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t));
- ODP_DBG("\n");
- return 0;
-}
-
-/**
- * Pool creation
- */
-
-odp_pool_t odp_pool_create(const char *name,
- odp_shm_t shm,
- odp_pool_param_t *params)
-{
- odp_pool_t pool_hdl = ODP_POOL_INVALID;
- pool_entry_t *pool;
- uint32_t i, headroom = 0, tailroom = 0;
-
- /* Default size and align for timeouts */
- if (params->type == ODP_POOL_TIMEOUT) {
- params->buf.size = 0; /* tmo.__res1 */
- params->buf.align = 0; /* tmo.__res2 */
- }
-
- /* Default initialization paramters */
- static _odp_buffer_pool_init_t default_init_params = {
- .udata_size = 0,
- .buf_init = NULL,
- .buf_init_arg = NULL,
- };
-
- _odp_buffer_pool_init_t *init_params = &default_init_params;
-
- if (params == NULL)
- return ODP_POOL_INVALID;
-
- /* Restriction for v1.0: All non-packet buffers are unsegmented */
- int unseg = 1;
-
- /* Restriction for v1.0: No zeroization support */
- const int zeroized = 0;
-
- /* Restriction for v1.0: No udata support */
- uint32_t udata_stride = (init_params->udata_size > sizeof(void *)) ?
- ODP_CACHE_LINE_SIZE_ROUNDUP(init_params->udata_size) :
- 0;
-
- uint32_t blk_size, buf_stride;
- uint32_t buf_align;
-
- if (params->type == ODP_POOL_PACKET)
- buf_align = 0;
- else
- buf_align = params->buf.align;
-
- /* Validate requested buffer alignment */
- if (buf_align > ODP_CONFIG_BUFFER_ALIGN_MAX ||
- buf_align != ODP_ALIGN_ROUNDDOWN_POWER_2(buf_align, buf_align))
- return ODP_POOL_INVALID;
-
- /* Set correct alignment based on input request */
- if (buf_align == 0)
- buf_align = ODP_CACHE_LINE_SIZE;
- else if (buf_align < ODP_CONFIG_BUFFER_ALIGN_MIN)
- buf_align = ODP_CONFIG_BUFFER_ALIGN_MIN;
-
- /* Calculate space needed for buffer blocks and metadata */
- switch (params->type) {
- case ODP_POOL_BUFFER:
- case ODP_POOL_TIMEOUT:
- blk_size = params->buf.size;
-
- /* Optimize small raw buffers */
- if (blk_size > ODP_MAX_INLINE_BUF || params->buf.align != 0)
- blk_size = ODP_ALIGN_ROUNDUP(blk_size, buf_align);
-
- buf_stride = params->type == ODP_POOL_BUFFER ?
- sizeof(odp_buffer_hdr_stride) :
- sizeof(odp_timeout_hdr_stride);
- break;
-
- case ODP_POOL_PACKET:
- headroom = ODP_CONFIG_PACKET_HEADROOM;
- tailroom = ODP_CONFIG_PACKET_TAILROOM;
- unseg = params->pkt.seg_len > ODP_CONFIG_PACKET_BUF_LEN_MAX;
-
- if (unseg)
- blk_size = ODP_ALIGN_ROUNDUP(
- headroom + params->pkt.seg_len + tailroom,
- buf_align);
- else
- blk_size = ODP_ALIGN_ROUNDUP(
- headroom + params->pkt.seg_len + tailroom,
- ODP_CONFIG_PACKET_SEG_LEN_MIN);
-
- buf_stride = params->type == ODP_POOL_PACKET ?
- sizeof(odp_packet_hdr_stride) :
- sizeof(odp_any_hdr_stride);
- break;
-
- default:
- return ODP_POOL_INVALID;
- }
-
- /* Validate requested number of buffers against addressable limits */
- if (params->buf.num >
- (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE)))
- return ODP_POOL_INVALID;
-
- /* Find an unused buffer pool slot and iniitalize it as requested */
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool = get_pool_entry(i);
-
- POOL_LOCK(&pool->s.lock);
- if (pool->s.pool_shm != ODP_SHM_INVALID) {
- POOL_UNLOCK(&pool->s.lock);
- continue;
- }
-
- /* found free pool */
- size_t block_size, pad_size, mdata_size, udata_size;
-
- pool->s.flags.all = 0;
-
- if (name == NULL) {
- pool->s.name[0] = 0;
- } else {
- strncpy(pool->s.name, name,
- ODP_POOL_NAME_LEN - 1);
- pool->s.name[ODP_POOL_NAME_LEN - 1] = 0;
- pool->s.flags.has_name = 1;
- }
-
- pool->s.params = *params;
- pool->s.init_params = *init_params;
- pool->s.buf_align = buf_align;
-
- /* Optimize for short buffers: Data stored in buffer hdr */
- if (blk_size <= ODP_MAX_INLINE_BUF) {
- block_size = 0;
- pool->s.buf_align = blk_size == 0 ? 0 : sizeof(void *);
- } else {
- block_size = params->buf.num * blk_size;
- pool->s.buf_align = buf_align;
- }
-
- pad_size = ODP_CACHE_LINE_SIZE_ROUNDUP(block_size) - block_size;
- mdata_size = params->buf.num * buf_stride;
- udata_size = params->buf.num * udata_stride;
-
- pool->s.pool_size = ODP_PAGE_SIZE_ROUNDUP(block_size +
- pad_size +
- mdata_size +
- udata_size);
-
- if (shm == ODP_SHM_NULL) {
- shm = odp_shm_reserve(pool->s.name,
- pool->s.pool_size,
- ODP_PAGE_SIZE, 0);
- if (shm == ODP_SHM_INVALID) {
- POOL_UNLOCK(&pool->s.lock);
- return ODP_POOL_INVALID;
- }
- pool->s.pool_base_addr = odp_shm_addr(shm);
- } else {
- odp_shm_info_t info;
- if (odp_shm_info(shm, &info) != 0 ||
- info.size < pool->s.pool_size) {
- POOL_UNLOCK(&pool->s.lock);
- return ODP_POOL_INVALID;
- }
- pool->s.pool_base_addr = odp_shm_addr(shm);
- void *page_addr =
- ODP_ALIGN_ROUNDUP_PTR(pool->s.pool_base_addr,
- ODP_PAGE_SIZE);
- if (pool->s.pool_base_addr != page_addr) {
- if (info.size < pool->s.pool_size +
- ((size_t)page_addr -
- (size_t)pool->s.pool_base_addr)) {
- POOL_UNLOCK(&pool->s.lock);
- return ODP_POOL_INVALID;
- }
- pool->s.pool_base_addr = page_addr;
- }
- pool->s.flags.user_supplied_shm = 1;
- }
-
- pool->s.pool_shm = shm;
-
- /* Now safe to unlock since pool entry has been allocated */
- POOL_UNLOCK(&pool->s.lock);
-
- pool->s.flags.unsegmented = unseg;
- pool->s.flags.zeroized = zeroized;
- pool->s.seg_size = unseg ?
- blk_size : ODP_CONFIG_PACKET_SEG_LEN_MIN;
-
-
- uint8_t *block_base_addr = pool->s.pool_base_addr;
- uint8_t *mdata_base_addr =
- block_base_addr + block_size + pad_size;
- uint8_t *udata_base_addr = mdata_base_addr + mdata_size;
-
- /* Pool mdata addr is used for indexing buffer metadata */
- pool->s.pool_mdata_addr = mdata_base_addr;
-
- pool->s.buf_stride = buf_stride;
- _odp_atomic_ptr_store(&pool->s.buf_freelist, NULL,
- _ODP_MEMMODEL_RLX);
- _odp_atomic_ptr_store(&pool->s.blk_freelist, NULL,
- _ODP_MEMMODEL_RLX);
-
- /* Initialization will increment these to their target vals */
- odp_atomic_store_u32(&pool->s.bufcount, 0);
- odp_atomic_store_u32(&pool->s.blkcount, 0);
-
- uint8_t *buf = udata_base_addr - buf_stride;
- uint8_t *udat = udata_stride == 0 ? NULL :
- block_base_addr - udata_stride;
-
- /* Init buffer common header and add to pool buffer freelist */
- do {
- odp_buffer_hdr_t *tmp =
- (odp_buffer_hdr_t *)(void *)buf;
-
- /* Iniitalize buffer metadata */
- tmp->allocator = ODP_FREEBUF;
- tmp->flags.all = 0;
- tmp->flags.zeroized = zeroized;
- tmp->size = 0;
- odp_atomic_store_u32(&tmp->ref_count, 0);
- tmp->type = params->type;
- tmp->pool_hdl = pool->s.pool_hdl;
- tmp->udata_addr = (void *)udat;
- tmp->udata_size = init_params->udata_size;
- tmp->segcount = 0;
- tmp->segsize = pool->s.seg_size;
- tmp->handle.handle = odp_buffer_encode_handle(tmp);
-
- /* Set 1st seg addr for zero-len buffers */
- tmp->addr[0] = NULL;
-
- /* Special case for short buffer data */
- if (blk_size <= ODP_MAX_INLINE_BUF) {
- tmp->flags.hdrdata = 1;
- if (blk_size > 0) {
- tmp->segcount = 1;
- tmp->addr[0] = &tmp->addr[1];
- tmp->size = blk_size;
- }
- }
-
- /* Push buffer onto pool's freelist */
- ret_buf(&pool->s, tmp);
- buf -= buf_stride;
- udat -= udata_stride;
- } while (buf >= mdata_base_addr);
-
- /* Form block freelist for pool */
- uint8_t *blk =
- block_base_addr + block_size - pool->s.seg_size;
-
- if (blk_size > ODP_MAX_INLINE_BUF)
- do {
- ret_blk(&pool->s, blk);
- blk -= pool->s.seg_size;
- } while (blk >= block_base_addr);
-
- /* Initialize pool statistics counters */
- odp_atomic_store_u64(&pool->s.bufallocs, 0);
- odp_atomic_store_u64(&pool->s.buffrees, 0);
- odp_atomic_store_u64(&pool->s.blkallocs, 0);
- odp_atomic_store_u64(&pool->s.blkfrees, 0);
- odp_atomic_store_u64(&pool->s.bufempty, 0);
- odp_atomic_store_u64(&pool->s.blkempty, 0);
- odp_atomic_store_u64(&pool->s.high_wm_count, 0);
- odp_atomic_store_u64(&pool->s.low_wm_count, 0);
-
- /* Reset other pool globals to initial state */
- pool->s.low_wm_assert = 0;
- pool->s.quiesced = 0;
- pool->s.low_wm_assert = 0;
- pool->s.headroom = headroom;
- pool->s.tailroom = tailroom;
-
- /* Watermarks are hard-coded for now to control caching */
- pool->s.high_wm = params->buf.num / 2;
- pool->s.low_wm = params->buf.num / 4;
-
- pool_hdl = pool->s.pool_hdl;
- break;
- }
-
- return pool_hdl;
-}
-
-
-odp_pool_t odp_pool_lookup(const char *name)
-{
- uint32_t i;
- pool_entry_t *pool;
-
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool = get_pool_entry(i);
-
- POOL_LOCK(&pool->s.lock);
- if (strcmp(name, pool->s.name) == 0) {
- /* found it */
- POOL_UNLOCK(&pool->s.lock);
- return pool->s.pool_hdl;
- }
- POOL_UNLOCK(&pool->s.lock);
- }
-
- return ODP_POOL_INVALID;
-}
-
-int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
-{
- uint32_t pool_id = pool_handle_to_index(pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
-
- if (pool == NULL || info == NULL)
- return -1;
-
- info->name = pool->s.name;
- info->shm = pool->s.flags.user_supplied_shm ?
- pool->s.pool_shm : ODP_SHM_INVALID;
- info->params.buf.size = pool->s.params.buf.size;
- info->params.buf.align = pool->s.params.buf.align;
- info->params.buf.num = pool->s.params.buf.num;
- info->params.type = pool->s.params.type;
-
- return 0;
-}
-
-int odp_pool_destroy(odp_pool_t pool_hdl)
-{
- uint32_t pool_id = pool_handle_to_index(pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
-
- if (pool == NULL)
- return -1;
-
- POOL_LOCK(&pool->s.lock);
-
- /* Call fails if pool is not allocated or predefined*/
- if (pool->s.pool_shm == ODP_SHM_INVALID ||
- pool->s.flags.predefined) {
- POOL_UNLOCK(&pool->s.lock);
- return -1;
- }
-
- /* Make sure local cache is empty */
- flush_cache(&local_cache[pool_id], &pool->s);
-
- /* Call fails if pool has allocated buffers */
- if (odp_atomic_load_u32(&pool->s.bufcount) < pool->s.params.buf.num) {
- POOL_UNLOCK(&pool->s.lock);
- return -1;
- }
-
- if (!pool->s.flags.user_supplied_shm)
- odp_shm_free(pool->s.pool_shm);
-
- pool->s.pool_shm = ODP_SHM_INVALID;
- POOL_UNLOCK(&pool->s.lock);
-
- return 0;
-}
-
-odp_buffer_t buffer_alloc(odp_pool_t pool_hdl, size_t size)
-{
- uint32_t pool_id = pool_handle_to_index(pool_hdl);
- pool_entry_t *pool = get_pool_entry(pool_id);
- uintmax_t totsize = pool->s.headroom + size + pool->s.tailroom;
- odp_anybuf_t *buf;
-
- /* Reject oversized allocation requests */
- if ((pool->s.flags.unsegmented && totsize > pool->s.seg_size) ||
- (!pool->s.flags.unsegmented &&
- totsize > ODP_CONFIG_PACKET_BUF_LEN_MAX))
- return ODP_BUFFER_INVALID;
-
- /* Try to satisfy request from the local cache */
- buf = (odp_anybuf_t *)(void *)get_local_buf(&local_cache[pool_id],
- &pool->s, totsize);
-
- /* If cache is empty, satisfy request from the pool */
- if (odp_unlikely(buf == NULL)) {
- buf = (odp_anybuf_t *)(void *)get_buf(&pool->s);
-
- if (odp_unlikely(buf == NULL))
- return ODP_BUFFER_INVALID;
-
- /* Get blocks for this buffer, if pool uses application data */
- if (buf->buf.size < totsize) {
- intmax_t needed = totsize - buf->buf.size;
- do {
- uint8_t *blk = get_blk(&pool->s);
- if (blk == NULL) {
- ret_buf(&pool->s, &buf->buf);
- return ODP_BUFFER_INVALID;
- }
- buf->buf.addr[buf->buf.segcount++] = blk;
- needed -= pool->s.seg_size;
- } while (needed > 0);
- buf->buf.size = buf->buf.segcount * pool->s.seg_size;
- }
- }
-
- /* By default, buffers inherit their pool's zeroization setting */
- buf->buf.flags.zeroized = pool->s.flags.zeroized;
-
- if (buf->buf.type == ODP_EVENT_PACKET) {
- packet_init(pool, &buf->pkt, size);
-
- if (pool->s.init_params.buf_init != NULL)
- (*pool->s.init_params.buf_init)
- (buf->buf.handle.handle,
- pool->s.init_params.buf_init_arg);
- }
-
- return odp_hdr_to_buf(&buf->buf);
-}
-
-odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl)
-{
- return buffer_alloc(pool_hdl,
- odp_pool_to_entry(pool_hdl)->s.params.buf.size);
-}
-
-void odp_buffer_free(odp_buffer_t buf)
-{
- odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr(buf);
- pool_entry_t *pool = odp_buf_to_pool(buf_hdr);
-
- if (odp_unlikely(pool->s.low_wm_assert))
- ret_buf(&pool->s, buf_hdr);
- else
- ret_local_buf(&local_cache[pool->s.pool_id], buf_hdr);
-}
-
-void _odp_flush_caches(void)
-{
- int i;
-
- for (i = 0; i < ODP_CONFIG_POOLS; i++) {
- pool_entry_t *pool = get_pool_entry(i);
- flush_cache(&local_cache[i], &pool->s);
- }
-}
-
-void odp_pool_print(odp_pool_t pool_hdl)
-{
- pool_entry_t *pool;
- uint32_t pool_id;
-
- pool_id = pool_handle_to_index(pool_hdl);
- pool = get_pool_entry(pool_id);
-
- uint32_t bufcount = odp_atomic_load_u32(&pool->s.bufcount);
- uint32_t blkcount = odp_atomic_load_u32(&pool->s.blkcount);
- uint64_t bufallocs = odp_atomic_load_u64(&pool->s.bufallocs);
- uint64_t buffrees = odp_atomic_load_u64(&pool->s.buffrees);
- uint64_t blkallocs = odp_atomic_load_u64(&pool->s.blkallocs);
- uint64_t blkfrees = odp_atomic_load_u64(&pool->s.blkfrees);
- uint64_t bufempty = odp_atomic_load_u64(&pool->s.bufempty);
- uint64_t blkempty = odp_atomic_load_u64(&pool->s.blkempty);
- uint64_t hiwmct = odp_atomic_load_u64(&pool->s.high_wm_count);
- uint64_t lowmct = odp_atomic_load_u64(&pool->s.low_wm_count);
-
- ODP_DBG("Pool info\n");
- ODP_DBG("---------\n");
- ODP_DBG(" pool %" PRIu64 "\n",
- odp_pool_to_u64(pool->s.pool_hdl));
- ODP_DBG(" name %s\n",
- pool->s.flags.has_name ? pool->s.name : "Unnamed Pool");
- ODP_DBG(" pool type %s\n",
- pool->s.params.type == ODP_POOL_BUFFER ? "buffer" :
- (pool->s.params.type == ODP_POOL_PACKET ? "packet" :
- (pool->s.params.type == ODP_POOL_TIMEOUT ? "timeout" :
- "unknown")));
- ODP_DBG(" pool storage %sODP managed\n",
- pool->s.flags.user_supplied_shm ?
- "application provided, " : "");
- ODP_DBG(" pool status %s\n",
- pool->s.quiesced ? "quiesced" : "active");
- ODP_DBG(" pool opts %s, %s, %s\n",
- pool->s.flags.unsegmented ? "unsegmented" : "segmented",
- pool->s.flags.zeroized ? "zeroized" : "non-zeroized",
- pool->s.flags.predefined ? "predefined" : "created");
- ODP_DBG(" pool base %p\n", pool->s.pool_base_addr);
- ODP_DBG(" pool size %zu (%zu pages)\n",
- pool->s.pool_size, pool->s.pool_size / ODP_PAGE_SIZE);
- ODP_DBG(" pool mdata base %p\n", pool->s.pool_mdata_addr);
- ODP_DBG(" udata size %zu\n", pool->s.init_params.udata_size);
- ODP_DBG(" headroom %u\n", pool->s.headroom);
- ODP_DBG(" buf size %zu\n", pool->s.params.buf.size);
- ODP_DBG(" tailroom %u\n", pool->s.tailroom);
- ODP_DBG(" buf align %u requested, %u used\n",
- pool->s.params.buf.align, pool->s.buf_align);
- ODP_DBG(" num bufs %u\n", pool->s.params.buf.num);
- ODP_DBG(" bufs available %u %s\n", bufcount,
- pool->s.low_wm_assert ? " **low wm asserted**" : "");
- ODP_DBG(" bufs in use %u\n", pool->s.params.buf.num - bufcount);
- ODP_DBG(" buf allocs %lu\n", bufallocs);
- ODP_DBG(" buf frees %lu\n", buffrees);
- ODP_DBG(" buf empty %lu\n", bufempty);
- ODP_DBG(" blk size %zu\n",
- pool->s.seg_size > ODP_MAX_INLINE_BUF ? pool->s.seg_size : 0);
- ODP_DBG(" blks available %u\n", blkcount);
- ODP_DBG(" blk allocs %lu\n", blkallocs);
- ODP_DBG(" blk frees %lu\n", blkfrees);
- ODP_DBG(" blk empty %lu\n", blkempty);
- ODP_DBG(" high wm value %lu\n", pool->s.high_wm);
- ODP_DBG(" high wm count %lu\n", hiwmct);
- ODP_DBG(" low wm value %lu\n", pool->s.low_wm);
- ODP_DBG(" low wm count %lu\n", lowmct);
-}
-
-
-odp_pool_t odp_buffer_pool(odp_buffer_t buf)
-{
- return odp_buf_to_hdr(buf)->pool_hdl;
-}
@@ -16,7 +16,7 @@
#include <odp_classification_datamodel.h>
#include <odp_classification_inlines.h>
#include <odp_classification_internal.h>
-#include <odp_buffer_pool_internal.h>
+#include <odp_pool_internal.h>
#include <odp/shared_memory.h>
#include <odp/helper/eth.h>
#include <string.h>
new file mode 100644
@@ -0,0 +1,617 @@
+/* Copyright (c) 2013, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <odp/std_types.h>
+#include <odp/pool.h>
+#include <odp_buffer_internal.h>
+#include <odp_pool_internal.h>
+#include <odp_buffer_inlines.h>
+#include <odp_packet_internal.h>
+#include <odp_timer_internal.h>
+#include <odp_align_internal.h>
+#include <odp/shared_memory.h>
+#include <odp/align.h>
+#include <odp_internal.h>
+#include <odp/config.h>
+#include <odp/hints.h>
+#include <odp_debug_internal.h>
+#include <odp_atomic_internal.h>
+
+#include <string.h>
+#include <stdlib.h>
+
+
+#if ODP_CONFIG_POOLS > ODP_BUFFER_MAX_POOLS
+#error ODP_CONFIG_POOLS > ODP_BUFFER_MAX_POOLS
+#endif
+
+
+typedef union buffer_type_any_u {
+ odp_buffer_hdr_t buf;
+ odp_packet_hdr_t pkt;
+ odp_timeout_hdr_t tmo;
+} odp_anybuf_t;
+
+_ODP_STATIC_ASSERT((sizeof(union buffer_type_any_u) % 8) == 0,
+ "BUFFER_TYPE_ANY_U__SIZE_ERR");
+
+/* Any buffer type header */
+typedef struct {
+ union buffer_type_any_u any_hdr; /* any buffer type */
+} odp_any_buffer_hdr_t;
+
+typedef struct odp_any_hdr_stride {
+ uint8_t pad[ODP_CACHE_LINE_SIZE_ROUNDUP(sizeof(odp_any_buffer_hdr_t))];
+} odp_any_hdr_stride;
+
+
+typedef struct pool_table_t {
+ pool_entry_t pool[ODP_CONFIG_POOLS];
+} pool_table_t;
+
+
+/* The pool table */
+static pool_table_t *pool_tbl;
+
+/* Pool entry pointers (for inlining) */
+void *pool_entry_ptr[ODP_CONFIG_POOLS];
+
+/* Local cache for buffer alloc/free acceleration */
+static __thread local_cache_t local_cache[ODP_CONFIG_POOLS];
+
+int odp_buffer_pool_init_global(void)
+{
+ uint32_t i;
+ odp_shm_t shm;
+
+ shm = odp_shm_reserve("odp_buffer_pools",
+ sizeof(pool_table_t),
+ sizeof(pool_entry_t), 0);
+
+ pool_tbl = odp_shm_addr(shm);
+
+ if (pool_tbl == NULL)
+ return -1;
+
+ memset(pool_tbl, 0, sizeof(pool_table_t));
+
+ for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ /* init locks */
+ pool_entry_t *pool = &pool_tbl->pool[i];
+ POOL_LOCK_INIT(&pool->s.lock);
+ pool->s.pool_hdl = pool_index_to_handle(i);
+ pool->s.pool_id = i;
+ pool_entry_ptr[i] = pool;
+ }
+
+ ODP_DBG("\nBuffer pool init global\n");
+ ODP_DBG(" pool_entry_s size %zu\n", sizeof(struct pool_entry_s));
+ ODP_DBG(" pool_entry_t size %zu\n", sizeof(pool_entry_t));
+ ODP_DBG(" odp_buffer_hdr_t size %zu\n", sizeof(odp_buffer_hdr_t));
+ ODP_DBG("\n");
+ return 0;
+}
+
+/**
+ * Pool creation
+ */
+
+odp_pool_t odp_pool_create(const char *name,
+ odp_shm_t shm,
+ odp_pool_param_t *params)
+{
+ odp_pool_t pool_hdl = ODP_POOL_INVALID;
+ pool_entry_t *pool;
+ uint32_t i, headroom = 0, tailroom = 0;
+
+ /* Default size and align for timeouts */
+ if (params->type == ODP_POOL_TIMEOUT) {
+ params->buf.size = 0; /* tmo.__res1 */
+ params->buf.align = 0; /* tmo.__res2 */
+ }
+
+ /* Default initialization paramters */
+ static _odp_buffer_pool_init_t default_init_params = {
+ .udata_size = 0,
+ .buf_init = NULL,
+ .buf_init_arg = NULL,
+ };
+
+ _odp_buffer_pool_init_t *init_params = &default_init_params;
+
+ if (params == NULL)
+ return ODP_POOL_INVALID;
+
+ /* Restriction for v1.0: All non-packet buffers are unsegmented */
+ int unseg = 1;
+
+ /* Restriction for v1.0: No zeroization support */
+ const int zeroized = 0;
+
+ /* Restriction for v1.0: No udata support */
+ uint32_t udata_stride = (init_params->udata_size > sizeof(void *)) ?
+ ODP_CACHE_LINE_SIZE_ROUNDUP(init_params->udata_size) :
+ 0;
+
+ uint32_t blk_size, buf_stride;
+ uint32_t buf_align;
+
+ if (params->type == ODP_POOL_PACKET)
+ buf_align = 0;
+ else
+ buf_align = params->buf.align;
+
+ /* Validate requested buffer alignment */
+ if (buf_align > ODP_CONFIG_BUFFER_ALIGN_MAX ||
+ buf_align != ODP_ALIGN_ROUNDDOWN_POWER_2(buf_align, buf_align))
+ return ODP_POOL_INVALID;
+
+ /* Set correct alignment based on input request */
+ if (buf_align == 0)
+ buf_align = ODP_CACHE_LINE_SIZE;
+ else if (buf_align < ODP_CONFIG_BUFFER_ALIGN_MIN)
+ buf_align = ODP_CONFIG_BUFFER_ALIGN_MIN;
+
+ /* Calculate space needed for buffer blocks and metadata */
+ switch (params->type) {
+ case ODP_POOL_BUFFER:
+ case ODP_POOL_TIMEOUT:
+ blk_size = params->buf.size;
+
+ /* Optimize small raw buffers */
+ if (blk_size > ODP_MAX_INLINE_BUF || params->buf.align != 0)
+ blk_size = ODP_ALIGN_ROUNDUP(blk_size, buf_align);
+
+ buf_stride = params->type == ODP_POOL_BUFFER ?
+ sizeof(odp_buffer_hdr_stride) :
+ sizeof(odp_timeout_hdr_stride);
+ break;
+
+ case ODP_POOL_PACKET:
+ headroom = ODP_CONFIG_PACKET_HEADROOM;
+ tailroom = ODP_CONFIG_PACKET_TAILROOM;
+ unseg = params->pkt.seg_len > ODP_CONFIG_PACKET_BUF_LEN_MAX;
+
+ if (unseg)
+ blk_size = ODP_ALIGN_ROUNDUP(
+ headroom + params->pkt.seg_len + tailroom,
+ buf_align);
+ else
+ blk_size = ODP_ALIGN_ROUNDUP(
+ headroom + params->pkt.seg_len + tailroom,
+ ODP_CONFIG_PACKET_SEG_LEN_MIN);
+
+ buf_stride = params->type == ODP_POOL_PACKET ?
+ sizeof(odp_packet_hdr_stride) :
+ sizeof(odp_any_hdr_stride);
+ break;
+
+ default:
+ return ODP_POOL_INVALID;
+ }
+
+ /* Validate requested number of buffers against addressable limits */
+ if (params->buf.num >
+ (ODP_BUFFER_MAX_BUFFERS / (buf_stride / ODP_CACHE_LINE_SIZE)))
+ return ODP_POOL_INVALID;
+
+ /* Find an unused buffer pool slot and iniitalize it as requested */
+ for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ pool = get_pool_entry(i);
+
+ POOL_LOCK(&pool->s.lock);
+ if (pool->s.pool_shm != ODP_SHM_INVALID) {
+ POOL_UNLOCK(&pool->s.lock);
+ continue;
+ }
+
+ /* found free pool */
+ size_t block_size, pad_size, mdata_size, udata_size;
+
+ pool->s.flags.all = 0;
+
+ if (name == NULL) {
+ pool->s.name[0] = 0;
+ } else {
+ strncpy(pool->s.name, name,
+ ODP_POOL_NAME_LEN - 1);
+ pool->s.name[ODP_POOL_NAME_LEN - 1] = 0;
+ pool->s.flags.has_name = 1;
+ }
+
+ pool->s.params = *params;
+ pool->s.init_params = *init_params;
+ pool->s.buf_align = buf_align;
+
+ /* Optimize for short buffers: Data stored in buffer hdr */
+ if (blk_size <= ODP_MAX_INLINE_BUF) {
+ block_size = 0;
+ pool->s.buf_align = blk_size == 0 ? 0 : sizeof(void *);
+ } else {
+ block_size = params->buf.num * blk_size;
+ pool->s.buf_align = buf_align;
+ }
+
+ pad_size = ODP_CACHE_LINE_SIZE_ROUNDUP(block_size) - block_size;
+ mdata_size = params->buf.num * buf_stride;
+ udata_size = params->buf.num * udata_stride;
+
+ pool->s.pool_size = ODP_PAGE_SIZE_ROUNDUP(block_size +
+ pad_size +
+ mdata_size +
+ udata_size);
+
+ if (shm == ODP_SHM_NULL) {
+ shm = odp_shm_reserve(pool->s.name,
+ pool->s.pool_size,
+ ODP_PAGE_SIZE, 0);
+ if (shm == ODP_SHM_INVALID) {
+ POOL_UNLOCK(&pool->s.lock);
+ return ODP_POOL_INVALID;
+ }
+ pool->s.pool_base_addr = odp_shm_addr(shm);
+ } else {
+ odp_shm_info_t info;
+ if (odp_shm_info(shm, &info) != 0 ||
+ info.size < pool->s.pool_size) {
+ POOL_UNLOCK(&pool->s.lock);
+ return ODP_POOL_INVALID;
+ }
+ pool->s.pool_base_addr = odp_shm_addr(shm);
+ void *page_addr =
+ ODP_ALIGN_ROUNDUP_PTR(pool->s.pool_base_addr,
+ ODP_PAGE_SIZE);
+ if (pool->s.pool_base_addr != page_addr) {
+ if (info.size < pool->s.pool_size +
+ ((size_t)page_addr -
+ (size_t)pool->s.pool_base_addr)) {
+ POOL_UNLOCK(&pool->s.lock);
+ return ODP_POOL_INVALID;
+ }
+ pool->s.pool_base_addr = page_addr;
+ }
+ pool->s.flags.user_supplied_shm = 1;
+ }
+
+ pool->s.pool_shm = shm;
+
+ /* Now safe to unlock since pool entry has been allocated */
+ POOL_UNLOCK(&pool->s.lock);
+
+ pool->s.flags.unsegmented = unseg;
+ pool->s.flags.zeroized = zeroized;
+ pool->s.seg_size = unseg ?
+ blk_size : ODP_CONFIG_PACKET_SEG_LEN_MIN;
+
+
+ uint8_t *block_base_addr = pool->s.pool_base_addr;
+ uint8_t *mdata_base_addr =
+ block_base_addr + block_size + pad_size;
+ uint8_t *udata_base_addr = mdata_base_addr + mdata_size;
+
+ /* Pool mdata addr is used for indexing buffer metadata */
+ pool->s.pool_mdata_addr = mdata_base_addr;
+
+ pool->s.buf_stride = buf_stride;
+ _odp_atomic_ptr_store(&pool->s.buf_freelist, NULL,
+ _ODP_MEMMODEL_RLX);
+ _odp_atomic_ptr_store(&pool->s.blk_freelist, NULL,
+ _ODP_MEMMODEL_RLX);
+
+ /* Initialization will increment these to their target vals */
+ odp_atomic_store_u32(&pool->s.bufcount, 0);
+ odp_atomic_store_u32(&pool->s.blkcount, 0);
+
+ uint8_t *buf = udata_base_addr - buf_stride;
+ uint8_t *udat = udata_stride == 0 ? NULL :
+ block_base_addr - udata_stride;
+
+ /* Init buffer common header and add to pool buffer freelist */
+ do {
+ odp_buffer_hdr_t *tmp =
+ (odp_buffer_hdr_t *)(void *)buf;
+
+ /* Iniitalize buffer metadata */
+ tmp->allocator = ODP_FREEBUF;
+ tmp->flags.all = 0;
+ tmp->flags.zeroized = zeroized;
+ tmp->size = 0;
+ odp_atomic_store_u32(&tmp->ref_count, 0);
+ tmp->type = params->type;
+ tmp->pool_hdl = pool->s.pool_hdl;
+ tmp->udata_addr = (void *)udat;
+ tmp->udata_size = init_params->udata_size;
+ tmp->segcount = 0;
+ tmp->segsize = pool->s.seg_size;
+ tmp->handle.handle = odp_buffer_encode_handle(tmp);
+
+ /* Set 1st seg addr for zero-len buffers */
+ tmp->addr[0] = NULL;
+
+ /* Special case for short buffer data */
+ if (blk_size <= ODP_MAX_INLINE_BUF) {
+ tmp->flags.hdrdata = 1;
+ if (blk_size > 0) {
+ tmp->segcount = 1;
+ tmp->addr[0] = &tmp->addr[1];
+ tmp->size = blk_size;
+ }
+ }
+
+ /* Push buffer onto pool's freelist */
+ ret_buf(&pool->s, tmp);
+ buf -= buf_stride;
+ udat -= udata_stride;
+ } while (buf >= mdata_base_addr);
+
+ /* Form block freelist for pool */
+ uint8_t *blk =
+ block_base_addr + block_size - pool->s.seg_size;
+
+ if (blk_size > ODP_MAX_INLINE_BUF)
+ do {
+ ret_blk(&pool->s, blk);
+ blk -= pool->s.seg_size;
+ } while (blk >= block_base_addr);
+
+ /* Initialize pool statistics counters */
+ odp_atomic_store_u64(&pool->s.bufallocs, 0);
+ odp_atomic_store_u64(&pool->s.buffrees, 0);
+ odp_atomic_store_u64(&pool->s.blkallocs, 0);
+ odp_atomic_store_u64(&pool->s.blkfrees, 0);
+ odp_atomic_store_u64(&pool->s.bufempty, 0);
+ odp_atomic_store_u64(&pool->s.blkempty, 0);
+ odp_atomic_store_u64(&pool->s.high_wm_count, 0);
+ odp_atomic_store_u64(&pool->s.low_wm_count, 0);
+
+ /* Reset other pool globals to initial state */
+ pool->s.low_wm_assert = 0;
+ pool->s.quiesced = 0;
+ pool->s.low_wm_assert = 0;
+ pool->s.headroom = headroom;
+ pool->s.tailroom = tailroom;
+
+ /* Watermarks are hard-coded for now to control caching */
+ pool->s.high_wm = params->buf.num / 2;
+ pool->s.low_wm = params->buf.num / 4;
+
+ pool_hdl = pool->s.pool_hdl;
+ break;
+ }
+
+ return pool_hdl;
+}
+
+
+odp_pool_t odp_pool_lookup(const char *name)
+{
+ uint32_t i;
+ pool_entry_t *pool;
+
+ for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ pool = get_pool_entry(i);
+
+ POOL_LOCK(&pool->s.lock);
+ if (strcmp(name, pool->s.name) == 0) {
+ /* found it */
+ POOL_UNLOCK(&pool->s.lock);
+ return pool->s.pool_hdl;
+ }
+ POOL_UNLOCK(&pool->s.lock);
+ }
+
+ return ODP_POOL_INVALID;
+}
+
+int odp_pool_info(odp_pool_t pool_hdl, odp_pool_info_t *info)
+{
+ uint32_t pool_id = pool_handle_to_index(pool_hdl);
+ pool_entry_t *pool = get_pool_entry(pool_id);
+
+ if (pool == NULL || info == NULL)
+ return -1;
+
+ info->name = pool->s.name;
+ info->shm = pool->s.flags.user_supplied_shm ?
+ pool->s.pool_shm : ODP_SHM_INVALID;
+ info->params.buf.size = pool->s.params.buf.size;
+ info->params.buf.align = pool->s.params.buf.align;
+ info->params.buf.num = pool->s.params.buf.num;
+ info->params.type = pool->s.params.type;
+
+ return 0;
+}
+
+int odp_pool_destroy(odp_pool_t pool_hdl)
+{
+ uint32_t pool_id = pool_handle_to_index(pool_hdl);
+ pool_entry_t *pool = get_pool_entry(pool_id);
+
+ if (pool == NULL)
+ return -1;
+
+ POOL_LOCK(&pool->s.lock);
+
+ /* Call fails if pool is not allocated or predefined*/
+ if (pool->s.pool_shm == ODP_SHM_INVALID ||
+ pool->s.flags.predefined) {
+ POOL_UNLOCK(&pool->s.lock);
+ return -1;
+ }
+
+ /* Make sure local cache is empty */
+ flush_cache(&local_cache[pool_id], &pool->s);
+
+ /* Call fails if pool has allocated buffers */
+ if (odp_atomic_load_u32(&pool->s.bufcount) < pool->s.params.buf.num) {
+ POOL_UNLOCK(&pool->s.lock);
+ return -1;
+ }
+
+ if (!pool->s.flags.user_supplied_shm)
+ odp_shm_free(pool->s.pool_shm);
+
+ pool->s.pool_shm = ODP_SHM_INVALID;
+ POOL_UNLOCK(&pool->s.lock);
+
+ return 0;
+}
+
+odp_buffer_t buffer_alloc(odp_pool_t pool_hdl, size_t size)
+{
+ uint32_t pool_id = pool_handle_to_index(pool_hdl);
+ pool_entry_t *pool = get_pool_entry(pool_id);
+ uintmax_t totsize = pool->s.headroom + size + pool->s.tailroom;
+ odp_anybuf_t *buf;
+
+ /* Reject oversized allocation requests */
+ if ((pool->s.flags.unsegmented && totsize > pool->s.seg_size) ||
+ (!pool->s.flags.unsegmented &&
+ totsize > ODP_CONFIG_PACKET_BUF_LEN_MAX))
+ return ODP_BUFFER_INVALID;
+
+ /* Try to satisfy request from the local cache */
+ buf = (odp_anybuf_t *)(void *)get_local_buf(&local_cache[pool_id],
+ &pool->s, totsize);
+
+ /* If cache is empty, satisfy request from the pool */
+ if (odp_unlikely(buf == NULL)) {
+ buf = (odp_anybuf_t *)(void *)get_buf(&pool->s);
+
+ if (odp_unlikely(buf == NULL))
+ return ODP_BUFFER_INVALID;
+
+ /* Get blocks for this buffer, if pool uses application data */
+ if (buf->buf.size < totsize) {
+ intmax_t needed = totsize - buf->buf.size;
+ do {
+ uint8_t *blk = get_blk(&pool->s);
+ if (blk == NULL) {
+ ret_buf(&pool->s, &buf->buf);
+ return ODP_BUFFER_INVALID;
+ }
+ buf->buf.addr[buf->buf.segcount++] = blk;
+ needed -= pool->s.seg_size;
+ } while (needed > 0);
+ buf->buf.size = buf->buf.segcount * pool->s.seg_size;
+ }
+ }
+
+ /* By default, buffers inherit their pool's zeroization setting */
+ buf->buf.flags.zeroized = pool->s.flags.zeroized;
+
+ if (buf->buf.type == ODP_EVENT_PACKET) {
+ packet_init(pool, &buf->pkt, size);
+
+ if (pool->s.init_params.buf_init != NULL)
+ (*pool->s.init_params.buf_init)
+ (buf->buf.handle.handle,
+ pool->s.init_params.buf_init_arg);
+ }
+
+ return odp_hdr_to_buf(&buf->buf);
+}
+
+odp_buffer_t odp_buffer_alloc(odp_pool_t pool_hdl)
+{
+ return buffer_alloc(pool_hdl,
+ odp_pool_to_entry(pool_hdl)->s.params.buf.size);
+}
+
+void odp_buffer_free(odp_buffer_t buf)
+{
+ odp_buffer_hdr_t *buf_hdr = odp_buf_to_hdr(buf);
+ pool_entry_t *pool = odp_buf_to_pool(buf_hdr);
+
+ if (odp_unlikely(pool->s.low_wm_assert))
+ ret_buf(&pool->s, buf_hdr);
+ else
+ ret_local_buf(&local_cache[pool->s.pool_id], buf_hdr);
+}
+
+void _odp_flush_caches(void)
+{
+ int i;
+
+ for (i = 0; i < ODP_CONFIG_POOLS; i++) {
+ pool_entry_t *pool = get_pool_entry(i);
+ flush_cache(&local_cache[i], &pool->s);
+ }
+}
+
+void odp_pool_print(odp_pool_t pool_hdl)
+{
+ pool_entry_t *pool;
+ uint32_t pool_id;
+
+ pool_id = pool_handle_to_index(pool_hdl);
+ pool = get_pool_entry(pool_id);
+
+ uint32_t bufcount = odp_atomic_load_u32(&pool->s.bufcount);
+ uint32_t blkcount = odp_atomic_load_u32(&pool->s.blkcount);
+ uint64_t bufallocs = odp_atomic_load_u64(&pool->s.bufallocs);
+ uint64_t buffrees = odp_atomic_load_u64(&pool->s.buffrees);
+ uint64_t blkallocs = odp_atomic_load_u64(&pool->s.blkallocs);
+ uint64_t blkfrees = odp_atomic_load_u64(&pool->s.blkfrees);
+ uint64_t bufempty = odp_atomic_load_u64(&pool->s.bufempty);
+ uint64_t blkempty = odp_atomic_load_u64(&pool->s.blkempty);
+ uint64_t hiwmct = odp_atomic_load_u64(&pool->s.high_wm_count);
+ uint64_t lowmct = odp_atomic_load_u64(&pool->s.low_wm_count);
+
+ ODP_DBG("Pool info\n");
+ ODP_DBG("---------\n");
+ ODP_DBG(" pool %" PRIu64 "\n",
+ odp_pool_to_u64(pool->s.pool_hdl));
+ ODP_DBG(" name %s\n",
+ pool->s.flags.has_name ? pool->s.name : "Unnamed Pool");
+ ODP_DBG(" pool type %s\n",
+ pool->s.params.type == ODP_POOL_BUFFER ? "buffer" :
+ (pool->s.params.type == ODP_POOL_PACKET ? "packet" :
+ (pool->s.params.type == ODP_POOL_TIMEOUT ? "timeout" :
+ "unknown")));
+ ODP_DBG(" pool storage %sODP managed\n",
+ pool->s.flags.user_supplied_shm ?
+ "application provided, " : "");
+ ODP_DBG(" pool status %s\n",
+ pool->s.quiesced ? "quiesced" : "active");
+ ODP_DBG(" pool opts %s, %s, %s\n",
+ pool->s.flags.unsegmented ? "unsegmented" : "segmented",
+ pool->s.flags.zeroized ? "zeroized" : "non-zeroized",
+ pool->s.flags.predefined ? "predefined" : "created");
+ ODP_DBG(" pool base %p\n", pool->s.pool_base_addr);
+ ODP_DBG(" pool size %zu (%zu pages)\n",
+ pool->s.pool_size, pool->s.pool_size / ODP_PAGE_SIZE);
+ ODP_DBG(" pool mdata base %p\n", pool->s.pool_mdata_addr);
+ ODP_DBG(" udata size %zu\n", pool->s.init_params.udata_size);
+ ODP_DBG(" headroom %u\n", pool->s.headroom);
+ ODP_DBG(" buf size %zu\n", pool->s.params.buf.size);
+ ODP_DBG(" tailroom %u\n", pool->s.tailroom);
+ ODP_DBG(" buf align %u requested, %u used\n",
+ pool->s.params.buf.align, pool->s.buf_align);
+ ODP_DBG(" num bufs %u\n", pool->s.params.buf.num);
+ ODP_DBG(" bufs available %u %s\n", bufcount,
+ pool->s.low_wm_assert ? " **low wm asserted**" : "");
+ ODP_DBG(" bufs in use %u\n", pool->s.params.buf.num - bufcount);
+ ODP_DBG(" buf allocs %lu\n", bufallocs);
+ ODP_DBG(" buf frees %lu\n", buffrees);
+ ODP_DBG(" buf empty %lu\n", bufempty);
+ ODP_DBG(" blk size %zu\n",
+ pool->s.seg_size > ODP_MAX_INLINE_BUF ? pool->s.seg_size : 0);
+ ODP_DBG(" blks available %u\n", blkcount);
+ ODP_DBG(" blk allocs %lu\n", blkallocs);
+ ODP_DBG(" blk frees %lu\n", blkfrees);
+ ODP_DBG(" blk empty %lu\n", blkempty);
+ ODP_DBG(" high wm value %lu\n", pool->s.high_wm);
+ ODP_DBG(" high wm count %lu\n", hiwmct);
+ ODP_DBG(" low wm value %lu\n", pool->s.low_wm);
+ ODP_DBG(" low wm count %lu\n", lowmct);
+}
+
+
+odp_pool_t odp_buffer_pool(odp_buffer_t buf)
+{
+ return odp_buf_to_hdr(buf)->pool_hdl;
+}
@@ -10,7 +10,7 @@
#include <odp/align.h>
#include <odp/buffer.h>
#include <odp_buffer_internal.h>
-#include <odp_buffer_pool_internal.h>
+#include <odp_pool_internal.h>
#include <odp_buffer_inlines.h>
#include <odp_internal.h>
#include <odp/shared_memory.h>
@@ -36,7 +36,7 @@
#include <odp/buffer.h>
#include <odp_buffer_inlines.h>
#include <odp/pool.h>
-#include <odp_buffer_pool_internal.h>
+#include <odp_pool_internal.h>
#include <odp/debug.h>
#include <odp_debug_internal.h>
#include <odp/event.h>
Signed-off-by: Bill Fischofer <bill.fischofer@linaro.org> --- platform/linux-generic/Makefile.am | 4 +- .../linux-generic/include/odp_buffer_inlines.h | 2 +- .../include/odp_buffer_pool_internal.h | 380 ------------- .../include/odp_classification_datamodel.h | 2 +- .../linux-generic/include/odp_packet_internal.h | 2 +- platform/linux-generic/include/odp_pool_internal.h | 380 +++++++++++++ .../linux-generic/include/odp_timer_internal.h | 2 +- platform/linux-generic/odp_buffer.c | 2 +- platform/linux-generic/odp_buffer_pool.c | 617 --------------------- platform/linux-generic/odp_classification.c | 2 +- platform/linux-generic/odp_pool.c | 617 +++++++++++++++++++++ platform/linux-generic/odp_queue.c | 2 +- platform/linux-generic/odp_timer.c | 2 +- 13 files changed, 1007 insertions(+), 1007 deletions(-) delete mode 100644 platform/linux-generic/include/odp_buffer_pool_internal.h create mode 100644 platform/linux-generic/include/odp_pool_internal.h delete mode 100644 platform/linux-generic/odp_buffer_pool.c create mode 100644 platform/linux-generic/odp_pool.c