@@ -64,7 +64,7 @@ int odp_buffer_type(odp_buffer_t buf);
#define ODP_BUFFER_TYPE_INVALID (-1) /**< Buffer type invalid */
#define ODP_BUFFER_TYPE_RAW 0 /**< Raw buffer */
#define ODP_BUFFER_TYPE_PACKET 1 /**< Packet buffer */
-#define ODP_BUFFER_TYPE_TIMER 2 /**< Timer buffer */
+#define ODP_BUFFER_TYPE_TIMEOUT 2 /**< Timer timeout buffer */
/**
* Tests if buffer is part of a scatter/gather list
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013, Linaro Limited
+/* Copyright (c) 2014, Linaro Limited
* All rights reserved.
*
* SPDX-License-Identifier: BSD-3-Clause
@@ -8,7 +8,175 @@
/**
* @file
*
- * ODP timer
+ * ODP timer service
+ *
+
+//Example #1 Retransmission timer (e.g. for reliable connections)
+
+//Create timer pool for reliable connections
+#define SEC 1000000000ULL //1s expressed in nanoseconds
+odp_timer_pool_t tcp_tpid =
+ odp_timer_pool_create("TCP",
+ buffer_pool,
+ 1000000,//resolution 1ms
+ 7200 * SEC,//max tmo length 2hours
+ 40000,//num_timers
+ true,//shared
+ ODP_CLOCK_DEFAULT
+ );
+if (tcp_tpid == ODP_TIMER_POOL_INVALID)
+{
+ //Failed to create timer pool => fatal error
+}
+
+
+//Setting up a new connection
+//Allocate retransmission timeout (identical for supervision timeout)
+//The user pointer points back to the connection context
+conn->ret_tim = odp_timer_alloc(tcp_tpid, queue, conn);
+//Check if all resources were successfully allocated
+if (conn->ret_tim == ODP_TIMER_INVALID)
+{
+ //Failed to allocate all resources for connection => tear down
+ //Destroy timeout
+ odp_timer_free(conn->ret_tim);
+ //Tear down connection
+ ...
+ return false;
+}
+//All necessary resources successfully allocated
+//Compute initial retransmission length in timer ticks
+conn->ret_len = odp_timer_ns_to_tick(tcp_tpid, 3 * SEC);//Per RFC1122
+//Arm the timer
+odp_timer_set_rel(conn->ret_tim, conn->ret_len);
+return true;
+
+
+//A packet for the connection has just been transmitted
+//Reset the retransmission timer
+odp_timer_set_rel(conn->ret_tim, conn->ret_len);
+
+
+//A retransmission timeout for the connection has been received
+//Check if timeout is fresh or stale, for stale timeouts we need to reset
the
+//timer
+switch (odp_timer_tmo_status(tmo))
+{
+ case ODP_TMO_FRESH :
+ //Fresh timeout, last transmitted packet not acked in time =>
+ retransmit
+ //Get connection from timeout event
+ conn = odp_timer_get_userptr(tmo);
+ //Retransmit last packet (e.g. TCP segment)
+ ...
+ //Re-arm timer using original delta value
+ odp_timer_set_rel(conn->ret_tim, conn->ret_len);
+ break;
+ case ODP_TMO_STALE :
+ break;//Do nothing
+ case ODP_TMO_ORPHAN :
+ odp_free_buffer(tmo);
+ return;//Get out of here
+}
+
+
+//Example #2 Periodic tick
+
+//Create timer pool for periodic ticks
+odp_timer_pool_t per_tpid =
+ odp_timer_pool_create("periodic-tick",
+ buffer_pool,
+ 1,//resolution 1ns
+ 1000000000,//maximum timeout length 1s
+ 10,//num_timers
+ false,//not shared
+ ODP_CLOCK_DEFAULT
+ );
+if (per_tpid == ODP_TIMER_POOL_INVALID)
+{
+ //Failed to create timer pool => fatal error
+}
+
+
+//Allocate periodic timer
+tim_1733 = odp_timer_alloc(per_tpid, queue, NULL);
+//Check if all resources were successfully allocated
+if (tim_1733 == ODP_TIMER_INVALID)
+{
+ //Failed to allocate all resources => tear down
+ //Destroy timeout
+ odp_timer_free(tim_1733);
+ //Tear down other state
+ ...
+ return false;
+}
+//All necessary resources successfully allocated
+//Compute tick period in timer ticks
+period_1733 = odp_timer_ns_to_tick(per_tpid, 1000000000U / 1733U);//1733Hz
+//Compute when next tick should expire
+next_1733 = odp_timer_current_tick(per_tpid) + period_1733;
+//Arm the periodic timer
+odp_timer_set_abs(tim_1733, next_1733);
+return true;
+
+
+
+//A periodic timer timeout has been received
+//Must call odp_timer_tmo_status() on timeout!
+ret = odp_timer_tmo_status(tmo);
+//We expect the timeout is fresh since we are not calling set or cancel on
+//active or expired timers in this example
+assert(ret == ODP_TMO_FRESH);
+//Do processing driven by timeout *before*
+...
+do {
+ //Compute when the timer should expire next
+ next_1733 += period_1733;
+ //Check that this is in the future
+ if (likely(next_1733 > odp_timer_current_tick(per_tpid))
+ break;//Yes, done
+ //Else we missed a timeout
+ //Optionally attempt some recovery and/or logging of the problem
+ ...
+} while (0);
+//Re-arm periodic timer
+odp_timer_set_abs(tim_1733, next_1733);
+//Or do processing driven by timeout *after*
+...
+return;
+
+//Example #3 Tear down of flow
+//ctx points to flow context data structure owned by application
+//Free the timer, cancelling any timeout
+odp_timer_free(ctx->timer);//Any enqueued timeout will be made invalid
+//Continue tearing down and eventually freeing context
+...
+return;
+
+//A timeout has been received, check status
+switch (odp_timer_tmo_status(tmo))
+{
+ case ODP_TMO_FRESH :
+ //A flow has timed out, tear it down
+ //Find flow context from timeout
+ ctx = (context *)odp_timer_get_userptr(tmo);
+ //Free the supervision timer, any enqueued timeout will remain
+ odp_timer_free(ctx->tim);
+ //Free other flow related resources
+ ...
+ //Flow torn down
+ break;
+ case ODP_TMO_STALE :
+ //A stale timeout was received, timer automatically reset
+ break;
+ case ODP_TMO_ORPHAN :
+ //Orphaned timeout (from previously torn down flow)
+ //No corresponding timer or flow context
+ //Free the timeout
+ odp_buffer_free(tmo);
+ break;
+}
+
*/
#ifndef ODP_TIMER_H_
@@ -23,116 +191,325 @@ extern "C" {
#include <odp_buffer_pool.h>
#include <odp_queue.h>
-
/**
-* ODP timer handle
+* ODP timer pool handle (platform dependent)
*/
-typedef uint32_t odp_timer_t;
+struct odp_timer_pool;
+typedef struct odp_timer_pool *odp_timer_pool_t;
-/** Invalid timer */
-#define ODP_TIMER_INVALID 0
+/**
+ * Invalid timer pool handle (platform dependent)
+ */
+#define ODP_TIMER_POOL_INVALID NULL
+typedef enum odp_timer_pool_clock_source_e {
+ ODP_CLOCK_DEFAULT = 0,
+ /* Platform dependent which clock sources exist beyond
+ ODP_CLOCK_DEFAULT */
+ ODP_CLOCK_NONE = 1
+} odp_timer_pool_clock_source_t;
/**
-* ODP timeout handle
+* ODP timer handle (platform dependent)
*/
+struct odp_timer;
+typedef struct odp_timer *odp_timer_t;
+
+/**
+ * Invalid timer handle (platform dependent)
+ */
+#define ODP_TIMER_INVALID NULL
+
+/**
+ * ODP timeout event handle
+ */
typedef odp_buffer_t odp_timer_tmo_t;
-/** Invalid timeout */
-#define ODP_TIMER_TMO_INVALID 0
+/**
+ * ODP timeout status
+ */
+typedef enum odp_timer_tmo_status_e {
+ ODP_TMO_FRESH, /* Timeout is fresh, process it */
+ ODP_TMO_STALE, /* Timer reset or cancelled, do nothing */
+ ODP_TMO_ORPHAN,/* Timer deleted, free timeout */
+} odp_timer_tmo_status_t;
+
+/**
+* ODP tick value
+*/
+typedef uint64_t odp_timer_tick_t;
/**
- * Create a timer
+ * Create a timer pool
*
- * Creates a new timer with requested properties.
+ * Create a new timer pool.
+ * odp_timer_pool_create() is typically called once or a couple of times
during
+ * application initialisation.
*
* @param name Name
- * @param pool Buffer pool for allocating timeout notifications
+ * @param buf_pool Buffer pool for allocating timers
* @param resolution Timeout resolution in nanoseconds
- * @param min_tmo Minimum timeout duration in nanoseconds
- * @param max_tmo Maximum timeout duration in nanoseconds
+ * @param max_tmo Maximum relative timeout in nanoseconds
+ * @param num_timers Number of supported timers (minimum)
+ * @param shared Shared or private timer pool.
+ * Operations on shared timers will include the necessary
+ * mutual exclusion, operations on private timers may not
+ * (mutual exclusion is the responsibility of the caller).
+ * @param clk_src Clock source to use
+ *
+ * @return Timer pool handle if successful, otherwise
ODP_TIMER_POOL_INVALID
+ * and errno set
+ */
+odp_timer_pool_t
+odp_timer_pool_create(const char *name,
+ odp_buffer_pool_t buf_pool,
+ uint64_t resolution,
+ uint64_t max_tmo,
+ uint32_t num_timers,
+ bool shared,
+ odp_timer_pool_clock_source_t clk_src);
+
+/**
+ * Start a timer pool
*
- * @return Timer handle if successful, otherwise ODP_TIMER_INVALID
+ * Start all created timer pools, enabling the allocation of timers.
+ * The purpose of this call is to coordinate the creation of multiple timer
+ * pools that may use the same underlying HW resources.
+ * This function may be called multiple times.
*/
-odp_timer_t odp_timer_create(const char *name, odp_buffer_pool_t pool,
- uint64_t resolution, uint64_t min_tmo,
- uint64_t max_tmo);
+void odp_timer_pool_start(void);
+
+/**
+ * Destroy a timer pool
+ *
+ * Destroy a timer pool, freeing all resources.
+ * All timers must have been freed.
+ *
+ * @param tpid Timer pool identifier
+ */
+void odp_timer_pool_destroy(odp_timer_pool_t tpid);
/**
* Convert timer ticks to nanoseconds
*
- * @param timer Timer
+ * @param tpid Timer pool identifier
* @param ticks Timer ticks
*
* @return Nanoseconds
*/
-uint64_t odp_timer_tick_to_ns(odp_timer_t timer, uint64_t ticks);
+uint64_t odp_timer_tick_to_ns(odp_timer_pool_t tpid, odp_timer_tick_t
ticks);
/**
* Convert nanoseconds to timer ticks
*
- * @param timer Timer
+ * @param tpid Timer pool identifier
* @param ns Nanoseconds
*
* @return Timer ticks
*/
-uint64_t odp_timer_ns_to_tick(odp_timer_t timer, uint64_t ns);
+odp_timer_tick_t odp_timer_ns_to_tick(odp_timer_pool_t tpid, uint64_t ns);
+
+/**
+ * Current tick value
+ *
+ * @param tpid Timer pool identifier
+ *
+ * @return Current time in timer ticks
+ */
+odp_timer_tick_t odp_timer_current_tick(odp_timer_pool_t tpid);
/**
- * Timer resolution in nanoseconds
+ * ODP timer configurations
+ */
+
+typedef enum odp_timer_pool_conf_e {
+ ODP_TIMER_NAME, /* Return name of timer pool */
+ ODP_TIMER_RESOLUTION,/* Return the timer resolution (in ns) */
+ ODP_TIMER_MAX_TMO, /* Return the maximum supported timeout (in ns) */
+ ODP_TIMER_NUM_TIMERS,/* Return number of supported timers */
+ ODP_TIMER_SHARED /* Return shared flag */
+} odp_timer_pool_conf_t;
+
+/**
+ * Query different timer pool configurations, e.g.
+ * Timer resolution in nanoseconds
+ * Maximum timeout in timer ticks
+ * Number of supported timers
+ * Shared or private timer pool
*
- * @param timer Timer
+ * @param tpid Timer pool identifier
+ * @param item Configuration item being queried
*
- * @return Resolution in nanoseconds
+ * @return the requested piece of information or 0 for unknown item.
*/
-uint64_t odp_timer_resolution(odp_timer_t timer);
+uint64_t odp_timer_pool_query_conf(odp_timer_pool_t tpid,
+ odp_timer_pool_conf_t item);
/**
- * Maximum timeout in timer ticks
+ * Allocate a timer
+ *
+ * Create a timer (allocating all necessary resources e.g. timeout event)
from
+ * the timer pool.
*
- * @param timer Timer
+ * @param tpid Timer pool identifier
+ * @param queue Destination queue for timeout notifications
+ * @param user_ptr User defined pointer or NULL (copied to timeouts)
*
- * @return Maximum timeout in timer ticks
+ * @return Timer handle if successful, otherwise ODP_TIMER_INVALID and
+ * errno set.
*/
-uint64_t odp_timer_maximum_tmo(odp_timer_t timer);
+odp_timer_t odp_timer_alloc(odp_timer_pool_t tpid,
+ odp_queue_t queue,
+ void *user_ptr);
/**
- * Current timer tick
+ * Free a timer
*
- * @param timer Timer
+ * Free (destroy) a timer, freeing all associated resources (e.g. default
+ * timeout event). An expired and enqueued timeout event will not be freed.
+ * It is the responsibility of the application to free this timeout when it
+ * is received.
*
- * @return Current time in timer ticks
+ * @param tim Timer handle
+ */
+void odp_timer_free(odp_timer_t tim);
+
+/**
+ * Set a timer (absolute time) with a user-defined timeout buffer
+ *
+ * Set (arm) the timer to expire at specific time. The user-defined
+ * buffer will be enqueued when the timer expires.
+ * Arming may fail (if the timer is in state EXPIRED), an earlier timeout
+ * will then be received. odp_timer_tmo_status() must be used to check if
+ * the received timeout is valid.
+ *
+ * Note: any invalid parameters will be treated as programming errors and
will
+ * cause the application to abort.
+ * Note: a timeout too near in time may be delivered immediately.
+ * Note: a timeout too far away in time (beyond max_timeout) might be
delivered
+ * early.
+ *
+ * @param tim Timer
+ * @param abs_tck Expiration time in absolute timer ticks
+ * @param user_buf The buffer to use as timeout event
+ */
+void odp_timer_set_abs_w_buf(odp_timer_t tim,
+ odp_timer_tick_t abs_tck,
+ odp_buffer_t user_buf);
+
+/**
+ * Set a timer with an absolute expiration time
+ *
+ * Set (arm) the timer to expire at a specific time.
+ * Arming may fail (if the timer is in state EXPIRED), an earlier timeout
+ * will then be received. odp_timer_tmo_status() must be used to check if
+ * the received timeout is valid.
+ *
+ * Note: any invalid parameters will be treated as programming errors and
will
+ * cause the application to abort.
+ * Note: a timeout too near in time may be delivered immediately.
+ * Note: a timeout too far away in time (beyond max_timeout) might be
delivered
+ * early, it will automatically be reset by odp_timer_tmo_status().
+ *
+ * @param tim Timer
+ * @param abs_tck Expiration time in absolute timer ticks
+ */
+void odp_timer_set_abs(odp_timer_t tim, odp_timer_tick_t abs_tck);
+
+/**
+ * Set a timer with a relative expiration time
+ *
+ * Set (arm) the timer to expire at a relative future time.
+ * Arming may fail (if the timer is in state EXPIRED),
+ * an earlier timeout will then be received. odp_timer_tmo_status() must
+ * be used to check if the received timeout is valid.
+ *
+ * Note: any invalid parameters will be treated as programming errors and
will
+ * cause the application to abort.
+ * Note: a timeout too near in time may be delivered immediately.
+ * Note: a timeout too far away in time (beyond max_timeout) might be
delivered
+ * early, it will automatically be reset by odp_timer_tmo_status().
+ *
+ * @param tim Timer
+ * @param rel_tck Expiration time in timer ticks relative to current time
of
+ * the timer pool the timer belongs to
*/
-uint64_t odp_timer_current_tick(odp_timer_t timer);
+void odp_timer_set_rel(odp_timer_t tim, odp_timer_tick_t rel_tck);
/**
- * Request timeout with an absolute timer tick
+ * Cancel a timer
*
- * When tick reaches tmo_tick, the timer enqueues the timeout notification
into
- * the destination queue.
+ * Cancel a timer, preventing future expiration and delivery.
*
- * @param timer Timer
- * @param tmo_tick Absolute timer tick value which triggers the timeout
- * @param queue Destination queue for the timeout notification
- * @param buf User defined timeout notification buffer. When
- * ODP_BUFFER_INVALID, default timeout notification is
used.
+ * A timer that has already expired and been enqueued for delivery may be
+ * impossible to cancel and will instead be delivered to the destination
queue.
+ * Use odp_timer_tmo_status() the check whether a received timeout is
fresh or
+ * stale (cancelled). Stale timeouts will automatically be recycled.
*
- * @return Timeout handle if successful, otherwise ODP_TIMER_TMO_INVALID
+ * Note: any invalid parameters will be treated as programming errors and
will
+ * cause the application to abort.
+ *
+ * @param tim Timer handle
*/
-odp_timer_tmo_t odp_timer_absolute_tmo(odp_timer_t timer, uint64_t
tmo_tick,
- odp_queue_t queue, odp_buffer_t buf);
+void odp_timer_cancel(odp_timer_t tim);
/**
- * Cancel a timeout
+ * Return fresh/stale/orphan status of timeout.
+ *
+ * Check a received timeout for orphaness (i.e. parent timer freed) and
+ * staleness (i.e. parent timer has been reset or cancelled after timeout
+ * was enqueued).
+ * If the timeout is fresh, it should be processed.
+ * If the timeout is stale, the timer will automatically be reset unless it
+ * was cancelled.
+ * If the timeout is orphaned, it should be freed (by the caller).
+ *
+ * Note: odp_timer_tmo_status() must be called on all received (not
+ * user-defined) timeouts!
+ *
+ * @param tmo Timeout
+ *
+ * @return ODP_TMO_FRESH, ODP_TMO_STALE, ODP_TMO_ORPHAN
+ */
+odp_timer_tmo_status_t odp_timer_tmo_status(odp_timer_tmo_t tmo);
+
+/**
+ * Get timer handle
+ *
+ * Return Handle of parent timer.
+ *
+ * @param tmo Timeout
+ *
+ * @return Timer handle or ODP_TIMER_INVALID for orphaned timeouts
+ */
+odp_timer_t odp_timer_get_handle(odp_timer_tmo_t tmo);
+
+/**
+ * Get expiration time
+ *
+ * Return (actual) expiration time of timeout.
+ *
+ * @param tmo Timeout
+ *
+ * @return Expiration time
+ */
+odp_timer_tick_t odp_timer_get_expiry(odp_timer_tmo_t tmo);
+
+/**
+ * Get user pointer
+ *
+ * Return User pointer of timer associated with timeout.
+ * The user pointer is often used to point to some associated context.
*
- * @param timer Timer
- * @param tmo Timeout to cancel
+ * @param tmo Timeout
*
- * @return 0 if successful
+ * @return User pointer
*/
-int odp_timer_cancel_tmo(odp_timer_t timer, odp_timer_tmo_t tmo);
+void *odp_timer_get_userptr(odp_timer_tmo_t tmo);
+/* Helper functions */
+unsigned odp_timer_pool_expire(odp_timer_pool_t tpid, odp_timer_tick_t
tick);
#ifdef __cplusplus
}
b/platform/linux-generic/Makefile
@@ -72,6 +72,7 @@ OBJS += $(OBJ_DIR)/odp_thread.o
OBJS += $(OBJ_DIR)/odp_ticketlock.o
OBJS += $(OBJ_DIR)/odp_time.o
OBJS += $(OBJ_DIR)/odp_timer.o
+OBJS += $(OBJ_DIR)/priority_queue.o
OBJS += $(OBJ_DIR)/odp_ring.o
OBJS += $(OBJ_DIR)/odp_rwlock.o
ifeq ($(ODP_HAVE_NETMAP),yes)
@@ -98,6 +99,10 @@ $(DOC_DIR):
$(OBJ_DIR)/%.o: ./source/%.c
$(QUIET_CC)$(CC) -c -MD $(EXTRA_CFLAGS) $(CFLAGS) -o $@ $<
+$(OBJ_DIR)/%.o: ./source/%.cc
+#filter out some compiler options not valid for C++
+ $(QUIET_CC)$(CC) -c -MD $(filter-out -Wstrict-prototypes
-Wmissing-prototypes -Wold-style-definition
-Wnested-externs,$(EXTRA_CFLAGS)) $(CFLAGS) -o $@ $<
+
#
# Lib rule
#
b/platform/linux-generic/include/odp_buffer_pool_internal.h
@@ -93,7 +93,7 @@ static inline odp_buffer_hdr_t
*odp_buf_to_hdr(odp_buffer_t buf)
}
#endif
- pool = get_pool_entry(pool_id);
+ pool = (struct pool_entry_s *)get_pool_entry(pool_id);
#ifdef POOL_ERROR_CHECK
if (odp_unlikely(index > pool->num_bufs - 1)) {
b/platform/linux-generic/include/odp_timer_internal.h
new file mode 100644
@@ -0,0 +1,81 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP timeout descriptor - implementation internal
+ */
+
+#ifndef ODP_TIMER_INTERNAL_H_
+#define ODP_TIMER_INTERNAL_H_
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <odp_align.h>
+#include <odp_debug.h>
+#include <odp_buffer_internal.h>
+#include <odp_buffer_pool_internal.h>
+#include <odp_timer.h>
+
+/**
+ * Internal Timeout header
+ */
+#ifdef __cplusplus
+ /* Inheritance required for static_cast from "odp_buffer_hdr_t *" to
+ "odp_timeout_hdr_t *" to work. */
+ struct odp_timeout_hdr_t : public odp_buffer_hdr_t {
+#else
+ typedef struct {
+ /* common buffer header */
+ odp_buffer_hdr_t buf_hdr;
+#endif
+
+ /* Requested expiration time */
+ odp_timer_tick_t expiration;
+ /* User ptr inherited from parent timer */
+ void *user_ptr;
+ /* Parent timer */
+ odp_timer_t timer;
+ /* Tag inherited from parent timer at time of expiration */
+ uint32_t tag;
+ /* Gen-cnt inherited from parent timer at time of creation */
+ uint32_t gc;
+ uint8_t payload[];
+#ifdef __cplusplus
+ };
+#else
+ }
+ odp_timeout_hdr_t;
+#endif
+
+#ifndef __cplusplus
+ /* C++ doesn't allow offsetof() on "non-POD" datatypes. Don't know why
+ odp_timeout_hdr_t is classified as non-POD, perhaps because of the
+ inheritance? */
+ ODP_ASSERT(sizeof(odp_timeout_hdr_t) ==
+ ODP_OFFSETOF(odp_timeout_hdr_t, payload),
+ ODP_TIMEOUT_HDR_T__SIZE_ERR);
+#endif
+ ODP_ASSERT(sizeof(odp_timeout_hdr_t) % sizeof(uint64_t) == 0,
+ ODP_TIMEOUT_HDR_T__SIZE_ERR2);
+
+ /**
+ * Return the timeout header
+ */
+ static inline odp_timeout_hdr_t *odp_timeout_hdr(odp_buffer_t buf)
+ {
+ return (odp_timeout_hdr_t *)odp_buf_to_hdr(buf);
+ }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
b/platform/linux-generic/source/odp_buffer_pool.c
@@ -9,6 +9,7 @@
#include <odp_buffer_pool_internal.h>
#include <odp_buffer_internal.h>
#include <odp_packet_internal.h>
+#include <odp_timer_internal.h>
#include <odp_shared_memory.h>
#include <odp_align.h>
#include <odp_internal.h>
@@ -115,7 +116,8 @@ static odp_buffer_hdr_t *index_to_hdr(pool_entry_t
*pool, uint32_t index)
{
odp_buffer_hdr_t *hdr;
- hdr = (odp_buffer_hdr_t *)(pool->s.buf_base + index *
pool->s.buf_size);
+ hdr = (odp_buffer_hdr_t *)(pool->s.buf_base + index *
+ pool->s.buf_size);
return hdr;
}
@@ -141,7 +143,7 @@ static uint32_t rem_buf_index(odp_buffer_chunk_hdr_t
*chunk_hdr)
static odp_buffer_chunk_hdr_t *next_chunk(pool_entry_t *pool,
- odp_buffer_chunk_hdr_t *chunk_hdr)
+ odp_buffer_chunk_hdr_t *chunk_hdr)
{
uint32_t index;
@@ -190,13 +192,13 @@ static void check_align(pool_entry_t *pool,
odp_buffer_hdr_t *hdr)
if (!ODP_ALIGNED_CHECK_POWER_2(hdr->addr, pool->s.payload_align)) {
ODP_ERR("check_align: payload align error %p, align %zu\n",
hdr->addr, pool->s.payload_align);
- exit(0);
+ abort();
}
if (!ODP_ALIGNED_CHECK_POWER_2(hdr, ODP_CACHE_LINE_SIZE)) {
ODP_ERR("check_align: hdr align error %p, align %i\n",
hdr, ODP_CACHE_LINE_SIZE);
- exit(0);
+ abort();
}
}
@@ -208,12 +210,17 @@ static void fill_hdr(void *ptr, pool_entry_t *pool,
uint32_t index,
size_t size = pool->s.hdr_size;
uint8_t *payload = hdr->payload;
- if (buf_type == ODP_BUFFER_TYPE_CHUNK)
+ if (buf_type == ODP_BUFFER_TYPE_CHUNK) {
size = sizeof(odp_buffer_chunk_hdr_t);
-
- if (pool->s.buf_type == ODP_BUFFER_TYPE_PACKET) {
+ } else if (pool->s.buf_type == ODP_BUFFER_TYPE_PACKET) {
odp_packet_hdr_t *packet_hdr = ptr;
payload = packet_hdr->payload;
+ } else if (pool->s.buf_type == ODP_BUFFER_TYPE_TIMEOUT) {
+ odp_timeout_hdr_t *timeout_hdr = ptr;
+ payload = timeout_hdr->payload;
+ } else if (pool->s.buf_type != ODP_BUFFER_TYPE_RAW) {
+ ODP_ERR("Unknown buffer type %u\n", pool->s.buf_type);
+ abort();
}
memset(hdr, 0, size);
@@ -255,6 +262,8 @@ static void link_bufs(pool_entry_t *pool)
hdr_size = sizeof(odp_buffer_hdr_t);
else if (buf_type == ODP_BUFFER_TYPE_PACKET)
hdr_size = sizeof(odp_packet_hdr_t);
+ else if (buf_type == ODP_BUFFER_TYPE_TIMEOUT)
+ hdr_size = sizeof(odp_timeout_hdr_t);
else {
ODP_ERR("odp_buffer_pool_create: Bad type %i\n",
buf_type);
@@ -312,7 +321,7 @@ static void link_bufs(pool_entry_t *pool)
add_chunk(pool, chunk_hdr);
chunk_hdr = (odp_buffer_chunk_hdr_t *)index_to_hdr(pool,
- index);
+ index);
pool->s.num_bufs += ODP_BUFS_PER_CHUNK;
pool_size -= ODP_BUFS_PER_CHUNK * size;
}
@@ -320,9 +329,9 @@ static void link_bufs(pool_entry_t *pool)
odp_buffer_pool_t odp_buffer_pool_create(const char *name,
- void *base_addr, uint64_t size,
- size_t buf_size, size_t buf_align,
- int buf_type)
+ void *base_addr, uint64_t size,
+ size_t buf_size, size_t buf_align,
+ int buf_type)
{
odp_buffer_pool_t i;
pool_entry_t *pool;
b/platform/linux-generic/source/odp_timer.c
deleted file mode 100644
@@ -1,332 +0,0 @@
-/* Copyright (c) 2013, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#include <odp_timer.h>
-#include <odp_internal.h>
-#include <odp_atomic.h>
-#include <odp_spinlock.h>
-#include <odp_sync.h>
-#include <odp_debug.h>
-
-#include <signal.h>
-#include <time.h>
-
-#include <string.h>
-
-#define NUM_TIMERS 1
-#define MAX_TICKS 1024
-#define RESOLUTION_NS 1000000
-
-struct timeout_t;
-
-typedef struct timeout_t {
- struct timeout_t *next;
- int timer_id;
- int tick;
- uint64_t tmo_tick;
- odp_queue_t queue;
- odp_buffer_t buf;
- odp_buffer_t tmo_buf;
-} timeout_t;
-
-typedef struct {
- odp_spinlock_t lock;
- timeout_t *list;
-} tick_t;
-
-typedef struct {
- volatile int active;
- volatile uint64_t cur_tick;
- timer_t timerid;
- odp_buffer_pool_t pool;
- uint64_t resolution_ns;
- uint64_t max_ticks;
- tick_t tick[MAX_TICKS];
-
-} timer_ring_t;
-
-typedef struct {
- timer_ring_t timer[NUM_TIMERS];
- odp_atomic_int_t num_timers;
-} timer_global_t;
-
-/* Global */
-timer_global_t odp_timer;
-
-static void add_tmo(tick_t *tick, timeout_t *tmo)
-{
- odp_spinlock_lock(&tick->lock);
-
- tmo->next = tick->list;
- tick->list = tmo;
-
- odp_spinlock_unlock(&tick->lock);
-}
-
-static timeout_t *rem_tmo(tick_t *tick)
-{
- timeout_t *tmo;
-
- odp_spinlock_lock(&tick->lock);
-
- tmo = tick->list;
-
- if (tmo)
- tick->list = tmo->next;
-
- odp_spinlock_unlock(&tick->lock);
-
- if (tmo)
- tmo->next = NULL;
-
- return tmo;
-}
-
-/**
- * Search and delete tmo entry from timeout list
- * return -1 : on error.. handle not in list
- * 0 : success
- */
-static int find_and_del_tmo(timeout_t **tmo, odp_timer_tmo_t handle)
-{
- timeout_t *cur, *prev;
- prev = NULL;
-
- for (cur = *tmo; cur != NULL; prev = cur, cur = cur->next) {
- if (cur->tmo_buf == handle) {
- if (prev == NULL)
- *tmo = cur->next;
- else
- prev->next = cur->next;
-
- break;
- }
- }
-
- if (!cur)
- /* couldn't find tmo in list */
- return -1;
-
- /* application to free tmo_buf provided by absolute_tmo call */
- return 0;
-}
-
-int odp_timer_cancel_tmo(odp_timer_t timer, odp_timer_tmo_t tmo)
-{
- int id;
- uint64_t tick_idx;
- timeout_t *cancel_tmo;
- tick_t *tick;
-
- /* get id */
- id = timer - 1;
-
- /* get tmo_buf to cancel */
- cancel_tmo = (timeout_t *)odp_buffer_addr(tmo);
- tick_idx = cancel_tmo->tick;
- tick = &odp_timer.timer[id].tick[tick_idx];
-
- odp_spinlock_lock(&tick->lock);
- /* search and delete tmo from tick list */
- if (find_and_del_tmo(&tick->list, tmo) != 0) {
- odp_spinlock_unlock(&tick->lock);
- ODP_DBG("Couldn't find the tmo (%d) in tick list\n", (int)tmo);
- return -1;
- }
- odp_spinlock_unlock(&tick->lock);
-
- return 0;
-}
-
-static void notify_function(union sigval sigval)
-{
- (void) sigval;
- uint64_t cur_tick;
- timeout_t *tmo;
- tick_t *tick;
-
- if (odp_timer.timer[0].active == 0)
- return;
-
- /* ODP_DBG("Tick\n"); */
-
- cur_tick = odp_timer.timer[0].cur_tick++;
-
- tick = &odp_timer.timer[0].tick[cur_tick % MAX_TICKS];
-
- while ((tmo = rem_tmo(tick)) != NULL) {
- odp_queue_t queue;
- odp_buffer_t buf;
-
- queue = tmo->queue;
- buf = tmo->buf;
-
- if (buf != tmo->tmo_buf)
- odp_buffer_free(tmo->tmo_buf);
-
- odp_queue_enq(queue, buf);
- }
-}
-
-static void timer_init(void)
-{
- struct sigevent sigev;
- struct itimerspec ispec;
-
- ODP_DBG("Timer thread starts\n");
-
- memset(&sigev, 0, sizeof(sigev));
- memset(&ispec, 0, sizeof(ispec));
-
- sigev.sigev_notify = SIGEV_THREAD;
- sigev.sigev_notify_function = notify_function;
-
- if (timer_create(CLOCK_MONOTONIC, &sigev,
- &odp_timer.timer[0].timerid)) {
- ODP_DBG("Timer create failed\n");
- return;
- }
-
- ispec.it_interval.tv_sec = 0;
- ispec.it_interval.tv_nsec = RESOLUTION_NS;
- ispec.it_value.tv_sec = 0;
- ispec.it_value.tv_nsec = RESOLUTION_NS;
-
- if (timer_settime(odp_timer.timer[0].timerid, 0, &ispec, NULL)) {
- ODP_DBG("Timer set failed\n");
- return;
- }
-
- return;
-}
-
-int odp_timer_init_global(void)
-{
- int i;
-
- memset(&odp_timer, 0, sizeof(timer_global_t));
-
- for (i = 0; i < MAX_TICKS; i++)
- odp_spinlock_init(&odp_timer.timer[0].tick[i].lock);
-
- timer_init();
-
- return 0;
-}
-
-odp_timer_t odp_timer_create(const char *name, odp_buffer_pool_t pool,
- uint64_t resolution, uint64_t min_tmo,
- uint64_t max_tmo)
-{
- uint32_t id;
- (void) name; (void) resolution; (void) min_tmo; (void) max_tmo;
-
- if (odp_timer.num_timers >= NUM_TIMERS)
- return ODP_TIMER_INVALID;
-
- id = odp_atomic_fetch_inc_int(&odp_timer.num_timers);
- if (id >= NUM_TIMERS)
- return ODP_TIMER_INVALID;
-
- odp_timer.timer[id].pool = pool;
- odp_timer.timer[id].resolution_ns = RESOLUTION_NS;
- odp_timer.timer[id].max_ticks = MAX_TICKS;
-
- odp_sync_stores();
-
- odp_timer.timer[id].active = 1;
-
- return id + 1;
-}
-
-odp_timer_tmo_t odp_timer_absolute_tmo(odp_timer_t timer, uint64_t
tmo_tick,
- odp_queue_t queue, odp_buffer_t buf)
-{
- int id;
- uint64_t tick;
- uint64_t cur_tick;
- timeout_t *new_tmo;
- odp_buffer_t tmo_buf;
-
- id = timer - 1;
-
- cur_tick = odp_timer.timer[id].cur_tick;
- if (tmo_tick <= cur_tick) {
- ODP_DBG("timeout too close\n");
- return ODP_TIMER_TMO_INVALID;
- }
-
- tick = tmo_tick - cur_tick;
- if (tick > MAX_TICKS) {
- ODP_DBG("timeout too far\n");
- return ODP_TIMER_TMO_INVALID;
- }
-
- tick = (cur_tick + tick) % MAX_TICKS;
-
- tmo_buf = odp_buffer_alloc(odp_timer.timer[id].pool);
- if (tmo_buf == ODP_BUFFER_INVALID) {
- ODP_DBG("alloc failed\n");
- return ODP_TIMER_TMO_INVALID;
- }
-
- new_tmo = (timeout_t *)odp_buffer_addr(tmo_buf);
-
- new_tmo->timer_id = id;
- new_tmo->tick = (int)tick;
- new_tmo->tmo_tick = tmo_tick;
- new_tmo->queue = queue;
- new_tmo->tmo_buf = tmo_buf;
-
- if (buf != ODP_BUFFER_INVALID)
- new_tmo->buf = buf;
- else
- new_tmo->buf = tmo_buf;
-
- add_tmo(&odp_timer.timer[id].tick[tick], new_tmo);
-
- return tmo_buf;
-}
-
-uint64_t odp_timer_tick_to_ns(odp_timer_t timer, uint64_t ticks)
-{
- uint32_t id;
-
- id = timer - 1;
- return ticks * odp_timer.timer[id].resolution_ns;
-}
-
-uint64_t odp_timer_ns_to_tick(odp_timer_t timer, uint64_t ns)
-{
- uint32_t id;
-
- id = timer - 1;
- return ns / odp_timer.timer[id].resolution_ns;
-}
-
-uint64_t odp_timer_resolution(odp_timer_t timer)
-{
- uint32_t id;
-
- id = timer - 1;
- return odp_timer.timer[id].resolution_ns;
-}
-
-uint64_t odp_timer_maximum_tmo(odp_timer_t timer)
-{
- uint32_t id;
-
- id = timer - 1;
- return odp_timer.timer[id].max_ticks;
-}
-
-uint64_t odp_timer_current_tick(odp_timer_t timer)
-{
- uint32_t id;
-
- id = timer - 1;
- return odp_timer.timer[id].cur_tick;
-}
b/platform/linux-generic/source/odp_timer.cc
new file mode 100644
@@ -0,0 +1,554 @@
+/* Copyright (c) 2014, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+
+/**
+ * @file
+ *
+ * ODP timer service
+ *
+ */
+
+#include <assert.h>
+#include <string.h>
+#include <stdlib.h>
+#include <time.h>
+#include <signal.h>
+#include "odp_std_types.h"
+#include "odp_buffer.h"
+#include "odp_buffer_pool.h"
+#include "odp_queue.h"
+#include "odp_hints.h"
+#include "odp_sync.h"
+#include "odp_spinlock.h"
+#include "odp_debug.h"
+#include "odp_align.h"
+#include "odp_shared_memory.h"
+#include "odp_hints.h"
+#include "odp_internal.h"
+#include "odp_timer.h"
+#include "odp_timer_internal.h"
+#include "priority_queue.h"
+
+#define RESOLUTION_NS 1000000U
+
+struct odp_timer : public pq_element {
+public:
+ odp_timer_tick_t req_tmo;//Requested timeout tick
+ odp_buffer_t tmo_buf;//ODP_BUFFER_INVALID if timeout enqueued
+ odp_queue_t queue;//ODP_QUEUE_INVALID if timer is free
+ uint32_t tag;//Reusing tag as next pointer/index when timer is free
+ uint32_t gc;
+ bool user_buf; //User-defined buffer?
+ //Constructor for array of objects
+ odp_timer() :
+ pq_element(),
+ tmo_buf(ODP_BUFFER_INVALID),
+ queue(ODP_QUEUE_INVALID) {
+ gc = 0;
+ }
+ ~odp_timer() {
+ assert(tmo_buf == ODP_BUFFER_INVALID);
+ assert(queue == ODP_QUEUE_INVALID);
+ }
+ //Setup when timer is allocated
+ void setup(odp_queue_t _q, void *_up, odp_buffer_t _tmo) {
+ req_tmo = INVALID_PRIORITY;
+ tmo_buf = _tmo;
+ queue = _q;
+ tag = 0;
+ user_buf = false;
+ //Initialise constant fields of timeout event
+ odp_timeout_hdr_t *tmo_hdr =
+ static_cast<odp_timeout_hdr_t *>
+ (odp_buf_to_hdr(tmo_buf));
+ tmo_hdr->gc = gc;
+ tmo_hdr->timer = this;
+ tmo_hdr->user_ptr = _up;
+ //tmo_hdr->tag set at expiration time
+ //tmo_hdr->expiration set at expiration time
+ assert(queue != ODP_QUEUE_INVALID);
+ }
+ //Teardown when timer is freed
+ odp_buffer_t teardown() {
+ //Increase generation count to make pending timeout orphaned
+ ++gc;
+ odp_buffer_t buf = tmo_buf;
+ tmo_buf = ODP_BUFFER_INVALID;
+ queue = ODP_QUEUE_INVALID;
+ return buf;
+ }
+ inline uint32_t get_next_free() {
+ assert(queue == ODP_QUEUE_INVALID);
+ return tag;
+ }
+ inline void set_next_free(uint32_t nf) {
+ assert(queue == ODP_QUEUE_INVALID);
+ tag = nf;
+ }
+ inline void expire(odp_timer_tick_t tick, bool shared) {
+ //Timer expired, is there actually any timeout event
+ //we can enqueue?
+ if (odp_likely(tmo_buf != ODP_BUFFER_INVALID)) {
+ //Swap out timeout buffer
+ odp_buffer_t buf = tmo_buf;
+ tmo_buf = ODP_BUFFER_INVALID;
+ if (odp_likely(!user_buf)) {
+ odp_timeout_hdr_t *tmo_hdr =
+ static_cast<odp_timeout_hdr_t *>
+ (odp_buf_to_hdr(buf));
+ //Copy tag from timer
+ //and actual expiration tick from timer pool
+ tmo_hdr->tag = tag;
+ tmo_hdr->expiration = tick;
+ }
+ if (shared)
+ odp_sync_stores();
+ //Else don't touch user-defined buffer
+ int rc = odp_queue_enq(queue, buf);
+ if (rc != 0) {
+ abort();
+ }
+ }
+ //No, timeout event already enqueued
+ }
+};
+
+//Forward declarations
+static void timer_init(odp_timer_pool *tp);
+static void timer_exit(odp_timer_pool *tp);
+
+struct odp_timer_pool : public priority_queue {
+public :
+ uint64_t tick;
+ bool shared;
+ odp_spinlock_t lock;
+ const char *name;
+ odp_buffer_pool_t buf_pool;
+ uint64_t resolution_ns;
+ uint64_t max_timeout;
+ odp_timer *timers;
+ uint32_t num_alloc;//Current number of allocated timers
+ uint32_t max_timers;//Max number of timers
+ uint32_t first_free;//0..max_timers-1 => free timer
+ timer_t timerid;
+ odp_timer_pool_clock_source_t clk_src;
+
+ odp_timer_pool(const char *_n,
+ odp_buffer_pool_t _bp,
+ uint64_t _r,
+ uint64_t _m,
+ uint32_t _mt,
+ bool _s,
+ odp_timer_pool_clock_source_t _cs) :
+ priority_queue(_mt),
+ tick(0),
+ shared(_s),
+ name(strdup(_n)),
+ buf_pool(_bp),
+ resolution_ns(_r),
+ max_timeout(_m),
+ num_alloc(0),
+ max_timers(_mt),
+ first_free(0),
+ clk_src(_cs) {
+ timers = new odp_timer[_mt];
+ for (uint32_t i = 0; i < max_timers; i++)
+ timers[i].set_next_free(i + 1);
+ odp_spinlock_init(&lock);
+ if (clk_src == ODP_CLOCK_DEFAULT)
+ timer_init(this);
+ //Make sure timer pool initialisation is globally observable
+ //before we return a pointer to it
+ odp_sync_stores();
+ }
+ ~odp_timer_pool() {
+ if (shared)
+ odp_spinlock_lock(&lock);
+ if (num_alloc != 0) {
+ //It's a programming error to attempt to destroy a
+ //timer pool which is still in use
+ ODP_ERR("%s: timers in use\n", name);
+ abort();
+ }
+ if (clk_src == ODP_CLOCK_DEFAULT)
+ timer_exit(this);
+ delete[] timers;
+ odp_sync_stores();
+ }
+ inline odp_timer *timer_alloc(odp_queue_t queue,
+ void *user_ptr,
+ odp_buffer_t tmo_buf) {
+ odp_timer *tim = ODP_TIMER_INVALID;
+ if (odp_likely(shared))
+ odp_spinlock_lock(&lock);
+ if (odp_likely(num_alloc < max_timers)) {
+ num_alloc++;
+ //Remove first unused timer from free list
+ assert(first_free != max_timers);
+ tim = &timers[first_free];
+ first_free = tim->get_next_free();
+ //Insert timer into priority queue
+ if (odp_unlikely(!register_element(tim))) {
+ //Unexpected internal error
+ abort();
+ }
+ //Create timer
+ tim->setup(queue, user_ptr, tmo_buf);
+ }
+ if (odp_likely(shared))
+ odp_spinlock_unlock(&lock);
+ return tim;
+ }
+ inline void timer_free(odp_timer *tim) {
+ if (odp_likely(shared))
+ odp_spinlock_lock(&lock);
+ //Destroy timer
+ odp_buffer_t buf = tim->teardown();
+ //Remove timer from priority queue
+ unregister_element(tim);
+ //Insert timer into free list
+ tim->set_next_free(first_free);
+ first_free = (tim - &timers[0]) / sizeof timers[0];
+ assert(num_alloc != 0);
+ num_alloc--;
+ if (odp_likely(shared))
+ odp_spinlock_unlock(&lock);
+ if (buf != ODP_BUFFER_INVALID)
+ odp_buffer_free(buf);
+ }
+ inline void timer_reset(odp_timer *tim, odp_timer_tick_t abs_tck) {
+ if (odp_likely(shared))
+ odp_spinlock_lock(&lock);
+ //Increase timer tag to make any pending timeout stale
+ tim->tag++;
+ //Save requested timeout
+ tim->req_tmo = abs_tck;
+ //Update timer position in priority queue
+ reset_element(tim, abs_tck);
+ if (odp_likely(shared))
+ odp_spinlock_unlock(&lock);
+ }
+ inline void timer_reset_w_buf(odp_timer *tim,
+ odp_timer_tick_t abs_tck,
+ odp_buffer_t user_buf) {
+ if (odp_likely(shared))
+ odp_spinlock_lock(&lock);
+ //Increase timer tag to make any pending timeout stale
+ tim->tag++;
+ //Save requested timeout
+ tim->req_tmo = abs_tck;
+ //Set flag indicating presence of user defined buffer
+ tim->user_buf = true;
+ //Swap in new buffer, get any old buffer pointer
+ odp_buffer_t old_buf = tim->tmo_buf;
+ tim->tmo_buf = user_buf;
+ //Update timer position in priority queue
+ reset_element(tim, abs_tck);
+ if (odp_likely(shared))
+ odp_spinlock_unlock(&lock);
+ //Free old buffer if present
+ if (odp_unlikely(old_buf != ODP_BUFFER_INVALID))
+ odp_buffer_free(old_buf);
+ }
+ inline void timer_cancel(odp_timer *tim) {
+ odp_buffer_t tmo_buf = ODP_BUFFER_INVALID;
+ if (odp_likely(shared))
+ odp_spinlock_lock(&lock);
+ if (odp_unlikely(tim->user_buf)) {
+ //Swap out old user buffer
+ tmo_buf = tim->tmo_buf;
+ tim->tmo_buf = ODP_BUFFER_INVALID;
+ tim->user_buf = false;
+ }
+ //Else a normal timer (no user-defined buffer)
+ //Increase timer tag to make any pending timeout stale
+ tim->tag++;
+ //Clear requested timeout
+ tim->req_tmo = INVALID_PRIORITY;
+ //Remove timer from the priority queue
+ deactivate_element(tim);
+ if (odp_likely(shared))
+ odp_spinlock_unlock(&lock);
+ //Free user-defined buffer if present
+ if (odp_unlikely(tmo_buf != ODP_BUFFER_INVALID))
+ odp_buffer_free(tmo_buf);
+ }
+};
+
+unsigned odp_timer_pool_expire(odp_timer_pool_t tpid, odp_timer_tick_t
tick)
+{
+ if (tpid->shared)
+ odp_spinlock_lock(&tpid->lock);
+ unsigned nexp = 0;
+ odp_timer_t tim;
+ tpid->tick = tick;
+ while ((tim = static_cast<odp_timer_t>(tpid->release_element(tick))) !=
+ ODP_TIMER_INVALID) {
+ assert(tim->get_prio() <= tick);
+ tim->expire(tick, tpid->shared);
+ nexp++;
+ }
+ if (tpid->shared)
+ odp_spinlock_unlock(&tpid->lock);
+ return nexp;
+}
+
+//Functions that use Linux/POSIX per-process timers and related facilities
+static void timer_notify(union sigval sigval)
+{
+ odp_timer_pool *tp = static_cast<odp_timer_pool *>(sigval.sival_ptr);
+ uint64_t new_tick = tp->tick + 1;
+ (void)odp_timer_pool_expire(tp, new_tick);
+}
+
+static void timer_init(odp_timer_pool *tp)
+{
+ struct sigevent sigev;
+ struct itimerspec ispec;
+
+ ODP_DBG("Creating Linux timer for timer pool %s\n", tp->name);
+
+ memset(&sigev, 0, sizeof sigev);
+ memset(&ispec, 0, sizeof ispec);
+
+ sigev.sigev_notify = SIGEV_THREAD;
+ sigev.sigev_notify_function = timer_notify;
+ sigev.sigev_value.sival_ptr = tp;
+
+ if (timer_create(CLOCK_MONOTONIC, &sigev, &tp->timerid)) {
+ perror("timer_create");
+ abort();
+ }
+
+ ispec.it_interval.tv_sec = 0;
+ ispec.it_interval.tv_nsec = RESOLUTION_NS;
+ ispec.it_value.tv_sec = 0;
+ ispec.it_value.tv_nsec = RESOLUTION_NS;
+
+ if (timer_settime(&tp->timerid, 0, &ispec, NULL)) {
+ perror("timer_settime");
+ abort();
+ }
+}
+
+static void timer_exit(odp_timer_pool *tp)
+{
+ if (timer_delete(tp->timerid) != 0) {
+ perror("timer_delete");
+ abort();
+ }
+}
+
+odp_timer_pool_t
+odp_timer_pool_create(const char *name,
+ odp_buffer_pool_t buf_pool,
+ uint64_t resolution_ns,
+ uint64_t max_timeout,
+ uint32_t num_timers,
+ bool shared,
+ odp_timer_pool_clock_source_t clk_src)
+{
+ //Verify that buffer pool can be used for timeouts
+ odp_buffer_t buf = odp_buffer_alloc(buf_pool);
+ if (buf == ODP_BUFFER_INVALID) {
+ ODP_ERR("%s: Failed to allocate buffer\n", name);
+ abort();
+ }
+ if (odp_buffer_type(buf) != ODP_BUFFER_TYPE_TIMEOUT) {
+ ODP_ERR("%s: Buffer pool wrong type\n", name);
+ abort();
+ }
+ odp_buffer_free(buf);
+ odp_timer_pool_t tp = new odp_timer_pool(name, buf_pool, resolution_ns,
+ max_timeout, num_timers,
+ shared, clk_src);
+ return tp;
+}
+
+void odp_timer_pool_start(void)
+{
+ //Nothing to do here
+}
+
+void odp_timer_pool_destroy(odp_timer_pool_t tpid)
+{
+ delete tpid;
+}
+
+uint64_t odp_timer_tick_to_ns(odp_timer_pool_t tpid, odp_timer_tick_t
ticks)
+{
+ return ticks * tpid->resolution_ns;
+}
+
+odp_timer_tick_t odp_timer_ns_to_tick(odp_timer_pool_t tpid, uint64_t ns)
+{
+ return (odp_timer_tick_t)(ns / tpid->resolution_ns);
+}
+
+odp_timer_tick_t odp_timer_current_tick(odp_timer_pool_t tpid)
+{
+ return tpid->tick;
+}
+
+uint64_t odp_timer_pool_query_conf(odp_timer_pool_t tpid,
+ odp_timer_pool_conf_t item)
+{
+ switch (item) {
+ case ODP_TIMER_NAME :
+ return (uint64_t)(tpid->name);
+ case ODP_TIMER_RESOLUTION :
+ return tpid->resolution_ns;
+ case ODP_TIMER_MAX_TMO :
+ return tpid->max_timeout;
+ case ODP_TIMER_NUM_TIMERS :
+ return tpid->max_timers;
+ case ODP_TIMER_SHARED :
+ return tpid->shared;
+ default :
+ return 0;
+ }
+}
+
+odp_timer_t odp_timer_alloc(odp_timer_pool_t tpid,
+ odp_queue_t queue,
+ void *user_ptr)
+{
+ //We check this because ODP_QUEUE_INVALID is used
+ //to indicate a free timer
+ if (odp_unlikely(queue == ODP_QUEUE_INVALID)) {
+ ODP_ERR("%s: Invalid queue identifier\n", tpid->name);
+ abort();
+ }
+ odp_buffer_t tmo_buf = odp_buffer_alloc(tpid->buf_pool);
+ if (odp_likely(tmo_buf != ODP_BUFFER_INVALID)) {
+ odp_timer *tim = tpid->timer_alloc(queue, user_ptr, tmo_buf);
+ if (tim != ODP_TIMER_INVALID) {
+ //Success
+ assert(tim->queue != ODP_QUEUE_INVALID);
+ return tim;
+ }
+ odp_buffer_free(tmo_buf);
+ assert(tim->queue == ODP_QUEUE_INVALID);
+ }
+ //Else failed to allocate timeout event
+ //TODO set errno = ENOMEM
+ return ODP_TIMER_INVALID;
+}
+
+void odp_timer_free(odp_timer_t tim)
+{
+ if (odp_unlikely(tim->queue == ODP_QUEUE_INVALID)) {
+ ODP_ERR("Invalid timer %p\n", tim);
+ abort();
+ }
+ odp_timer_pool *tp = static_cast<odp_timer_pool *>(tim->get_pq());
+ tp->timer_free(tim);
+}
+
+void odp_timer_set_abs_w_buf(odp_timer_t tim,
+ odp_timer_tick_t abs_tck,
+ odp_buffer_t user_buf)
+{
+ if (odp_unlikely(tim->queue == ODP_QUEUE_INVALID)) {
+ ODP_ERR("Invalid timer %p\n", tim);
+ abort();
+ }
+ odp_timer_pool *tp = static_cast<odp_timer_pool *>(tim->get_pq());
+ tp->timer_reset_w_buf(tim, abs_tck, user_buf);
+}
+
+void odp_timer_set_abs(odp_timer_t tim, odp_timer_tick_t abs_tck)
+{
+ if (odp_unlikely(tim->queue == ODP_QUEUE_INVALID)) {
+ ODP_ERR("Invalid timer %p\n", tim);
+ abort();
+ }
+ odp_timer_pool *tp = static_cast<odp_timer_pool *>(tim->get_pq());
+ tp->timer_reset(tim, abs_tck);
+}
+
+void odp_timer_set_rel(odp_timer_t tim, odp_timer_tick_t rel_tck)
+{
+ if (odp_unlikely(tim->queue == ODP_QUEUE_INVALID)) {
+ ODP_ERR("Invalid timer %p\n", tim);
+ abort();
+ }
+ odp_timer_pool *tp = static_cast<odp_timer_pool *>(tim->get_pq());
+ tp->timer_reset(tim, tp->tick + rel_tck);
+}
+
+void odp_timer_cancel(odp_timer_t tim)
+{
+ if (odp_unlikely(tim->queue == ODP_QUEUE_INVALID)) {
+ ODP_ERR("Invalid timer %p\n", tim);
+ abort();
+ }
+ odp_timer_pool *tp = static_cast<odp_timer_pool *>(tim->get_pq());
+ tp->timer_cancel(tim);
+}
+
+odp_timer_tmo_status_t odp_timer_tmo_status(odp_timer_tmo_t tmo_buf)
+{
+ odp_timeout_hdr_t *tmo_hdr =
+ static_cast<odp_timeout_hdr_t *>(odp_buf_to_hdr(tmo_buf));
+ odp_timer *tim = tmo_hdr->timer;
+
+ //Compare generation count (gc) of timeout and parent timer (if any)
+ if (odp_unlikely(tim == ODP_TIMER_INVALID ||
+ tmo_hdr->gc != tim->gc)) {
+ //Generation counters differ => timeout is orphaned
+ return ODP_TMO_ORPHAN;
+ }
+ //Else gen-cnts match => parent timer exists
+
+ //Return timeout to timer so that it can be delivered again
+ tim->tmo_buf = tmo_buf;
+ //FIXME do we need some kind of synchronisation or locking here?
+
+ //Compare tags of timeout and parent timer
+ //Compare requested and actual timeout time
+ if (odp_likely(tim->tag == tmo_hdr->tag &&
+ tim->req_tmo <= tmo_hdr->expiration)) {
+ //Tags match, actual timeout is after requested => good!
+ return ODP_TMO_FRESH;
+ } else {//Tags don't match or actual timeout time is before requested
+ //Timer has been reset or cancelled and timeout is stale
+ //or timeout expired too early
+ if (tim->req_tmo != INVALID_PRIORITY) {
+ //Reset the timer for requested timeout
+ odp_timer_set_abs(tim, tim->req_tmo);
+ }
+ //Else timer was cancelled, do nothing
+ return ODP_TMO_STALE;
+ }
+}
+
+odp_timer_t odp_timer_get_handle(odp_timer_tmo_t tmo_buf)
+{
+ odp_timeout_hdr_t *tmo_hdr =
+ static_cast<odp_timeout_hdr_t *>(odp_buf_to_hdr(tmo_buf));
+ return tmo_hdr->timer;
+}
+
+odp_timer_tick_t odp_timer_get_expiry(odp_timer_tmo_t tmo_buf)
+{
+ odp_timeout_hdr_t *tmo_hdr =
+ static_cast<odp_timeout_hdr_t *>(odp_buf_to_hdr(tmo_buf));
+ return tmo_hdr->expiration;
+}
+
+void *odp_timer_get_userptr(odp_timer_tmo_t tmo_buf)
+{
+ odp_timeout_hdr_t *tmo_hdr =
+ static_cast<odp_timeout_hdr_t *>(odp_buf_to_hdr(tmo_buf));
+ return tmo_hdr->user_ptr;
+}
+
+int odp_timer_init_global()
+{
+ return 0;
+}
b/platform/linux-generic/source/priority_queue.cc
new file mode 100644
@@ -0,0 +1,322 @@
+#define NDEBUG /* Enabled by default by ODP build system */
+#include <assert.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <strings.h>
+#include <odp_hints.h>
+#include <odp_align.h>
+
+#include "priority_queue.h"
+
+
+#define NUM_CHILDREN 4
+#define CHILD(n) (NUM_CHILDREN * (n) + 1)
+#define PARENT(n) (((n) - 1) / NUM_CHILDREN)
+
+//Internal nodes in the array
+struct heap_node
+{
+ class pq_element *elem;
+ pq_priority_t prio;//Copy of elem->prio so we avoid unnecessary
dereferencing
+};
+
+#define ALIGNMENT(p) (1U << ((unsigned)ffs((int)p) - 1U))
+
+priority_queue::priority_queue(uint32_t _max_elems) :
+ max_elems(_max_elems),
+ reg_elems(0),
+ num_elems(0)
+{
+ heap = org_ptr = new heap_node[_max_elems + 64 / sizeof(heap_node)];
+ assert((size_t)&heap[1] % 8 == 0);
+ //Increment base address until first child (index 1) is cache line
aligned
+ //and thus all children (e.g. index 1-4) stored in the same cache line
+ //We are not interested in the alignment of heap[0] as this is a lone
node
+ while ((size_t)&heap[1] % ODP_CACHE_LINE_SIZE != 0)
+ {
+ //Cast to ptr to struct member with the greatest alignment
requirement
+ heap = (heap_node *)((pq_priority_t *)heap + 1);
+ }
+// printf("Alignment of heap[1]=%u\n", ALIGNMENT((size_t)&heap[1]));
+// printf("Alignment of heap[CHILD(1)]=%u\n",
ALIGNMENT((size_t)&heap[CHILD(1)]));
+ assert_heap();
+}
+
+priority_queue::~priority_queue()
+{
+ assert_heap();
+ delete org_ptr;
+}
+
+uint32_t
+priority_queue::assert_elem(uint32_t index, bool recurse)
+{
+#ifndef NDEBUG
+ uint32_t num = 1;
+ const pq_element *elem = heap[index].elem;
+ assert(elem->index == index);
+ assert(elem->prio == heap[index].prio);
+ uint32_t child = CHILD(index);
+ for (uint32_t i = 0; i < NUM_CHILDREN; i++, child++)
+ {
+ if (valid_index(child))
+ {
+ assert(heap[child].elem != NULL);
+ assert(heap[child].prio >= elem->prio);
+ if (recurse)
+ {
+ num += assert_elem(child, recurse);
+ }
+ }
+ }
+ return num;
+#else
+ (void)index;
+ (void)recurse;
+ return 0;
+#endif
+}
+
+void
+priority_queue::assert_heap()
+{
+#ifndef NDEBUG
+ uint32_t num = 0;
+ if (odp_likely(num_elems != 0))
+ {
+ assert(heap[0].elem != NULL);
+ num += assert_elem(0, true);
+ }
+ assert(num == num_elems);
+ for (unsigned i = 0; i < num_elems; i++)
+ {
+ assert(heap[i].elem != NULL);
+ assert(heap[i].prio != INVALID_PRIORITY);
+ }
+#endif
+}
+
+unsigned nswaps;
+
+//Bubble up to proper position
+void
+priority_queue::bubble_up(pq_element *elem)
+{
+ assert(heap[elem->index].elem == elem);
+ assert(heap[elem->index].prio == elem->prio);
+ uint32_t current = elem->index;
+ pq_priority_t prio = elem->prio;
+ assert(current == 0 || heap[PARENT(current)].elem != NULL);
+ //Move up into proper position
+ while (current != 0 && heap[PARENT(current)].prio > prio)
+ {
+ nswaps++;
+ uint32_t parent = PARENT(current);
+ assert(heap[parent].elem != NULL);
+ //Swap current with parent
+ //1) Move parent down
+ heap[current].elem = heap[parent].elem;
+ heap[current].prio = heap[parent].prio;
+ heap[current].elem->index = current;
+ //2) Move current up to parent
+ heap[parent].elem = elem;
+ heap[parent].prio = prio;
+ heap[parent].elem->index = parent;
+ //Continue moving elem until it is in the right place
+ current = parent;
+ }
+ assert_heap();
+}
+
+//Find the smallest child that is smaller than the specified priority
+//TODO very hot function!
+uint32_t priority_queue::smallest_child(uint32_t index, pq_priority_t val)
+{
+ uint32_t smallest = index;
+ uint32_t child = CHILD(index);
+#if 1
+ //Unroll loop when all children exist
+ if (odp_likely(valid_index(child + 3)))
+ {
+ if (heap[child + 0].prio < val) //TODO: cache misses!
+ {
+ val = heap[smallest = child + 0].prio;
+ }
+ if (heap[child + 1].prio < val)
+ {
+ val = heap[smallest = child + 1].prio;
+ }
+ if (heap[child + 2].prio < val)
+ {
+ val = heap[smallest = child + 2].prio;
+ }
+ if (heap[child + 3].prio < val)
+ {
+ val = heap[smallest = child + 3].prio;
+ }
+ return smallest;
+ }
+#endif
+ for (uint32_t i = 0; i < NUM_CHILDREN; i++)
+ {
+ if (odp_unlikely(!valid_index(child + i)))
+ break;
+ if (heap[child + i].prio < val)
+ {
+ smallest = child + i;
+ val = heap[smallest].prio;
+ }
+ }
+ return smallest;
+}
+
+//TODO very hot function, can it be optimised?
+void
+priority_queue::bubble_down(pq_element *elem)
+{
+ assert(heap[elem->index].elem == elem);
+ assert(heap[elem->index].prio == elem->prio);
+ uint32_t current = elem->index;
+ pq_priority_t prio = elem->prio;
+ for (;;)
+ {
+ uint32_t child = smallest_child(current, prio);
+ if (current == child)
+ {
+ //No smaller child, we are done
+ assert_heap();
+ return;
+ }
+ //Element larger than smaller child, must move down
+ nswaps++;
+ assert(heap[child].elem != NULL);
+ //1) Move child up to current
+ heap[current].elem = heap[child].elem;
+ heap[current].prio = heap[child].prio;
+ //2) Move current down to child
+ heap[child].elem = elem;
+ heap[child].prio = prio;
+ heap[child].elem->index = child;
+
+ heap[current].elem->index = current; //TODO cache misses!
+ //Continue moving element until it is in the right place
+ current = child;
+ }
+}
+
+bool
+priority_queue::register_element(pq_element *elem)
+{
+ if (odp_likely(reg_elems < max_elems))
+ {
+ elem->pq = this;
+ reg_elems++;
+ return true;
+ }
+ return false;
+}
+
+void
+priority_queue::unregister_element(pq_element *elem)
+{
+ assert(elem->pq == this);
+ if (elem->is_active())
+ {
+ deactivate_element(elem);
+ }
+ elem->pq = NULL;
+ reg_elems--;
+}
+
+void
+priority_queue::activate_element(pq_element *elem, pq_priority_t prio)
+{
+ assert(elem->pq == this);
+ //Insert element at end
+ uint32_t index = num_elems++;
+ heap[index].elem = elem;
+ heap[index].prio = prio;
+ elem->index = index;
+ elem->prio = prio;
+ bubble_up(elem);
+}
+
+void
+priority_queue::deactivate_element(pq_element *elem)
+{
+ assert(elem->pq == this);
+ if (odp_likely(elem->is_active()))
+ {
+ //Swap element with last element
+ uint32_t current = elem->index;
+ uint32_t last = --num_elems;
+ if (odp_likely(last != current))
+ {
+ //Move last element to current
+ heap[current].elem = heap[last].elem;
+ heap[current].prio = heap[last].prio;
+ heap[current].elem->index = current;
+ //Remove last element
+#if 0
+ heap[last].elem = NULL;
+ heap[last].prio = INVALID_PRIORITY;
+#endif
+ //Bubble down old 'last' element to its proper place
+ if (heap[current].prio < elem->prio)
+ {
+ bubble_up(heap[current].elem);
+ }
+ else
+ {
+ bubble_down(heap[current].elem);
+ }
+ }
+ else
+ {
+#if 0
+ heap[last].elem = NULL;
+ heap[last].prio = INVALID_PRIORITY;
+#endif
+ }
+ elem->index = INVALID_INDEX;
+ assert_heap();
+ }
+}
+
+void
+priority_queue::reset_element(pq_element *elem, pq_priority_t prio)
+{
+ assert(prio != INVALID_PRIORITY);
+ if (odp_likely(elem->is_active()))
+ {
+ assert(prio >= elem->prio);
+ elem->prio = prio;
+ heap[elem->index].prio = prio;//TODO cache misses here!
+ bubble_down(elem);
+ assert_heap();
+ }
+ else
+ {
+ activate_element(elem, prio);
+ }
+}
+
+pq_priority_t priority_queue::first_priority() const
+{
+ return num_elems != 0 ? heap[0].prio : INVALID_PRIORITY;
+}
+
+pq_element *
+priority_queue::release_element(pq_priority_t threshold)
+{
+ if (odp_likely(num_elems != 0 && heap[0].prio <= threshold))
+ {
+ pq_element *elem = heap[0].elem;
+ //Remove element from heap
+ deactivate_element(elem);
+ assert(elem->prio <= threshold);
+ return elem;
+ }
+ return NULL;
+}
b/platform/linux-generic/source/priority_queue.h
new file mode 100644
@@ -0,0 +1,101 @@
+#ifndef _PRIORITY_QUEUE_H
+#define _PRIORITY_QUEUE_H
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdbool.h>
+
+#define INVALID_INDEX ~0U
+#define INVALID_PRIORITY ((pq_priority_t)~0ULL)
+
+typedef uint64_t pq_priority_t;
+
+class priority_queue;
+
+/* The user gets a pointer to this structure */
+class pq_element
+{
+ /* Set when pq_element registered with priority queue */
+ priority_queue *pq;
+ uint32_t index;/* Index into heap array */
+ pq_priority_t prio;
+public:
+
+ inline pq_element()
+ {
+ pq = NULL;
+ index = INVALID_INDEX;
+ prio = 0U;
+ }
+ inline ~pq_element()
+ {
+ assert(index == INVALID_INDEX);
+ }
+ inline priority_queue *get_pq() const
+ {
+ return pq;
+ }
+ inline pq_priority_t get_prio() const
+ {
+ return prio;
+ }
+ inline uint32_t get_index() const
+ {
+ return index;
+ }
+ inline bool is_active() const
+ {
+ return index != INVALID_INDEX;
+ }
+ friend class priority_queue;
+};
+
+
+struct heap_node;
+
+class priority_queue
+{
+ uint32_t max_elems;/* Number of elements in heap */
+ /* Number of registered elements (active + inactive) */
+ uint32_t reg_elems;
+ uint32_t num_elems;/* Number of active elements */
+ heap_node *heap;
+ heap_node *org_ptr;
+
+ uint32_t smallest_child(uint32_t, pq_priority_t);
+ void bubble_down(pq_element *);
+ void bubble_up(pq_element *);
+ inline bool valid_index(uint32_t idx)
+ {
+ return idx < num_elems;
+ }
+public:
+ priority_queue(uint32_t _max_elems);
+ ~priority_queue();
+
+ /* Register pq_element with priority queue */
+ /* Return false if priority queue full */
+ bool register_element(pq_element *);
+ /* Activate and add pq_element to priority queue */
+ /* Element must be disarmed */
+ void activate_element(pq_element *, pq_priority_t _val);
+ /* Reset (increase) priority for pq_element */
+ /* Element may be active or inactive (released) */
+ void reset_element(pq_element *, pq_priority_t _val);
+ /* Deactivate and remove element from priority queue */
+ /* Element may be active or inactive (released) */
+ void deactivate_element(pq_element *);
+ /* Unregister pq_element */
+ void unregister_element(pq_element *);
+
+ /* Return priority of first element (lowest numerical value) */
+ pq_priority_t first_priority() const;
+ /* Deactivate and return first element if it's prio is <= threshold */
+ pq_element *release_element(pq_priority_t threshold);
+
+ uint32_t assert_elem(uint32_t index, bool recurse);
+ void assert_heap();
+};
+
+#endif /* _PRIORITY_QUEUE_H */