diff mbox

[PATCHv8,08/12] linux-generic: ring: remove ODPH_ prefix

Message ID 1439895698-18597-9-git-send-email-maxim.uvarov@linaro.org
State New
Headers show

Commit Message

Maxim Uvarov Aug. 18, 2015, 11:01 a.m. UTC
Signed-off-by: Maxim Uvarov <maxim.uvarov@linaro.org>
---
 platform/linux-generic/include/odp_ring_internal.h | 24 +++----
 platform/linux-generic/pktio/ring.c                | 84 +++++++++++-----------
 platform/linux-generic/test/ring/odp_ring_test.c   | 16 ++---
 3 files changed, 62 insertions(+), 62 deletions(-)
diff mbox

Patch

diff --git a/platform/linux-generic/include/odp_ring_internal.h b/platform/linux-generic/include/odp_ring_internal.h
index 04f2b25..c3c2790 100644
--- a/platform/linux-generic/include/odp_ring_internal.h
+++ b/platform/linux-generic/include/odp_ring_internal.h
@@ -89,8 +89,8 @@ 
  *
  */
 
-#ifndef ODPH_RING_H_
-#define ODPH_RING_H_
+#ifndef RING_H_
+#define RING_H_
 
 #ifdef __cplusplus
 extern "C" {
@@ -104,14 +104,14 @@  extern "C" {
 #include <sys/queue.h>
 
 enum shm_ring_queue_behavior {
-	ODPH_RING_QUEUE_FIXED = 0, /**< Enq/Deq a fixed number
+	RING_QUEUE_FIXED = 0, /**< Enq/Deq a fixed number
 				of items from a ring */
-	ODPH_RING_QUEUE_VARIABLE   /**< Enq/Deq as many items
+	RING_QUEUE_VARIABLE   /**< Enq/Deq as many items
 				a possible from ring */
 };
 
 
-#define ODPH_RING_NAMESIZE 32 /**< The maximum length of a ring name. */
+#define RING_NAMESIZE 32 /**< The maximum length of a ring name. */
 
 /**
  * An ODP ring structure.
@@ -128,7 +128,7 @@  typedef struct shm_ring {
 	TAILQ_ENTRY(shm_ring) next;
 
 	/** @private Name of the ring. */
-	char name[ODPH_RING_NAMESIZE];
+	char name[RING_NAMESIZE];
 	/** @private Flags supplied at creation. */
 	int flags;
 
@@ -156,13 +156,13 @@  typedef struct shm_ring {
 } shm_ring_t;
 
 
-#define ODPH_RING_F_SP_ENQ (1 << 0) /* The default enqueue is "single-producer".*/
-#define ODPH_RING_F_SC_DEQ (1 << 1) /* The default dequeue is "single-consumer".*/
-#define ODPH_RING_SHM_PROC (1 << 2) /* If set - ring is visible from different
+#define RING_F_SP_ENQ (1 << 0) /* The default enqueue is "single-producer".*/
+#define RING_F_SC_DEQ (1 << 1) /* The default dequeue is "single-consumer".*/
+#define RING_SHM_PROC (1 << 2) /* If set - ring is visible from different
 				    processes. Default is thread visible.     */
-#define ODPH_RING_NO_LIST  (1 << 3) /* Do not link ring to linked list. */
-#define ODPH_RING_QUOT_EXCEED (1 << 31)  /* Quota exceed for burst ops */
-#define ODPH_RING_SZ_MASK  (unsigned)(0x0fffffff) /* Ring size mask */
+#define RING_NO_LIST  (1 << 3) /* Do not link ring to linked list. */
+#define RING_QUOT_EXCEED (1 << 31)  /* Quota exceed for burst ops */
+#define RING_SZ_MASK  (unsigned)(0x0fffffff) /* Ring size mask */
 
 
 /**
diff --git a/platform/linux-generic/pktio/ring.c b/platform/linux-generic/pktio/ring.c
index fed7f63..07ab13e 100644
--- a/platform/linux-generic/pktio/ring.c
+++ b/platform/linux-generic/pktio/ring.c
@@ -78,7 +78,7 @@ 
 #include <string.h>
 #include <odp/rwlock.h>
 #include <odp_ring_internal.h>
-#include <odph_debug.h>
+#include <odp_debug_internal.h>
 
 static TAILQ_HEAD(, shm_ring) odp_ring_list;
 
@@ -155,21 +155,21 @@  void shm_ring_tailq_init(void)
 shm_ring_t *
 shm_ring_create(const char *name, unsigned count, unsigned flags)
 {
-	char ring_name[ODPH_RING_NAMESIZE];
+	char ring_name[RING_NAMESIZE];
 	shm_ring_t *r;
 	size_t ring_size;
 	uint32_t shm_flag;
 	odp_shm_t shm;
 
-	if (flags & ODPH_RING_SHM_PROC)
+	if (flags & RING_SHM_PROC)
 		shm_flag = ODP_SHM_PROC;
 	else
 		shm_flag = 0;
 
 	/* count must be a power of 2 */
-	if (!RING_VAL_IS_POWER_2(count) || (count > ODPH_RING_SZ_MASK)) {
-		ODPH_ERR("Requested size is invalid, must be power of 2, and do not exceed the size limit %u\n",
-			 ODPH_RING_SZ_MASK);
+	if (!RING_VAL_IS_POWER_2(count) || (count > RING_SZ_MASK)) {
+		ODP_ERR("Requested size is invalid, must be power of 2, and do not exceed the size limit %u\n",
+			 RING_SZ_MASK);
 		return NULL;
 	}
 
@@ -188,8 +188,8 @@  shm_ring_create(const char *name, unsigned count, unsigned flags)
 		snprintf(r->name, sizeof(r->name), "%s", name);
 		r->flags = flags;
 		r->prod.watermark = count;
-		r->prod.sp_enqueue = !!(flags & ODPH_RING_F_SP_ENQ);
-		r->cons.sc_dequeue = !!(flags & ODPH_RING_F_SC_DEQ);
+		r->prod.sp_enqueue = !!(flags & RING_F_SP_ENQ);
+		r->cons.sc_dequeue = !!(flags & RING_F_SC_DEQ);
 		r->prod.size = count;
 		r->cons.size = count;
 		r->prod.mask = count-1;
@@ -199,10 +199,10 @@  shm_ring_create(const char *name, unsigned count, unsigned flags)
 		r->prod.tail = 0;
 		r->cons.tail = 0;
 
-		if (!(flags & ODPH_RING_NO_LIST))
+		if (!(flags & RING_NO_LIST))
 			TAILQ_INSERT_TAIL(&odp_ring_list, r, next);
 	} else {
-		ODPH_ERR("Cannot reserve memory\n");
+		ODP_ERR("Cannot reserve memory\n");
 	}
 
 	odp_rwlock_write_unlock(&qlock);
@@ -255,7 +255,7 @@  int __shm_ring_mp_do_enqueue(shm_ring_t *r, void * const *obj_table,
 
 		/* check that we have enough room in ring */
 		if (odp_unlikely(n > free_entries)) {
-			if (behavior == ODPH_RING_QUEUE_FIXED) {
+			if (behavior == RING_QUEUE_FIXED) {
 				return -ENOBUFS;
 			} else {
 				/* No free entry available */
@@ -280,10 +280,10 @@  int __shm_ring_mp_do_enqueue(shm_ring_t *r, void * const *obj_table,
 
 	/* if we exceed the watermark */
 	if (odp_unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
-		ret = (behavior == ODPH_RING_QUEUE_FIXED) ? -EDQUOT :
-				(int)(n | ODPH_RING_QUOT_EXCEED);
+		ret = (behavior == RING_QUEUE_FIXED) ? -EDQUOT :
+				(int)(n | RING_QUOT_EXCEED);
 	} else {
-		ret = (behavior == ODPH_RING_QUEUE_FIXED) ? 0 : n;
+		ret = (behavior == RING_QUEUE_FIXED) ? 0 : n;
 	}
 
 	/*
@@ -321,7 +321,7 @@  int __shm_ring_sp_do_enqueue(shm_ring_t *r, void * const *obj_table,
 
 	/* check that we have enough room in ring */
 	if (odp_unlikely(n > free_entries)) {
-		if (behavior == ODPH_RING_QUEUE_FIXED) {
+		if (behavior == RING_QUEUE_FIXED) {
 			return -ENOBUFS;
 		} else {
 			/* No free entry available */
@@ -340,10 +340,10 @@  int __shm_ring_sp_do_enqueue(shm_ring_t *r, void * const *obj_table,
 
 	/* if we exceed the watermark */
 	if (odp_unlikely(((mask + 1) - free_entries + n) > r->prod.watermark)) {
-		ret = (behavior == ODPH_RING_QUEUE_FIXED) ? -EDQUOT :
-			(int)(n | ODPH_RING_QUOT_EXCEED);
+		ret = (behavior == RING_QUEUE_FIXED) ? -EDQUOT :
+			(int)(n | RING_QUOT_EXCEED);
 	} else {
-		ret = (behavior == ODPH_RING_QUEUE_FIXED) ? 0 : n;
+		ret = (behavior == RING_QUEUE_FIXED) ? 0 : n;
 	}
 
 	/* Release our entries and the memory they refer to */
@@ -381,7 +381,7 @@  int __shm_ring_mc_do_dequeue(shm_ring_t *r, void **obj_table,
 
 		/* Set the actual entries for dequeue */
 		if (n > entries) {
-			if (behavior == ODPH_RING_QUEUE_FIXED) {
+			if (behavior == RING_QUEUE_FIXED) {
 				return -ENOENT;
 			} else {
 				if (odp_unlikely(entries == 0))
@@ -414,7 +414,7 @@  int __shm_ring_mc_do_dequeue(shm_ring_t *r, void **obj_table,
 	__atomic_thread_fence(__ATOMIC_RELEASE);
 	r->cons.tail = cons_next;
 
-	return behavior == ODPH_RING_QUEUE_FIXED ? 0 : n;
+	return behavior == RING_QUEUE_FIXED ? 0 : n;
 }
 
 /**
@@ -437,7 +437,7 @@  int __shm_ring_sc_do_dequeue(shm_ring_t *r, void **obj_table,
 	entries = prod_tail - cons_head;
 
 	if (n > entries) {
-		if (behavior == ODPH_RING_QUEUE_FIXED) {
+		if (behavior == RING_QUEUE_FIXED) {
 			return -ENOENT;
 		} else {
 			if (odp_unlikely(entries == 0))
@@ -456,7 +456,7 @@  int __shm_ring_sc_do_dequeue(shm_ring_t *r, void **obj_table,
 	DEQUEUE_PTRS();
 
 	r->cons.tail = cons_next;
-	return behavior == ODPH_RING_QUEUE_FIXED ? 0 : n;
+	return behavior == RING_QUEUE_FIXED ? 0 : n;
 }
 
 /**
@@ -466,7 +466,7 @@  int shm_ring_mp_enqueue_bulk(shm_ring_t *r, void * const *obj_table,
 				unsigned n)
 {
 	return __shm_ring_mp_do_enqueue(r, obj_table, n,
-					 ODPH_RING_QUEUE_FIXED);
+					 RING_QUEUE_FIXED);
 }
 
 /**
@@ -476,7 +476,7 @@  int shm_ring_sp_enqueue_bulk(shm_ring_t *r, void * const *obj_table,
 			     unsigned n)
 {
 	return __shm_ring_sp_do_enqueue(r, obj_table, n,
-					 ODPH_RING_QUEUE_FIXED);
+					 RING_QUEUE_FIXED);
 }
 
 /**
@@ -485,7 +485,7 @@  int shm_ring_sp_enqueue_bulk(shm_ring_t *r, void * const *obj_table,
 int shm_ring_mc_dequeue_bulk(shm_ring_t *r, void **obj_table, unsigned n)
 {
 	return __shm_ring_mc_do_dequeue(r, obj_table, n,
-					 ODPH_RING_QUEUE_FIXED);
+					 RING_QUEUE_FIXED);
 }
 
 /**
@@ -494,7 +494,7 @@  int shm_ring_mc_dequeue_bulk(shm_ring_t *r, void **obj_table, unsigned n)
 int shm_ring_sc_dequeue_bulk(shm_ring_t *r, void **obj_table, unsigned n)
 {
 	return __shm_ring_sc_do_dequeue(r, obj_table, n,
-					 ODPH_RING_QUEUE_FIXED);
+					 RING_QUEUE_FIXED);
 }
 
 /**
@@ -540,19 +540,19 @@  unsigned shm_ring_free_count(const shm_ring_t *r)
 /* dump the status of the ring on the console */
 void shm_ring_dump(const shm_ring_t *r)
 {
-	ODPH_DBG("ring <%s>@%p\n", r->name, r);
-	ODPH_DBG("  flags=%x\n", r->flags);
-	ODPH_DBG("  size=%" PRIu32 "\n", r->prod.size);
-	ODPH_DBG("  ct=%" PRIu32 "\n", r->cons.tail);
-	ODPH_DBG("  ch=%" PRIu32 "\n", r->cons.head);
-	ODPH_DBG("  pt=%" PRIu32 "\n", r->prod.tail);
-	ODPH_DBG("  ph=%" PRIu32 "\n", r->prod.head);
-	ODPH_DBG("  used=%u\n", shm_ring_count(r));
-	ODPH_DBG("  avail=%u\n", shm_ring_free_count(r));
+	ODP_DBG("ring <%s>@%p\n", r->name, r);
+	ODP_DBG("  flags=%x\n", r->flags);
+	ODP_DBG("  size=%" PRIu32 "\n", r->prod.size);
+	ODP_DBG("  ct=%" PRIu32 "\n", r->cons.tail);
+	ODP_DBG("  ch=%" PRIu32 "\n", r->cons.head);
+	ODP_DBG("  pt=%" PRIu32 "\n", r->prod.tail);
+	ODP_DBG("  ph=%" PRIu32 "\n", r->prod.head);
+	ODP_DBG("  used=%u\n", shm_ring_count(r));
+	ODP_DBG("  avail=%u\n", shm_ring_free_count(r));
 	if (r->prod.watermark == r->prod.size)
-		ODPH_DBG("  watermark=0\n");
+		ODP_DBG("  watermark=0\n");
 	else
-		ODPH_DBG("  watermark=%" PRIu32 "\n", r->prod.watermark);
+		ODP_DBG("  watermark=%" PRIu32 "\n", r->prod.watermark);
 }
 
 /* dump the status of all rings on the console */
@@ -576,7 +576,7 @@  shm_ring_t *shm_ring_lookup(const char *name)
 
 	odp_rwlock_read_lock(&qlock);
 	TAILQ_FOREACH(r, &odp_ring_list, next) {
-		if (strncmp(name, r->name, ODPH_RING_NAMESIZE) == 0)
+		if (strncmp(name, r->name, RING_NAMESIZE) == 0)
 			break;
 	}
 	odp_rwlock_read_unlock(&qlock);
@@ -591,7 +591,7 @@  int shm_ring_mp_enqueue_burst(shm_ring_t *r, void * const *obj_table,
 			      unsigned n)
 {
 	return __shm_ring_mp_do_enqueue(r, obj_table, n,
-					 ODPH_RING_QUEUE_VARIABLE);
+					 RING_QUEUE_VARIABLE);
 }
 
 /**
@@ -601,7 +601,7 @@  int shm_ring_sp_enqueue_burst(shm_ring_t *r, void * const *obj_table,
 			      unsigned n)
 {
 	return __shm_ring_sp_do_enqueue(r, obj_table, n,
-					ODPH_RING_QUEUE_VARIABLE);
+					RING_QUEUE_VARIABLE);
 }
 
 /**
@@ -622,7 +622,7 @@  int shm_ring_enqueue_burst(shm_ring_t *r, void * const *obj_table,
 int shm_ring_mc_dequeue_burst(shm_ring_t *r, void **obj_table, unsigned n)
 {
 	return __shm_ring_mc_do_dequeue(r, obj_table, n,
-					ODPH_RING_QUEUE_VARIABLE);
+					RING_QUEUE_VARIABLE);
 }
 
 /**
@@ -631,7 +631,7 @@  int shm_ring_mc_dequeue_burst(shm_ring_t *r, void **obj_table, unsigned n)
 int shm_ring_sc_dequeue_burst(shm_ring_t *r, void **obj_table, unsigned n)
 {
 	return __shm_ring_sc_do_dequeue(r, obj_table, n,
-					 ODPH_RING_QUEUE_VARIABLE);
+					 RING_QUEUE_VARIABLE);
 }
 
 /**
diff --git a/platform/linux-generic/test/ring/odp_ring_test.c b/platform/linux-generic/test/ring/odp_ring_test.c
index 799f5c6..7b9a81e 100644
--- a/platform/linux-generic/test/ring/odp_ring_test.c
+++ b/platform/linux-generic/test/ring/odp_ring_test.c
@@ -88,7 +88,7 @@  static int test_ring_basic(shm_ring_t *r)
 	printf("enqueue 1 obj\n");
 	ret = shm_ring_sp_enqueue_burst(r, cur_src, 1);
 	cur_src += 1;
-	if ((ret & ODPH_RING_SZ_MASK) != 1) {
+	if ((ret & RING_SZ_MASK) != 1) {
 		LOG_ERR("sp_enq for 1 obj failed\n");
 		goto fail;
 	}
@@ -96,14 +96,14 @@  static int test_ring_basic(shm_ring_t *r)
 	printf("enqueue 2 objs\n");
 	ret = shm_ring_sp_enqueue_burst(r, cur_src, 2);
 	cur_src += 2;
-	if ((ret & ODPH_RING_SZ_MASK) != 2) {
+	if ((ret & RING_SZ_MASK) != 2) {
 		LOG_ERR("sp_enq for 2 obj failed\n");
 		goto fail;
 	}
 
 	printf("enqueue MAX_BULK objs\n");
 	ret = shm_ring_sp_enqueue_burst(r, cur_src, MAX_BULK);
-	if ((ret & ODPH_RING_SZ_MASK) != MAX_BULK) {
+	if ((ret & RING_SZ_MASK) != MAX_BULK) {
 		LOG_ERR("sp_enq for %d obj failed\n", MAX_BULK);
 		goto fail;
 	}
@@ -111,7 +111,7 @@  static int test_ring_basic(shm_ring_t *r)
 	printf("dequeue 1 obj\n");
 	ret = shm_ring_sc_dequeue_burst(r, cur_dst, 1);
 	cur_dst += 1;
-	if ((ret & ODPH_RING_SZ_MASK) != 1) {
+	if ((ret & RING_SZ_MASK) != 1) {
 		LOG_ERR("sc_deq for 1 obj failed\n");
 		goto fail;
 	}
@@ -119,7 +119,7 @@  static int test_ring_basic(shm_ring_t *r)
 	printf("dequeue 2 objs\n");
 	ret = shm_ring_sc_dequeue_burst(r, cur_dst, 2);
 	cur_dst += 2;
-	if ((ret & ODPH_RING_SZ_MASK) != 2) {
+	if ((ret & RING_SZ_MASK) != 2) {
 		LOG_ERR("sc_deq for 2 obj failed\n");
 		goto fail;
 	}
@@ -127,7 +127,7 @@  static int test_ring_basic(shm_ring_t *r)
 	printf("dequeue MAX_BULK objs\n");
 	ret = shm_ring_sc_dequeue_burst(r, cur_dst, MAX_BULK);
 	cur_dst += MAX_BULK;
-	if ((ret & ODPH_RING_SZ_MASK) != MAX_BULK) {
+	if ((ret & RING_SZ_MASK) != MAX_BULK) {
 		LOG_ERR("sc_deq for %d obj failed\n", MAX_BULK);
 		goto fail;
 	}
@@ -355,7 +355,7 @@  static void *test_ring(void *arg)
 {
 	ring_arg_t *parg = (ring_arg_t *)arg;
 	int thr;
-	char ring_name[ODPH_RING_NAMESIZE];
+	char ring_name[RING_NAMESIZE];
 	shm_ring_t *r;
 	int result = 0;
 
@@ -438,7 +438,7 @@  int main(int argc __attribute__((__unused__)),
 	rarg.thrdarg.testcase = ODP_RING_TEST_STRESS;
 	rarg.stress_type = one_enq_one_deq;
 /*	rarg.stress_type = multi_enq_multi_deq;*/
-	char ring_name[ODPH_RING_NAMESIZE];
+	char ring_name[RING_NAMESIZE];
 
 	printf("starting stess test type : %d..\n", rarg.stress_type);
 	/* create a ring */