@@ -109,177 +109,254 @@ _ODP_INLINE void odp_atomic_min_u32(odp_atomic_u32_t *atom, uint32_t new_min)
}
}
+#ifdef ODP_ATOMIC_U64_LOCK
+
+/**
+ * @internal
+ * CAS operation expression for the ATOMIC_OP macro
+ */
+#define ATOMIC_CAS_OP(ret_ptr, old_val, new_val) \
+({ \
+ if (atom->v == (old_val)) { \
+ atom->v = (new_val); \
+ *(ret_ptr) = 1; \
+ } else { \
+ *(ret_ptr) = 0; \
+ } \
+})
+
+/**
+ * @internal
+ * Helper macro for lock-based atomic operations on 64-bit integers
+ * @param[in,out] atom Pointer to the 64-bit atomic variable
+ * @param expr Expression used update the variable.
+ * @return The old value of the variable.
+ */
+#define ATOMIC_OP(atom, expr) \
+({ \
+ uint64_t _old_val; \
+ /* Loop while lock is already taken, stop when lock becomes clear */ \
+ while (__atomic_test_and_set(&(atom)->lock, __ATOMIC_ACQUIRE)) \
+ (void)0; \
+ _old_val = (atom)->v; \
+ (expr); /* Perform whatever update is desired */ \
+ __atomic_clear(&(atom)->lock, __ATOMIC_RELEASE); \
+ _old_val; /* Return old value */ \
+})
+
_ODP_INLINE void odp_atomic_init_u64(odp_atomic_u64_t *atom, uint64_t val)
{
atom->v = val;
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
__atomic_clear(&atom->lock, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE uint64_t odp_atomic_load_u64(odp_atomic_u64_t *atom)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP(atom, (void)0);
-#else
- return __atomic_load_n(&atom->v, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE void odp_atomic_store_u64(odp_atomic_u64_t *atom, uint64_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
(void)ATOMIC_OP(atom, atom->v = val);
-#else
- __atomic_store_n(&atom->v, val, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE uint64_t odp_atomic_fetch_add_u64(odp_atomic_u64_t *atom,
uint64_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP(atom, atom->v += val);
-#else
- return __atomic_fetch_add(&atom->v, val, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE void odp_atomic_add_u64(odp_atomic_u64_t *atom, uint64_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
(void)ATOMIC_OP(atom, atom->v += val);
-#else
- (void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE uint64_t odp_atomic_fetch_sub_u64(odp_atomic_u64_t *atom,
uint64_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP(atom, atom->v -= val);
-#else
- return __atomic_fetch_sub(&atom->v, val, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE void odp_atomic_sub_u64(odp_atomic_u64_t *atom, uint64_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
(void)ATOMIC_OP(atom, atom->v -= val);
-#else
- (void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE uint64_t odp_atomic_fetch_inc_u64(odp_atomic_u64_t *atom)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP(atom, atom->v++);
-#else
- return __atomic_fetch_add(&atom->v, 1, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE void odp_atomic_inc_u64(odp_atomic_u64_t *atom)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
(void)ATOMIC_OP(atom, atom->v++);
-#else
- (void)__atomic_fetch_add(&atom->v, 1, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE uint64_t odp_atomic_fetch_dec_u64(odp_atomic_u64_t *atom)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP(atom, atom->v--);
-#else
- return __atomic_fetch_sub(&atom->v, 1, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE void odp_atomic_dec_u64(odp_atomic_u64_t *atom)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
(void)ATOMIC_OP(atom, atom->v--);
-#else
- (void)__atomic_fetch_sub(&atom->v, 1, __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE int odp_atomic_cas_u64(odp_atomic_u64_t *atom, uint64_t *old_val,
uint64_t new_val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
int ret;
*old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
return ret;
-#else
- return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
- 0 /* strong */,
- __ATOMIC_RELAXED,
- __ATOMIC_RELAXED);
-#endif
}
_ODP_INLINE uint64_t odp_atomic_xchg_u64(odp_atomic_u64_t *atom,
uint64_t new_val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP(atom, atom->v = new_val);
-#else
- return __atomic_exchange_n(&atom->v, new_val, __ATOMIC_RELAXED);
-#endif
}
-_ODP_INLINE void odp_atomic_max_u64(odp_atomic_u64_t *atom, uint64_t new_max)
+_ODP_INLINE uint64_t odp_atomic_load_acq_u64(odp_atomic_u64_t *atom)
{
- uint64_t old_val;
+ return ATOMIC_OP(atom, (void)0);
+}
- old_val = odp_atomic_load_u64(atom);
+_ODP_INLINE void odp_atomic_store_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ (void)ATOMIC_OP(atom, atom->v = val);
+}
- while (new_max > old_val) {
- if (odp_atomic_cas_u64(atom, &old_val, new_max))
- break;
- }
+_ODP_INLINE void odp_atomic_add_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ (void)ATOMIC_OP(atom, atom->v += val);
}
-_ODP_INLINE void odp_atomic_min_u64(odp_atomic_u64_t *atom, uint64_t new_min)
+_ODP_INLINE void odp_atomic_sub_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
{
- uint64_t old_val;
+ (void)ATOMIC_OP(atom, atom->v -= val);
+}
- old_val = odp_atomic_load_u64(atom);
+_ODP_INLINE int odp_atomic_cas_acq_u64(odp_atomic_u64_t *atom,
+ uint64_t *old_val, uint64_t new_val)
+{
+ int ret;
+ *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
+ return ret;
+}
- while (new_min < old_val) {
- if (odp_atomic_cas_u64(atom, &old_val, new_min))
- break;
- }
+_ODP_INLINE int odp_atomic_cas_rel_u64(odp_atomic_u64_t *atom,
+ uint64_t *old_val, uint64_t new_val)
+{
+ int ret;
+ *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
+ return ret;
}
-_ODP_INLINE uint32_t odp_atomic_load_acq_u32(odp_atomic_u32_t *atom)
+_ODP_INLINE int odp_atomic_cas_acq_rel_u64(odp_atomic_u64_t *atom,
+ uint64_t *old_val,
+ uint64_t new_val)
+{
+ int ret;
+ *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
+ return ret;
+}
+
+#else /* !ODP_ATOMIC_U64_LOCK */
+
+_ODP_INLINE void odp_atomic_init_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ atom->v = val;
+}
+
+_ODP_INLINE uint64_t odp_atomic_load_u64(odp_atomic_u64_t *atom)
+{
+ return __atomic_load_n(&atom->v, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE void odp_atomic_store_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ __atomic_store_n(&atom->v, val, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE uint64_t odp_atomic_fetch_add_u64(odp_atomic_u64_t *atom,
+ uint64_t val)
+{
+ return __atomic_fetch_add(&atom->v, val, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE void odp_atomic_add_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ (void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE uint64_t odp_atomic_fetch_sub_u64(odp_atomic_u64_t *atom,
+ uint64_t val)
+{
+ return __atomic_fetch_sub(&atom->v, val, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE void odp_atomic_sub_u64(odp_atomic_u64_t *atom, uint64_t val)
+{
+ (void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE uint64_t odp_atomic_fetch_inc_u64(odp_atomic_u64_t *atom)
+{
+ return __atomic_fetch_add(&atom->v, 1, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE void odp_atomic_inc_u64(odp_atomic_u64_t *atom)
+{
+ (void)__atomic_fetch_add(&atom->v, 1, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE uint64_t odp_atomic_fetch_dec_u64(odp_atomic_u64_t *atom)
+{
+ return __atomic_fetch_sub(&atom->v, 1, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE void odp_atomic_dec_u64(odp_atomic_u64_t *atom)
+{
+ (void)__atomic_fetch_sub(&atom->v, 1, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE int odp_atomic_cas_u64(odp_atomic_u64_t *atom, uint64_t *old_val,
+ uint64_t new_val)
+{
+ return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
+ 0 /* strong */,
+ __ATOMIC_RELAXED,
+ __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE uint64_t odp_atomic_xchg_u64(odp_atomic_u64_t *atom,
+ uint64_t new_val)
+{
+ return __atomic_exchange_n(&atom->v, new_val, __ATOMIC_RELAXED);
+}
+
+_ODP_INLINE uint64_t odp_atomic_load_acq_u64(odp_atomic_u64_t *atom)
{
return __atomic_load_n(&atom->v, __ATOMIC_ACQUIRE);
}
-_ODP_INLINE void odp_atomic_store_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
+_ODP_INLINE void odp_atomic_store_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
{
__atomic_store_n(&atom->v, val, __ATOMIC_RELEASE);
}
-_ODP_INLINE void odp_atomic_add_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
+_ODP_INLINE void odp_atomic_add_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
{
(void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELEASE);
}
-_ODP_INLINE void odp_atomic_sub_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
+_ODP_INLINE void odp_atomic_sub_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
{
(void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELEASE);
}
-_ODP_INLINE int odp_atomic_cas_acq_u32(odp_atomic_u32_t *atom,
- uint32_t *old_val, uint32_t new_val)
+_ODP_INLINE int odp_atomic_cas_acq_u64(odp_atomic_u64_t *atom,
+ uint64_t *old_val, uint64_t new_val)
{
return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
0 /* strong */,
@@ -287,8 +364,8 @@ _ODP_INLINE int odp_atomic_cas_acq_u32(odp_atomic_u32_t *atom,
__ATOMIC_RELAXED);
}
-_ODP_INLINE int odp_atomic_cas_rel_u32(odp_atomic_u32_t *atom,
- uint32_t *old_val, uint32_t new_val)
+_ODP_INLINE int odp_atomic_cas_rel_u64(odp_atomic_u64_t *atom,
+ uint64_t *old_val, uint64_t new_val)
{
return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
0 /* strong */,
@@ -296,9 +373,9 @@ _ODP_INLINE int odp_atomic_cas_rel_u32(odp_atomic_u32_t *atom,
__ATOMIC_RELAXED);
}
-_ODP_INLINE int odp_atomic_cas_acq_rel_u32(odp_atomic_u32_t *atom,
- uint32_t *old_val,
- uint32_t new_val)
+_ODP_INLINE int odp_atomic_cas_acq_rel_u64(odp_atomic_u64_t *atom,
+ uint64_t *old_val,
+ uint64_t new_val)
{
return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
0 /* strong */,
@@ -306,86 +383,78 @@ _ODP_INLINE int odp_atomic_cas_acq_rel_u32(odp_atomic_u32_t *atom,
__ATOMIC_RELAXED);
}
-_ODP_INLINE uint64_t odp_atomic_load_acq_u64(odp_atomic_u64_t *atom)
+#endif /* !ODP_ATOMIC_U64_LOCK */
+
+_ODP_INLINE void odp_atomic_max_u64(odp_atomic_u64_t *atom, uint64_t new_max)
+{
+ uint64_t old_val;
+
+ old_val = odp_atomic_load_u64(atom);
+
+ while (new_max > old_val) {
+ if (odp_atomic_cas_u64(atom, &old_val, new_max))
+ break;
+ }
+}
+
+_ODP_INLINE void odp_atomic_min_u64(odp_atomic_u64_t *atom, uint64_t new_min)
+{
+ uint64_t old_val;
+
+ old_val = odp_atomic_load_u64(atom);
+
+ while (new_min < old_val) {
+ if (odp_atomic_cas_u64(atom, &old_val, new_min))
+ break;
+ }
+}
+
+_ODP_INLINE uint32_t odp_atomic_load_acq_u32(odp_atomic_u32_t *atom)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- return ATOMIC_OP(atom, (void)0);
-#else
return __atomic_load_n(&atom->v, __ATOMIC_ACQUIRE);
-#endif
}
-_ODP_INLINE void odp_atomic_store_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+_ODP_INLINE void odp_atomic_store_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- (void)ATOMIC_OP(atom, atom->v = val);
-#else
__atomic_store_n(&atom->v, val, __ATOMIC_RELEASE);
-#endif
}
-_ODP_INLINE void odp_atomic_add_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+_ODP_INLINE void odp_atomic_add_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- (void)ATOMIC_OP(atom, atom->v += val);
-#else
(void)__atomic_fetch_add(&atom->v, val, __ATOMIC_RELEASE);
-#endif
}
-_ODP_INLINE void odp_atomic_sub_rel_u64(odp_atomic_u64_t *atom, uint64_t val)
+_ODP_INLINE void odp_atomic_sub_rel_u32(odp_atomic_u32_t *atom, uint32_t val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- (void)ATOMIC_OP(atom, atom->v -= val);
-#else
(void)__atomic_fetch_sub(&atom->v, val, __ATOMIC_RELEASE);
-#endif
}
-_ODP_INLINE int odp_atomic_cas_acq_u64(odp_atomic_u64_t *atom,
- uint64_t *old_val, uint64_t new_val)
+_ODP_INLINE int odp_atomic_cas_acq_u32(odp_atomic_u32_t *atom,
+ uint32_t *old_val, uint32_t new_val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- int ret;
- *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
- return ret;
-#else
return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
0 /* strong */,
__ATOMIC_ACQUIRE,
__ATOMIC_RELAXED);
-#endif
}
-_ODP_INLINE int odp_atomic_cas_rel_u64(odp_atomic_u64_t *atom,
- uint64_t *old_val, uint64_t new_val)
+_ODP_INLINE int odp_atomic_cas_rel_u32(odp_atomic_u32_t *atom,
+ uint32_t *old_val, uint32_t new_val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- int ret;
- *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
- return ret;
-#else
return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
0 /* strong */,
__ATOMIC_RELEASE,
__ATOMIC_RELAXED);
-#endif
}
-_ODP_INLINE int odp_atomic_cas_acq_rel_u64(odp_atomic_u64_t *atom,
- uint64_t *old_val,
- uint64_t new_val)
+_ODP_INLINE int odp_atomic_cas_acq_rel_u32(odp_atomic_u32_t *atom,
+ uint32_t *old_val,
+ uint32_t new_val)
{
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- int ret;
- *old_val = ATOMIC_OP(atom, ATOMIC_CAS_OP(&ret, *old_val, new_val));
- return ret;
-#else
return __atomic_compare_exchange_n(&atom->v, old_val, new_val,
0 /* strong */,
__ATOMIC_ACQ_REL,
__ATOMIC_RELAXED);
-#endif
}
#endif
@@ -20,61 +20,39 @@ extern "C" {
#include <odp/api/std_types.h>
#include <odp/api/align.h>
-/**
- * @internal
- * Atomic 64-bit unsigned integer
- */
-struct odp_atomic_u64_s {
- uint64_t v; /**< Actual storage for the atomic variable */
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
- /* Some architectures do not support lock-free operations on 64-bit
- * data types. We use a spin lock to ensure atomicity. */
- char lock; /**< Spin lock (if needed) used to ensure atomic access */
-#endif
-} ODP_ALIGNED(sizeof(uint64_t)); /* Enforce alignement! */
-
/**
* @internal
* Atomic 32-bit unsigned integer
*/
struct odp_atomic_u32_s {
uint32_t v; /**< Actual storage for the atomic variable */
-} ODP_ALIGNED(sizeof(uint32_t)); /* Enforce alignement! */
+} ODP_ALIGNED(sizeof(uint32_t)); /* Enforce alignment! */
-#if __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+#if __GCC_ATOMIC_LLONG_LOCK_FREE >= 2
/**
* @internal
- * CAS operation expression for the ATOMIC_OP macro
+ * Atomic 64-bit unsigned integer
*/
-#define ATOMIC_CAS_OP(ret_ptr, old_val, new_val) \
-({ \
- if (atom->v == (old_val)) { \
- atom->v = (new_val); \
- *(ret_ptr) = 1; \
- } else { \
- *(ret_ptr) = 0; \
- } \
-})
+struct odp_atomic_u64_s {
+ uint64_t v; /**< Actual storage for the atomic variable */
+} ODP_ALIGNED(sizeof(uint64_t)); /* Enforce alignment! */
+
+#else
+
+#define ODP_ATOMIC_U64_LOCK 1
/**
* @internal
- * Helper macro for lock-based atomic operations on 64-bit integers
- * @param[in,out] atom Pointer to the 64-bit atomic variable
- * @param expr Expression used update the variable.
- * @return The old value of the variable.
+ * Atomic 64-bit unsigned integer
*/
-#define ATOMIC_OP(atom, expr) \
-({ \
- uint64_t _old_val; \
- /* Loop while lock is already taken, stop when lock becomes clear */ \
- while (__atomic_test_and_set(&(atom)->lock, __ATOMIC_ACQUIRE)) \
- (void)0; \
- _old_val = (atom)->v; \
- (expr); /* Perform whatever update is desired */ \
- __atomic_clear(&(atom)->lock, __ATOMIC_RELEASE); \
- _old_val; /* Return old value */ \
-})
+struct odp_atomic_u64_s {
+ uint64_t v; /**< Actual storage for the atomic variable */
+ /* Some architectures do not support lock-free operations on 64-bit
+ * data types. We use a spin lock to ensure atomicity. */
+ char lock; /**< Spin lock (if needed) used to ensure atomic access */
+} ODP_ALIGNED(sizeof(uint64_t)); /* Enforce alignment! */
+
#endif
typedef struct odp_atomic_u64_s odp_atomic_u64_t;
@@ -223,7 +223,7 @@ static inline void _odp_atomic_u32_sub_mm(odp_atomic_u32_t *atom,
*****************************************************************************/
/* Check if the compiler support lock-less atomic operations on 64-bit types */
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
+#ifdef ODP_ATOMIC_U64_LOCK
/**
* @internal
* Helper macro for lock-based atomic operations on 64-bit integers
@@ -247,7 +247,6 @@ static inline void _odp_atomic_u32_sub_mm(odp_atomic_u32_t *atom,
__ATOMIC_SEQ_CST : __ATOMIC_RELEASE); \
old_val; /* Return old value */ \
})
-#endif
/**
* Atomic load of 64-bit atomic variable
@@ -258,13 +257,9 @@ static inline void _odp_atomic_u32_sub_mm(odp_atomic_u32_t *atom,
* @return Value of the variable
*/
static inline uint64_t _odp_atomic_u64_load_mm(odp_atomic_u64_t *atom,
- _odp_memmodel_t mmodel)
+ _odp_memmodel_t mmodel)
{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP_MM(atom, (void)0, mmodel);
-#else
- return __atomic_load_n(&atom->v, mmodel);
-#endif
}
/**
@@ -275,14 +270,10 @@ static inline uint64_t _odp_atomic_u64_load_mm(odp_atomic_u64_t *atom,
* @param mmodel Memory order associated with the store operation
*/
static inline void _odp_atomic_u64_store_mm(odp_atomic_u64_t *atom,
- uint64_t val,
- _odp_memmodel_t mmodel)
+ uint64_t val,
+ _odp_memmodel_t mmodel)
{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
(void)ATOMIC_OP_MM(atom, atom->v = val, mmodel);
-#else
- __atomic_store_n(&atom->v, val, mmodel);
-#endif
}
/**
@@ -295,15 +286,11 @@ static inline void _odp_atomic_u64_store_mm(odp_atomic_u64_t *atom,
* @return Old value of variable
*/
static inline uint64_t _odp_atomic_u64_xchg_mm(odp_atomic_u64_t *atom,
- uint64_t val,
- _odp_memmodel_t mmodel)
+ uint64_t val,
+ _odp_memmodel_t mmodel)
{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP_MM(atom, atom->v = val, mmodel);
-#else
- return __atomic_exchange_n(&atom->v, val, mmodel);
-#endif
}
/**
@@ -322,12 +309,11 @@ static inline uint64_t _odp_atomic_u64_xchg_mm(odp_atomic_u64_t *atom,
* @retval 0 exchange failed and '*exp' updated with current value
*/
static inline int _odp_atomic_u64_cmp_xchg_strong_mm(odp_atomic_u64_t *atom,
- uint64_t *exp,
- uint64_t val,
- _odp_memmodel_t success,
- _odp_memmodel_t failure)
+ uint64_t *exp,
+ uint64_t val,
+ _odp_memmodel_t success,
+ _odp_memmodel_t failure)
{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
/* Possibly we are a bit pessimistic with the memory models */
odp_bool_t ret_succ;
/* Loop while lock is already taken, stop when lock becomes clear */
@@ -346,10 +332,6 @@ static inline int _odp_atomic_u64_cmp_xchg_strong_mm(odp_atomic_u64_t *atom,
(ret_succ ? success : failure) == _ODP_MEMMODEL_SC ?
__ATOMIC_SEQ_CST : __ATOMIC_RELEASE);
return ret_succ;
-#else
- return __atomic_compare_exchange_n(&atom->v, exp, val,
- false/*strong*/, success, failure);
-#endif
}
/**
@@ -362,14 +344,10 @@ static inline int _odp_atomic_u64_cmp_xchg_strong_mm(odp_atomic_u64_t *atom,
* @return Value of the atomic variable before the addition
*/
static inline uint64_t _odp_atomic_u64_fetch_add_mm(odp_atomic_u64_t *atom,
- uint64_t val,
- _odp_memmodel_t mmodel)
+ uint64_t val,
+ _odp_memmodel_t mmodel)
{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP_MM(atom, atom->v += val, mmodel);
-#else
- return __atomic_fetch_add(&atom->v, val, mmodel);
-#endif
}
/**
@@ -380,15 +358,11 @@ static inline uint64_t _odp_atomic_u64_fetch_add_mm(odp_atomic_u64_t *atom,
* @param mmodel Memory order associated with the add operation.
*/
static inline void _odp_atomic_u64_add_mm(odp_atomic_u64_t *atom,
- uint64_t val,
- _odp_memmodel_t mmodel)
+ uint64_t val,
+ _odp_memmodel_t mmodel)
{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
(void)ATOMIC_OP_MM(atom, atom->v += val, mmodel);
-#else
- (void)__atomic_fetch_add(&atom->v, val, mmodel);
-#endif
}
/**
@@ -401,14 +375,10 @@ static inline void _odp_atomic_u64_add_mm(odp_atomic_u64_t *atom,
* @return Value of the atomic variable before the subtraction
*/
static inline uint64_t _odp_atomic_u64_fetch_sub_mm(odp_atomic_u64_t *atom,
- uint64_t val,
- _odp_memmodel_t mmodel)
+ uint64_t val,
+ _odp_memmodel_t mmodel)
{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
return ATOMIC_OP_MM(atom, atom->v -= val, mmodel);
-#else
- return __atomic_fetch_sub(&atom->v, val, mmodel);
-#endif
}
/**
@@ -419,20 +389,150 @@ static inline uint64_t _odp_atomic_u64_fetch_sub_mm(odp_atomic_u64_t *atom,
* @param mmodel Memory order associated with the subtract operation
*/
static inline void _odp_atomic_u64_sub_mm(odp_atomic_u64_t *atom,
- uint64_t val,
- _odp_memmodel_t mmodel)
+ uint64_t val,
+ _odp_memmodel_t mmodel)
{
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
(void)ATOMIC_OP_MM(atom, atom->v -= val, mmodel);
-#else
- (void)__atomic_fetch_sub(&atom->v, val, mmodel);
-#endif
}
-#if !defined __GCC_ATOMIC_LLONG_LOCK_FREE || __GCC_ATOMIC_LLONG_LOCK_FREE < 2
#undef ATOMIC_OP_MM
-#endif
+
+#else /* ! ODP_ATOMIC_U64_LOCK */
+
+/**
+ * Atomic load of 64-bit atomic variable
+ *
+ * @param atom Pointer to a 64-bit atomic variable
+ * @param mmodel Memory order associated with the load operation
+ *
+ * @return Value of the variable
+ */
+static inline uint64_t _odp_atomic_u64_load_mm(odp_atomic_u64_t *atom,
+ _odp_memmodel_t mmodel)
+{
+ return __atomic_load_n(&atom->v, mmodel);
+}
+
+/**
+ * Atomic store to 64-bit atomic variable
+ *
+ * @param[out] atom Pointer to a 64-bit atomic variable
+ * @param val Value to write to the atomic variable
+ * @param mmodel Memory order associated with the store operation
+ */
+static inline void _odp_atomic_u64_store_mm(odp_atomic_u64_t *atom,
+ uint64_t val,
+ _odp_memmodel_t mmodel)
+{
+ __atomic_store_n(&atom->v, val, mmodel);
+}
+
+/**
+ * Atomic exchange (swap) of 64-bit atomic variable
+ *
+ * @param[in,out] atom Pointer to a 64-bit atomic variable
+ * @param val New value to write to the atomic variable
+ * @param mmodel Memory order associated with the exchange operation
+ *
+ * @return Old value of variable
+ */
+static inline uint64_t _odp_atomic_u64_xchg_mm(odp_atomic_u64_t *atom,
+ uint64_t val,
+ _odp_memmodel_t mmodel)
+
+{
+ return __atomic_exchange_n(&atom->v, val, mmodel);
+}
+
+/**
+ * Atomic compare and exchange (swap) of 64-bit atomic variable
+ * "Strong" semantics, will not fail spuriously.
+ *
+ * @param[in,out] atom Pointer to a 64-bit atomic variable
+ * @param[in,out] exp Pointer to expected value (updated on failure)
+ * @param val New value to write
+ * @param success Memory order associated with a successful compare-and-swap
+ * operation
+ * @param failure Memory order associated with a failed compare-and-swap
+ * operation
+ *
+ * @retval 1 exchange successful
+ * @retval 0 exchange failed and '*exp' updated with current value
+ */
+static inline int _odp_atomic_u64_cmp_xchg_strong_mm(odp_atomic_u64_t *atom,
+ uint64_t *exp,
+ uint64_t val,
+ _odp_memmodel_t success,
+ _odp_memmodel_t failure)
+{
+ return __atomic_compare_exchange_n(&atom->v, exp, val,
+ false/*strong*/, success, failure);
+}
+
+/**
+ * Atomic fetch and add of 64-bit atomic variable
+ *
+ * @param[in,out] atom Pointer to a 64-bit atomic variable
+ * @param val Value to add to the atomic variable
+ * @param mmodel Memory order associated with the add operation
+ *
+ * @return Value of the atomic variable before the addition
+ */
+static inline uint64_t _odp_atomic_u64_fetch_add_mm(odp_atomic_u64_t *atom,
+ uint64_t val,
+ _odp_memmodel_t mmodel)
+{
+ return __atomic_fetch_add(&atom->v, val, mmodel);
+}
+
+/**
+ * Atomic add of 64-bit atomic variable
+ *
+ * @param[in,out] atom Pointer to a 64-bit atomic variable
+ * @param val Value to add to the atomic variable
+ * @param mmodel Memory order associated with the add operation.
+ */
+static inline void _odp_atomic_u64_add_mm(odp_atomic_u64_t *atom,
+ uint64_t val,
+ _odp_memmodel_t mmodel)
+
+{
+ (void)__atomic_fetch_add(&atom->v, val, mmodel);
+}
+
+/**
+ * Atomic fetch and subtract of 64-bit atomic variable
+ *
+ * @param[in,out] atom Pointer to a 64-bit atomic variable
+ * @param val Value to subtract from the atomic variable
+ * @param mmodel Memory order associated with the subtract operation
+ *
+ * @return Value of the atomic variable before the subtraction
+ */
+static inline uint64_t _odp_atomic_u64_fetch_sub_mm(odp_atomic_u64_t *atom,
+ uint64_t val,
+ _odp_memmodel_t mmodel)
+{
+ return __atomic_fetch_sub(&atom->v, val, mmodel);
+}
+
+/**
+ * Atomic subtract of 64-bit atomic variable
+ *
+ * @param[in,out] atom Pointer to a 64-bit atomic variable
+ * @param val Value to subtract from the atomic variable
+ * @param mmodel Memory order associated with the subtract operation
+ */
+static inline void _odp_atomic_u64_sub_mm(odp_atomic_u64_t *atom,
+ uint64_t val,
+ _odp_memmodel_t mmodel)
+
+{
+ (void)__atomic_fetch_sub(&atom->v, val, mmodel);
+}
+
+#endif /* ! ODP_ATOMIC_U64_LOCK */
/*****************************************************************************
* Operations on pointer atomics
Rewrite atomic_types.h/atomic_inlines.h to clearly separate simple (common) and locked 64-bit cases. This is allows us to ease switching of atomic header to abi setup. Signed-off-by: Dmitry Eremin-Solenikov <dmitry.ereminsolenikov@linaro.org> --- .../include/odp/api/plat/atomic_inlines.h | 315 +++++++++++++-------- .../include/odp/api/plat/atomic_types.h | 58 ++-- .../linux-generic/include/odp_atomic_internal.h | 208 ++++++++++---- 3 files changed, 364 insertions(+), 217 deletions(-) -- 2.14.2