@@ -9,6 +9,7 @@ AM_CPPFLAGS += -I$(top_srcdir)/include/odp/arch/@ARCH_ABI@
AM_CPPFLAGS += -I$(top_builddir)/include
AM_CPPFLAGS += -Iinclude
AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch/$(ARCH_DIR)
+AM_CPPFLAGS += -I$(top_srcdir)/platform/$(with_platform)/arch/default
AM_CPPFLAGS += -Iinclude
AM_CPPFLAGS += -DSYSCONFDIR=\"@sysconfdir@\"
@@ -195,22 +196,6 @@ noinst_HEADERS = \
${srcdir}/include/protocols/thash.h \
${srcdir}/include/protocols/udp.h
-if ARCH_IS_ARM
-noinst_HEADERS += ${srcdir}/arch/arm/odp_atomic.h \
- ${srcdir}/arch/arm/odp_cpu.h \
- ${srcdir}/arch/arm/odp_cpu_idling.h \
- ${srcdir}/arch/arm/odp_llsc.h
-endif
-if ARCH_IS_MIPS64
-noinst_HEADERS += ${srcdir}/arch/mips64/odp_cpu.h
-endif
-if ARCH_IS_POWERPC
-noinst_HEADERS += ${srcdir}/arch/powerpc/odp_cpu.h
-endif
-if ARCH_IS_X86
-noinst_HEADERS += ${srcdir}/arch/x86/odp_cpu.h
-endif
-
__LIB__libodp_linux_la_SOURCES = \
_fdserver.c \
_ishm.c \
@@ -290,6 +275,10 @@ __LIB__libodp_linux_la_SOURCES += arch/default/odp_cpu_arch.c \
arch/default/odp_global_time.c \
arch/default/odp_sysinfo_parse.c
arch_odp_headers = $(srcdir)/arch/arm/odp/api/cpu_arch.h
+noinst_HEADERS += ${srcdir}/arch/arm/odp_atomic.h \
+ ${srcdir}/arch/arm/odp_cpu.h \
+ ${srcdir}/arch/arm/odp_cpu_idling.h \
+ ${srcdir}/arch/arm/odp_llsc.h
endif
if ARCH_IS_AARCH64
__LIB__libodp_linux_la_SOURCES += arch/default/odp_cpu_arch.c \
@@ -297,6 +286,10 @@ __LIB__libodp_linux_la_SOURCES += arch/default/odp_cpu_arch.c \
arch/aarch64/odp_global_time.c \
arch/default/odp_sysinfo_parse.c
arch_odp_headers = $(srcdir)/arch/aarch64/odp/api/cpu_arch.h
+noinst_HEADERS += ${srcdir}/arch/aarch64/odp_atomic.h \
+ ${srcdir}/arch/aarch64/odp_cpu.h \
+ ${srcdir}/arch/aarch64/odp_cpu_idling.h \
+ ${srcdir}/arch/aarch64/odp_llsc.h
endif
if ARCH_IS_MIPS64
__LIB__libodp_linux_la_SOURCES += arch/mips64/odp_cpu_arch.c \
@@ -304,6 +297,7 @@ __LIB__libodp_linux_la_SOURCES += arch/mips64/odp_cpu_arch.c \
arch/default/odp_global_time.c \
arch/mips64/odp_sysinfo_parse.c
arch_odp_headers = $(srcdir)/arch/mips64/odp/api/cpu_arch.h
+noinst_HEADERS += ${srcdir}/arch/default/odp_cpu.h
endif
if ARCH_IS_POWERPC
__LIB__libodp_linux_la_SOURCES += arch/default/odp_cpu_arch.c \
@@ -311,6 +305,7 @@ __LIB__libodp_linux_la_SOURCES += arch/default/odp_cpu_arch.c \
arch/default/odp_global_time.c \
arch/powerpc/odp_sysinfo_parse.c
arch_odp_headers = $(srcdir)/arch/powerpc/odp/api/cpu_arch.h
+noinst_HEADERS += ${srcdir}/arch/default/odp_cpu.h
endif
if ARCH_IS_X86
__LIB__libodp_linux_la_SOURCES += arch/x86/cpu_flags.c \
@@ -320,6 +315,7 @@ __LIB__libodp_linux_la_SOURCES += arch/x86/cpu_flags.c \
arch/x86/odp_sysinfo_parse.c
arch_odp_headers = $(srcdir)/arch/x86/odp/api/cpu_arch.h
noinst_HEADERS += $(srcdir)/arch/x86/cpu_flags.h
+noinst_HEADERS += ${srcdir}/arch/default/odp_cpu.h
endif
noinst_HEADERS += $(srcdir)/arch/default/odp/api/cpu_arch.h
new file mode 100644
@@ -0,0 +1,208 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_ATOMIC_H
+#define PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_ATOMIC_H
+
+#ifndef PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H
+#error This file should not be included directly, please include odp_cpu.h
+#endif
+
+#ifdef CONFIG_DMBSTR
+
+#define atomic_store_release(loc, val, ro) \
+do { \
+ _odp_release_barrier(ro); \
+ __atomic_store_n(loc, val, __ATOMIC_RELAXED); \
+} while (0)
+
+#else
+
+#define atomic_store_release(loc, val, ro) \
+ __atomic_store_n(loc, val, __ATOMIC_RELEASE)
+
+#endif /* CONFIG_DMBSTR */
+
+#define HAS_ACQ(mo) ((mo) != __ATOMIC_RELAXED && (mo) != __ATOMIC_RELEASE)
+#define HAS_RLS(mo) ((mo) == __ATOMIC_RELEASE || (mo) == __ATOMIC_ACQ_REL || \
+ (mo) == __ATOMIC_SEQ_CST)
+
+#define LL_MO(mo) (HAS_ACQ((mo)) ? __ATOMIC_ACQUIRE : __ATOMIC_RELAXED)
+#define SC_MO(mo) (HAS_RLS((mo)) ? __ATOMIC_RELEASE : __ATOMIC_RELAXED)
+
+#ifndef __ARM_FEATURE_QRDMX /* Feature only available in v8.1a and beyond */
+static inline bool
+__lockfree_compare_exchange_16(register __int128 *var, __int128 *exp,
+ register __int128 neu, bool weak, int mo_success,
+ int mo_failure)
+{
+ (void)weak; /* Always do strong CAS or we can't perform atomic read */
+ /* Ignore memory ordering for failure, memory order for
+ * success must be stronger or equal. */
+ (void)mo_failure;
+ register __int128 old;
+ register __int128 expected;
+ int ll_mo = LL_MO(mo_success);
+ int sc_mo = SC_MO(mo_success);
+
+ expected = *exp;
+ __asm__ volatile("" ::: "memory");
+ do {
+ /* Atomicity of LLD is not guaranteed */
+ old = lld(var, ll_mo);
+ /* Must write back neu or old to verify atomicity of LLD */
+ } while (odp_unlikely(scd(var, old == expected ? neu : old, sc_mo)));
+ *exp = old; /* Always update, atomically read value */
+ return old == expected;
+}
+
+static inline __int128 __lockfree_exchange_16(__int128 *var, __int128 neu,
+ int mo)
+{
+ register __int128 old;
+ int ll_mo = LL_MO(mo);
+ int sc_mo = SC_MO(mo);
+
+ do {
+ /* Atomicity of LLD is not guaranteed */
+ old = lld(var, ll_mo);
+ /* Must successfully write back to verify atomicity of LLD */
+ } while (odp_unlikely(scd(var, neu, sc_mo)));
+ return old;
+}
+
+static inline __int128 __lockfree_fetch_and_16(__int128 *var, __int128 mask,
+ int mo)
+{
+ register __int128 old;
+ int ll_mo = LL_MO(mo);
+ int sc_mo = SC_MO(mo);
+
+ do {
+ /* Atomicity of LLD is not guaranteed */
+ old = lld(var, ll_mo);
+ /* Must successfully write back to verify atomicity of LLD */
+ } while (odp_unlikely(scd(var, old & mask, sc_mo)));
+ return old;
+}
+
+static inline __int128 __lockfree_fetch_or_16(__int128 *var, __int128 mask,
+ int mo)
+{
+ register __int128 old;
+ int ll_mo = LL_MO(mo);
+ int sc_mo = SC_MO(mo);
+
+ do {
+ /* Atomicity of LLD is not guaranteed */
+ old = lld(var, ll_mo);
+ /* Must successfully write back to verify atomicity of LLD */
+ } while (odp_unlikely(scd(var, old | mask, sc_mo)));
+ return old;
+}
+
+#else
+
+static inline __int128 casp(__int128 *var, __int128 old, __int128 neu, int mo)
+{
+ if (mo == __ATOMIC_RELAXED) {
+ __asm__ volatile("casp %0, %H0, %1, %H1, [%2]"
+ : "+r" (old)
+ : "r" (neu), "r" (var)
+ : "memory");
+ } else if (mo == __ATOMIC_ACQUIRE) {
+ __asm__ volatile("caspa %0, %H0, %1, %H1, [%2]"
+ : "+r" (old)
+ : "r" (neu), "r" (var)
+ : "memory");
+ } else if (mo == __ATOMIC_ACQ_REL) {
+ __asm__ volatile("caspal %0, %H0, %1, %H1, [%2]"
+ : "+r" (old)
+ : "r" (neu), "r" (var)
+ : "memory");
+ } else if (mo == __ATOMIC_RELEASE) {
+ __asm__ volatile("caspl %0, %H0, %1, %H1, [%2]"
+ : "+r" (old)
+ : "r" (neu), "r" (var)
+ : "memory");
+ } else {
+ abort();
+ }
+ return old;
+}
+
+static inline bool
+__lockfree_compare_exchange_16(register __int128 *var, __int128 *exp,
+ register __int128 neu, bool weak, int mo_success,
+ int mo_failure)
+{
+ (void)weak;
+ (void)mo_failure;
+ __int128 old;
+ __int128 expected;
+
+ expected = *exp;
+ old = casp(var, expected, neu, mo_success);
+ *exp = old; /* Always update, atomically read value */
+ return old == expected;
+}
+
+static inline __int128 __lockfree_exchange_16(__int128 *var, __int128 neu,
+ int mo)
+{
+ __int128 old;
+ __int128 expected;
+
+ do {
+ expected = *var;
+ old = casp(var, expected, neu, mo);
+ } while (old != expected);
+ return old;
+}
+
+static inline __int128 __lockfree_fetch_and_16(__int128 *var, __int128 mask,
+ int mo)
+{
+ __int128 old;
+ __int128 expected;
+
+ do {
+ expected = *var;
+ old = casp(var, expected, expected & mask, mo);
+ } while (old != expected);
+ return old;
+}
+
+static inline __int128 __lockfree_fetch_or_16(__int128 *var, __int128 mask,
+ int mo)
+{
+ __int128 old;
+ __int128 expected;
+
+ do {
+ expected = *var;
+ old = casp(var, expected, expected | mask, mo);
+ } while (old != expected);
+ return old;
+}
+
+#endif /* __ARM_FEATURE_QRDMX */
+
+static inline __int128 __lockfree_load_16(__int128 *var, int mo)
+{
+ __int128 old = *var; /* Possibly torn read */
+
+ /* Do CAS to ensure atomicity
+ * Either CAS succeeds (writing back the same value)
+ * Or CAS fails and returns the old value (atomic read)
+ */
+ (void)__lockfree_compare_exchange_16(var, &old, old, false, mo, mo);
+ return old;
+}
+
+#endif /* PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_ATOMIC_H */
new file mode 100644
@@ -0,0 +1,62 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H
+#define PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H
+
+#if !defined(__aarch64__)
+#error Use this file only when compiling for ARMv8 architecture
+#endif
+
+#include <odp_debug_internal.h>
+
+/*
+ * Use LLD/SCD atomic primitives instead of lock-based code path in llqueue
+ * LLD/SCD is on ARM the fastest way to enqueue and dequeue elements from a
+ * linked list queue.
+ */
+#define CONFIG_LLDSCD
+
+/*
+ * Use DMB;STR instead of STRL on ARM
+ * On early ARMv8 implementations (e.g. Cortex-A57) this is noticeably more
+ * performant than using store-release.
+ * This also allows for load-only barriers (DMB ISHLD) which are much cheaper
+ * than a full barrier
+ */
+#define CONFIG_DMBSTR
+
+/*
+ * Use ARM event signalling mechanism
+ * Event signalling minimises spinning (busy waiting) which decreases
+ * cache coherency traffic when spinning on shared locations (thus faster and
+ * more scalable) and enables the CPU to enter a sleep state (lower power
+ * consumption).
+ */
+#define CONFIG_WFE
+
+static inline void dmb(void)
+{
+ __asm__ volatile("dmb" : : : "memory");
+}
+
+/* Only ARMv8 supports DMB ISHLD */
+/* A load only barrier is much cheaper than full barrier */
+#define _odp_release_barrier(ro) \
+do { \
+ if (ro) \
+ __asm__ volatile("dmb ishld" ::: "memory"); \
+ else \
+ __asm__ volatile("dmb ish" ::: "memory"); \
+} while (0)
+
+#include "odp_llsc.h"
+#include "odp_atomic.h"
+#include "odp_cpu_idling.h"
+
+#endif /* PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H */
new file mode 100644
@@ -0,0 +1,51 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_LINUXGENERIC_ARCH_ARM_CPU_IDLING_H
+#define PLATFORM_LINUXGENERIC_ARCH_ARM_CPU_IDLING_H
+
+#ifndef PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H
+#error This file should not be included directly, please include odp_cpu.h
+#endif
+
+static inline void sevl(void)
+{
+#ifdef CONFIG_WFE
+ __asm__ volatile("sevl" : : : );
+#endif
+}
+
+static inline int wfe(void)
+{
+#ifdef CONFIG_WFE
+ __asm__ volatile("wfe" : : : "memory");
+#endif
+ return 1;
+}
+
+static inline void doze(void)
+{
+#ifndef CONFIG_WFE
+ /* When using WFE do not stall the pipeline using other means */
+ odp_cpu_pause();
+#endif
+}
+
+#ifdef CONFIG_WFE
+#define monitor128(addr, mo) lld((addr), (mo))
+#define monitor64(addr, mo) ll64((addr), (mo))
+#define monitor32(addr, mo) ll32((addr), (mo))
+#define monitor8(addr, mo) ll8((addr), (mo))
+#else
+#define monitor128(addr, mo) __atomic_load_n((addr), (mo))
+#define monitor64(addr, mo) __atomic_load_n((addr), (mo))
+#define monitor32(addr, mo) __atomic_load_n((addr), (mo))
+#define monitor8(addr, mo) __atomic_load_n((addr), (mo))
+#endif
+
+#endif /* PLATFORM_LINUXGENERIC_ARCH_ARM_CPU_IDLING_H */
new file mode 100644
@@ -0,0 +1,165 @@
+/* Copyright (c) 2017, ARM Limited. All rights reserved.
+ *
+ * Copyright (c) 2017, Linaro Limited
+ * All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef PLATFORM_LINUXGENERIC_ARCH_ARM_LLSC_H
+#define PLATFORM_LINUXGENERIC_ARCH_ARM_LLSC_H
+
+#ifndef PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H
+#error This file should not be included directly, please include odp_cpu.h
+#endif
+
+static inline uint16_t ll8(uint8_t *var, int mm)
+{
+ uint16_t old;
+
+ if (mm == __ATOMIC_ACQUIRE)
+ __asm__ volatile("ldaxrb %w0, [%1]"
+ : "=&r" (old)
+ : "r" (var)
+ : "memory");
+ else if (mm == __ATOMIC_RELAXED)
+ __asm__ volatile("ldxrb %w0, [%1]"
+ : "=&r" (old)
+ : "r" (var)
+ : );
+ else
+ ODP_ABORT();
+ return old;
+}
+
+static inline uint32_t ll32(uint32_t *var, int mm)
+{
+ uint32_t old;
+
+ if (mm == __ATOMIC_ACQUIRE)
+ __asm__ volatile("ldaxr %w0, [%1]"
+ : "=&r" (old)
+ : "r" (var)
+ : "memory");
+ else if (mm == __ATOMIC_RELAXED)
+ __asm__ volatile("ldxr %w0, [%1]"
+ : "=&r" (old)
+ : "r" (var)
+ : );
+ else
+ ODP_ABORT();
+ return old;
+}
+
+/* Return 0 on success, 1 on failure */
+static inline uint32_t sc32(uint32_t *var, uint32_t neu, int mm)
+{
+ uint32_t ret;
+
+ if (mm == __ATOMIC_RELEASE)
+ __asm__ volatile("stlxr %w0, %w1, [%2]"
+ : "=&r" (ret)
+ : "r" (neu), "r" (var)
+ : "memory");
+ else if (mm == __ATOMIC_RELAXED)
+ __asm__ volatile("stxr %w0, %w1, [%2]"
+ : "=&r" (ret)
+ : "r" (neu), "r" (var)
+ : );
+ else
+ ODP_ABORT();
+ return ret;
+}
+
+static inline uint64_t ll(uint64_t *var, int mm)
+{
+ uint64_t old;
+
+ if (mm == __ATOMIC_ACQUIRE)
+ __asm__ volatile("ldaxr %0, [%1]"
+ : "=&r" (old)
+ : "r" (var)
+ : "memory");
+ else if (mm == __ATOMIC_RELAXED)
+ __asm__ volatile("ldxr %0, [%1]"
+ : "=&r" (old)
+ : "r" (var)
+ : );
+ else
+ ODP_ABORT();
+ return old;
+}
+
+#define ll64(a, b) ll((a), (b))
+
+/* Return 0 on success, 1 on failure */
+static inline uint32_t sc(uint64_t *var, uint64_t neu, int mm)
+{
+ uint32_t ret;
+
+ if (mm == __ATOMIC_RELEASE)
+ __asm__ volatile("stlxr %w0, %1, [%2]"
+ : "=&r" (ret)
+ : "r" (neu), "r" (var)
+ : "memory");
+ else if (mm == __ATOMIC_RELAXED)
+ __asm__ volatile("stxr %w0, %1, [%2]"
+ : "=&r" (ret)
+ : "r" (neu), "r" (var)
+ : );
+ else
+ ODP_ABORT();
+ return ret;
+}
+
+#define sc64(a, b, c) sc((a), (b), (c))
+
+union i128 {
+ __int128 i128;
+ int64_t i64[2];
+};
+
+static inline __int128 lld(__int128 *var, int mm)
+{
+ union i128 old;
+
+ if (mm == __ATOMIC_ACQUIRE)
+ __asm__ volatile("ldaxp %0, %1, [%2]"
+ : "=&r" (old.i64[0]), "=&r" (old.i64[1])
+ : "r" (var)
+ : "memory");
+ else if (mm == __ATOMIC_RELAXED)
+ __asm__ volatile("ldxp %0, %1, [%2]"
+ : "=&r" (old.i64[0]), "=&r" (old.i64[1])
+ : "r" (var)
+ : );
+ else
+ ODP_ABORT();
+ return old.i128;
+}
+
+/* Return 0 on success, 1 on failure */
+static inline uint32_t scd(__int128 *var, __int128 neu, int mm)
+{
+ uint32_t ret;
+
+ if (mm == __ATOMIC_RELEASE)
+ __asm__ volatile("stlxp %w0, %1, %2, [%3]"
+ : "=&r" (ret)
+ : "r" (((union i128)neu).i64[0]),
+ "r" (((union i128)neu).i64[1]),
+ "r" (var)
+ : "memory");
+ else if (mm == __ATOMIC_RELAXED)
+ __asm__ volatile("stxp %w0, %1, %2, [%3]"
+ : "=&r" (ret)
+ : "r" (((union i128)neu).i64[0]),
+ "r" (((union i128)neu).i64[1]),
+ "r" (var)
+ : );
+ else
+ ODP_ABORT();
+ return ret;
+}
+
+#endif /* PLATFORM_LINUXGENERIC_ARCH_ARM_LLSC_H */
@@ -28,185 +28,4 @@ do { \
#endif /* CONFIG_DMBSTR */
-#ifdef __aarch64__
-
-#define HAS_ACQ(mo) ((mo) != __ATOMIC_RELAXED && (mo) != __ATOMIC_RELEASE)
-#define HAS_RLS(mo) ((mo) == __ATOMIC_RELEASE || (mo) == __ATOMIC_ACQ_REL || \
- (mo) == __ATOMIC_SEQ_CST)
-
-#define LL_MO(mo) (HAS_ACQ((mo)) ? __ATOMIC_ACQUIRE : __ATOMIC_RELAXED)
-#define SC_MO(mo) (HAS_RLS((mo)) ? __ATOMIC_RELEASE : __ATOMIC_RELAXED)
-
-#ifndef __ARM_FEATURE_QRDMX /* Feature only available in v8.1a and beyond */
-static inline bool
-__lockfree_compare_exchange_16(register __int128 *var, __int128 *exp,
- register __int128 neu, bool weak, int mo_success,
- int mo_failure)
-{
- (void)weak; /* Always do strong CAS or we can't perform atomic read */
- /* Ignore memory ordering for failure, memory order for
- * success must be stronger or equal. */
- (void)mo_failure;
- register __int128 old;
- register __int128 expected;
- int ll_mo = LL_MO(mo_success);
- int sc_mo = SC_MO(mo_success);
-
- expected = *exp;
- __asm__ volatile("" ::: "memory");
- do {
- /* Atomicity of LLD is not guaranteed */
- old = lld(var, ll_mo);
- /* Must write back neu or old to verify atomicity of LLD */
- } while (odp_unlikely(scd(var, old == expected ? neu : old, sc_mo)));
- *exp = old; /* Always update, atomically read value */
- return old == expected;
-}
-
-static inline __int128 __lockfree_exchange_16(__int128 *var, __int128 neu,
- int mo)
-{
- register __int128 old;
- int ll_mo = LL_MO(mo);
- int sc_mo = SC_MO(mo);
-
- do {
- /* Atomicity of LLD is not guaranteed */
- old = lld(var, ll_mo);
- /* Must successfully write back to verify atomicity of LLD */
- } while (odp_unlikely(scd(var, neu, sc_mo)));
- return old;
-}
-
-static inline __int128 __lockfree_fetch_and_16(__int128 *var, __int128 mask,
- int mo)
-{
- register __int128 old;
- int ll_mo = LL_MO(mo);
- int sc_mo = SC_MO(mo);
-
- do {
- /* Atomicity of LLD is not guaranteed */
- old = lld(var, ll_mo);
- /* Must successfully write back to verify atomicity of LLD */
- } while (odp_unlikely(scd(var, old & mask, sc_mo)));
- return old;
-}
-
-static inline __int128 __lockfree_fetch_or_16(__int128 *var, __int128 mask,
- int mo)
-{
- register __int128 old;
- int ll_mo = LL_MO(mo);
- int sc_mo = SC_MO(mo);
-
- do {
- /* Atomicity of LLD is not guaranteed */
- old = lld(var, ll_mo);
- /* Must successfully write back to verify atomicity of LLD */
- } while (odp_unlikely(scd(var, old | mask, sc_mo)));
- return old;
-}
-
-#else
-
-static inline __int128 casp(__int128 *var, __int128 old, __int128 neu, int mo)
-{
- if (mo == __ATOMIC_RELAXED) {
- __asm__ volatile("casp %0, %H0, %1, %H1, [%2]"
- : "+r" (old)
- : "r" (neu), "r" (var)
- : "memory");
- } else if (mo == __ATOMIC_ACQUIRE) {
- __asm__ volatile("caspa %0, %H0, %1, %H1, [%2]"
- : "+r" (old)
- : "r" (neu), "r" (var)
- : "memory");
- } else if (mo == __ATOMIC_ACQ_REL) {
- __asm__ volatile("caspal %0, %H0, %1, %H1, [%2]"
- : "+r" (old)
- : "r" (neu), "r" (var)
- : "memory");
- } else if (mo == __ATOMIC_RELEASE) {
- __asm__ volatile("caspl %0, %H0, %1, %H1, [%2]"
- : "+r" (old)
- : "r" (neu), "r" (var)
- : "memory");
- } else {
- abort();
- }
- return old;
-}
-
-static inline bool
-__lockfree_compare_exchange_16(register __int128 *var, __int128 *exp,
- register __int128 neu, bool weak, int mo_success,
- int mo_failure)
-{
- (void)weak;
- (void)mo_failure;
- __int128 old;
- __int128 expected;
-
- expected = *exp;
- old = casp(var, expected, neu, mo_success);
- *exp = old; /* Always update, atomically read value */
- return old == expected;
-}
-
-static inline __int128 __lockfree_exchange_16(__int128 *var, __int128 neu,
- int mo)
-{
- __int128 old;
- __int128 expected;
-
- do {
- expected = *var;
- old = casp(var, expected, neu, mo);
- } while (old != expected);
- return old;
-}
-
-static inline __int128 __lockfree_fetch_and_16(__int128 *var, __int128 mask,
- int mo)
-{
- __int128 old;
- __int128 expected;
-
- do {
- expected = *var;
- old = casp(var, expected, expected & mask, mo);
- } while (old != expected);
- return old;
-}
-
-static inline __int128 __lockfree_fetch_or_16(__int128 *var, __int128 mask,
- int mo)
-{
- __int128 old;
- __int128 expected;
-
- do {
- expected = *var;
- old = casp(var, expected, expected | mask, mo);
- } while (old != expected);
- return old;
-}
-
-#endif /* __ARM_FEATURE_QRDMX */
-
-static inline __int128 __lockfree_load_16(__int128 *var, int mo)
-{
- __int128 old = *var; /* Possibly torn read */
-
- /* Do CAS to ensure atomicity
- * Either CAS succeeds (writing back the same value)
- * Or CAS fails and returns the old value (atomic read)
- */
- (void)__lockfree_compare_exchange_16(var, &old, old, false, mo, mo);
- return old;
-}
-
-#endif /* __aarch64__ */
-
#endif /* PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_ATOMIC_H */
@@ -9,7 +9,7 @@
#ifndef PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H
#define PLATFORM_LINUXGENERIC_ARCH_ARM_ODP_CPU_H
-#if !defined(__arm__) && !defined(__aarch64__)
+#if !defined(__arm__)
#error Use this file only when compiling for ARM architecture
#endif
@@ -38,34 +38,16 @@
* more scalable) and enables the CPU to enter a sleep state (lower power
* consumption).
*/
-#ifdef __aarch64__
-#define CONFIG_WFE
-#endif
+/* #define CONFIG_WFE */
static inline void dmb(void)
{
__asm__ volatile("dmb" : : : "memory");
}
-#ifdef __aarch64__
-
-/* Only ARMv8 supports DMB ISHLD */
-/* A load only barrier is much cheaper than full barrier */
-#define _odp_release_barrier(ro) \
-do { \
- if (ro) \
- __asm__ volatile("dmb ishld" ::: "memory"); \
- else \
- __asm__ volatile("dmb ish" ::: "memory"); \
-} while (0)
-
-#else
-
#define _odp_release_barrier(ro) \
__atomic_thread_fence(__ATOMIC_RELEASE)
-#endif /* __aarch64__ */
-
#include "odp_llsc.h"
#include "odp_atomic.h"
#include "odp_cpu_idling.h"
@@ -13,8 +13,6 @@
#error This file should not be included directly, please include odp_cpu.h
#endif
-#ifdef __arm__
-
static inline uint32_t ll8(uint8_t *var, int mm)
{
uint8_t old;
@@ -95,159 +93,4 @@ static inline uint32_t scd(uint64_t *var, uint64_t neu, int mm)
#define sc64(a, b, c) scd((a), (b), (c))
-#endif /* __arm__ */
-
-#ifdef __aarch64__
-
-static inline uint16_t ll8(uint8_t *var, int mm)
-{
- uint16_t old;
-
- if (mm == __ATOMIC_ACQUIRE)
- __asm__ volatile("ldaxrb %w0, [%1]"
- : "=&r" (old)
- : "r" (var)
- : "memory");
- else if (mm == __ATOMIC_RELAXED)
- __asm__ volatile("ldxrb %w0, [%1]"
- : "=&r" (old)
- : "r" (var)
- : );
- else
- ODP_ABORT();
- return old;
-}
-
-static inline uint32_t ll32(uint32_t *var, int mm)
-{
- uint32_t old;
-
- if (mm == __ATOMIC_ACQUIRE)
- __asm__ volatile("ldaxr %w0, [%1]"
- : "=&r" (old)
- : "r" (var)
- : "memory");
- else if (mm == __ATOMIC_RELAXED)
- __asm__ volatile("ldxr %w0, [%1]"
- : "=&r" (old)
- : "r" (var)
- : );
- else
- ODP_ABORT();
- return old;
-}
-
-/* Return 0 on success, 1 on failure */
-static inline uint32_t sc32(uint32_t *var, uint32_t neu, int mm)
-{
- uint32_t ret;
-
- if (mm == __ATOMIC_RELEASE)
- __asm__ volatile("stlxr %w0, %w1, [%2]"
- : "=&r" (ret)
- : "r" (neu), "r" (var)
- : "memory");
- else if (mm == __ATOMIC_RELAXED)
- __asm__ volatile("stxr %w0, %w1, [%2]"
- : "=&r" (ret)
- : "r" (neu), "r" (var)
- : );
- else
- ODP_ABORT();
- return ret;
-}
-
-static inline uint64_t ll(uint64_t *var, int mm)
-{
- uint64_t old;
-
- if (mm == __ATOMIC_ACQUIRE)
- __asm__ volatile("ldaxr %0, [%1]"
- : "=&r" (old)
- : "r" (var)
- : "memory");
- else if (mm == __ATOMIC_RELAXED)
- __asm__ volatile("ldxr %0, [%1]"
- : "=&r" (old)
- : "r" (var)
- : );
- else
- ODP_ABORT();
- return old;
-}
-
-#define ll64(a, b) ll((a), (b))
-
-/* Return 0 on success, 1 on failure */
-static inline uint32_t sc(uint64_t *var, uint64_t neu, int mm)
-{
- uint32_t ret;
-
- if (mm == __ATOMIC_RELEASE)
- __asm__ volatile("stlxr %w0, %1, [%2]"
- : "=&r" (ret)
- : "r" (neu), "r" (var)
- : "memory");
- else if (mm == __ATOMIC_RELAXED)
- __asm__ volatile("stxr %w0, %1, [%2]"
- : "=&r" (ret)
- : "r" (neu), "r" (var)
- : );
- else
- ODP_ABORT();
- return ret;
-}
-
-#define sc64(a, b, c) sc((a), (b), (c))
-
-union i128 {
- __int128 i128;
- int64_t i64[2];
-};
-
-static inline __int128 lld(__int128 *var, int mm)
-{
- union i128 old;
-
- if (mm == __ATOMIC_ACQUIRE)
- __asm__ volatile("ldaxp %0, %1, [%2]"
- : "=&r" (old.i64[0]), "=&r" (old.i64[1])
- : "r" (var)
- : "memory");
- else if (mm == __ATOMIC_RELAXED)
- __asm__ volatile("ldxp %0, %1, [%2]"
- : "=&r" (old.i64[0]), "=&r" (old.i64[1])
- : "r" (var)
- : );
- else
- ODP_ABORT();
- return old.i128;
-}
-
-/* Return 0 on success, 1 on failure */
-static inline uint32_t scd(__int128 *var, __int128 neu, int mm)
-{
- uint32_t ret;
-
- if (mm == __ATOMIC_RELEASE)
- __asm__ volatile("stlxp %w0, %1, %2, [%3]"
- : "=&r" (ret)
- : "r" (((union i128)neu).i64[0]),
- "r" (((union i128)neu).i64[1]),
- "r" (var)
- : "memory");
- else if (mm == __ATOMIC_RELAXED)
- __asm__ volatile("stxp %w0, %1, %2, [%3]"
- : "=&r" (ret)
- : "r" (((union i128)neu).i64[0]),
- "r" (((union i128)neu).i64[1]),
- "r" (var)
- : );
- else
- ODP_ABORT();
- return ret;
-}
-
-#endif /* __aarch64__ */
-
#endif /* PLATFORM_LINUXGENERIC_ARCH_ARM_LLSC_H */
deleted file mode 100644
@@ -1,43 +0,0 @@
-/* Copyright (c) 2017, ARM Limited. All rights reserved.
- *
- * Copyright (c) 2017, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_MIPS64_CPU_H_
-#define ODP_MIPS64_CPU_H_
-
-/******************************************************************************
- * Atomics
- *****************************************************************************/
-
-#define atomic_store_release(loc, val, ro) \
- __atomic_store_n(loc, val, __ATOMIC_RELEASE)
-
-/******************************************************************************
- * Idle mgmt
- *****************************************************************************/
-
-static inline void sevl(void)
-{
- /* empty */
-}
-
-static inline int wfe(void)
-{
- return 1;
-}
-
-#define monitor128(addr, mo) __atomic_load_n((addr), (mo))
-#define monitor64(addr, mo) __atomic_load_n((addr), (mo))
-#define monitor32(addr, mo) __atomic_load_n((addr), (mo))
-#define monitor8(addr, mo) __atomic_load_n((addr), (mo))
-
-static inline void doze(void)
-{
- odp_cpu_pause();
-}
-
-#endif
deleted file mode 100644
@@ -1,43 +0,0 @@
-/* Copyright (c) 2017, ARM Limited. All rights reserved.
- *
- * Copyright (c) 2017, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_POWERPC_CPU_H_
-#define ODP_POWERPC_CPU_H_
-
-/******************************************************************************
- * Atomics
- *****************************************************************************/
-
-#define atomic_store_release(loc, val, ro) \
- __atomic_store_n(loc, val, __ATOMIC_RELEASE)
-
-/******************************************************************************
- * Idle mgmt
- *****************************************************************************/
-
-static inline void sevl(void)
-{
- /* empty */
-}
-
-static inline int wfe(void)
-{
- return 1;
-}
-
-#define monitor128(addr, mo) __atomic_load_n((addr), (mo))
-#define monitor64(addr, mo) __atomic_load_n((addr), (mo))
-#define monitor32(addr, mo) __atomic_load_n((addr), (mo))
-#define monitor8(addr, mo) __atomic_load_n((addr), (mo))
-
-static inline void doze(void)
-{
- odp_cpu_pause();
-}
-
-#endif
deleted file mode 100644
@@ -1,43 +0,0 @@
-/* Copyright (c) 2017, ARM Limited. All rights reserved.
- *
- * Copyright (c) 2017, Linaro Limited
- * All rights reserved.
- *
- * SPDX-License-Identifier: BSD-3-Clause
- */
-
-#ifndef ODP_X86_CPU_H_
-#define ODP_X86_CPU_H_
-
-/******************************************************************************
- * Atomics
- *****************************************************************************/
-
-#define atomic_store_release(loc, val, ro) \
- __atomic_store_n(loc, val, __ATOMIC_RELEASE)
-
-/******************************************************************************
- * Idle mgmt
- *****************************************************************************/
-
-static inline void sevl(void)
-{
- /* empty */
-}
-
-static inline int wfe(void)
-{
- return 1;
-}
-
-#define monitor128(addr, mo) __atomic_load_n((addr), (mo))
-#define monitor64(addr, mo) __atomic_load_n((addr), (mo))
-#define monitor32(addr, mo) __atomic_load_n((addr), (mo))
-#define monitor8(addr, mo) __atomic_load_n((addr), (mo))
-
-static inline void doze(void)
-{
- odp_cpu_pause();
-}
-
-#endif