@@ -35,6 +35,14 @@
#if defined(CONFIG_ATOMIC128)
# define HAVE_al16_fast true
+#elif defined(CONFIG_TCG_INTERPRETER)
+/*
+ * FIXME: host specific detection for this is in tcg/$host/,
+ * but we're using tcg/tci/ instead.
+ */
+# define HAVE_al16_fast false
+#elif defined(__x86_64__) && defined(CONFIG_INT128)
+# define HAVE_al16_fast likely(have_atomic16)
#else
# define HAVE_al16_fast false
#endif
@@ -162,6 +170,12 @@ load_atomic16(void *pv)
r.u = qatomic_read__nocheck(p);
return r.s;
+#elif defined(__x86_64__) && defined(CONFIG_INT128)
+ Int128Alias r;
+
+ /* Via HAVE_al16_fast, have_atomic16 is true. */
+ asm("vmovdqa %1, %0" : "=x" (r.u) : "m" (*(Int128 *)pv));
+ return r.s;
#else
qemu_build_not_reached();
#endif
@@ -383,6 +397,24 @@ load_atom_extract_al16_or_al8(void *pv, int s)
r = qatomic_read__nocheck(p16);
}
return r >> shr;
+#elif defined(__x86_64__) && defined(CONFIG_INT128)
+ uintptr_t pi = (uintptr_t)pv;
+ int shr = (pi & 7) * 8;
+ uint64_t a, b;
+
+ /* Via HAVE_al16_fast, have_atomic16 is true. */
+ pv = (void *)(pi & ~7);
+ if (pi & 8) {
+ uint64_t *p8 = __builtin_assume_aligned(pv, 16, 8);
+ a = qatomic_read__nocheck(p8);
+ b = qatomic_read__nocheck(p8 + 1);
+ } else {
+ asm("vmovdqa %2, %0\n\tvpextrq $1, %0, %1"
+ : "=x"(a), "=r"(b) : "m" (*(__uint128_t *)pv));
+ }
+ asm("shrd %b2, %1, %0" : "+r"(a) : "r"(b), "c"(shr));
+
+ return a;
#else
qemu_build_not_reached();
#endif
@@ -699,23 +731,35 @@ static inline void ATTRIBUTE_ATOMIC128_OPT
store_atomic16(void *pv, Int128Alias val)
{
#if defined(CONFIG_ATOMIC128)
- __uint128_t *pu = __builtin_assume_aligned(pv, 16);
- qatomic_set__nocheck(pu, val.u);
-#elif defined(CONFIG_CMPXCHG128)
- __uint128_t *pu = __builtin_assume_aligned(pv, 16);
- __uint128_t o;
-
- /*
- * Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always
- * defer to libatomic, so we must use __sync_val_compare_and_swap_16
- * and accept the sequential consistency that comes with it.
- */
- do {
- o = *pu;
- } while (!__sync_bool_compare_and_swap_16(pu, o, val.u));
-#else
- qemu_build_not_reached();
+ {
+ __uint128_t *pu = __builtin_assume_aligned(pv, 16);
+ qatomic_set__nocheck(pu, val.u);
+ return;
+ }
#endif
+#if defined(__x86_64__) && defined(CONFIG_INT128)
+ if (HAVE_al16_fast) {
+ asm("vmovdqa %1, %0" : "=m"(*(__uint128_t *)pv) : "x" (val.u));
+ return;
+ }
+#endif
+#if defined(CONFIG_CMPXCHG128)
+ {
+ __uint128_t *pu = __builtin_assume_aligned(pv, 16);
+ __uint128_t o;
+
+ /*
+ * Without CONFIG_ATOMIC128, __atomic_compare_exchange_n will always
+ * defer to libatomic, so we must use __sync_val_compare_and_swap_16
+ * and accept the sequential consistency that comes with it.
+ */
+ do {
+ o = *pu;
+ } while (!__sync_bool_compare_and_swap_16(pu, o, val.u));
+ return;
+ }
+#endif
+ qemu_build_not_reached();
}
/**
Hosts using Intel and AMD AVX cpus are quite common. Add fast paths through ldst_atomicity using this. Only enable with CONFIG_INT128; some older clang versions do not support __int128_t, and the inline assembly won't work on structures. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/ldst_atomicity.c.inc | 76 +++++++++++++++++++++++++++------- 1 file changed, 60 insertions(+), 16 deletions(-)