@@ -41,6 +41,8 @@
* but we're using tcg/tci/ instead.
*/
# define HAVE_al16_fast false
+#elif defined(__aarch64__)
+# define HAVE_al16_fast likely(have_lse2)
#elif defined(__x86_64__)
# define HAVE_al16_fast likely(have_atomic16)
#else
@@ -48,6 +50,8 @@
#endif
#if defined(CONFIG_ATOMIC128) || defined(CONFIG_CMPXCHG128)
# define HAVE_al16 true
+#elif defined(__aarch64__)
+# define HAVE_al16 true
#else
# define HAVE_al16 false
#endif
@@ -168,6 +172,12 @@ load_atomic16(void *pv)
r.u = qatomic_read__nocheck(p);
return r.s;
+#elif defined(__aarch64__)
+ /* Via HAVE_al16_fast, FEAT_LSE2 is present: LDP becomes atomic. */
+ Int128Alias r;
+
+ asm("ldp %0, %R0, %1" : "=r"(r.u) : "m"(*(__uint128_t *)pv));
+ return r.s;
#elif defined(__x86_64__)
Int128Alias r;
@@ -246,7 +256,20 @@ static Int128 load_atomic16_or_exit(CPUArchState *env, uintptr_t ra, void *pv)
* In system mode all guest pages are writable, and for user-only
* we have just checked writability. Try cmpxchg.
*/
-#if defined(CONFIG_CMPXCHG128)
+#if defined(__aarch64__)
+ /* We can do better than cmpxchg for AArch64. */
+ {
+ Int128Alias r;
+ uint32_t fail;
+
+ /* The load must be paired with the store to guarantee not tearing. */
+ asm("0: ldxp %0, %R0, %2\n\t"
+ "stxp %w1, %0, %R0, %2\n\t"
+ "cbnz %w1, 0b"
+ : "=&r"(r.u), "=&r"(fail) : "Q"(*p));
+ return r.s;
+ }
+#elif defined(CONFIG_CMPXCHG128)
/* Swap 0 with 0, with the side-effect of returning the old value. */
{
Int128Alias r;
@@ -393,6 +416,18 @@ load_atom_extract_al16_or_al8(void *pv, int s)
r = qatomic_read__nocheck(p16);
}
return r >> shr;
+#elif defined(__aarch64__)
+ /*
+ * Via HAVE_al16_fast, FEAT_LSE2 is present.
+ * LDP becomes single-copy atomic if 16-byte aligned, and
+ * single-copy atomic on the parts if 8-byte aligned.
+ */
+ uintptr_t pi = (uintptr_t)pv;
+ int shr = (pi & 7) * 8;
+ uint64_t l, h;
+
+ asm("ldp %0, %1, %2" : "=r"(l), "=r"(h) : "m"(*(__uint128_t *)(pi & ~7)));
+ return (l >> shr) | (h << (-shr & 63));
#elif defined(__x86_64__)
uintptr_t pi = (uintptr_t)pv;
int shr = (pi & 7) * 8;
@@ -739,7 +774,23 @@ store_atomic16(void *pv, Int128Alias val)
return;
}
#endif
-#if defined(CONFIG_CMPXCHG128)
+#if defined(__aarch64__)
+ /* We can do better than cmpxchg for AArch64. */
+ __uint128_t *pu = __builtin_assume_aligned(pv, 16);
+ __uint128_t old;
+ uint32_t fail;
+
+ if (HAVE_al16_fast) {
+ /* Via HAVE_al16_fast, FEAT_LSE2 is present: STP becomes atomic. */
+ asm("stp %1, %R1, %0" : "=Q"(*pu) : "r"(val.u));
+ } else {
+ asm("0: ldxp %0, %R0, %1\n\t"
+ "stxp %w2, %3, %R3, %1\n\t"
+ "cbnz %w2, 0b"
+ : "=&r"(old), "=Q"(*pu), "=&r"(fail) : "r"(val.u));
+ }
+ return;
+#elif defined(CONFIG_CMPXCHG128)
{
__uint128_t *pu = __builtin_assume_aligned(pv, 16);
__uint128_t o;
@@ -839,7 +890,25 @@ static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk)
static void ATTRIBUTE_ATOMIC128_OPT
store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk)
{
-#if defined(CONFIG_ATOMIC128)
+#if defined(__aarch64__)
+ /*
+ * GCC only implements __sync* primitives for int128 on aarch64.
+ * We can do better without the barriers, and integrating the
+ * arithmetic into the load-exclusive/store-conditional pair.
+ */
+ __uint128_t tmp, *pu = __builtin_assume_aligned(ps, 16);
+ uint32_t fail;
+
+ asm("0: ldxp %[t], %R[t], %[mem]\n\t"
+ "bic %[t], %[t], %[m]\n\t"
+ "bic %R[t], %R[t], %R[m]\n\t"
+ "orr %[t], %[t], %[v]\n\t"
+ "orr %R[t], %R[t], %R[v]\n\t"
+ "stxp %w[f], %[t], %R[t], %[mem]\n\t"
+ "cbnz %w[f], 0b\n"
+ : [mem] "+Q"(*pu), [f] "=&r"(fail), [t] "=&r"(tmp)
+ : [v] "r"(val.u), [m] "r"(msk.u));
+#elif defined(CONFIG_ATOMIC128)
__uint128_t *pu, old, new;
/* With CONFIG_ATOMIC128, we can avoid the memory barriers. */
We have code in atomic128.h noting that through GCC 8, there was no support for atomic operations on __uint128. This has been fixed in GCC 10. But we can still improve over any basic compare-and-swap loop using the ldxp/stxp instructions. Add fast paths for FEAT_LSE2, using the detection in tcg. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/ldst_atomicity.c.inc | 75 ++++++++++++++++++++++++++++++++-- 1 file changed, 72 insertions(+), 3 deletions(-)