diff mbox series

[6/8] tcg/aarch64: Support TCG_TARGET_SIGNED_ADDR32

Message ID 20211010174401.141339-7-richard.henderson@linaro.org
State New
Headers show
Series tcg: support 32-bit guest addresses as signed | expand

Commit Message

Richard Henderson Oct. 10, 2021, 5:43 p.m. UTC
AArch64 has both sign and zero-extending addressing modes, which
means that either treatment of guest addresses is equally efficient.
Enabling this for AArch64 gives us testing of the feature in CI.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

---
 tcg/aarch64/tcg-target-sa32.h |  8 ++++-
 tcg/aarch64/tcg-target.c.inc  | 68 ++++++++++++++++++++++-------------
 2 files changed, 51 insertions(+), 25 deletions(-)

-- 
2.25.1

Comments

Alex Bennée Oct. 11, 2021, 10:28 a.m. UTC | #1
Richard Henderson <richard.henderson@linaro.org> writes:

> AArch64 has both sign and zero-extending addressing modes, which

> means that either treatment of guest addresses is equally efficient.

> Enabling this for AArch64 gives us testing of the feature in CI.


So which guests front ends will exercise this backend? I realise you
never mentioned it in the cover letter. Is this something we can
exercise in 32 bit user mode tests?

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

> ---

>  tcg/aarch64/tcg-target-sa32.h |  8 ++++-

>  tcg/aarch64/tcg-target.c.inc  | 68 ++++++++++++++++++++++-------------

>  2 files changed, 51 insertions(+), 25 deletions(-)

>

> diff --git a/tcg/aarch64/tcg-target-sa32.h b/tcg/aarch64/tcg-target-sa32.h

> index cb185b1526..c99e502e4c 100644

> --- a/tcg/aarch64/tcg-target-sa32.h

> +++ b/tcg/aarch64/tcg-target-sa32.h

> @@ -1 +1,7 @@

> -#define TCG_TARGET_SIGNED_ADDR32 0

> +/*

> + * AArch64 has both SXTW and UXTW addressing modes, which means that

> + * it is agnostic to how guest addresses should be represented.

> + * Because aarch64 is more common than the other hosts that will

> + * want to use this feature, enable it for continuous testing.

> + */

> +#define TCG_TARGET_SIGNED_ADDR32 1

> diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc

> index 5edca8d44d..88b2963f9d 100644

> --- a/tcg/aarch64/tcg-target.c.inc

> +++ b/tcg/aarch64/tcg-target.c.inc

> @@ -12,6 +12,7 @@

>  

>  #include "../tcg-pool.c.inc"

>  #include "qemu/bitops.h"

> +#include "tcg-target-sa32.h"

>  

>  /* We're going to re-use TCGType in setting of the SF bit, which controls

>     the size of the operation performed.  If we know the values match, it

> @@ -804,12 +805,12 @@ static void tcg_out_insn_3617(TCGContext *s, AArch64Insn insn, bool q,

>  }

>  

>  static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn,

> -                              TCGReg rd, TCGReg base, TCGType ext,

> +                              TCGReg rd, TCGReg base, int option,

>                                TCGReg regoff)

>  {

>      /* Note the AArch64Insn constants above are for C3.3.12.  Adjust.  */

>      tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 |

> -              0x4000 | ext << 13 | base << 5 | (rd & 0x1f));

> +              option << 13 | base << 5 | (rd & 0x1f));

>  }

>  

>  static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn,

> @@ -1124,7 +1125,7 @@ static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, TCGReg rd,

>  

>      /* Worst-case scenario, move offset to temp register, use reg offset.  */

>      tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);

> -    tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP);

> +    tcg_out_ldst_r(s, insn, rd, rn, 3 /* LSL #0 */, TCG_REG_TMP);

>  }

>  

>  static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)

> @@ -1718,34 +1719,34 @@ static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,

>  

>  static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,

>                                     TCGReg data_r, TCGReg addr_r,

> -                                   TCGType otype, TCGReg off_r)

> +                                   int option, TCGReg off_r)

>  {

>      /* Byte swapping is left to middle-end expansion. */

>      tcg_debug_assert((memop & MO_BSWAP) == 0);

>  

>      switch (memop & MO_SSIZE) {

>      case MO_UB:

> -        tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);

> +        tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, option, off_r);

>          break;

>      case MO_SB:

>          tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,

> -                       data_r, addr_r, otype, off_r);

> +                       data_r, addr_r, option, off_r);

>          break;

>      case MO_UW:

> -        tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);

> +        tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, option, off_r);

>          break;

>      case MO_SW:

>          tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),

> -                       data_r, addr_r, otype, off_r);

> +                       data_r, addr_r, option, off_r);

>          break;

>      case MO_UL:

> -        tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);

> +        tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, option, off_r);

>          break;

>      case MO_SL:

> -        tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);

> +        tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, option, off_r);

>          break;

>      case MO_Q:

> -        tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);

> +        tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, option, off_r);

>          break;

>      default:

>          tcg_abort();

> @@ -1754,50 +1755,68 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,

>  

>  static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,

>                                     TCGReg data_r, TCGReg addr_r,

> -                                   TCGType otype, TCGReg off_r)

> +                                   int option, TCGReg off_r)

>  {

>      /* Byte swapping is left to middle-end expansion. */

>      tcg_debug_assert((memop & MO_BSWAP) == 0);

>  

>      switch (memop & MO_SIZE) {

>      case MO_8:

> -        tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);

> +        tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, option, off_r);

>          break;

>      case MO_16:

> -        tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r);

> +        tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, option, off_r);

>          break;

>      case MO_32:

> -        tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r);

> +        tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, option, off_r);

>          break;

>      case MO_64:

> -        tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);

> +        tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, option, off_r);

>          break;

>      default:

>          tcg_abort();

>      }

>  }

>  

> +static int guest_ext_option(void)

> +{

> +#ifdef CONFIG_USER_ONLY

> +    bool signed_addr32 = guest_base_signed_addr32;

> +#else

> +    bool signed_addr32 = TCG_TARGET_SIGNED_ADDR32;

> +#endif

> +

> +    if (TARGET_LONG_BITS == 64) {

> +        return 3; /* LSL #0 */

> +    } else if (signed_addr32) {

> +        return 6; /* SXTW */

> +    } else {

> +        return 2; /* UXTW */

> +    }

> +}


If this is is going to be a magic number we pass into our code
generation we could at least wrap it in a confined enum rather than a
bare int we chuck around.

> +

>  static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,

>                              MemOpIdx oi, TCGType ext)

>  {

>      MemOp memop = get_memop(oi);

> -    const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;

> +    int option = guest_ext_option();

> +

>  #ifdef CONFIG_SOFTMMU

>      unsigned mem_index = get_mmuidx(oi);

>      tcg_insn_unit *label_ptr;

>  

>      tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1);

>      tcg_out_qemu_ld_direct(s, memop, ext, data_reg,

> -                           TCG_REG_X1, otype, addr_reg);

> +                           TCG_REG_X1, option, addr_reg);

>      add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,

>                          s->code_ptr, label_ptr);

>  #else /* !CONFIG_SOFTMMU */

>      if (USE_GUEST_BASE) {

>          tcg_out_qemu_ld_direct(s, memop, ext, data_reg,

> -                               TCG_REG_GUEST_BASE, otype, addr_reg);

> +                               TCG_REG_GUEST_BASE, option, addr_reg);

>      } else {

>          tcg_out_qemu_ld_direct(s, memop, ext, data_reg,

> -                               addr_reg, TCG_TYPE_I64, TCG_REG_XZR);

> +                               addr_reg, option, TCG_REG_XZR);

>      }

>  #endif /* CONFIG_SOFTMMU */

>  }

> @@ -1806,23 +1825,24 @@ static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,

>                              MemOpIdx oi)

>  {

>      MemOp memop = get_memop(oi);

> -    const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;

> +    int option = guest_ext_option();

> +

>  #ifdef CONFIG_SOFTMMU

>      unsigned mem_index = get_mmuidx(oi);

>      tcg_insn_unit *label_ptr;

>  

>      tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0);

>      tcg_out_qemu_st_direct(s, memop, data_reg,

> -                           TCG_REG_X1, otype, addr_reg);

> +                           TCG_REG_X1, option, addr_reg);

>      add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,

>                          data_reg, addr_reg, s->code_ptr, label_ptr);

>  #else /* !CONFIG_SOFTMMU */

>      if (USE_GUEST_BASE) {

>          tcg_out_qemu_st_direct(s, memop, data_reg,

> -                               TCG_REG_GUEST_BASE, otype, addr_reg);

> +                               TCG_REG_GUEST_BASE, option, addr_reg);

>      } else {

>          tcg_out_qemu_st_direct(s, memop, data_reg,

> -                               addr_reg, TCG_TYPE_I64, TCG_REG_XZR);

> +                               addr_reg, option, TCG_REG_XZR);

>      }

>  #endif /* CONFIG_SOFTMMU */

>  }



-- 
Alex Bennée
Richard Henderson Oct. 11, 2021, 3:24 p.m. UTC | #2
On 10/11/21 3:28 AM, Alex Bennée wrote:
> 

> Richard Henderson <richard.henderson@linaro.org> writes:

> 

>> AArch64 has both sign and zero-extending addressing modes, which

>> means that either treatment of guest addresses is equally efficient.

>> Enabling this for AArch64 gives us testing of the feature in CI.

> 

> So which guests front ends will exercise this backend?


All 32-bit guests.

> Is this something we can exercise in 32 bit user mode tests?


Yes.

Which is why I enabled this for aarch64, so that we'd have a major platform testing it daily.


r~
Richard Henderson Oct. 13, 2021, 9:05 p.m. UTC | #3
On 10/11/21 3:28 AM, Alex Bennée wrote:
>> +    if (TARGET_LONG_BITS == 64) {

>> +        return 3; /* LSL #0 */

>> +    } else if (signed_addr32) {

>> +        return 6; /* SXTW */

>> +    } else {

>> +        return 2; /* UXTW */

>> +    }

>> +}

> 

> If this is is going to be a magic number we pass into our code

> generation we could at least wrap it in a confined enum rather than a

> bare int we chuck around.


Given that it's used exactly one, and commented, and matches the ARM, do we really need an 
enum?


r~
diff mbox series

Patch

diff --git a/tcg/aarch64/tcg-target-sa32.h b/tcg/aarch64/tcg-target-sa32.h
index cb185b1526..c99e502e4c 100644
--- a/tcg/aarch64/tcg-target-sa32.h
+++ b/tcg/aarch64/tcg-target-sa32.h
@@ -1 +1,7 @@ 
-#define TCG_TARGET_SIGNED_ADDR32 0
+/*
+ * AArch64 has both SXTW and UXTW addressing modes, which means that
+ * it is agnostic to how guest addresses should be represented.
+ * Because aarch64 is more common than the other hosts that will
+ * want to use this feature, enable it for continuous testing.
+ */
+#define TCG_TARGET_SIGNED_ADDR32 1
diff --git a/tcg/aarch64/tcg-target.c.inc b/tcg/aarch64/tcg-target.c.inc
index 5edca8d44d..88b2963f9d 100644
--- a/tcg/aarch64/tcg-target.c.inc
+++ b/tcg/aarch64/tcg-target.c.inc
@@ -12,6 +12,7 @@ 
 
 #include "../tcg-pool.c.inc"
 #include "qemu/bitops.h"
+#include "tcg-target-sa32.h"
 
 /* We're going to re-use TCGType in setting of the SF bit, which controls
    the size of the operation performed.  If we know the values match, it
@@ -804,12 +805,12 @@  static void tcg_out_insn_3617(TCGContext *s, AArch64Insn insn, bool q,
 }
 
 static void tcg_out_insn_3310(TCGContext *s, AArch64Insn insn,
-                              TCGReg rd, TCGReg base, TCGType ext,
+                              TCGReg rd, TCGReg base, int option,
                               TCGReg regoff)
 {
     /* Note the AArch64Insn constants above are for C3.3.12.  Adjust.  */
     tcg_out32(s, insn | I3312_TO_I3310 | regoff << 16 |
-              0x4000 | ext << 13 | base << 5 | (rd & 0x1f));
+              option << 13 | base << 5 | (rd & 0x1f));
 }
 
 static void tcg_out_insn_3312(TCGContext *s, AArch64Insn insn,
@@ -1124,7 +1125,7 @@  static void tcg_out_ldst(TCGContext *s, AArch64Insn insn, TCGReg rd,
 
     /* Worst-case scenario, move offset to temp register, use reg offset.  */
     tcg_out_movi(s, TCG_TYPE_I64, TCG_REG_TMP, offset);
-    tcg_out_ldst_r(s, insn, rd, rn, TCG_TYPE_I64, TCG_REG_TMP);
+    tcg_out_ldst_r(s, insn, rd, rn, 3 /* LSL #0 */, TCG_REG_TMP);
 }
 
 static bool tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg)
@@ -1718,34 +1719,34 @@  static void tcg_out_tlb_read(TCGContext *s, TCGReg addr_reg, MemOp opc,
 
 static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
                                    TCGReg data_r, TCGReg addr_r,
-                                   TCGType otype, TCGReg off_r)
+                                   int option, TCGReg off_r)
 {
     /* Byte swapping is left to middle-end expansion. */
     tcg_debug_assert((memop & MO_BSWAP) == 0);
 
     switch (memop & MO_SSIZE) {
     case MO_UB:
-        tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_LDRB, data_r, addr_r, option, off_r);
         break;
     case MO_SB:
         tcg_out_ldst_r(s, ext ? I3312_LDRSBX : I3312_LDRSBW,
-                       data_r, addr_r, otype, off_r);
+                       data_r, addr_r, option, off_r);
         break;
     case MO_UW:
-        tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_LDRH, data_r, addr_r, option, off_r);
         break;
     case MO_SW:
         tcg_out_ldst_r(s, (ext ? I3312_LDRSHX : I3312_LDRSHW),
-                       data_r, addr_r, otype, off_r);
+                       data_r, addr_r, option, off_r);
         break;
     case MO_UL:
-        tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_LDRW, data_r, addr_r, option, off_r);
         break;
     case MO_SL:
-        tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_LDRSWX, data_r, addr_r, option, off_r);
         break;
     case MO_Q:
-        tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_LDRX, data_r, addr_r, option, off_r);
         break;
     default:
         tcg_abort();
@@ -1754,50 +1755,68 @@  static void tcg_out_qemu_ld_direct(TCGContext *s, MemOp memop, TCGType ext,
 
 static void tcg_out_qemu_st_direct(TCGContext *s, MemOp memop,
                                    TCGReg data_r, TCGReg addr_r,
-                                   TCGType otype, TCGReg off_r)
+                                   int option, TCGReg off_r)
 {
     /* Byte swapping is left to middle-end expansion. */
     tcg_debug_assert((memop & MO_BSWAP) == 0);
 
     switch (memop & MO_SIZE) {
     case MO_8:
-        tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_STRB, data_r, addr_r, option, off_r);
         break;
     case MO_16:
-        tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_STRH, data_r, addr_r, option, off_r);
         break;
     case MO_32:
-        tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_STRW, data_r, addr_r, option, off_r);
         break;
     case MO_64:
-        tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, otype, off_r);
+        tcg_out_ldst_r(s, I3312_STRX, data_r, addr_r, option, off_r);
         break;
     default:
         tcg_abort();
     }
 }
 
+static int guest_ext_option(void)
+{
+#ifdef CONFIG_USER_ONLY
+    bool signed_addr32 = guest_base_signed_addr32;
+#else
+    bool signed_addr32 = TCG_TARGET_SIGNED_ADDR32;
+#endif
+
+    if (TARGET_LONG_BITS == 64) {
+        return 3; /* LSL #0 */
+    } else if (signed_addr32) {
+        return 6; /* SXTW */
+    } else {
+        return 2; /* UXTW */
+    }
+}
+
 static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
                             MemOpIdx oi, TCGType ext)
 {
     MemOp memop = get_memop(oi);
-    const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
+    int option = guest_ext_option();
+
 #ifdef CONFIG_SOFTMMU
     unsigned mem_index = get_mmuidx(oi);
     tcg_insn_unit *label_ptr;
 
     tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 1);
     tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
-                           TCG_REG_X1, otype, addr_reg);
+                           TCG_REG_X1, option, addr_reg);
     add_qemu_ldst_label(s, true, oi, ext, data_reg, addr_reg,
                         s->code_ptr, label_ptr);
 #else /* !CONFIG_SOFTMMU */
     if (USE_GUEST_BASE) {
         tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
-                               TCG_REG_GUEST_BASE, otype, addr_reg);
+                               TCG_REG_GUEST_BASE, option, addr_reg);
     } else {
         tcg_out_qemu_ld_direct(s, memop, ext, data_reg,
-                               addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
+                               addr_reg, option, TCG_REG_XZR);
     }
 #endif /* CONFIG_SOFTMMU */
 }
@@ -1806,23 +1825,24 @@  static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
                             MemOpIdx oi)
 {
     MemOp memop = get_memop(oi);
-    const TCGType otype = TARGET_LONG_BITS == 64 ? TCG_TYPE_I64 : TCG_TYPE_I32;
+    int option = guest_ext_option();
+
 #ifdef CONFIG_SOFTMMU
     unsigned mem_index = get_mmuidx(oi);
     tcg_insn_unit *label_ptr;
 
     tcg_out_tlb_read(s, addr_reg, memop, &label_ptr, mem_index, 0);
     tcg_out_qemu_st_direct(s, memop, data_reg,
-                           TCG_REG_X1, otype, addr_reg);
+                           TCG_REG_X1, option, addr_reg);
     add_qemu_ldst_label(s, false, oi, (memop & MO_SIZE)== MO_64,
                         data_reg, addr_reg, s->code_ptr, label_ptr);
 #else /* !CONFIG_SOFTMMU */
     if (USE_GUEST_BASE) {
         tcg_out_qemu_st_direct(s, memop, data_reg,
-                               TCG_REG_GUEST_BASE, otype, addr_reg);
+                               TCG_REG_GUEST_BASE, option, addr_reg);
     } else {
         tcg_out_qemu_st_direct(s, memop, data_reg,
-                               addr_reg, TCG_TYPE_I64, TCG_REG_XZR);
+                               addr_reg, option, TCG_REG_XZR);
     }
 #endif /* CONFIG_SOFTMMU */
 }