diff mbox series

[v2,09/12] target/arm: Implement the SETG* instructions

Message ID 20230912140434.1333369-10-peter.maydell@linaro.org
State Superseded
Headers show
Series target/arm: Implement FEAT_MOPS | expand

Commit Message

Peter Maydell Sept. 12, 2023, 2:04 p.m. UTC
The FEAT_MOPS SETG* instructions are very similar to the SET*
instructions, but as well as setting memory contents they also
set the MTE tags. They are architecturally required to operate
on tag-granule aligned regions only.

Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
---
v2: - separate helper functions calling do_setp/setm/sete
    - use cpu_st16_mmu()
---
 target/arm/internals.h         | 10 ++++
 target/arm/tcg/helper-a64.h    |  3 ++
 target/arm/tcg/a64.decode      |  5 ++
 target/arm/tcg/helper-a64.c    | 86 ++++++++++++++++++++++++++++++++--
 target/arm/tcg/mte_helper.c    | 40 ++++++++++++++++
 target/arm/tcg/translate-a64.c | 20 +++++---
 6 files changed, 155 insertions(+), 9 deletions(-)

Comments

Philippe Mathieu-Daudé Sept. 24, 2024, 7:14 p.m. UTC | #1
Hi Peter,

(patch merged as commit 6087df574400659226861fa5ba47970f1fbd277b).

On 12/9/23 16:04, Peter Maydell wrote:
> The FEAT_MOPS SETG* instructions are very similar to the SET*
> instructions, but as well as setting memory contents they also
> set the MTE tags. They are architecturally required to operate
> on tag-granule aligned regions only.
> 
> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
> ---
> v2: - separate helper functions calling do_setp/setm/sete
>      - use cpu_st16_mmu()

So you replaced the pair of cpu_stq_mmuidx_ra() from v1 by
cpu_st16_mmu().

> ---
>   target/arm/internals.h         | 10 ++++
>   target/arm/tcg/helper-a64.h    |  3 ++
>   target/arm/tcg/a64.decode      |  5 ++
>   target/arm/tcg/helper-a64.c    | 86 ++++++++++++++++++++++++++++++++--
>   target/arm/tcg/mte_helper.c    | 40 ++++++++++++++++
>   target/arm/tcg/translate-a64.c | 20 +++++---
>   6 files changed, 155 insertions(+), 9 deletions(-)


> +/*
> + * Similar, but setting tags. The architecture requires us to do this
> + * in 16-byte chunks. SETP accesses are not tag checked; they set
> + * the tags.
> + */
> +static uint64_t set_step_tags(CPUARMState *env, uint64_t toaddr,
> +                              uint64_t setsize, uint32_t data, int memidx,
> +                              uint32_t *mtedesc, uintptr_t ra)
> +{
> +    void *mem;
> +    uint64_t cleanaddr;
> +
> +    setsize = MIN(setsize, page_limit(toaddr));
> +
> +    cleanaddr = useronly_clean_ptr(toaddr);
> +    /*
> +     * Trapless lookup: returns NULL for invalid page, I/O,
> +     * watchpoints, clean pages, etc.
> +     */
> +    mem = tlb_vaddr_to_host(env, cleanaddr, MMU_DATA_STORE, memidx);
> +
> +#ifndef CONFIG_USER_ONLY
> +    if (unlikely(!mem)) {
> +        /*
> +         * Slow-path: just do one write. This will handle the
> +         * watchpoint, invalid page, etc handling correctly.
> +         * The architecture requires that we do 16 bytes at a time,
> +         * and we know both ptr and size are 16 byte aligned.
> +         * For clean code pages, the next iteration will see
> +         * the page dirty and will use the fast path.
> +         */
> +        uint64_t repldata = data * 0x0101010101010101ULL;
> +        MemOpIdx oi16 = make_memop_idx(MO_TE | MO_128, memidx);

I'm trying to understand the MO_TE use, but I'm not seeing it in
https://developer.arm.com/documentation/ddi0602/2024-06/Base-Instructions/SETGP--SETGM--SETGE--Memory-set-with-tag-setting-
pseudo code. I also checked
https://developer.arm.com/documentation/ddi0602/2024-06/Shared-Pseudocode/aarch64-functions-mops?lang=en#impl-aarch64.MemSetBytes.4
and 
https://developer.arm.com/documentation/ddi0602/2024-06/Shared-Pseudocode/aarch64-functions-memory?lang=en#AArch64.MemSingleWrite.5

Is the following part in MemSingleWrite()?

     if !atomic && aligned && accdesc.ispair then

         bits(halfsize*8) lowhalf, highhalf;
         <highhalf, lowhalf> = value;

         memstatus = PhysMemWrite(memaddrdesc, halfsize, accdesc, lowhalf);

         memaddrdesc.paddress.address = memaddrdesc.paddress.address + 
halfsize;
         memstatus = PhysMemWrite(memaddrdesc, halfsize, accdesc, highhalf);

> +        cpu_st16_mmu(env, toaddr, int128_make128(repldata, repldata), oi16, ra);
> +        mte_mops_set_tags(env, toaddr, 16, *mtedesc);
> +        return 16;
> +    }
> +#endif
> +    /* Easy case: just memset the host memory */
> +    memset(mem, data, setsize);
> +    mte_mops_set_tags(env, toaddr, setsize, *mtedesc);
> +    return setsize;
> +}

If we need to endian swap, could we use the cached hflags instead of MO_TE?

The BE_DATA bit is iset in rebuild_hflags_a64() when
arm_cpu_data_is_big_endian_a64() is true. The following diff snippet
works for me but I'm out of my comfort zone here :)

-- >8 --
          uint64_t repldata = data * 0x0101010101010101ULL;
-        MemOpIdx oi16 = make_memop_idx(MO_TE | MO_128, memidx);
+        MemOp be_data = EX_TBFLAG_ANY(env->hflags, BE_DATA) ? MO_BE : 
MO_LE;
+        MemOpIdx oi16 = make_memop_idx(be_data | MO_128, memidx);
          cpu_st16_mmu(env, toaddr, int128_make128(repldata, repldata), 
oi16, ra);
          mte_mops_set_tags(env, toaddr, 16, *mtedesc);
          return 16;
---

Thanks,

Phil.
Richard Henderson Oct. 3, 2024, 6:10 p.m. UTC | #2
On 9/24/24 12:14, Philippe Mathieu-Daudé wrote:
> Hi Peter,
> 
> (patch merged as commit 6087df574400659226861fa5ba47970f1fbd277b).
> 
> On 12/9/23 16:04, Peter Maydell wrote:
>> The FEAT_MOPS SETG* instructions are very similar to the SET*
>> instructions, but as well as setting memory contents they also
>> set the MTE tags. They are architecturally required to operate
>> on tag-granule aligned regions only.
>>
>> Signed-off-by: Peter Maydell <peter.maydell@linaro.org>
>> Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
>> ---
>> v2: - separate helper functions calling do_setp/setm/sete
>>      - use cpu_st16_mmu()
> 
> So you replaced the pair of cpu_stq_mmuidx_ra() from v1 by
> cpu_st16_mmu().
> 
>> ---
>>   target/arm/internals.h         | 10 ++++
>>   target/arm/tcg/helper-a64.h    |  3 ++
>>   target/arm/tcg/a64.decode      |  5 ++
>>   target/arm/tcg/helper-a64.c    | 86 ++++++++++++++++++++++++++++++++--
>>   target/arm/tcg/mte_helper.c    | 40 ++++++++++++++++
>>   target/arm/tcg/translate-a64.c | 20 +++++---
>>   6 files changed, 155 insertions(+), 9 deletions(-)
> 
> 
>> +/*
>> + * Similar, but setting tags. The architecture requires us to do this
>> + * in 16-byte chunks. SETP accesses are not tag checked; they set
>> + * the tags.
>> + */
>> +static uint64_t set_step_tags(CPUARMState *env, uint64_t toaddr,
>> +                              uint64_t setsize, uint32_t data, int memidx,
>> +                              uint32_t *mtedesc, uintptr_t ra)
>> +{
>> +    void *mem;
>> +    uint64_t cleanaddr;
>> +
>> +    setsize = MIN(setsize, page_limit(toaddr));
>> +
>> +    cleanaddr = useronly_clean_ptr(toaddr);
>> +    /*
>> +     * Trapless lookup: returns NULL for invalid page, I/O,
>> +     * watchpoints, clean pages, etc.
>> +     */
>> +    mem = tlb_vaddr_to_host(env, cleanaddr, MMU_DATA_STORE, memidx);
>> +
>> +#ifndef CONFIG_USER_ONLY
>> +    if (unlikely(!mem)) {
>> +        /*
>> +         * Slow-path: just do one write. This will handle the
>> +         * watchpoint, invalid page, etc handling correctly.
>> +         * The architecture requires that we do 16 bytes at a time,
>> +         * and we know both ptr and size are 16 byte aligned.
>> +         * For clean code pages, the next iteration will see
>> +         * the page dirty and will use the fast path.
>> +         */
>> +        uint64_t repldata = data * 0x0101010101010101ULL;
>> +        MemOpIdx oi16 = make_memop_idx(MO_TE | MO_128, memidx);
> 
> I'm trying to understand the MO_TE use, but I'm not seeing it in
> https://developer.arm.com/documentation/ddi0602/2024-06/Base-Instructions/SETGP--SETGM-- 
> SETGE--Memory-set-with-tag-setting-
> pseudo code. I also checked
> https://developer.arm.com/documentation/ddi0602/2024-06/Shared-Pseudocode/aarch64- 
> functions-mops?lang=en#impl-aarch64.MemSetBytes.4
> and https://developer.arm.com/documentation/ddi0602/2024-06/Shared-Pseudocode/aarch64- 
> functions-memory?lang=en#AArch64.MemSingleWrite.5

It's not actually needed.  All of the bytes stored are identical (see the construction of 
repldata).

Removing MO_TE will store the bytes in host byte order, which will avoid an unnecessary 
bswap on big-endian hosts.

The stores here are all from

     while tagstep > 0 do
         tagaddr = memset.toaddress + memset.setsize + (tagstep - 1) * 16;
         AArch64.MemTag[tagaddr, accdesc] = tag;
         tagstep = tagstep - 1;


r~
diff mbox series

Patch

diff --git a/target/arm/internals.h b/target/arm/internals.h
index a70a7fd50f6..642f77df29b 100644
--- a/target/arm/internals.h
+++ b/target/arm/internals.h
@@ -1300,6 +1300,16 @@  uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
 void mte_check_fail(CPUARMState *env, uint32_t desc,
                     uint64_t dirty_ptr, uintptr_t ra);
 
+/**
+ * mte_mops_set_tags: Set MTE tags for a portion of a FEAT_MOPS operation
+ * @env: CPU env
+ * @dirty_ptr: Start address of memory region (dirty pointer)
+ * @size: length of region (guaranteed not to cross page boundary)
+ * @desc: MTEDESC descriptor word
+ */
+void mte_mops_set_tags(CPUARMState *env, uint64_t dirty_ptr, uint64_t size,
+                       uint32_t desc);
+
 static inline int allocation_tag_from_addr(uint64_t ptr)
 {
     return extract64(ptr, 56, 4);
diff --git a/target/arm/tcg/helper-a64.h b/target/arm/tcg/helper-a64.h
index 7ce5d2105ad..10a99107124 100644
--- a/target/arm/tcg/helper-a64.h
+++ b/target/arm/tcg/helper-a64.h
@@ -121,3 +121,6 @@  DEF_HELPER_FLAGS_4(unaligned_access, TCG_CALL_NO_WG,
 DEF_HELPER_3(setp, void, env, i32, i32)
 DEF_HELPER_3(setm, void, env, i32, i32)
 DEF_HELPER_3(sete, void, env, i32, i32)
+DEF_HELPER_3(setgp, void, env, i32, i32)
+DEF_HELPER_3(setgm, void, env, i32, i32)
+DEF_HELPER_3(setge, void, env, i32, i32)
diff --git a/target/arm/tcg/a64.decode b/target/arm/tcg/a64.decode
index c2a97328eeb..a202faa17bc 100644
--- a/target/arm/tcg/a64.decode
+++ b/target/arm/tcg/a64.decode
@@ -570,3 +570,8 @@  STZ2G           11011001 11 1 ......... 11 ..... ..... @ldst_tag p=0 w=1
 SETP            00 011001110 ..... 00 . . 01 ..... ..... @set
 SETM            00 011001110 ..... 01 . . 01 ..... ..... @set
 SETE            00 011001110 ..... 10 . . 01 ..... ..... @set
+
+# Like SET, but also setting MTE tags
+SETGP           00 011101110 ..... 00 . . 01 ..... ..... @set
+SETGM           00 011101110 ..... 01 . . 01 ..... ..... @set
+SETGE           00 011101110 ..... 10 . . 01 ..... ..... @set
diff --git a/target/arm/tcg/helper-a64.c b/target/arm/tcg/helper-a64.c
index 24ae5ecf32e..2cf89184d77 100644
--- a/target/arm/tcg/helper-a64.c
+++ b/target/arm/tcg/helper-a64.c
@@ -1103,6 +1103,50 @@  static uint64_t set_step(CPUARMState *env, uint64_t toaddr,
     return setsize;
 }
 
+/*
+ * Similar, but setting tags. The architecture requires us to do this
+ * in 16-byte chunks. SETP accesses are not tag checked; they set
+ * the tags.
+ */
+static uint64_t set_step_tags(CPUARMState *env, uint64_t toaddr,
+                              uint64_t setsize, uint32_t data, int memidx,
+                              uint32_t *mtedesc, uintptr_t ra)
+{
+    void *mem;
+    uint64_t cleanaddr;
+
+    setsize = MIN(setsize, page_limit(toaddr));
+
+    cleanaddr = useronly_clean_ptr(toaddr);
+    /*
+     * Trapless lookup: returns NULL for invalid page, I/O,
+     * watchpoints, clean pages, etc.
+     */
+    mem = tlb_vaddr_to_host(env, cleanaddr, MMU_DATA_STORE, memidx);
+
+#ifndef CONFIG_USER_ONLY
+    if (unlikely(!mem)) {
+        /*
+         * Slow-path: just do one write. This will handle the
+         * watchpoint, invalid page, etc handling correctly.
+         * The architecture requires that we do 16 bytes at a time,
+         * and we know both ptr and size are 16 byte aligned.
+         * For clean code pages, the next iteration will see
+         * the page dirty and will use the fast path.
+         */
+        uint64_t repldata = data * 0x0101010101010101ULL;
+        MemOpIdx oi16 = make_memop_idx(MO_TE | MO_128, memidx);
+        cpu_st16_mmu(env, toaddr, int128_make128(repldata, repldata), oi16, ra);
+        mte_mops_set_tags(env, toaddr, 16, *mtedesc);
+        return 16;
+    }
+#endif
+    /* Easy case: just memset the host memory */
+    memset(mem, data, setsize);
+    mte_mops_set_tags(env, toaddr, setsize, *mtedesc);
+    return setsize;
+}
+
 typedef uint64_t StepFn(CPUARMState *env, uint64_t toaddr,
                         uint64_t setsize, uint32_t data,
                         int memidx, uint32_t *mtedesc, uintptr_t ra);
@@ -1141,6 +1185,18 @@  static bool mte_checks_needed(uint64_t ptr, uint32_t desc)
     return !tcma_check(desc, bit55, allocation_tag_from_addr(ptr));
 }
 
+/* Take an exception if the SETG addr/size are not granule aligned */
+static void check_setg_alignment(CPUARMState *env, uint64_t ptr, uint64_t size,
+                                 uint32_t memidx, uintptr_t ra)
+{
+    if ((size != 0 && !QEMU_IS_ALIGNED(ptr, TAG_GRANULE)) ||
+        !QEMU_IS_ALIGNED(size, TAG_GRANULE)) {
+        arm_cpu_do_unaligned_access(env_cpu(env), ptr, MMU_DATA_STORE,
+                                    memidx, ra);
+
+    }
+}
+
 /*
  * For the Memory Set operation, our implementation chooses
  * always to use "option A", where we update Xd to the final
@@ -1171,9 +1227,14 @@  static void do_setp(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
 
     if (setsize > INT64_MAX) {
         setsize = INT64_MAX;
+        if (is_setg) {
+            setsize &= ~0xf;
+        }
     }
 
-    if (!mte_checks_needed(toaddr, mtedesc)) {
+    if (unlikely(is_setg)) {
+        check_setg_alignment(env, toaddr, setsize, memidx, ra);
+    } else if (!mte_checks_needed(toaddr, mtedesc)) {
         mtedesc = 0;
     }
 
@@ -1203,6 +1264,11 @@  void HELPER(setp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
     do_setp(env, syndrome, mtedesc, set_step, false, GETPC());
 }
 
+void HELPER(setgp)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
+{
+    do_setp(env, syndrome, mtedesc, set_step_tags, true, GETPC());
+}
+
 static void do_setm(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
                     StepFn *stepfn, bool is_setg, uintptr_t ra)
 {
@@ -1237,7 +1303,9 @@  static void do_setm(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
      * have an IMPDEF check for alignment here.
      */
 
-    if (!mte_checks_needed(toaddr, mtedesc)) {
+    if (unlikely(is_setg)) {
+        check_setg_alignment(env, toaddr, setsize, memidx, ra);
+    } else if (!mte_checks_needed(toaddr, mtedesc)) {
         mtedesc = 0;
     }
 
@@ -1260,6 +1328,11 @@  void HELPER(setm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
     do_setm(env, syndrome, mtedesc, set_step, false, GETPC());
 }
 
+void HELPER(setgm)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
+{
+    do_setm(env, syndrome, mtedesc, set_step_tags, true, GETPC());
+}
+
 static void do_sete(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
                     StepFn *stepfn, bool is_setg, uintptr_t ra)
 {
@@ -1295,7 +1368,9 @@  static void do_sete(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc,
                            mops_mismatch_exception_target_el(env), ra);
     }
 
-    if (!mte_checks_needed(toaddr, mtedesc)) {
+    if (unlikely(is_setg)) {
+        check_setg_alignment(env, toaddr, setsize, memidx, ra);
+    } else if (!mte_checks_needed(toaddr, mtedesc)) {
         mtedesc = 0;
     }
 
@@ -1312,3 +1387,8 @@  void HELPER(sete)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
 {
     do_sete(env, syndrome, mtedesc, set_step, false, GETPC());
 }
+
+void HELPER(setge)(CPUARMState *env, uint32_t syndrome, uint32_t mtedesc)
+{
+    do_sete(env, syndrome, mtedesc, set_step_tags, true, GETPC());
+}
diff --git a/target/arm/tcg/mte_helper.c b/target/arm/tcg/mte_helper.c
index 1cb61cea7af..66a80eeb950 100644
--- a/target/arm/tcg/mte_helper.c
+++ b/target/arm/tcg/mte_helper.c
@@ -1041,3 +1041,43 @@  uint64_t mte_mops_probe(CPUARMState *env, uint64_t ptr, uint64_t size,
         return n * TAG_GRANULE - (ptr - tag_first);
     }
 }
+
+void mte_mops_set_tags(CPUARMState *env, uint64_t ptr, uint64_t size,
+                       uint32_t desc)
+{
+    int mmu_idx, tag_count;
+    uint64_t ptr_tag;
+    void *mem;
+
+    if (!desc) {
+        /* Tags not actually enabled */
+        return;
+    }
+
+    mmu_idx = FIELD_EX32(desc, MTEDESC, MIDX);
+    /* True probe: this will never fault */
+    mem = allocation_tag_mem_probe(env, mmu_idx, ptr, MMU_DATA_STORE, size,
+                                   MMU_DATA_STORE, true, 0);
+    if (!mem) {
+        return;
+    }
+
+    /*
+     * We know that ptr and size are both TAG_GRANULE aligned; store
+     * the tag from the pointer value into the tag memory.
+     */
+    ptr_tag = allocation_tag_from_addr(ptr);
+    tag_count = size / TAG_GRANULE;
+    if (ptr & TAG_GRANULE) {
+        /* Not 2*TAG_GRANULE-aligned: store tag to first nibble */
+        store_tag1_parallel(TAG_GRANULE, mem, ptr_tag);
+        mem++;
+        tag_count--;
+    }
+    memset(mem, ptr_tag | (ptr_tag << 4), tag_count / 2);
+    if (tag_count & 1) {
+        /* Final trailing unaligned nibble */
+        mem += tag_count / 2;
+        store_tag1_parallel(0, mem, ptr_tag);
+    }
+}
diff --git a/target/arm/tcg/translate-a64.c b/target/arm/tcg/translate-a64.c
index da4aabbaf4e..27bb3039b4d 100644
--- a/target/arm/tcg/translate-a64.c
+++ b/target/arm/tcg/translate-a64.c
@@ -3964,11 +3964,16 @@  TRANS_FEAT(STZ2G, aa64_mte_insn_reg, do_STG, a, true, true)
 
 typedef void SetFn(TCGv_env, TCGv_i32, TCGv_i32);
 
-static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, SetFn fn)
+static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue,
+                   bool is_setg, SetFn fn)
 {
     int memidx;
     uint32_t syndrome, desc = 0;
 
+    if (is_setg && !dc_isar_feature(aa64_mte, s)) {
+        return false;
+    }
+
     /*
      * UNPREDICTABLE cases: we choose to UNDEF, which allows
      * us to pull this check before the CheckMOPSEnabled() test
@@ -3985,10 +3990,10 @@  static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, SetFn fn)
      * We pass option_a == true, matching our implementation;
      * we pass wrong_option == false: helper function may set that bit.
      */
-    syndrome = syn_mop(true, false, (a->nontemp << 1) | a->unpriv,
+    syndrome = syn_mop(true, is_setg, (a->nontemp << 1) | a->unpriv,
                        is_epilogue, false, true, a->rd, a->rs, a->rn);
 
-    if (s->mte_active[a->unpriv]) {
+    if (is_setg ? s->ata[a->unpriv] : s->mte_active[a->unpriv]) {
         /* We may need to do MTE tag checking, so assemble the descriptor */
         desc = FIELD_DP32(desc, MTEDESC, TBI, s->tbid);
         desc = FIELD_DP32(desc, MTEDESC, TCMA, s->tcma);
@@ -4007,9 +4012,12 @@  static bool do_SET(DisasContext *s, arg_set *a, bool is_epilogue, SetFn fn)
     return true;
 }
 
-TRANS_FEAT(SETP, aa64_mops, do_SET, a, false, gen_helper_setp)
-TRANS_FEAT(SETM, aa64_mops, do_SET, a, false, gen_helper_setm)
-TRANS_FEAT(SETE, aa64_mops, do_SET, a, true, gen_helper_sete)
+TRANS_FEAT(SETP, aa64_mops, do_SET, a, false, false, gen_helper_setp)
+TRANS_FEAT(SETM, aa64_mops, do_SET, a, false, false, gen_helper_setm)
+TRANS_FEAT(SETE, aa64_mops, do_SET, a, true, false, gen_helper_sete)
+TRANS_FEAT(SETGP, aa64_mops, do_SET, a, false, true, gen_helper_setgp)
+TRANS_FEAT(SETGM, aa64_mops, do_SET, a, false, true, gen_helper_setgm)
+TRANS_FEAT(SETGE, aa64_mops, do_SET, a, true, true, gen_helper_setge)
 
 typedef void ArithTwoOp(TCGv_i64, TCGv_i64, TCGv_i64);