diff mbox series

[for-4.2,01/24] cputlb: Add tlb_set_asid_for_mmuidx

Message ID 20190719210326.15466-2-richard.henderson@linaro.org
State Superseded
Headers show
Series target/arm: Implement ARMv8.1-VHE | expand

Commit Message

Richard Henderson July 19, 2019, 9:03 p.m. UTC
Although we can't do much with ASIDs except remember them, this
will allow cleanups within target/ that should make things clearer.

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>


---
v2: Assert cpu_is_self; only flush idx w/ asid mismatch.
---
 include/exec/cpu-all.h  | 11 +++++++++++
 include/exec/cpu-defs.h |  2 ++
 include/exec/exec-all.h | 19 +++++++++++++++++++
 accel/tcg/cputlb.c      | 26 ++++++++++++++++++++++++++
 4 files changed, 58 insertions(+)

-- 
2.17.1

Comments

Alex Bennée July 22, 2019, 9:53 a.m. UTC | #1
Richard Henderson <richard.henderson@linaro.org> writes:

> Although we can't do much with ASIDs except remember them, this

> will allow cleanups within target/ that should make things clearer.

>

> Signed-off-by: Richard Henderson <richard.henderson@linaro.org>

>

> ---

> v2: Assert cpu_is_self; only flush idx w/ asid mismatch.

> ---

>  include/exec/cpu-all.h  | 11 +++++++++++

>  include/exec/cpu-defs.h |  2 ++

>  include/exec/exec-all.h | 19 +++++++++++++++++++

>  accel/tcg/cputlb.c      | 26 ++++++++++++++++++++++++++

>  4 files changed, 58 insertions(+)

>

> diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h

> index 536ea58f81..40b140cbba 100644

> --- a/include/exec/cpu-all.h

> +++ b/include/exec/cpu-all.h

> @@ -439,4 +439,15 @@ static inline CPUTLB *env_tlb(CPUArchState *env)

>      return &env_neg(env)->tlb;

>  }

>

> +/**

> + * cpu_tlb(env)

> + * @cpu: The generic CPUState

> + *

> + * Return the CPUTLB state associated with the cpu.

> + */

> +static inline CPUTLB *cpu_tlb(CPUState *cpu)

> +{

> +    return &cpu_neg(cpu)->tlb;

> +}

> +

>  #endif /* CPU_ALL_H */

> diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h

> index 9bc713a70b..73584841c0 100644

> --- a/include/exec/cpu-defs.h

> +++ b/include/exec/cpu-defs.h

> @@ -169,6 +169,8 @@ typedef struct CPUTLBDesc {

>      size_t n_used_entries;

>      /* The next index to use in the tlb victim table.  */

>      size_t vindex;

> +    /* The current ASID for this tlb.  */

> +    uint32_t asid;


is it worth adding a "if used" to the comment. I assume there are arches
that will never set and therefore care about ASID.

Otherwise:

Reviewed-by: Alex Bennée <alex.bennee@linaro.org>


>      /* The tlb victim table, in two parts.  */

>      CPUTLBEntry vtable[CPU_VTLB_SIZE];

>      CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];

> diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h

> index 16034ee651..9c77aa5bf9 100644

> --- a/include/exec/exec-all.h

> +++ b/include/exec/exec-all.h

> @@ -225,6 +225,21 @@ void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);

>   * depend on when the guests translation ends the TB.

>   */

>  void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);

> +/**

> + * tlb_set_asid_for_mmuidx:

> + * @cpu: Originating cpu

> + * @asid: Address Space Identifier

> + * @idxmap: bitmap of MMU indexes to set to @asid

> + * @depmap: bitmap of dependent MMU indexes

> + *

> + * Set an ASID for all of @idxmap.  If any previous ASID was different,

> + * then we will flush the mmu idx.  If a flush is required, then also flush

> + * all dependent mmu indicies in @depmap.  This latter is typically used

> + * for secondary page resolution, for implementing virtualization within

> + * the guest.

> + */

> +void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid,

> +                             uint16_t idxmap, uint16_t dep_idxmap);

>  /**

>   * tlb_set_page_with_attrs:

>   * @cpu: CPU to add this TLB entry for

> @@ -310,6 +325,10 @@ static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,

>                                                         uint16_t idxmap)

>  {

>  }

> +static inline void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid,

> +                                           uint16_t idxmap, uint16_t depmap)

> +{

> +}

>  #endif

>

>  #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */

> diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c

> index bb9897b25a..c68f57755b 100644

> --- a/accel/tcg/cputlb.c

> +++ b/accel/tcg/cputlb.c

> @@ -540,6 +540,32 @@ void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)

>      tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);

>  }

>

> +void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid, uint16_t idxmap,

> +                             uint16_t depmap)

> +{

> +    CPUTLB *tlb = cpu_tlb(cpu);

> +    uint16_t work, to_flush = 0;

> +

> +    /* It doesn't make sense to set context across cpus.  */

> +    assert_cpu_is_self(cpu);

> +

> +    /*

> +     * We don't support ASIDs except for trivially.

> +     * If there is any change, then we must flush the TLB.

> +     */

> +    for (work = idxmap; work != 0; work &= work - 1) {

> +        int mmu_idx = ctz32(work);

> +        if (tlb->d[mmu_idx].asid != asid) {

> +            tlb->d[mmu_idx].asid = asid;

> +            to_flush |= 1 << mmu_idx;

> +        }

> +    }

> +    if (to_flush) {

> +        to_flush |= depmap;

> +        tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(to_flush));

> +    }

> +}

> +

>  /* update the TLBs so that writes to code in the virtual page 'addr'

>     can be detected */

>  void tlb_protect_code(ram_addr_t ram_addr)



--
Alex Bennée
diff mbox series

Patch

diff --git a/include/exec/cpu-all.h b/include/exec/cpu-all.h
index 536ea58f81..40b140cbba 100644
--- a/include/exec/cpu-all.h
+++ b/include/exec/cpu-all.h
@@ -439,4 +439,15 @@  static inline CPUTLB *env_tlb(CPUArchState *env)
     return &env_neg(env)->tlb;
 }
 
+/**
+ * cpu_tlb(env)
+ * @cpu: The generic CPUState
+ *
+ * Return the CPUTLB state associated with the cpu.
+ */
+static inline CPUTLB *cpu_tlb(CPUState *cpu)
+{
+    return &cpu_neg(cpu)->tlb;
+}
+
 #endif /* CPU_ALL_H */
diff --git a/include/exec/cpu-defs.h b/include/exec/cpu-defs.h
index 9bc713a70b..73584841c0 100644
--- a/include/exec/cpu-defs.h
+++ b/include/exec/cpu-defs.h
@@ -169,6 +169,8 @@  typedef struct CPUTLBDesc {
     size_t n_used_entries;
     /* The next index to use in the tlb victim table.  */
     size_t vindex;
+    /* The current ASID for this tlb.  */
+    uint32_t asid;
     /* The tlb victim table, in two parts.  */
     CPUTLBEntry vtable[CPU_VTLB_SIZE];
     CPUIOTLBEntry viotlb[CPU_VTLB_SIZE];
diff --git a/include/exec/exec-all.h b/include/exec/exec-all.h
index 16034ee651..9c77aa5bf9 100644
--- a/include/exec/exec-all.h
+++ b/include/exec/exec-all.h
@@ -225,6 +225,21 @@  void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap);
  * depend on when the guests translation ends the TB.
  */
 void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap);
+/**
+ * tlb_set_asid_for_mmuidx:
+ * @cpu: Originating cpu
+ * @asid: Address Space Identifier
+ * @idxmap: bitmap of MMU indexes to set to @asid
+ * @depmap: bitmap of dependent MMU indexes
+ *
+ * Set an ASID for all of @idxmap.  If any previous ASID was different,
+ * then we will flush the mmu idx.  If a flush is required, then also flush
+ * all dependent mmu indicies in @depmap.  This latter is typically used
+ * for secondary page resolution, for implementing virtualization within
+ * the guest.
+ */
+void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid,
+                             uint16_t idxmap, uint16_t dep_idxmap);
 /**
  * tlb_set_page_with_attrs:
  * @cpu: CPU to add this TLB entry for
@@ -310,6 +325,10 @@  static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu,
                                                        uint16_t idxmap)
 {
 }
+static inline void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid,
+                                           uint16_t idxmap, uint16_t depmap)
+{
+}
 #endif
 
 #define CODE_GEN_ALIGN           16 /* must be >= of the size of a icache line */
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c
index bb9897b25a..c68f57755b 100644
--- a/accel/tcg/cputlb.c
+++ b/accel/tcg/cputlb.c
@@ -540,6 +540,32 @@  void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
     tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
 }
 
+void tlb_set_asid_for_mmuidx(CPUState *cpu, uint32_t asid, uint16_t idxmap,
+                             uint16_t depmap)
+{
+    CPUTLB *tlb = cpu_tlb(cpu);
+    uint16_t work, to_flush = 0;
+
+    /* It doesn't make sense to set context across cpus.  */
+    assert_cpu_is_self(cpu);
+
+    /*
+     * We don't support ASIDs except for trivially.
+     * If there is any change, then we must flush the TLB.
+     */
+    for (work = idxmap; work != 0; work &= work - 1) {
+        int mmu_idx = ctz32(work);
+        if (tlb->d[mmu_idx].asid != asid) {
+            tlb->d[mmu_idx].asid = asid;
+            to_flush |= 1 << mmu_idx;
+        }
+    }
+    if (to_flush) {
+        to_flush |= depmap;
+        tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(to_flush));
+    }
+}
+
 /* update the TLBs so that writes to code in the virtual page 'addr'
    can be detected */
 void tlb_protect_code(ram_addr_t ram_addr)