Message ID | 20190719210326.15466-14-richard.henderson@linaro.org |
---|---|
State | Superseded |
Headers | show |
Series | target/arm: Implement ARMv8.1-VHE | expand |
Richard Henderson <richard.henderson@linaro.org> writes: > No functional change, but unify code sequences. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Reviewed-by: Alex Bennée <alex.bennee@linaro.org> > --- > target/arm/helper.c | 118 ++++++++++++++------------------------------ > 1 file changed, 37 insertions(+), 81 deletions(-) > > diff --git a/target/arm/helper.c b/target/arm/helper.c > index 9a9809ff4f..7adbf51479 100644 > --- a/target/arm/helper.c > +++ b/target/arm/helper.c > @@ -3901,70 +3901,61 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env, > * Page D4-1736 (DDI0487A.b) > */ > > +static int vae1_tlbmask(CPUARMState *env) > +{ > + if (arm_is_secure_below_el3(env)) { > + return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0; > + } else { > + return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0; > + } > +} > + > static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, > uint64_t value) > { > CPUState *cs = env_cpu(env); > - bool sec = arm_is_secure_below_el3(env); > + int mask = vae1_tlbmask(env); > > - if (sec) { > - tlb_flush_by_mmuidx_all_cpus_synced(cs, > - ARMMMUIdxBit_S1SE1 | > - ARMMMUIdxBit_S1SE0); > - } else { > - tlb_flush_by_mmuidx_all_cpus_synced(cs, > - ARMMMUIdxBit_S12NSE1 | > - ARMMMUIdxBit_S12NSE0); > - } > + tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); > } > > static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, > uint64_t value) > { > CPUState *cs = env_cpu(env); > + int mask = vae1_tlbmask(env); > > if (tlb_force_broadcast(env)) { > tlbi_aa64_vmalle1is_write(env, NULL, value); > return; > } > > + tlb_flush_by_mmuidx(cs, mask); > +} > + > +static int vmalle1_tlbmask(CPUARMState *env) > +{ > + /* > + * Note that the 'ALL' scope must invalidate both stage 1 and > + * stage 2 translations, whereas most other scopes only invalidate > + * stage 1 translations. > + */ > if (arm_is_secure_below_el3(env)) { > - tlb_flush_by_mmuidx(cs, > - ARMMMUIdxBit_S1SE1 | > - ARMMMUIdxBit_S1SE0); > + return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0; > + } else if (arm_feature(env, ARM_FEATURE_EL2)) { > + return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0 | ARMMMUIdxBit_S2NS; > } else { > - tlb_flush_by_mmuidx(cs, > - ARMMMUIdxBit_S12NSE1 | > - ARMMMUIdxBit_S12NSE0); > + return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0; > } > } > > static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, > uint64_t value) > { > - /* Note that the 'ALL' scope must invalidate both stage 1 and > - * stage 2 translations, whereas most other scopes only invalidate > - * stage 1 translations. > - */ > - ARMCPU *cpu = env_archcpu(env); > - CPUState *cs = CPU(cpu); > + CPUState *cs = env_cpu(env); > + int mask = vmalle1_tlbmask(env); > > - if (arm_is_secure_below_el3(env)) { > - tlb_flush_by_mmuidx(cs, > - ARMMMUIdxBit_S1SE1 | > - ARMMMUIdxBit_S1SE0); > - } else { > - if (arm_feature(env, ARM_FEATURE_EL2)) { > - tlb_flush_by_mmuidx(cs, > - ARMMMUIdxBit_S12NSE1 | > - ARMMMUIdxBit_S12NSE0 | > - ARMMMUIdxBit_S2NS); > - } else { > - tlb_flush_by_mmuidx(cs, > - ARMMMUIdxBit_S12NSE1 | > - ARMMMUIdxBit_S12NSE0); > - } > - } > + tlb_flush_by_mmuidx(cs, mask); > } > > static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, > @@ -3988,28 +3979,10 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, > static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, > uint64_t value) > { > - /* Note that the 'ALL' scope must invalidate both stage 1 and > - * stage 2 translations, whereas most other scopes only invalidate > - * stage 1 translations. > - */ > CPUState *cs = env_cpu(env); > - bool sec = arm_is_secure_below_el3(env); > - bool has_el2 = arm_feature(env, ARM_FEATURE_EL2); > + int mask = vmalle1_tlbmask(env); > > - if (sec) { > - tlb_flush_by_mmuidx_all_cpus_synced(cs, > - ARMMMUIdxBit_S1SE1 | > - ARMMMUIdxBit_S1SE0); > - } else if (has_el2) { > - tlb_flush_by_mmuidx_all_cpus_synced(cs, > - ARMMMUIdxBit_S12NSE1 | > - ARMMMUIdxBit_S12NSE0 | > - ARMMMUIdxBit_S2NS); > - } else { > - tlb_flush_by_mmuidx_all_cpus_synced(cs, > - ARMMMUIdxBit_S12NSE1 | > - ARMMMUIdxBit_S12NSE0); > - } > + tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); > } > > static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, > @@ -4059,20 +4032,11 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, > static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, > uint64_t value) > { > - ARMCPU *cpu = env_archcpu(env); > - CPUState *cs = CPU(cpu); > - bool sec = arm_is_secure_below_el3(env); > + CPUState *cs = env_cpu(env); > + int mask = vae1_tlbmask(env); > uint64_t pageaddr = sextract64(value << 12, 0, 56); > > - if (sec) { > - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, > - ARMMMUIdxBit_S1SE1 | > - ARMMMUIdxBit_S1SE0); > - } else { > - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, > - ARMMMUIdxBit_S12NSE1 | > - ARMMMUIdxBit_S12NSE0); > - } > + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); > } > > static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, > @@ -4083,8 +4047,8 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, > * since we don't support flush-for-specific-ASID-only or > * flush-last-level-only. > */ > - ARMCPU *cpu = env_archcpu(env); > - CPUState *cs = CPU(cpu); > + CPUState *cs = env_cpu(env); > + int mask = vae1_tlbmask(env); > uint64_t pageaddr = sextract64(value << 12, 0, 56); > > if (tlb_force_broadcast(env)) { > @@ -4092,15 +4056,7 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, > return; > } > > - if (arm_is_secure_below_el3(env)) { > - tlb_flush_page_by_mmuidx(cs, pageaddr, > - ARMMMUIdxBit_S1SE1 | > - ARMMMUIdxBit_S1SE0); > - } else { > - tlb_flush_page_by_mmuidx(cs, pageaddr, > - ARMMMUIdxBit_S12NSE1 | > - ARMMMUIdxBit_S12NSE0); > - } > + tlb_flush_page_by_mmuidx(cs, pageaddr, mask); > } > > static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri, -- Alex Bennée
diff --git a/target/arm/helper.c b/target/arm/helper.c index 9a9809ff4f..7adbf51479 100644 --- a/target/arm/helper.c +++ b/target/arm/helper.c @@ -3901,70 +3901,61 @@ static CPAccessResult aa64_cacheop_access(CPUARMState *env, * Page D4-1736 (DDI0487A.b) */ +static int vae1_tlbmask(CPUARMState *env) +{ + if (arm_is_secure_below_el3(env)) { + return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0; + } else { + return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0; + } +} + static void tlbi_aa64_vmalle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); - bool sec = arm_is_secure_below_el3(env); + int mask = vae1_tlbmask(env); - if (sec) { - tlb_flush_by_mmuidx_all_cpus_synced(cs, - ARMMMUIdxBit_S1SE1 | - ARMMMUIdxBit_S1SE0); - } else { - tlb_flush_by_mmuidx_all_cpus_synced(cs, - ARMMMUIdxBit_S12NSE1 | - ARMMMUIdxBit_S12NSE0); - } + tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); } static void tlbi_aa64_vmalle1_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { CPUState *cs = env_cpu(env); + int mask = vae1_tlbmask(env); if (tlb_force_broadcast(env)) { tlbi_aa64_vmalle1is_write(env, NULL, value); return; } + tlb_flush_by_mmuidx(cs, mask); +} + +static int vmalle1_tlbmask(CPUARMState *env) +{ + /* + * Note that the 'ALL' scope must invalidate both stage 1 and + * stage 2 translations, whereas most other scopes only invalidate + * stage 1 translations. + */ if (arm_is_secure_below_el3(env)) { - tlb_flush_by_mmuidx(cs, - ARMMMUIdxBit_S1SE1 | - ARMMMUIdxBit_S1SE0); + return ARMMMUIdxBit_S1SE1 | ARMMMUIdxBit_S1SE0; + } else if (arm_feature(env, ARM_FEATURE_EL2)) { + return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0 | ARMMMUIdxBit_S2NS; } else { - tlb_flush_by_mmuidx(cs, - ARMMMUIdxBit_S12NSE1 | - ARMMMUIdxBit_S12NSE0); + return ARMMMUIdxBit_S12NSE1 | ARMMMUIdxBit_S12NSE0; } } static void tlbi_aa64_alle1_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - /* Note that the 'ALL' scope must invalidate both stage 1 and - * stage 2 translations, whereas most other scopes only invalidate - * stage 1 translations. - */ - ARMCPU *cpu = env_archcpu(env); - CPUState *cs = CPU(cpu); + CPUState *cs = env_cpu(env); + int mask = vmalle1_tlbmask(env); - if (arm_is_secure_below_el3(env)) { - tlb_flush_by_mmuidx(cs, - ARMMMUIdxBit_S1SE1 | - ARMMMUIdxBit_S1SE0); - } else { - if (arm_feature(env, ARM_FEATURE_EL2)) { - tlb_flush_by_mmuidx(cs, - ARMMMUIdxBit_S12NSE1 | - ARMMMUIdxBit_S12NSE0 | - ARMMMUIdxBit_S2NS); - } else { - tlb_flush_by_mmuidx(cs, - ARMMMUIdxBit_S12NSE1 | - ARMMMUIdxBit_S12NSE0); - } - } + tlb_flush_by_mmuidx(cs, mask); } static void tlbi_aa64_alle2_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -3988,28 +3979,10 @@ static void tlbi_aa64_alle3_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbi_aa64_alle1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - /* Note that the 'ALL' scope must invalidate both stage 1 and - * stage 2 translations, whereas most other scopes only invalidate - * stage 1 translations. - */ CPUState *cs = env_cpu(env); - bool sec = arm_is_secure_below_el3(env); - bool has_el2 = arm_feature(env, ARM_FEATURE_EL2); + int mask = vmalle1_tlbmask(env); - if (sec) { - tlb_flush_by_mmuidx_all_cpus_synced(cs, - ARMMMUIdxBit_S1SE1 | - ARMMMUIdxBit_S1SE0); - } else if (has_el2) { - tlb_flush_by_mmuidx_all_cpus_synced(cs, - ARMMMUIdxBit_S12NSE1 | - ARMMMUIdxBit_S12NSE0 | - ARMMMUIdxBit_S2NS); - } else { - tlb_flush_by_mmuidx_all_cpus_synced(cs, - ARMMMUIdxBit_S12NSE1 | - ARMMMUIdxBit_S12NSE0); - } + tlb_flush_by_mmuidx_all_cpus_synced(cs, mask); } static void tlbi_aa64_alle2is_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4059,20 +4032,11 @@ static void tlbi_aa64_vae3_write(CPUARMState *env, const ARMCPRegInfo *ri, static void tlbi_aa64_vae1is_write(CPUARMState *env, const ARMCPRegInfo *ri, uint64_t value) { - ARMCPU *cpu = env_archcpu(env); - CPUState *cs = CPU(cpu); - bool sec = arm_is_secure_below_el3(env); + CPUState *cs = env_cpu(env); + int mask = vae1_tlbmask(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); - if (sec) { - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, - ARMMMUIdxBit_S1SE1 | - ARMMMUIdxBit_S1SE0); - } else { - tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, - ARMMMUIdxBit_S12NSE1 | - ARMMMUIdxBit_S12NSE0); - } + tlb_flush_page_by_mmuidx_all_cpus_synced(cs, pageaddr, mask); } static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, @@ -4083,8 +4047,8 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, * since we don't support flush-for-specific-ASID-only or * flush-last-level-only. */ - ARMCPU *cpu = env_archcpu(env); - CPUState *cs = CPU(cpu); + CPUState *cs = env_cpu(env); + int mask = vae1_tlbmask(env); uint64_t pageaddr = sextract64(value << 12, 0, 56); if (tlb_force_broadcast(env)) { @@ -4092,15 +4056,7 @@ static void tlbi_aa64_vae1_write(CPUARMState *env, const ARMCPRegInfo *ri, return; } - if (arm_is_secure_below_el3(env)) { - tlb_flush_page_by_mmuidx(cs, pageaddr, - ARMMMUIdxBit_S1SE1 | - ARMMMUIdxBit_S1SE0); - } else { - tlb_flush_page_by_mmuidx(cs, pageaddr, - ARMMMUIdxBit_S12NSE1 | - ARMMMUIdxBit_S12NSE0); - } + tlb_flush_page_by_mmuidx(cs, pageaddr, mask); } static void tlbi_aa64_vae2is_write(CPUARMState *env, const ARMCPRegInfo *ri,
No functional change, but unify code sequences. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- target/arm/helper.c | 118 ++++++++++++++------------------------------ 1 file changed, 37 insertions(+), 81 deletions(-) -- 2.17.1