Message ID | 20190923230004.9231-6-richard.henderson@linaro.org |
---|---|
State | Superseded |
Headers | show |
Series | Move rom and notdirty handling to cputlb | expand |
On 24.09.19 00:59, Richard Henderson wrote: > We will shortly be using these more than once. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> > --- > accel/tcg/cputlb.c | 110 +++++++++++++++++++++++---------------------- > 1 file changed, 57 insertions(+), 53 deletions(-) > > diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c > index e529af6d09..430ba4a69d 100644 > --- a/accel/tcg/cputlb.c > +++ b/accel/tcg/cputlb.c > @@ -1281,6 +1281,29 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, > typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, > TCGMemOpIdx oi, uintptr_t retaddr); > > +static inline uint64_t QEMU_ALWAYS_INLINE > +load_memop(const void *haddr, MemOp op) > +{ > + switch (op) { > + case MO_UB: > + return ldub_p(haddr); > + case MO_BEUW: > + return lduw_be_p(haddr); > + case MO_LEUW: > + return lduw_le_p(haddr); > + case MO_BEUL: > + return (uint32_t)ldl_be_p(haddr); > + case MO_LEUL: > + return (uint32_t)ldl_le_p(haddr); > + case MO_BEQ: > + return ldq_be_p(haddr); > + case MO_LEQ: > + return ldq_le_p(haddr); > + default: > + optimize_away(); > + } > +} > + > static inline uint64_t QEMU_ALWAYS_INLINE > load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, > uintptr_t retaddr, MemOp op, bool code_read, > @@ -1373,33 +1396,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, > > do_aligned_access: > haddr = (void *)((uintptr_t)addr + entry->addend); > - switch (op) { > - case MO_UB: > - res = ldub_p(haddr); > - break; > - case MO_BEUW: > - res = lduw_be_p(haddr); > - break; > - case MO_LEUW: > - res = lduw_le_p(haddr); > - break; > - case MO_BEUL: > - res = (uint32_t)ldl_be_p(haddr); > - break; > - case MO_LEUL: > - res = (uint32_t)ldl_le_p(haddr); > - break; > - case MO_BEQ: > - res = ldq_be_p(haddr); > - break; > - case MO_LEQ: > - res = ldq_le_p(haddr); > - break; > - default: > - optimize_away(); > - } > - > - return res; > + return load_memop(haddr, op); > } > > /* > @@ -1415,7 +1412,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, > static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, > TCGMemOpIdx oi, uintptr_t retaddr) > { > - return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); > + return load_helper(env, addr, oi, retaddr, MO_UB, false, > + full_ldub_mmu); Unnecessary change. Reviewed-by: David Hildenbrand <david@redhat.com> -- Thanks, David / dhildenb
Richard Henderson <richard.henderson@linaro.org> writes: > We will shortly be using these more than once. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> <snip> > } > > /* > @@ -1415,7 +1412,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, > static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, > TCGMemOpIdx oi, uintptr_t retaddr) > { > - return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); > + return load_helper(env, addr, oi, retaddr, MO_UB, false, > + full_ldub_mmu); This is an unrelated change, otherwise: Reviewed-by: Alex Bennée <alex.bennee@linaro.org> > } > > tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, > @@ -1530,6 +1528,36 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, > * Store Helpers > */ > > +static inline void QEMU_ALWAYS_INLINE > +store_memop(void *haddr, uint64_t val, MemOp op) > +{ > + switch (op) { > + case MO_UB: > + stb_p(haddr, val); > + break; > + case MO_BEUW: > + stw_be_p(haddr, val); > + break; > + case MO_LEUW: > + stw_le_p(haddr, val); > + break; > + case MO_BEUL: > + stl_be_p(haddr, val); > + break; > + case MO_LEUL: > + stl_le_p(haddr, val); > + break; > + case MO_BEQ: > + stq_be_p(haddr, val); > + break; > + case MO_LEQ: > + stq_le_p(haddr, val); > + break; > + default: > + optimize_away(); > + } > +} > + > static inline void QEMU_ALWAYS_INLINE > store_helper(CPUArchState *env, target_ulong addr, uint64_t val, > TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) > @@ -1657,31 +1685,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, > > do_aligned_access: > haddr = (void *)((uintptr_t)addr + entry->addend); > - switch (op) { > - case MO_UB: > - stb_p(haddr, val); > - break; > - case MO_BEUW: > - stw_be_p(haddr, val); > - break; > - case MO_LEUW: > - stw_le_p(haddr, val); > - break; > - case MO_BEUL: > - stl_be_p(haddr, val); > - break; > - case MO_LEUL: > - stl_le_p(haddr, val); > - break; > - case MO_BEQ: > - stq_be_p(haddr, val); > - break; > - case MO_LEQ: > - stq_le_p(haddr, val); > - break; > - default: > - optimize_away(); > - } > + store_memop(haddr, val, op); > } > > void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, -- Alex Bennée
diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index e529af6d09..430ba4a69d 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -1281,6 +1281,29 @@ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr); +static inline uint64_t QEMU_ALWAYS_INLINE +load_memop(const void *haddr, MemOp op) +{ + switch (op) { + case MO_UB: + return ldub_p(haddr); + case MO_BEUW: + return lduw_be_p(haddr); + case MO_LEUW: + return lduw_le_p(haddr); + case MO_BEUL: + return (uint32_t)ldl_be_p(haddr); + case MO_LEUL: + return (uint32_t)ldl_le_p(haddr); + case MO_BEQ: + return ldq_be_p(haddr); + case MO_LEQ: + return ldq_le_p(haddr); + default: + optimize_away(); + } +} + static inline uint64_t QEMU_ALWAYS_INLINE load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr, MemOp op, bool code_read, @@ -1373,33 +1396,7 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, do_aligned_access: haddr = (void *)((uintptr_t)addr + entry->addend); - switch (op) { - case MO_UB: - res = ldub_p(haddr); - break; - case MO_BEUW: - res = lduw_be_p(haddr); - break; - case MO_LEUW: - res = lduw_le_p(haddr); - break; - case MO_BEUL: - res = (uint32_t)ldl_be_p(haddr); - break; - case MO_LEUL: - res = (uint32_t)ldl_le_p(haddr); - break; - case MO_BEQ: - res = ldq_be_p(haddr); - break; - case MO_LEQ: - res = ldq_le_p(haddr); - break; - default: - optimize_away(); - } - - return res; + return load_memop(haddr, op); } /* @@ -1415,7 +1412,8 @@ load_helper(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr) { - return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); + return load_helper(env, addr, oi, retaddr, MO_UB, false, + full_ldub_mmu); } tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, @@ -1530,6 +1528,36 @@ tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, * Store Helpers */ +static inline void QEMU_ALWAYS_INLINE +store_memop(void *haddr, uint64_t val, MemOp op) +{ + switch (op) { + case MO_UB: + stb_p(haddr, val); + break; + case MO_BEUW: + stw_be_p(haddr, val); + break; + case MO_LEUW: + stw_le_p(haddr, val); + break; + case MO_BEUL: + stl_be_p(haddr, val); + break; + case MO_LEUL: + stl_le_p(haddr, val); + break; + case MO_BEQ: + stq_be_p(haddr, val); + break; + case MO_LEQ: + stq_le_p(haddr, val); + break; + default: + optimize_away(); + } +} + static inline void QEMU_ALWAYS_INLINE store_helper(CPUArchState *env, target_ulong addr, uint64_t val, TCGMemOpIdx oi, uintptr_t retaddr, MemOp op) @@ -1657,31 +1685,7 @@ store_helper(CPUArchState *env, target_ulong addr, uint64_t val, do_aligned_access: haddr = (void *)((uintptr_t)addr + entry->addend); - switch (op) { - case MO_UB: - stb_p(haddr, val); - break; - case MO_BEUW: - stw_be_p(haddr, val); - break; - case MO_LEUW: - stw_le_p(haddr, val); - break; - case MO_BEUL: - stl_be_p(haddr, val); - break; - case MO_LEUL: - stl_le_p(haddr, val); - break; - case MO_BEQ: - stq_be_p(haddr, val); - break; - case MO_LEQ: - stq_le_p(haddr, val); - break; - default: - optimize_away(); - } + store_memop(haddr, val, op); } void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
We will shortly be using these more than once. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- accel/tcg/cputlb.c | 110 +++++++++++++++++++++++---------------------- 1 file changed, 57 insertions(+), 53 deletions(-) -- 2.17.1