Message ID | 20210614083800.1166166-29-richard.henderson@linaro.org |
---|---|
State | Superseded |
Headers | show |
Series | tcg: bswap improvements | expand |
On Mon, Jun 14, 2021 at 6:54 PM Richard Henderson <richard.henderson@linaro.org> wrote: > > TCG_TARGET_HAS_MEMORY_BSWAP is already unset for this backend, > which means that MO_BSWAP be handled by the middle-end and > will never be seen by the backend. Thus the indexes used with > qemu_{ld,st}_helpers will always be zero. > > Tidy the comments and asserts in tcg_out_qemu_{ld,st}_direct. > It is not that we do not handle bswap "yet", but never will. > > Signed-off-by: Richard Henderson <richard.henderson@linaro.org> Acked-by: Alistair Francis <alistair.francis@wdc.com> Alistair > --- > tcg/riscv/tcg-target.c.inc | 64 ++++++++++++++++++++------------------ > 1 file changed, 33 insertions(+), 31 deletions(-) > > diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc > index da7eecafc5..c16f96b401 100644 > --- a/tcg/riscv/tcg-target.c.inc > +++ b/tcg/riscv/tcg-target.c.inc > @@ -852,37 +852,43 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) > /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, > * TCGMemOpIdx oi, uintptr_t ra) > */ > -static void * const qemu_ld_helpers[16] = { > - [MO_UB] = helper_ret_ldub_mmu, > - [MO_SB] = helper_ret_ldsb_mmu, > - [MO_LEUW] = helper_le_lduw_mmu, > - [MO_LESW] = helper_le_ldsw_mmu, > - [MO_LEUL] = helper_le_ldul_mmu, > +static void * const qemu_ld_helpers[8] = { > + [MO_UB] = helper_ret_ldub_mmu, > + [MO_SB] = helper_ret_ldsb_mmu, > +#ifdef HOST_WORDS_BIGENDIAN > + [MO_UW] = helper_be_lduw_mmu, > + [MO_SW] = helper_be_ldsw_mmu, > + [MO_UL] = helper_be_ldul_mmu, > #if TCG_TARGET_REG_BITS == 64 > - [MO_LESL] = helper_le_ldsl_mmu, > + [MO_SL] = helper_be_ldsl_mmu, > #endif > - [MO_LEQ] = helper_le_ldq_mmu, > - [MO_BEUW] = helper_be_lduw_mmu, > - [MO_BESW] = helper_be_ldsw_mmu, > - [MO_BEUL] = helper_be_ldul_mmu, > + [MO_Q] = helper_be_ldq_mmu, > +#else > + [MO_UW] = helper_le_lduw_mmu, > + [MO_SW] = helper_le_ldsw_mmu, > + [MO_UL] = helper_le_ldul_mmu, > #if TCG_TARGET_REG_BITS == 64 > - [MO_BESL] = helper_be_ldsl_mmu, > + [MO_SL] = helper_le_ldsl_mmu, > +#endif > + [MO_Q] = helper_le_ldq_mmu, > #endif > - [MO_BEQ] = helper_be_ldq_mmu, > }; > > /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, > * uintxx_t val, TCGMemOpIdx oi, > * uintptr_t ra) > */ > -static void * const qemu_st_helpers[16] = { > - [MO_UB] = helper_ret_stb_mmu, > - [MO_LEUW] = helper_le_stw_mmu, > - [MO_LEUL] = helper_le_stl_mmu, > - [MO_LEQ] = helper_le_stq_mmu, > - [MO_BEUW] = helper_be_stw_mmu, > - [MO_BEUL] = helper_be_stl_mmu, > - [MO_BEQ] = helper_be_stq_mmu, > +static void * const qemu_st_helpers[4] = { > + [MO_8] = helper_ret_stb_mmu, > +#ifdef HOST_WORDS_BIGENDIAN > + [MO_16] = helper_be_stw_mmu, > + [MO_32] = helper_be_stl_mmu, > + [MO_64] = helper_be_stq_mmu, > +#else > + [MO_16] = helper_le_stw_mmu, > + [MO_32] = helper_le_stl_mmu, > + [MO_64] = helper_le_stq_mmu, > +#endif > }; > > /* We don't support oversize guests */ > @@ -997,7 +1003,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) > tcg_out_movi(s, TCG_TYPE_PTR, a2, oi); > tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr); > > - tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]); > + tcg_out_call(s, qemu_ld_helpers[opc & MO_SSIZE]); > tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0); > > tcg_out_goto(s, l->raddr); > @@ -1042,7 +1048,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) > tcg_out_movi(s, TCG_TYPE_PTR, a3, oi); > tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr); > > - tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SSIZE)]); > + tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE]); > > tcg_out_goto(s, l->raddr); > return true; > @@ -1052,10 +1058,8 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) > static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, > TCGReg base, MemOp opc, bool is_64) > { > - const MemOp bswap = opc & MO_BSWAP; > - > - /* We don't yet handle byteswapping, assert */ > - g_assert(!bswap); > + /* Byte swapping is left to middle-end expansion. */ > + tcg_debug_assert((opc & MO_BSWAP) == 0); > > switch (opc & (MO_SSIZE)) { > case MO_UB: > @@ -1139,10 +1143,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) > static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, > TCGReg base, MemOp opc) > { > - const MemOp bswap = opc & MO_BSWAP; > - > - /* We don't yet handle byteswapping, assert */ > - g_assert(!bswap); > + /* Byte swapping is left to middle-end expansion. */ > + tcg_debug_assert((opc & MO_BSWAP) == 0); > > switch (opc & (MO_SSIZE)) { > case MO_8: > -- > 2.25.1 > >
diff --git a/tcg/riscv/tcg-target.c.inc b/tcg/riscv/tcg-target.c.inc index da7eecafc5..c16f96b401 100644 --- a/tcg/riscv/tcg-target.c.inc +++ b/tcg/riscv/tcg-target.c.inc @@ -852,37 +852,43 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0) /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * TCGMemOpIdx oi, uintptr_t ra) */ -static void * const qemu_ld_helpers[16] = { - [MO_UB] = helper_ret_ldub_mmu, - [MO_SB] = helper_ret_ldsb_mmu, - [MO_LEUW] = helper_le_lduw_mmu, - [MO_LESW] = helper_le_ldsw_mmu, - [MO_LEUL] = helper_le_ldul_mmu, +static void * const qemu_ld_helpers[8] = { + [MO_UB] = helper_ret_ldub_mmu, + [MO_SB] = helper_ret_ldsb_mmu, +#ifdef HOST_WORDS_BIGENDIAN + [MO_UW] = helper_be_lduw_mmu, + [MO_SW] = helper_be_ldsw_mmu, + [MO_UL] = helper_be_ldul_mmu, #if TCG_TARGET_REG_BITS == 64 - [MO_LESL] = helper_le_ldsl_mmu, + [MO_SL] = helper_be_ldsl_mmu, #endif - [MO_LEQ] = helper_le_ldq_mmu, - [MO_BEUW] = helper_be_lduw_mmu, - [MO_BESW] = helper_be_ldsw_mmu, - [MO_BEUL] = helper_be_ldul_mmu, + [MO_Q] = helper_be_ldq_mmu, +#else + [MO_UW] = helper_le_lduw_mmu, + [MO_SW] = helper_le_ldsw_mmu, + [MO_UL] = helper_le_ldul_mmu, #if TCG_TARGET_REG_BITS == 64 - [MO_BESL] = helper_be_ldsl_mmu, + [MO_SL] = helper_le_ldsl_mmu, +#endif + [MO_Q] = helper_le_ldq_mmu, #endif - [MO_BEQ] = helper_be_ldq_mmu, }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, TCGMemOpIdx oi, * uintptr_t ra) */ -static void * const qemu_st_helpers[16] = { - [MO_UB] = helper_ret_stb_mmu, - [MO_LEUW] = helper_le_stw_mmu, - [MO_LEUL] = helper_le_stl_mmu, - [MO_LEQ] = helper_le_stq_mmu, - [MO_BEUW] = helper_be_stw_mmu, - [MO_BEUL] = helper_be_stl_mmu, - [MO_BEQ] = helper_be_stq_mmu, +static void * const qemu_st_helpers[4] = { + [MO_8] = helper_ret_stb_mmu, +#ifdef HOST_WORDS_BIGENDIAN + [MO_16] = helper_be_stw_mmu, + [MO_32] = helper_be_stl_mmu, + [MO_64] = helper_be_stq_mmu, +#else + [MO_16] = helper_le_stw_mmu, + [MO_32] = helper_le_stl_mmu, + [MO_64] = helper_le_stq_mmu, +#endif }; /* We don't support oversize guests */ @@ -997,7 +1003,7 @@ static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) tcg_out_movi(s, TCG_TYPE_PTR, a2, oi); tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr); - tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]); + tcg_out_call(s, qemu_ld_helpers[opc & MO_SSIZE]); tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0); tcg_out_goto(s, l->raddr); @@ -1042,7 +1048,7 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) tcg_out_movi(s, TCG_TYPE_PTR, a3, oi); tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr); - tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SSIZE)]); + tcg_out_call(s, qemu_st_helpers[opc & MO_SIZE]); tcg_out_goto(s, l->raddr); return true; @@ -1052,10 +1058,8 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, TCGReg base, MemOp opc, bool is_64) { - const MemOp bswap = opc & MO_BSWAP; - - /* We don't yet handle byteswapping, assert */ - g_assert(!bswap); + /* Byte swapping is left to middle-end expansion. */ + tcg_debug_assert((opc & MO_BSWAP) == 0); switch (opc & (MO_SSIZE)) { case MO_UB: @@ -1139,10 +1143,8 @@ static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, TCGReg base, MemOp opc) { - const MemOp bswap = opc & MO_BSWAP; - - /* We don't yet handle byteswapping, assert */ - g_assert(!bswap); + /* Byte swapping is left to middle-end expansion. */ + tcg_debug_assert((opc & MO_BSWAP) == 0); switch (opc & (MO_SSIZE)) { case MO_8:
TCG_TARGET_HAS_MEMORY_BSWAP is already unset for this backend, which means that MO_BSWAP be handled by the middle-end and will never be seen by the backend. Thus the indexes used with qemu_{ld,st}_helpers will always be zero. Tidy the comments and asserts in tcg_out_qemu_{ld,st}_direct. It is not that we do not handle bswap "yet", but never will. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/riscv/tcg-target.c.inc | 64 ++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 31 deletions(-) -- 2.25.1