@@ -846,7 +846,6 @@ static void tcg_out_mb(TCGContext *s, TCGArg a0)
* Load/store and TLB
*/
-#if defined(CONFIG_SOFTMMU)
static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
{
tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
@@ -893,34 +892,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
tcg_out_goto(s, l->raddr);
return true;
}
-#else
-static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
-{
- /* resolve label address */
- if (!reloc_sbimm12(l->label_ptr[0], tcg_splitwx_to_rx(s->code_ptr))) {
- return false;
- }
-
- tcg_out_mov(s, TCG_TYPE_TL, TCG_REG_A1, l->addrlo_reg);
- tcg_out_mov(s, TCG_TYPE_PTR, TCG_REG_A0, TCG_AREG0);
-
- /* tail call, with the return address back inline. */
- tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_RA, (uintptr_t)l->raddr);
- tcg_out_call_int(s, (const void *)(l->is_ld ? helper_unaligned_ld
- : helper_unaligned_st), true);
- return true;
-}
-
-static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
-{
- return tcg_out_fail_alignment(s, l);
-}
-
-static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
-{
- return tcg_out_fail_alignment(s, l);
-}
-#endif /* CONFIG_SOFTMMU */
/*
* For softmmu, perform the TLB load and compare.
Instead of using helper_unaligned_{ld,st}, use the full load/store helpers. This will allow the fast path to increase alignment to implement atomicity while not immediately raising an alignment exception. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/riscv/tcg-target.c.inc | 29 ----------------------------- 1 file changed, 29 deletions(-)