@@ -899,10 +899,6 @@ static void * const qemu_st_helpers[MO_SIZE + 1] = {
#endif
};
-/* We expect to use a 12-bit negative offset from ENV. */
-QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
-QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
-
static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
{
tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0);
@@ -910,76 +906,6 @@ static void tcg_out_goto(TCGContext *s, const tcg_insn_unit *target)
tcg_debug_assert(ok);
}
-static TCGReg tcg_out_tlb_load(TCGContext *s, TCGReg addr, MemOpIdx oi,
- tcg_insn_unit **label_ptr, bool is_load)
-{
- MemOp opc = get_memop(oi);
- unsigned s_bits = opc & MO_SIZE;
- unsigned a_bits = get_alignment_bits(opc);
- tcg_target_long compare_mask;
- int mem_index = get_mmuidx(oi);
- int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
- int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
- int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
- TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0;
-
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs);
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs);
-
- tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr,
- TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
- tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
-
- /* Load the tlb comparator and the addend. */
- tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
- is_load ? offsetof(CPUTLBEntry, addr_read)
- : offsetof(CPUTLBEntry, addr_write));
- tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
- offsetof(CPUTLBEntry, addend));
-
- /* We don't support unaligned accesses. */
- if (a_bits < s_bits) {
- a_bits = s_bits;
- }
- /* Clear the non-page, non-alignment bits from the address. */
- compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1);
- if (compare_mask == sextreg(compare_mask, 0, 12)) {
- tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr, compare_mask);
- } else {
- tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
- tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr);
- }
-
- /* Compare masked address with the TLB entry. */
- label_ptr[0] = s->code_ptr;
- tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
-
- /* TLB Hit - translate address using addend. */
- if (TARGET_LONG_BITS == 32) {
- tcg_out_ext32u(s, TCG_REG_TMP0, addr);
- addr = TCG_REG_TMP0;
- }
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr);
- return TCG_REG_TMP0;
-}
-
-static void add_qemu_ldst_label(TCGContext *s, int is_ld, MemOpIdx oi,
- TCGType data_type, TCGReg data_reg,
- TCGReg addr_reg, void *raddr,
- tcg_insn_unit **label_ptr)
-{
- TCGLabelQemuLdst *label = new_ldst_label(s);
-
- label->is_ld = is_ld;
- label->oi = oi;
- label->type = data_type;
- label->datalo_reg = data_reg;
- label->addrlo_reg = addr_reg;
- label->raddr = tcg_splitwx_to_rx(raddr);
- label->label_ptr[0] = label_ptr[0];
-}
-
static bool tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
MemOpIdx oi = l->oi;
@@ -1037,26 +963,6 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
return true;
}
#else
-
-static void tcg_out_test_alignment(TCGContext *s, bool is_ld, TCGReg addr_reg,
- unsigned a_bits)
-{
- unsigned a_mask = (1 << a_bits) - 1;
- TCGLabelQemuLdst *l = new_ldst_label(s);
-
- l->is_ld = is_ld;
- l->addrlo_reg = addr_reg;
-
- /* We are expecting a_bits to max out at 7, so we can always use andi. */
- tcg_debug_assert(a_bits < 12);
- tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
-
- l->label_ptr[0] = s->code_ptr;
- tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
-
- l->raddr = tcg_splitwx_to_rx(s->code_ptr);
-}
-
static bool tcg_out_fail_alignment(TCGContext *s, TCGLabelQemuLdst *l)
{
/* resolve label address */
@@ -1083,9 +989,108 @@ static bool tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l)
{
return tcg_out_fail_alignment(s, l);
}
-
#endif /* CONFIG_SOFTMMU */
+/*
+ * For softmmu, perform the TLB load and compare.
+ * For useronly, perform any required alignment tests.
+ * In both cases, return a TCGLabelQemuLdst structure if the slow path
+ * is required and fill in @h with the host address for the fast path.
+ */
+static TCGLabelQemuLdst *prepare_host_addr(TCGContext *s, TCGReg *pbase,
+ TCGReg addr_reg, MemOpIdx oi,
+ bool is_ld)
+{
+ TCGLabelQemuLdst *ldst = NULL;
+ MemOp opc = get_memop(oi);
+ unsigned a_bits = get_alignment_bits(opc);
+ unsigned a_mask = (1u << a_bits) - 1;
+
+#ifdef CONFIG_SOFTMMU
+ unsigned s_bits = opc & MO_SIZE;
+ int mem_index = get_mmuidx(oi);
+ int fast_ofs = TLB_MASK_TABLE_OFS(mem_index);
+ int mask_ofs = fast_ofs + offsetof(CPUTLBDescFast, mask);
+ int table_ofs = fast_ofs + offsetof(CPUTLBDescFast, table);
+ TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0;
+ tcg_target_long compare_mask;
+
+ ldst = new_ldst_label(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addr_reg;
+
+ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) > 0);
+ QEMU_BUILD_BUG_ON(TLB_MASK_TABLE_OFS(0) < -(1 << 11));
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_ofs);
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_ofs);
+
+ tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addr_reg,
+ TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS);
+ tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0);
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1);
+
+ /* Load the tlb comparator and the addend. */
+ tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2,
+ is_ld ? offsetof(CPUTLBEntry, addr_read)
+ : offsetof(CPUTLBEntry, addr_write));
+ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2,
+ offsetof(CPUTLBEntry, addend));
+
+ /* We don't support unaligned accesses. */
+ if (a_bits < s_bits) {
+ a_bits = s_bits;
+ }
+ /* Clear the non-page, non-alignment bits from the address. */
+ compare_mask = (tcg_target_long)TARGET_PAGE_MASK | a_mask;
+ if (compare_mask == sextreg(compare_mask, 0, 12)) {
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, compare_mask);
+ } else {
+ tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask);
+ tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addr_reg);
+ }
+
+ /* Compare masked address with the TLB entry. */
+ ldst->label_ptr[0] = s->code_ptr;
+ tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0);
+
+ /* TLB Hit - translate address using addend. */
+ if (TARGET_LONG_BITS == 32) {
+ tcg_out_ext32u(s, TCG_REG_TMP0, addr_reg);
+ addr_reg = TCG_REG_TMP0;
+ }
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addr_reg);
+ *pbase = TCG_REG_TMP0;
+#else
+ if (a_mask) {
+ ldst = new_ldst_label(s);
+ ldst->is_ld = is_ld;
+ ldst->oi = oi;
+ ldst->addrlo_reg = addr_reg;
+
+ /* We are expecting a_bits max 7, so we can always use andi. */
+ tcg_debug_assert(a_bits < 12);
+ tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addr_reg, a_mask);
+
+ ldst->label_ptr[0] = s->code_ptr;
+ tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP1, TCG_REG_ZERO, 0);
+ }
+
+ TCGReg base = addr_reg;
+ if (TARGET_LONG_BITS == 32) {
+ tcg_out_ext32u(s, TCG_REG_TMP0, base);
+ base = TCG_REG_TMP0;
+ }
+ if (guest_base != 0) {
+ tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
+ base = TCG_REG_TMP0;
+ }
+ *pbase = base;
+#endif
+
+ return ldst;
+}
+
static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
TCGReg base, MemOp opc, TCGType type)
{
@@ -1125,32 +1130,17 @@ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg val,
static void tcg_out_qemu_ld(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi, TCGType data_type)
{
- MemOp opc = get_memop(oi);
+ TCGLabelQemuLdst *ldst;
TCGReg base;
-#if defined(CONFIG_SOFTMMU)
- tcg_insn_unit *label_ptr[1];
+ ldst = prepare_host_addr(s, &base, addr_reg, oi, true);
+ tcg_out_qemu_ld_direct(s, data_reg, base, get_memop(oi), data_type);
- base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 1);
- tcg_out_qemu_ld_direct(s, data_reg, base, opc, data_type);
- add_qemu_ldst_label(s, true, oi, data_type, data_reg, addr_reg,
- s->code_ptr, label_ptr);
-#else
- unsigned a_bits = get_alignment_bits(opc);
- if (a_bits) {
- tcg_out_test_alignment(s, true, addr_reg, a_bits);
+ if (ldst) {
+ ldst->type = data_type;
+ ldst->datalo_reg = data_reg;
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
- base = addr_reg;
- if (TARGET_LONG_BITS == 32) {
- tcg_out_ext32u(s, TCG_REG_TMP0, base);
- base = TCG_REG_TMP0;
- }
- if (guest_base != 0) {
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
- base = TCG_REG_TMP0;
- }
- tcg_out_qemu_ld_direct(s, data_reg, base, opc, data_type);
-#endif
}
static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
@@ -1180,32 +1170,17 @@ static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg val,
static void tcg_out_qemu_st(TCGContext *s, TCGReg data_reg, TCGReg addr_reg,
MemOpIdx oi, TCGType data_type)
{
- MemOp opc = get_memop(oi);
+ TCGLabelQemuLdst *ldst;
TCGReg base;
-#if defined(CONFIG_SOFTMMU)
- tcg_insn_unit *label_ptr[1];
+ ldst = prepare_host_addr(s, &base, addr_reg, oi, false);
+ tcg_out_qemu_st_direct(s, data_reg, base, get_memop(oi));
- base = tcg_out_tlb_load(s, addr_reg, oi, label_ptr, 0);
- tcg_out_qemu_st_direct(s, data_reg, base, opc);
- add_qemu_ldst_label(s, false, oi, data_type, data_reg, addr_reg,
- s->code_ptr, label_ptr);
-#else
- unsigned a_bits = get_alignment_bits(opc);
- if (a_bits) {
- tcg_out_test_alignment(s, false, addr_reg, a_bits);
+ if (ldst) {
+ ldst->type = data_type;
+ ldst->datalo_reg = data_reg;
+ ldst->raddr = tcg_splitwx_to_rx(s->code_ptr);
}
- base = addr_reg;
- if (TARGET_LONG_BITS == 32) {
- tcg_out_ext32u(s, TCG_REG_TMP0, base);
- base = TCG_REG_TMP0;
- }
- if (guest_base != 0) {
- tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_GUEST_BASE_REG, base);
- base = TCG_REG_TMP0;
- }
- tcg_out_qemu_st_direct(s, data_reg, base, opc);
-#endif
}
static const tcg_insn_unit *tb_ret_addr;