@@ -220,6 +220,11 @@ static void * const qemu_st_helpers[MO_SIZE + 1] __attribute__((unused)) = {
#endif
};
+static MemOp atom_and_align_for_opc(TCGContext *s, MemOp *p_atom_a,
+ MemOp *p_atom_u, MemOp opc,
+ MemOp host_atom, bool allow_two_ops)
+ __attribute__((unused));
+
TCGContext tcg_init_ctx;
__thread TCGContext *tcg_ctx;
@@ -5123,6 +5128,70 @@ static void tcg_reg_alloc_call(TCGContext *s, TCGOp *op)
}
}
+/*
+ * Return the alignment and atomicity to use for the inline fast path
+ * for the given memory operation. The alignment may be larger than
+ * that specified in @opc, and the correct alignment will be diagnosed
+ * by the slow path helper.
+ */
+static MemOp atom_and_align_for_opc(TCGContext *s, MemOp *p_atom_a,
+ MemOp *p_atom_u, MemOp opc,
+ MemOp host_atom, bool allow_two_ops)
+{
+ MemOp align = get_alignment_bits(opc);
+ MemOp atom, atmax, atmin, size = opc & MO_SIZE;
+
+ /* When serialized, no further atomicity required. */
+ if (s->gen_tb->cflags & CF_PARALLEL) {
+ atom = opc & MO_ATOM_MASK;
+ } else {
+ atom = MO_ATOM_NONE;
+ }
+
+ atmax = opc & MO_ATMAX_MASK;
+ if (atmax == MO_ATMAX_SIZE) {
+ atmax = size;
+ } else {
+ atmax = atmax >> MO_ATMAX_SHIFT;
+ }
+
+ switch (atom) {
+ case MO_ATOM_NONE:
+ /* The operation requires no specific atomicity. */
+ atmax = atmin = MO_8;
+ break;
+ case MO_ATOM_IFALIGN:
+ /* If unaligned, the subobjects are bytes. */
+ atmin = MO_8;
+ break;
+ case MO_ATOM_WITHIN16:
+ /* If unaligned, there are subobjects if atmax < size. */
+ atmin = (atmax < size ? atmax : MO_8);
+ atmax = size;
+ break;
+ case MO_ATOM_SUBALIGN:
+ /* If unaligned but not odd, there are subobjects up to atmax - 1. */
+ atmin = (atmax == MO_8 ? MO_8 : atmax - 1);
+ break;
+ default:
+ g_assert_not_reached();
+ }
+
+ /*
+ * If there are subobjects, and the host model does not match, then we
+ * need to raise the initial alignment check. If the backend is prepared
+ * to double-check alignment and issue two half size ops, we need not
+ * raise initial alignment beyond half.
+ */
+ if (atmin > MO_8 && host_atom != atom) {
+ align = MAX(align, size - allow_two_ops);
+ }
+
+ *p_atom_a = atmax;
+ *p_atom_u = atmin;
+ return align;
+}
+
/*
* Similarly for qemu_ld/st slow path helpers.
* We must re-implement tcg_gen_callN and tcg_reg_alloc_call simultaneously,
Examine MemOp for atomicity and alignment, adjusting alignment as required to implement atomicity on the host. Signed-off-by: Richard Henderson <richard.henderson@linaro.org> --- tcg/tcg.c | 69 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 69 insertions(+)