===================================================================
@@ -59,6 +59,7 @@ typedef const struct rtx_def *const_rtx;
class scalar_int_mode;
class scalar_float_mode;
template<typename> class opt_mode;
+typedef opt_mode<scalar_mode> opt_scalar_mode;
typedef opt_mode<scalar_int_mode> opt_scalar_int_mode;
typedef opt_mode<scalar_float_mode> opt_scalar_float_mode;
template<typename> class pod_mode;
===================================================================
@@ -543,7 +543,8 @@ def build_pretty_printer():
pp.add_printer_for_regex(r'opt_mode<(\S+)>',
'opt_mode', OptMachineModePrinter)
pp.add_printer_for_types(['opt_scalar_int_mode',
- 'opt_scalar_float_mode'],
+ 'opt_scalar_float_mode',
+ 'opt_scalar_mode'],
'opt_mode', OptMachineModePrinter)
pp.add_printer_for_regex(r'pod_mode<(\S+)>',
'pod_mode', MachineModePrinter)
===================================================================
@@ -836,6 +836,13 @@ is_float_mode (machine_mode mode, T *flo
/* Set mode iterator *ITER to the mode that is two times wider than the
current one, if such a mode exists. */
+ template<typename T>
+ inline void
+ get_2xwider (opt_mode<T> *iter)
+ {
+ *iter = GET_MODE_2XWIDER_MODE (**iter);
+ }
+
inline void
get_2xwider (machine_mode *iter)
{
===================================================================
@@ -5891,6 +5891,7 @@ init_emit_once (void)
int i;
machine_mode mode;
scalar_float_mode double_mode;
+ opt_scalar_mode smode_iter;
/* Initialize the CONST_INT, CONST_WIDE_INT, CONST_DOUBLE,
CONST_FIXED, and memory attribute hash tables. */
@@ -6005,62 +6006,66 @@ init_emit_once (void)
const_tiny_rtx[1][(int) mode] = gen_const_vector (mode, 1);
}
- FOR_EACH_MODE_IN_CLASS (mode, MODE_FRACT)
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_FRACT)
{
- FCONST0 (mode).data.high = 0;
- FCONST0 (mode).data.low = 0;
- FCONST0 (mode).mode = mode;
- const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST0 (mode), mode);
- }
-
- FOR_EACH_MODE_IN_CLASS (mode, MODE_UFRACT)
- {
- FCONST0 (mode).data.high = 0;
- FCONST0 (mode).data.low = 0;
- FCONST0 (mode).mode = mode;
- const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST0 (mode), mode);
- }
-
- FOR_EACH_MODE_IN_CLASS (mode, MODE_ACCUM)
- {
- FCONST0 (mode).data.high = 0;
- FCONST0 (mode).data.low = 0;
- FCONST0 (mode).mode = mode;
- const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST0 (mode), mode);
+ scalar_mode smode = *smode_iter;
+ FCONST0 (smode).data.high = 0;
+ FCONST0 (smode).data.low = 0;
+ FCONST0 (smode).mode = smode;
+ const_tiny_rtx[0][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
+ }
+
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_UFRACT)
+ {
+ scalar_mode smode = *smode_iter;
+ FCONST0 (smode).data.high = 0;
+ FCONST0 (smode).data.low = 0;
+ FCONST0 (smode).mode = smode;
+ const_tiny_rtx[0][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
+ }
+
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_ACCUM)
+ {
+ scalar_mode smode = *smode_iter;
+ FCONST0 (smode).data.high = 0;
+ FCONST0 (smode).data.low = 0;
+ FCONST0 (smode).mode = smode;
+ const_tiny_rtx[0][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
/* We store the value 1. */
- FCONST1 (mode).data.high = 0;
- FCONST1 (mode).data.low = 0;
- FCONST1 (mode).mode = mode;
- FCONST1 (mode).data
- = double_int_one.lshift (GET_MODE_FBIT (mode),
+ FCONST1 (smode).data.high = 0;
+ FCONST1 (smode).data.low = 0;
+ FCONST1 (smode).mode = smode;
+ FCONST1 (smode).data
+ = double_int_one.lshift (GET_MODE_FBIT (smode),
HOST_BITS_PER_DOUBLE_INT,
- SIGNED_FIXED_POINT_MODE_P (mode));
- const_tiny_rtx[1][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST1 (mode), mode);
+ SIGNED_FIXED_POINT_MODE_P (smode));
+ const_tiny_rtx[1][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST1 (smode), smode);
}
- FOR_EACH_MODE_IN_CLASS (mode, MODE_UACCUM)
- {
- FCONST0 (mode).data.high = 0;
- FCONST0 (mode).data.low = 0;
- FCONST0 (mode).mode = mode;
- const_tiny_rtx[0][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST0 (mode), mode);
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_UACCUM)
+ {
+ scalar_mode smode = *smode_iter;
+ FCONST0 (smode).data.high = 0;
+ FCONST0 (smode).data.low = 0;
+ FCONST0 (smode).mode = smode;
+ const_tiny_rtx[0][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST0 (smode), smode);
/* We store the value 1. */
- FCONST1 (mode).data.high = 0;
- FCONST1 (mode).data.low = 0;
- FCONST1 (mode).mode = mode;
- FCONST1 (mode).data
- = double_int_one.lshift (GET_MODE_FBIT (mode),
+ FCONST1 (smode).data.high = 0;
+ FCONST1 (smode).data.low = 0;
+ FCONST1 (smode).mode = smode;
+ FCONST1 (smode).data
+ = double_int_one.lshift (GET_MODE_FBIT (smode),
HOST_BITS_PER_DOUBLE_INT,
- SIGNED_FIXED_POINT_MODE_P (mode));
- const_tiny_rtx[1][(int) mode] = CONST_FIXED_FROM_FIXED_VALUE (
- FCONST1 (mode), mode);
+ SIGNED_FIXED_POINT_MODE_P (smode));
+ const_tiny_rtx[1][(int) smode]
+ = CONST_FIXED_FROM_FIXED_VALUE (FCONST1 (smode), smode);
}
FOR_EACH_MODE_IN_CLASS (mode, MODE_VECTOR_FRACT)
@@ -6093,10 +6098,11 @@ init_emit_once (void)
if (STORE_FLAG_VALUE == 1)
const_tiny_rtx[1][(int) BImode] = const1_rtx;
- FOR_EACH_MODE_IN_CLASS (mode, MODE_POINTER_BOUNDS)
+ FOR_EACH_MODE_IN_CLASS (smode_iter, MODE_POINTER_BOUNDS)
{
- wide_int wi_zero = wi::zero (GET_MODE_PRECISION (mode));
- const_tiny_rtx[0][mode] = immed_wide_int_const (wi_zero, mode);
+ scalar_mode smode = *smode_iter;
+ wide_int wi_zero = wi::zero (GET_MODE_PRECISION (smode));
+ const_tiny_rtx[0][smode] = immed_wide_int_const (wi_zero, smode);
}
pc_rtx = gen_rtx_fmt_ (PC, VOIDmode);
===================================================================
@@ -559,23 +559,28 @@ convert_mode_scalar (rtx to, rtx from, i
}
else
{
- machine_mode intermediate;
+ scalar_mode intermediate;
rtx tmp;
int shift_amount;
/* Search for a mode to convert via. */
- FOR_EACH_MODE_FROM (intermediate, from_mode)
- if (((can_extend_p (to_mode, intermediate, unsignedp)
- != CODE_FOR_nothing)
- || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
- && TRULY_NOOP_TRUNCATION_MODES_P (to_mode, intermediate)))
- && (can_extend_p (intermediate, from_mode, unsignedp)
- != CODE_FOR_nothing))
- {
- convert_move (to, convert_to_mode (intermediate, from,
- unsignedp), unsignedp);
- return;
- }
+ opt_scalar_mode intermediate_iter;
+ FOR_EACH_MODE_FROM (intermediate_iter, from_mode)
+ {
+ scalar_mode intermediate = *intermediate_iter;
+ if (((can_extend_p (to_mode, intermediate, unsignedp)
+ != CODE_FOR_nothing)
+ || (GET_MODE_SIZE (to_mode) < GET_MODE_SIZE (intermediate)
+ && TRULY_NOOP_TRUNCATION_MODES_P (to_mode,
+ intermediate)))
+ && (can_extend_p (intermediate, from_mode, unsignedp)
+ != CODE_FOR_nothing))
+ {
+ convert_move (to, convert_to_mode (intermediate, from,
+ unsignedp), unsignedp);
+ return;
+ }
+ }
/* No suitable intermediate mode.
Generate what we need with shifts. */
===================================================================
@@ -3438,16 +3438,18 @@ omp_clause_aligned_alignment (tree claus
/* Otherwise return implementation defined alignment. */
unsigned int al = 1;
- machine_mode mode, vmode;
+ opt_scalar_mode mode_iter;
int vs = targetm.vectorize.autovectorize_vector_sizes ();
if (vs)
vs = 1 << floor_log2 (vs);
static enum mode_class classes[]
= { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
for (int i = 0; i < 4; i += 2)
- FOR_EACH_MODE_IN_CLASS (mode, classes[i])
+ /* The for loop above dictates that we only walk through scalar classes. */
+ FOR_EACH_MODE_IN_CLASS (mode_iter, classes[i])
{
- vmode = targetm.vectorize.preferred_simd_mode (mode);
+ scalar_mode mode = *mode_iter;
+ machine_mode vmode = targetm.vectorize.preferred_simd_mode (mode);
if (GET_MODE_CLASS (vmode) != classes[i + 1])
continue;
while (vs
===================================================================
@@ -4690,6 +4690,7 @@ expand_float (rtx to, rtx from, int unsi
&& is_a <scalar_mode> (GET_MODE (to), &to_mode)
&& is_a <scalar_mode> (GET_MODE (from), &from_mode))
{
+ opt_scalar_mode fmode_iter;
rtx_code_label *label = gen_label_rtx ();
rtx temp;
REAL_VALUE_TYPE offset;
@@ -4698,12 +4699,14 @@ expand_float (rtx to, rtx from, int unsi
least as wide as the target. Using FMODE will avoid rounding woes
with unsigned values greater than the signed maximum value. */
- FOR_EACH_MODE_FROM (fmode, to_mode)
- if (GET_MODE_PRECISION (from_mode) < GET_MODE_BITSIZE (fmode)
- && can_float_p (fmode, from_mode, 0) != CODE_FOR_nothing)
+ FOR_EACH_MODE_FROM (fmode_iter, to_mode)
+ if (GET_MODE_PRECISION (from_mode) < GET_MODE_BITSIZE (*fmode_iter)
+ && can_float_p (*fmode_iter, from_mode, 0) != CODE_FOR_nothing)
break;
- if (fmode == VOIDmode)
+ if (fmode_iter.exists ())
+ fmode = *fmode_iter;
+ else
{
/* There is no such mode. Pretend the target is wide enough. */
fmode = to_mode;
@@ -4838,6 +4841,7 @@ expand_fix (rtx to, rtx from, int unsign
enum insn_code icode;
rtx target = to;
machine_mode fmode, imode;
+ opt_scalar_mode fmode_iter;
bool must_trunc = false;
/* We first try to find a pair of modes, one real and one integer, at
@@ -4909,66 +4913,70 @@ expand_fix (rtx to, rtx from, int unsign
if (unsignedp
&& is_a <scalar_int_mode> (GET_MODE (to), &to_mode)
&& HWI_COMPUTABLE_MODE_P (to_mode))
- FOR_EACH_MODE_FROM (fmode, GET_MODE (from))
- if (CODE_FOR_nothing != can_fix_p (to_mode, fmode, 0, &must_trunc)
- && (!DECIMAL_FLOAT_MODE_P (fmode)
- || GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (to_mode)))
- {
- int bitsize;
- REAL_VALUE_TYPE offset;
- rtx limit;
- rtx_code_label *lab1, *lab2;
- rtx_insn *insn;
-
- bitsize = GET_MODE_PRECISION (to_mode);
- real_2expN (&offset, bitsize - 1, fmode);
- limit = const_double_from_real_value (offset, fmode);
- lab1 = gen_label_rtx ();
- lab2 = gen_label_rtx ();
-
- if (fmode != GET_MODE (from))
- from = convert_to_mode (fmode, from, 0);
-
- /* See if we need to do the subtraction. */
- do_pending_stack_adjust ();
- emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX, GET_MODE (from),
- 0, lab1);
-
- /* If not, do the signed "fix" and branch around fixup code. */
- expand_fix (to, from, 0);
- emit_jump_insn (targetm.gen_jump (lab2));
- emit_barrier ();
-
- /* Otherwise, subtract 2**(N-1), convert to signed number,
- then add 2**(N-1). Do the addition using XOR since this
- will often generate better code. */
- emit_label (lab1);
- target = expand_binop (GET_MODE (from), sub_optab, from, limit,
- NULL_RTX, 0, OPTAB_LIB_WIDEN);
- expand_fix (to, target, 0);
- target = expand_binop (to_mode, xor_optab, to,
- gen_int_mode
- (HOST_WIDE_INT_1 << (bitsize - 1),
- to_mode),
- to, 1, OPTAB_LIB_WIDEN);
+ FOR_EACH_MODE_FROM (fmode_iter, as_a <scalar_mode> (GET_MODE (from)))
+ {
+ scalar_mode fmode = *fmode_iter;
+ if (CODE_FOR_nothing != can_fix_p (to_mode, fmode,
+ 0, &must_trunc)
+ && (!DECIMAL_FLOAT_MODE_P (fmode)
+ || (GET_MODE_BITSIZE (fmode) > GET_MODE_PRECISION (to_mode))))
+ {
+ int bitsize;
+ REAL_VALUE_TYPE offset;
+ rtx limit;
+ rtx_code_label *lab1, *lab2;
+ rtx_insn *insn;
+
+ bitsize = GET_MODE_PRECISION (to_mode);
+ real_2expN (&offset, bitsize - 1, fmode);
+ limit = const_double_from_real_value (offset, fmode);
+ lab1 = gen_label_rtx ();
+ lab2 = gen_label_rtx ();
- if (target != to)
- emit_move_insn (to, target);
+ if (fmode != GET_MODE (from))
+ from = convert_to_mode (fmode, from, 0);
- emit_label (lab2);
+ /* See if we need to do the subtraction. */
+ do_pending_stack_adjust ();
+ emit_cmp_and_jump_insns (from, limit, GE, NULL_RTX,
+ GET_MODE (from), 0, lab1);
+
+ /* If not, do the signed "fix" and branch around fixup code. */
+ expand_fix (to, from, 0);
+ emit_jump_insn (targetm.gen_jump (lab2));
+ emit_barrier ();
+
+ /* Otherwise, subtract 2**(N-1), convert to signed number,
+ then add 2**(N-1). Do the addition using XOR since this
+ will often generate better code. */
+ emit_label (lab1);
+ target = expand_binop (GET_MODE (from), sub_optab, from, limit,
+ NULL_RTX, 0, OPTAB_LIB_WIDEN);
+ expand_fix (to, target, 0);
+ target = expand_binop (to_mode, xor_optab, to,
+ gen_int_mode
+ (HOST_WIDE_INT_1 << (bitsize - 1),
+ to_mode),
+ to, 1, OPTAB_LIB_WIDEN);
- if (optab_handler (mov_optab, to_mode) != CODE_FOR_nothing)
- {
- /* Make a place for a REG_NOTE and add it. */
- insn = emit_move_insn (to, to);
- set_dst_reg_note (insn, REG_EQUAL,
- gen_rtx_fmt_e (UNSIGNED_FIX, to_mode,
- copy_rtx (from)),
- to);
- }
+ if (target != to)
+ emit_move_insn (to, target);
- return;
- }
+ emit_label (lab2);
+
+ if (optab_handler (mov_optab, to_mode) != CODE_FOR_nothing)
+ {
+ /* Make a place for a REG_NOTE and add it. */
+ insn = emit_move_insn (to, to);
+ set_dst_reg_note (insn, REG_EQUAL,
+ gen_rtx_fmt_e (UNSIGNED_FIX, to_mode,
+ copy_rtx (from)),
+ to);
+ }
+
+ return;
+ }
+ }
/* We can't do it with an insn, so use a library call. But first ensure
that the mode of TO is at least as wide as SImode, since those are the
===================================================================
@@ -4195,8 +4195,10 @@ vectorizable_conversion (gimple *stmt, g
needs to be generated. */
gcc_assert (ncopies >= 1);
- machine_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
- machine_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
+ bool found_mode = false;
+ scalar_mode lhs_mode = SCALAR_TYPE_MODE (lhs_type);
+ scalar_mode rhs_mode = SCALAR_TYPE_MODE (rhs_type);
+ opt_scalar_mode rhs_mode_iter;
/* Supportable by target? */
switch (modifier)
@@ -4230,8 +4232,9 @@ vectorizable_conversion (gimple *stmt, g
goto unsupported;
fltsz = GET_MODE_SIZE (lhs_mode);
- FOR_EACH_2XWIDER_MODE (rhs_mode, rhs_mode)
+ FOR_EACH_2XWIDER_MODE (rhs_mode_iter, rhs_mode)
{
+ rhs_mode = *rhs_mode_iter;
if (GET_MODE_SIZE (rhs_mode) > fltsz)
break;
@@ -4258,10 +4261,13 @@ vectorizable_conversion (gimple *stmt, g
if (supportable_widening_operation (NOP_EXPR, stmt, cvt_type,
vectype_in, &code1, &code2,
&multi_step_cvt, &interm_types))
- break;
+ {
+ found_mode = true;
+ break;
+ }
}
- if (rhs_mode == VOIDmode || GET_MODE_SIZE (rhs_mode) > fltsz)
+ if (!found_mode)
goto unsupported;
if (GET_MODE_SIZE (rhs_mode) == fltsz)
===================================================================
@@ -2155,12 +2155,12 @@ c_common_fixed_point_type_for_size (unsi
else
mclass = unsignedp ? MODE_UACCUM : MODE_ACCUM;
- machine_mode mode;
+ opt_scalar_mode mode;
FOR_EACH_MODE_IN_CLASS (mode, mclass)
- if (GET_MODE_IBIT (mode) >= ibit && GET_MODE_FBIT (mode) >= fbit)
+ if (GET_MODE_IBIT (*mode) >= ibit && GET_MODE_FBIT (*mode) >= fbit)
break;
- if (mode == VOIDmode || !targetm.scalar_mode_supported_p (mode))
+ if (!mode.exists () || !targetm.scalar_mode_supported_p (*mode))
{
sorry ("GCC cannot support operators with integer types and "
"fixed-point types that have too many integral and "
@@ -2168,7 +2168,7 @@ c_common_fixed_point_type_for_size (unsi
return NULL_TREE;
}
- return c_common_type_for_mode (mode, satp);
+ return c_common_type_for_mode (*mode, satp);
}
/* Used for communication between c_common_type_for_mode and