===================================================================
@@ -241,6 +241,7 @@ #define POINTER_BOUNDS_MODE_P(MODE)
ALWAYS_INLINE opt_mode (from_int m) : m_mode (machine_mode (m)) {}
machine_mode else_void () const;
+ machine_mode else_blk () const;
T operator * () const;
bool exists () const;
@@ -260,6 +261,15 @@ opt_mode<T>::else_void () const
return m_mode;
}
+/* If the T exists, return its enum value, otherwise return E_BLKmode. */
+
+template<typename T>
+inline machine_mode
+opt_mode<T>::else_blk () const
+{
+ return m_mode == E_VOIDmode ? E_BLKmode : m_mode;
+}
+
/* Assert that the object contains a T and return it. */
template<typename T>
@@ -583,10 +593,9 @@ extern machine_mode smallest_mode_for_si
enum mode_class);
-/* Return an integer mode of the exact same size as the input mode,
- or BLKmode on failure. */
+/* Return an integer mode of exactly the same size as the input mode. */
-extern machine_mode int_mode_for_mode (machine_mode);
+extern opt_scalar_int_mode int_mode_for_mode (machine_mode);
extern machine_mode bitwise_mode_for_mode (machine_mode);
===================================================================
@@ -364,16 +364,16 @@ smallest_mode_for_size (unsigned int siz
return mode;
}
-/* Find an integer mode of the exact same size, or BLKmode on failure. */
+/* Return an integer mode of exactly the same size as MODE, if one exists. */
-machine_mode
+opt_scalar_int_mode
int_mode_for_mode (machine_mode mode)
{
switch (GET_MODE_CLASS (mode))
{
case MODE_INT:
case MODE_PARTIAL_INT:
- break;
+ return as_a <scalar_int_mode> (mode);
case MODE_COMPLEX_INT:
case MODE_COMPLEX_FLOAT:
@@ -390,12 +390,11 @@ int_mode_for_mode (machine_mode mode)
case MODE_VECTOR_UFRACT:
case MODE_VECTOR_UACCUM:
case MODE_POINTER_BOUNDS:
- mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
- break;
+ return int_mode_for_size (GET_MODE_BITSIZE (mode), 0);
case MODE_RANDOM:
if (mode == BLKmode)
- break;
+ return opt_scalar_int_mode ();
/* fall through */
@@ -403,8 +402,6 @@ int_mode_for_mode (machine_mode mode)
default:
gcc_unreachable ();
}
-
- return mode;
}
/* Find a mode that can be used for efficient bitwise operations on MODE.
===================================================================
@@ -5413,8 +5413,7 @@ expand_builtin_signbit (tree exp, rtx ta
if (GET_MODE_SIZE (fmode) <= UNITS_PER_WORD)
{
- imode = int_mode_for_mode (fmode);
- gcc_assert (imode != BLKmode);
+ imode = *int_mode_for_mode (fmode);
temp = gen_lowpart (imode, temp);
}
else
===================================================================
@@ -4836,10 +4836,11 @@ expand_debug_expr (tree exp)
}
else
{
- machine_mode ifmode = int_mode_for_mode (mode);
- machine_mode ihmode = int_mode_for_mode (imode);
+ scalar_int_mode ifmode;
+ scalar_int_mode ihmode;
rtx halfsize;
- if (ifmode == BLKmode || ihmode == BLKmode)
+ if (!int_mode_for_mode (mode).exists (&ifmode)
+ || !int_mode_for_mode (imode).exists (&ihmode))
return NULL;
halfsize = GEN_INT (GET_MODE_BITSIZE (ihmode));
re = op0;
===================================================================
@@ -8444,8 +8444,8 @@ gen_lowpart_or_truncate (machine_mode mo
{
/* Bit-cast X into an integer mode. */
if (!SCALAR_INT_MODE_P (GET_MODE (x)))
- x = gen_lowpart (int_mode_for_mode (GET_MODE (x)), x);
- x = simplify_gen_unary (TRUNCATE, int_mode_for_mode (mode),
+ x = gen_lowpart (*int_mode_for_mode (GET_MODE (x)), x);
+ x = simplify_gen_unary (TRUNCATE, *int_mode_for_mode (mode),
x, GET_MODE (x));
}
@@ -11519,7 +11519,7 @@ gen_lowpart_for_combine (machine_mode om
if (imode == VOIDmode)
{
- imode = int_mode_for_mode (omode);
+ imode = *int_mode_for_mode (omode);
x = gen_lowpart_common (imode, x);
if (x == NULL)
goto fail;
===================================================================
@@ -8148,7 +8148,7 @@ aarch64_emit_approx_sqrt (rtx dst, rtx s
}
machine_mode mmsk
- = mode_for_vector (int_mode_for_mode (GET_MODE_INNER (mode)),
+ = mode_for_vector (*int_mode_for_mode (GET_MODE_INNER (mode)),
GET_MODE_NUNITS (mode));
if (!recp)
{
===================================================================
@@ -283,7 +283,7 @@ avr_to_int_mode (rtx x)
return VOIDmode == mode
? x
- : simplify_gen_subreg (int_mode_for_mode (mode), x, mode, 0);
+ : simplify_gen_subreg (*int_mode_for_mode (mode), x, mode, 0);
}
namespace {
@@ -7737,7 +7737,7 @@ avr_out_plus_1 (rtx *xop, int *plen, enu
machine_mode mode = GET_MODE (xop[0]);
/* INT_MODE of the same size. */
- machine_mode imode = int_mode_for_mode (mode);
+ scalar_int_mode imode = *int_mode_for_mode (mode);
/* Number of bytes to operate on. */
int n_bytes = GET_MODE_SIZE (mode);
@@ -8240,7 +8240,7 @@ avr_out_plus (rtx insn, rtx *xop, int *p
rtx xpattern = INSN_P (insn) ? single_set (as_a <rtx_insn *> (insn)) : insn;
rtx xdest = SET_DEST (xpattern);
machine_mode mode = GET_MODE (xdest);
- machine_mode imode = int_mode_for_mode (mode);
+ scalar_int_mode imode = *int_mode_for_mode (mode);
int n_bytes = GET_MODE_SIZE (mode);
enum rtx_code code_sat = GET_CODE (SET_SRC (xpattern));
enum rtx_code code
@@ -9175,7 +9175,7 @@ #define MAY_CLOBBER(RR)
avr_out_round (rtx_insn *insn ATTRIBUTE_UNUSED, rtx *xop, int *plen)
{
machine_mode mode = GET_MODE (xop[0]);
- machine_mode imode = int_mode_for_mode (mode);
+ scalar_int_mode imode = *int_mode_for_mode (mode);
// The smallest fractional bit not cleared by the rounding is 2^(-RP).
int fbit = (int) GET_MODE_FBIT (mode);
double_int i_add = double_int_zero.set_bit (fbit-1 - INTVAL (xop[2]));
===================================================================
@@ -26215,7 +26215,7 @@ ix86_split_to_parts (rtx operand, rtx *p
if (GET_CODE (operand) == CONST_VECTOR)
{
- machine_mode imode = int_mode_for_mode (mode);
+ scalar_int_mode imode = *int_mode_for_mode (mode);
/* Caution: if we looked through a constant pool memory above,
the operand may actually have a different mode now. That's
ok, since we want to pun this all the way back to an integer. */
===================================================================
@@ -38666,10 +38666,8 @@ rs6000_do_expand_vec_perm (rtx target, r
imode = vmode;
if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
- {
- imode = mode_for_size (GET_MODE_UNIT_BITSIZE (vmode), MODE_INT, 0);
- imode = mode_for_vector (imode, nelt);
- }
+ imode = mode_for_vector (*int_mode_for_mode (GET_MODE_INNER (vmode)),
+ nelt);
x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
x = expand_vec_perm (vmode, op0, op1, x, target);
===================================================================
@@ -35748,10 +35748,8 @@ rs6000_do_expand_vec_perm (rtx target, r
imode = vmode;
if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
- {
- imode = mode_for_size (GET_MODE_UNIT_BITSIZE (vmode), MODE_INT, 0);
- imode = mode_for_vector (imode, nelt);
- }
+ imode = mode_for_vector (*int_mode_for_mode (GET_MODE_INNER (vmode)),
+ nelt);
x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
x = expand_vec_perm (vmode, op0, op1, x, target);
===================================================================
@@ -6464,7 +6464,7 @@ s390_expand_vec_compare_cc (rtx target,
default: gcc_unreachable ();
}
scratch_mode = mode_for_vector (
- int_mode_for_mode (GET_MODE_INNER (GET_MODE (cmp1))),
+ *int_mode_for_mode (GET_MODE_INNER (GET_MODE (cmp1))),
GET_MODE_NUNITS (GET_MODE (cmp1)));
gcc_assert (scratch_mode != BLKmode);
@@ -6572,8 +6572,9 @@ s390_expand_vcond (rtx target, rtx then,
/* We always use an integral type vector to hold the comparison
result. */
- result_mode = mode_for_vector (int_mode_for_mode (GET_MODE_INNER (cmp_mode)),
- GET_MODE_NUNITS (cmp_mode));
+ result_mode
+ = mode_for_vector (*int_mode_for_mode (GET_MODE_INNER (cmp_mode)),
+ GET_MODE_NUNITS (cmp_mode));
result_target = gen_reg_rtx (result_mode);
/* We allow vector immediates as comparison operands that
===================================================================
@@ -1492,10 +1492,9 @@ spu_split_immediate (rtx * ops)
unsigned char arrlo[16];
rtx to, temp, hi, lo;
int i;
- machine_mode imode = mode;
/* We need to do reals as ints because the constant used in the
IOR might not be a legitimate real constant. */
- imode = int_mode_for_mode (mode);
+ scalar_int_mode imode = *int_mode_for_mode (mode);
constant_to_array (mode, ops[1], arrhi);
if (imode != mode)
to = simplify_gen_subreg (imode, ops[0], mode, 0);
@@ -1521,10 +1520,9 @@ spu_split_immediate (rtx * ops)
unsigned char arr_andbi[16];
rtx to, reg_fsmbi, reg_and;
int i;
- machine_mode imode = mode;
/* We need to do reals as ints because the constant used in the
* AND might not be a legitimate real constant. */
- imode = int_mode_for_mode (mode);
+ scalar_int_mode imode = *int_mode_for_mode (mode);
constant_to_array (mode, ops[1], arr_fsmbi);
if (imode != mode)
to = simplify_gen_subreg(imode, ops[0], GET_MODE (ops[0]), 0);
@@ -4429,7 +4427,7 @@ spu_expand_mov (rtx * ops, machine_mode
if (GET_CODE (ops[1]) == SUBREG && !valid_subreg (ops[1]))
{
rtx from = SUBREG_REG (ops[1]);
- machine_mode imode = int_mode_for_mode (GET_MODE (from));
+ scalar_int_mode imode = *int_mode_for_mode (GET_MODE (from));
gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
&& GET_MODE_CLASS (imode) == MODE_INT
===================================================================
@@ -1734,12 +1734,12 @@ get_stored_val (store_info *store_info,
{
/* The store is a memset (addr, const_val, const_size). */
gcc_assert (CONST_INT_P (store_info->rhs));
- store_mode = int_mode_for_mode (read_mode);
- if (store_mode == BLKmode)
+ scalar_int_mode int_store_mode;
+ if (!int_mode_for_mode (read_mode).exists (&int_store_mode))
read_reg = NULL_RTX;
else if (store_info->rhs == const0_rtx)
- read_reg = extract_low_bits (read_mode, store_mode, const0_rtx);
- else if (GET_MODE_BITSIZE (store_mode) > HOST_BITS_PER_WIDE_INT
+ read_reg = extract_low_bits (read_mode, int_store_mode, const0_rtx);
+ else if (GET_MODE_BITSIZE (int_store_mode) > HOST_BITS_PER_WIDE_INT
|| BITS_PER_UNIT >= HOST_BITS_PER_WIDE_INT)
read_reg = NULL_RTX;
else
@@ -1753,8 +1753,8 @@ get_stored_val (store_info *store_info,
c |= (c << shift);
shift <<= 1;
}
- read_reg = gen_int_mode (c, store_mode);
- read_reg = extract_low_bits (read_mode, store_mode, read_reg);
+ read_reg = gen_int_mode (c, int_store_mode);
+ read_reg = extract_low_bits (read_mode, int_store_mode, read_reg);
}
}
else if (store_info->const_rhs
===================================================================
@@ -828,19 +828,15 @@ store_bit_field_1 (rtx str_rtx, unsigned
if we aren't. This must come after the entire register case above,
since that case is valid for any mode. The following cases are only
valid for integral modes. */
- {
- machine_mode imode = int_mode_for_mode (GET_MODE (op0));
- if (imode != GET_MODE (op0))
- {
- if (MEM_P (op0))
- op0 = adjust_bitfield_address_size (op0, imode, 0, MEM_SIZE (op0));
- else
- {
- gcc_assert (imode != BLKmode);
- op0 = gen_lowpart (imode, op0);
- }
- }
- }
+ opt_scalar_int_mode imode = int_mode_for_mode (GET_MODE (op0));
+ if (!imode.exists () || *imode != GET_MODE (op0))
+ {
+ if (MEM_P (op0))
+ op0 = adjust_bitfield_address_size (op0, imode.else_blk (),
+ 0, MEM_SIZE (op0));
+ else
+ op0 = gen_lowpart (*imode, op0);
+ }
/* Storing an lsb-aligned field in a register
can be done with a movstrict instruction. */
@@ -955,7 +951,7 @@ store_bit_field_1 (rtx str_rtx, unsigned
&& GET_MODE_CLASS (GET_MODE (value)) != MODE_INT
&& GET_MODE_CLASS (GET_MODE (value)) != MODE_PARTIAL_INT)
{
- value = gen_reg_rtx (int_mode_for_mode (GET_MODE (value)));
+ value = gen_reg_rtx (*int_mode_for_mode (GET_MODE (value)));
emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
}
@@ -1425,8 +1421,7 @@ convert_extracted_bit_field (rtx x, mach
value via a SUBREG. */
if (!SCALAR_INT_MODE_P (tmode))
{
- scalar_int_mode int_mode
- = *int_mode_for_size (GET_MODE_BITSIZE (tmode), 0);
+ scalar_int_mode int_mode = *int_mode_for_mode (tmode);
x = convert_to_mode (int_mode, x, unsignedp);
x = force_reg (int_mode, x);
return gen_lowpart (tmode, x);
@@ -1531,7 +1526,6 @@ extract_bit_field_1 (rtx str_rtx, unsign
bool reverse, bool fallback_p, rtx *alt_rtl)
{
rtx op0 = str_rtx;
- machine_mode int_mode;
machine_mode mode1;
if (tmode == VOIDmode)
@@ -1620,30 +1614,29 @@ extract_bit_field_1 (rtx str_rtx, unsign
/* Make sure we are playing with integral modes. Pun with subregs
if we aren't. */
- {
- machine_mode imode = int_mode_for_mode (GET_MODE (op0));
- if (imode != GET_MODE (op0))
- {
- if (MEM_P (op0))
- op0 = adjust_bitfield_address_size (op0, imode, 0, MEM_SIZE (op0));
- else if (imode != BLKmode)
- {
- op0 = gen_lowpart (imode, op0);
-
- /* If we got a SUBREG, force it into a register since we
- aren't going to be able to do another SUBREG on it. */
- if (GET_CODE (op0) == SUBREG)
- op0 = force_reg (imode, op0);
- }
- else
- {
- HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (op0));
- rtx mem = assign_stack_temp (GET_MODE (op0), size);
- emit_move_insn (mem, op0);
- op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
- }
- }
- }
+ opt_scalar_int_mode imode = int_mode_for_mode (GET_MODE (op0));
+ if (!imode.exists () || *imode != GET_MODE (op0))
+ {
+ if (MEM_P (op0))
+ op0 = adjust_bitfield_address_size (op0, imode.else_blk (),
+ 0, MEM_SIZE (op0));
+ else if (imode.exists ())
+ {
+ op0 = gen_lowpart (*imode, op0);
+
+ /* If we got a SUBREG, force it into a register since we
+ aren't going to be able to do another SUBREG on it. */
+ if (GET_CODE (op0) == SUBREG)
+ op0 = force_reg (*imode, op0);
+ }
+ else
+ {
+ HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (op0));
+ rtx mem = assign_stack_temp (GET_MODE (op0), size);
+ emit_move_insn (mem, op0);
+ op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
+ }
+ }
/* ??? We currently assume TARGET is at least as big as BITSIZE.
If that's wrong, the solution is to test for it and set TARGET to 0
@@ -1847,11 +1840,11 @@ extract_bit_field_1 (rtx str_rtx, unsign
/* Find a correspondingly-sized integer field, so we can apply
shifts and masks to it. */
- int_mode = int_mode_for_mode (tmode);
- if (int_mode == BLKmode)
- int_mode = int_mode_for_mode (mode);
- /* Should probably push op0 out to memory and then do a load. */
- gcc_assert (int_mode != BLKmode);
+ scalar_int_mode int_mode;
+ if (!int_mode_for_mode (tmode).exists (&int_mode))
+ /* If this fails, we should probably push op0 out to memory and then
+ do a load. */
+ int_mode = *int_mode_for_mode (mode);
target = extract_fixed_bit_field (int_mode, op0, bitsize, bitnum, target,
unsignedp, reverse);
@@ -2206,9 +2199,8 @@ extract_low_bits (machine_mode mode, mac
return x;
}
- src_int_mode = int_mode_for_mode (src_mode);
- int_mode = int_mode_for_mode (mode);
- if (src_int_mode == BLKmode || int_mode == BLKmode)
+ if (!int_mode_for_mode (src_mode).exists (&src_int_mode)
+ || !int_mode_for_mode (mode).exists (&int_mode))
return NULL_RTX;
if (!MODES_TIEABLE_P (src_int_mode, src_mode))
===================================================================
@@ -2094,17 +2094,17 @@ emit_group_load_1 (rtx *tmps, rtx dst, r
&& !MEM_P (orig_src)
&& GET_CODE (orig_src) != CONCAT)
{
- machine_mode imode = int_mode_for_mode (GET_MODE (orig_src));
- if (imode == BLKmode)
- src = assign_stack_temp (GET_MODE (orig_src), ssize);
+ scalar_int_mode imode;
+ if (int_mode_for_mode (GET_MODE (orig_src)).exists (&imode))
+ {
+ src = gen_reg_rtx (imode);
+ emit_move_insn (gen_lowpart (GET_MODE (orig_src), src), orig_src);
+ }
else
- src = gen_reg_rtx (imode);
- if (imode != BLKmode)
- src = gen_lowpart (GET_MODE (orig_src), src);
- emit_move_insn (src, orig_src);
- /* ...and back again. */
- if (imode != BLKmode)
- src = gen_lowpart (imode, src);
+ {
+ src = assign_stack_temp (GET_MODE (orig_src), ssize);
+ emit_move_insn (src, orig_src);
+ }
emit_group_load_1 (tmps, dst, src, type, ssize);
return;
}
@@ -2368,14 +2368,18 @@ emit_group_store (rtx orig_dst, rtx src,
if (!SCALAR_INT_MODE_P (m)
&& !MEM_P (orig_dst) && GET_CODE (orig_dst) != CONCAT)
{
- machine_mode imode = int_mode_for_mode (GET_MODE (orig_dst));
- if (imode == BLKmode)
- dst = assign_stack_temp (GET_MODE (orig_dst), ssize);
+ scalar_int_mode imode;
+ if (int_mode_for_mode (GET_MODE (orig_dst)).exists (&imode))
+ {
+ dst = gen_reg_rtx (imode);
+ emit_group_store (dst, src, type, ssize);
+ dst = gen_lowpart (GET_MODE (orig_dst), dst);
+ }
else
- dst = gen_reg_rtx (imode);
- emit_group_store (dst, src, type, ssize);
- if (imode != BLKmode)
- dst = gen_lowpart (GET_MODE (orig_dst), dst);
+ {
+ dst = assign_stack_temp (GET_MODE (orig_dst), ssize);
+ emit_group_store (dst, src, type, ssize);
+ }
emit_move_insn (orig_dst, dst);
return;
}
@@ -3283,12 +3287,11 @@ emit_move_change_mode (machine_mode new_
static rtx_insn *
emit_move_via_integer (machine_mode mode, rtx x, rtx y, bool force)
{
- machine_mode imode;
+ scalar_int_mode imode;
enum insn_code code;
/* There must exist a mode of the exact size we require. */
- imode = int_mode_for_mode (mode);
- if (imode == BLKmode)
+ if (!int_mode_for_mode (mode).exists (&imode))
return NULL;
/* The target must support moves in this mode. */
===================================================================
@@ -2575,8 +2575,7 @@ expand_absneg_bit (enum rtx_code code, s
if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
{
- imode = int_mode_for_mode (mode);
- if (imode == BLKmode)
+ if (!int_mode_for_mode (mode).exists (&imode))
return NULL_RTX;
word = 0;
nwords = 1;
@@ -3269,8 +3268,7 @@ expand_copysign_absneg (scalar_float_mod
{
if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
{
- imode = int_mode_for_mode (mode);
- if (imode == BLKmode)
+ if (!int_mode_for_mode (mode).exists (&imode))
return NULL_RTX;
op1 = gen_lowpart (imode, op1);
}
@@ -3332,15 +3330,14 @@ expand_copysign_absneg (scalar_float_mod
expand_copysign_bit (scalar_float_mode mode, rtx op0, rtx op1, rtx target,
int bitpos, bool op0_is_abs)
{
- machine_mode imode;
+ scalar_int_mode imode;
int word, nwords, i;
rtx temp;
rtx_insn *insns;
if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD)
{
- imode = int_mode_for_mode (mode);
- if (imode == BLKmode)
+ if (!int_mode_for_mode (mode).exists (&imode))
return NULL_RTX;
word = 0;
nwords = 1;
===================================================================
@@ -933,8 +933,7 @@ ifcvt_can_use_mask_load_store (gimple *s
/* Mask should be integer mode of the same size as the load/store
mode. */
mode = TYPE_MODE (TREE_TYPE (lhs));
- if (int_mode_for_mode (mode) == BLKmode
- || VECTOR_MODE_P (mode))
+ if (!int_mode_for_mode (mode).exists () || VECTOR_MODE_P (mode))
return false;
if (can_vec_mask_load_store_p (mode, VOIDmode, is_load))
===================================================================
@@ -3430,7 +3430,7 @@ vect_transform_slp_perm_load (slp_tree n
/* The generic VEC_PERM_EXPR code always uses an integral type of the
same size as the vector element being permuted. */
mask_element_type = lang_hooks.types.type_for_mode
- (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
+ (*int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
mask_type = get_vectype_for_scalar_type (mask_element_type);
nunits = TYPE_VECTOR_SUBPARTS (vectype);
mask = XALLOCAVEC (unsigned char, nunits);
===================================================================
@@ -6432,7 +6432,7 @@ vect_gen_perm_mask_any (tree vectype, co
nunits = TYPE_VECTOR_SUBPARTS (vectype);
mask_elt_type = lang_hooks.types.type_for_mode
- (int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
+ (*int_mode_for_mode (TYPE_MODE (TREE_TYPE (vectype))), 1);
mask_type = get_vectype_for_scalar_type (mask_elt_type);
mask_elts = XALLOCAVEC (tree, nunits);
===================================================================
@@ -6348,8 +6348,9 @@ prepare_call_arguments (basic_block bb,
{
/* For non-integer stack argument see also if they weren't
initialized by integers. */
- machine_mode imode = int_mode_for_mode (GET_MODE (mem));
- if (imode != GET_MODE (mem) && imode != BLKmode)
+ scalar_int_mode imode;
+ if (int_mode_for_mode (GET_MODE (mem)).exists (&imode)
+ && imode != GET_MODE (mem))
{
val = cselib_lookup (adjust_address_nv (mem, imode, 0),
imode, 0, VOIDmode);