GCC Middle and Back End API Reference
expmed.cc File Reference
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "backend.h"
#include "target.h"
#include "rtl.h"
#include "tree.h"
#include "predict.h"
#include "memmodel.h"
#include "tm_p.h"
#include "optabs.h"
#include "expmed.h"
#include "regs.h"
#include "emit-rtl.h"
#include "diagnostic-core.h"
#include "fold-const.h"
#include "stor-layout.h"
#include "dojump.h"
#include "explow.h"
#include "expr.h"
#include "langhooks.h"
#include "tree-vector-builder.h"
#include "recog.h"
Include dependency graph for expmed.cc:

Data Structures

struct  init_expmed_rtl
 

Macros

#define EXACT_POWER_OF_2_OR_ZERO_P(x)
 

Functions

static bool store_integral_bit_field (rtx, opt_scalar_int_mode, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, poly_uint64, poly_uint64, machine_mode, rtx, bool, bool)
 
static void store_fixed_bit_field (rtx, opt_scalar_int_mode, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, poly_uint64, poly_uint64, rtx, scalar_int_mode, bool)
 
static void store_fixed_bit_field_1 (rtx, scalar_int_mode, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, rtx, scalar_int_mode, bool)
 
static void store_split_bit_field (rtx, opt_scalar_int_mode, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, poly_uint64, poly_uint64, rtx, scalar_int_mode, bool)
 
static rtx extract_integral_bit_field (rtx, opt_scalar_int_mode, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, int, rtx, machine_mode, machine_mode, bool, bool)
 
static rtx extract_fixed_bit_field (machine_mode, rtx, opt_scalar_int_mode, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, rtx, int, bool)
 
static rtx extract_fixed_bit_field_1 (machine_mode, rtx, scalar_int_mode, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, rtx, int, bool)
 
static rtx lshift_value (machine_mode, unsigned HOST_WIDE_INT, int)
 
static rtx extract_split_bit_field (rtx, opt_scalar_int_mode, unsigned HOST_WIDE_INT, unsigned HOST_WIDE_INT, int, bool)
 
static void do_cmp_and_jump (rtx, rtx, enum rtx_code, machine_mode, rtx_code_label *)
 
static rtx expand_smod_pow2 (scalar_int_mode, rtx, HOST_WIDE_INT)
 
static rtx expand_sdiv_pow2 (scalar_int_mode, rtx, HOST_WIDE_INT)
 
static rtx mask_rtx (scalar_int_mode mode, int bitpos, int bitsize, bool complement)
 
static void init_expmed_one_conv (struct init_expmed_rtl *all, scalar_int_mode to_mode, scalar_int_mode from_mode, bool speed)
 
static void init_expmed_one_mode (struct init_expmed_rtl *all, machine_mode mode, int speed)
 
void init_expmed (void)
 
rtx negate_rtx (machine_mode mode, rtx x)
 
static void check_reverse_storage_order_support (void)
 
static void check_reverse_float_storage_order_support (void)
 
rtx flip_storage_order (machine_mode mode, rtx x)
 
static rtx narrow_bit_field_mem (rtx mem, opt_scalar_int_mode mode, unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitnum, unsigned HOST_WIDE_INT *new_bitnum)
 
static rtx adjust_bit_field_mem_for_reg (enum extraction_pattern pattern, rtx op0, HOST_WIDE_INT bitsize, HOST_WIDE_INT bitnum, poly_uint64 bitregion_start, poly_uint64 bitregion_end, machine_mode fieldmode, unsigned HOST_WIDE_INT *new_bitnum)
 
static bool lowpart_bit_field_p (poly_uint64 bitnum, poly_uint64 bitsize, machine_mode struct_mode)
 
static bool strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitnum, scalar_int_mode fieldmode, poly_uint64 bitregion_start, poly_uint64 bitregion_end)
 
static bool simple_mem_bitfield_p (rtx op0, poly_uint64 bitsize, poly_uint64 bitnum, machine_mode mode, poly_uint64 *bytenum)
 
static bool store_bit_field_using_insv (const extraction_insn *insv, rtx op0, opt_scalar_int_mode op0_mode, unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitnum, rtx value, scalar_int_mode value_mode)
 
static bool store_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum, poly_uint64 bitregion_start, poly_uint64 bitregion_end, machine_mode fieldmode, rtx value, bool reverse, bool fallback_p, bool undefined_p)
 
void store_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum, poly_uint64 bitregion_start, poly_uint64 bitregion_end, machine_mode fieldmode, rtx value, bool reverse, bool undefined_p)
 
static rtx convert_extracted_bit_field (rtx x, machine_mode mode, machine_mode tmode, bool unsignedp)
 
static rtx extract_bit_field_using_extv (const extraction_insn *extv, rtx op0, opt_scalar_int_mode op0_mode, unsigned HOST_WIDE_INT bitsize, unsigned HOST_WIDE_INT bitnum, int unsignedp, rtx target, machine_mode mode, machine_mode tmode)
 
static rtx extract_bit_field_as_subreg (machine_mode mode, rtx op0, machine_mode op0_mode, poly_uint64 bitsize, poly_uint64 bitnum)
 
static rtx extract_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum, int unsignedp, rtx target, machine_mode mode, machine_mode tmode, bool reverse, bool fallback_p, rtx *alt_rtl)
 
rtx extract_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum, int unsignedp, rtx target, machine_mode mode, machine_mode tmode, bool reverse, rtx *alt_rtl)
 
rtx extract_low_bits (machine_mode mode, machine_mode src_mode, rtx src)
 
void expand_inc (rtx target, rtx inc)
 
void expand_dec (rtx target, rtx dec)
 
static rtx expand_shift_1 (enum tree_code code, machine_mode mode, rtx shifted, rtx amount, rtx target, int unsignedp, bool may_fail=false)
 
rtx expand_shift (enum tree_code code, machine_mode mode, rtx shifted, poly_int64 amount, rtx target, int unsignedp)
 
rtx maybe_expand_shift (enum tree_code code, machine_mode mode, rtx shifted, int amount, rtx target, int unsignedp)
 
rtx expand_variable_shift (enum tree_code code, machine_mode mode, rtx shifted, tree amount, rtx target, int unsignedp)
 
static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT, const struct mult_cost *, machine_mode mode)
 
static rtx expand_mult_const (machine_mode, rtx, HOST_WIDE_INT, rtx, const struct algorithm *, enum mult_variant)
 
static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int)
 
static rtx extract_high_half (scalar_int_mode, rtx)
 
static rtx expmed_mult_highpart (scalar_int_mode, rtx, rtx, rtx, int, int)
 
bool choose_mult_variant (machine_mode mode, HOST_WIDE_INT val, struct algorithm *alg, enum mult_variant *variant, int mult_cost)
 
rtx expand_mult (machine_mode mode, rtx op0, rtx op1, rtx target, int unsignedp, bool no_libcall)
 
int mult_by_coeff_cost (HOST_WIDE_INT coeff, machine_mode mode, bool speed)
 
rtx expand_widening_mult (machine_mode mode, rtx op0, rtx op1, rtx target, int unsignedp, optab this_optab)
 
unsigned HOST_WIDE_INT choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision, unsigned HOST_WIDE_INT *multiplier_ptr, int *post_shift_ptr)
 
rtx expand_mult_highpart_adjust (scalar_int_mode mode, rtx adj_operand, rtx op0, rtx op1, rtx target, int unsignedp)
 
rtx expmed_mult_highpart_optab (scalar_int_mode mode, rtx op0, rtx op1, rtx target, int unsignedp, int max_cost)
 
rtx expand_divmod (int rem_flag, enum tree_code code, machine_mode mode, rtx op0, rtx op1, rtx target, int unsignedp, enum optab_methods methods)
 
tree make_tree (tree type, rtx x)
 
rtx expand_and (machine_mode mode, rtx op0, rtx op1, rtx target)
 
rtx emit_cstore (rtx target, enum insn_code icode, enum rtx_code code, machine_mode mode, machine_mode compare_mode, int unsignedp, rtx x, rtx y, int normalizep, machine_mode target_mode)
 
static rtx emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1, machine_mode mode, int unsignedp, int normalizep, machine_mode target_mode)
 
rtx emit_store_flag_int (rtx target, rtx subtarget, enum rtx_code code, rtx op0, rtx op1, scalar_int_mode mode, int unsignedp, int normalizep, rtx trueval)
 
rtx emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1, machine_mode mode, int unsignedp, int normalizep)
 
rtx emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1, machine_mode mode, int unsignedp, int normalizep)
 
rtx expand_rotate_as_vec_perm (machine_mode mode, rtx dst, rtx x, rtx amt)
 
static enum rtx_code equivalent_cmp_code (enum rtx_code code)
 
void canonicalize_comparison (machine_mode mode, enum rtx_code *code, rtx *imm)
 

Variables

struct target_expmed default_target_expmed
 
static int reverse_storage_order_supported = -1
 
static int reverse_float_storage_order_supported = -1
 

Macro Definition Documentation

◆ EXACT_POWER_OF_2_OR_ZERO_P

#define EXACT_POWER_OF_2_OR_ZERO_P ( x)
Value:
(((x) & ((x) - HOST_WIDE_INT_1U)) == 0)
#define HOST_WIDE_INT_1U
Definition hwint.h:70
Test whether a value is zero of a power of two.   

Referenced by expand_divmod(), expand_mult(), and expand_widening_mult().

Function Documentation

◆ adjust_bit_field_mem_for_reg()

static rtx adjust_bit_field_mem_for_reg ( enum extraction_pattern pattern,
rtx op0,
HOST_WIDE_INT bitsize,
HOST_WIDE_INT bitnum,
poly_uint64 bitregion_start,
poly_uint64 bitregion_end,
machine_mode fieldmode,
unsigned HOST_WIDE_INT * new_bitnum )
static
The caller wants to perform insertion or extraction PATTERN on a
bitfield of size BITSIZE at BITNUM bits into memory operand OP0.
BITREGION_START and BITREGION_END are as for store_bit_field
and FIELDMODE is the natural mode of the field.

Search for a mode that is compatible with the memory access
restrictions and (where applicable) with a register insertion or
extraction.  Return the new memory on success, storing the adjusted
bit position in *NEW_BITNUM.  Return null otherwise.   

References extraction_insn::field_mode, get_best_reg_extraction_insn(), GET_MODE_BITSIZE(), GET_MODE_SIZE(), MEM_ALIGN, MEM_VOLATILE_P, narrow_bit_field_mem(), bit_field_mode_iterator::next_mode(), NULL_RTX, bit_field_mode_iterator::prefer_smaller_modes(), and word_mode.

Referenced by extract_integral_bit_field(), and store_integral_bit_field().

◆ canonicalize_comparison()

void canonicalize_comparison ( machine_mode mode,
enum rtx_code * code,
rtx * imm )
Choose the more appropiate immediate in scalar integer comparisons.  The
purpose of this is to end up with an immediate which can be loaded into a
register in fewer moves, if possible.

For each integer comparison there exists an equivalent choice:
  i)   a >  b or a >= b + 1
  ii)  a <= b or a <  b + 1
  iii) a >= b or a >  b - 1
  iv)  a <  b or a <= b - 1

MODE is the mode of the first operand.
CODE points to the comparison code.
IMM points to the rtx containing the immediate.  *IMM must satisfy
CONST_SCALAR_INT_P on entry and continues to satisfy CONST_SCALAR_INT_P
on exit.   

References wi::add(), can_create_pseudo_p, equivalent_cmp_code(), gen_move_insn(), gen_rtx_REG(), immed_wide_int_const(), insn_cost(), LAST_VIRTUAL_REGISTER, expand_operand::mode, wi::OVF_NONE, SCALAR_INT_MODE_P, SIGNED, wi::sub(), UNSIGNED, and unsigned_condition_p().

Referenced by emit_store_flag_1(), and prepare_cmp_insn().

◆ check_reverse_float_storage_order_support()

static void check_reverse_float_storage_order_support ( void )
static
Check whether reverse FP storage order is supported on the target.   

References FLOAT_WORDS_BIG_ENDIAN, reverse_float_storage_order_supported, and sorry().

Referenced by flip_storage_order().

◆ check_reverse_storage_order_support()

static void check_reverse_storage_order_support ( void )
static
Check whether reverse storage order is supported on the target.   

References reverse_storage_order_supported, and sorry().

Referenced by flip_storage_order().

◆ choose_mult_variant()

bool choose_mult_variant ( machine_mode mode,
HOST_WIDE_INT val,
struct algorithm * alg,
enum mult_variant * variant,
int mult_cost )
Find the cheapest way of multiplying a value of mode MODE by VAL.
Try three variations:

    - a shift/add sequence based on VAL itself
    - a shift/add sequence based on -VAL, followed by a negation
    - a shift/add sequence based on VAL - 1, followed by an addition.

Return true if the cheapest of these cost less than MULT_COST,
describing the algorithm in *ALG and final fixup in *VARIANT.   

References add_cost(), add_variant, basic_variant, CHEAPER_MULT_COST, algorithm::cost, mult_cost::cost, GET_MODE_UNIT_BITSIZE, GET_MODE_UNIT_PRECISION, HOST_BITS_PER_INT, HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, HOST_WIDE_INT_MIN, mult_cost::latency, MULT_COST_LESS, neg_cost(), negate_variant, optimize_insn_for_speed_p(), and synth_mult().

Referenced by expand_expr_real_2(), expand_mult(), expand_widening_mult(), expmed_mult_highpart(), mult_by_coeff_cost(), and vect_synth_mult_by_constant().

◆ choose_multiplier()

unsigned HOST_WIDE_INT choose_multiplier ( unsigned HOST_WIDE_INT d,
int n,
int precision,
unsigned HOST_WIDE_INT * multiplier_ptr,
int * post_shift_ptr )
Choose a minimal N + 1 bit approximation to 2**K / D that can be used to
replace division by D, put the least significant N bits of the result in
*MULTIPLIER_PTR, the value K - N in *POST_SHIFT_PTR, and return the most
significant bit.

The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
needed precision is PRECISION (should be <= N).

PRECISION should be as small as possible so this function can choose the
multiplier more freely.  If PRECISION is <= N - 1, the most significant
bit returned by the function will be zero.

Using this function, x / D is equal to (x*m) / 2**N >> (*POST_SHIFT_PTR),
where m is the full N + 1 bit multiplier.   

References ceil_log2(), wi::extract_uhwi(), gcc_assert, HOST_BITS_PER_DOUBLE_INT, HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, wi::set_bit_in_zero(), generic_wide_int< storage >::to_uhwi(), wi::udiv_trunc(), and wi::uhwi().

Referenced by expand_divmod(), expand_vector_divmod(), and vect_recog_divmod_pattern().

◆ convert_extracted_bit_field()

static rtx convert_extracted_bit_field ( rtx x,
machine_mode mode,
machine_mode tmode,
bool unsignedp )
static
A subroutine of extract_bit_field_1 that converts return value X
to either MODE or TMODE.  MODE, TMODE and UNSIGNEDP are arguments
to extract_bit_field.   

References convert_to_mode(), force_reg(), gen_lowpart, GET_MODE, int_mode_for_mode(), expand_operand::mode, opt_mode< T >::require(), and SCALAR_INT_MODE_P.

Referenced by extract_bit_field(), extract_bit_field_1(), extract_bit_field_using_extv(), and extract_integral_bit_field().

◆ do_cmp_and_jump()

static void do_cmp_and_jump ( rtx arg1,
rtx arg2,
enum rtx_code op,
machine_mode mode,
rtx_code_label * label )
static
Perform possibly multi-word comparison and conditional jump to LABEL
if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE.  This is
now a thin wrapper around do_compare_rtx_and_jump.   

References do_compare_rtx_and_jump(), expand_operand::mode, NULL, NULL_RTX, and profile_probability::uninitialized().

Referenced by expand_divmod(), expand_sdiv_pow2(), and expand_smod_pow2().

◆ emit_cstore()

rtx emit_cstore ( rtx target,
enum insn_code icode,
enum rtx_code code,
machine_mode mode,
machine_mode compare_mode,
int unsignedp,
rtx x,
rtx y,
int normalizep,
machine_mode target_mode )

◆ emit_store_flag()

rtx emit_store_flag ( rtx target,
enum rtx_code code,
rtx op0,
rtx op1,
machine_mode mode,
int unsignedp,
int normalizep )
Emit a store-flags instruction for comparison CODE on OP0 and OP1
and storing in TARGET.  Normally return TARGET.
Return 0 if that cannot be done.

MODE is the mode to use for OP0 and OP1 should they be CONST_INTs.  If
it is VOIDmode, they cannot both be CONST_INT.

UNSIGNEDP is for the case where we have to widen the operands
to perform the operation.  It says to use zero-extension.

NORMALIZEP is 1 if we should convert the result to be either zero
or one.  Normalize is -1 if we should convert the result to be
either zero or -1.  If NORMALIZEP is zero, the result will be left
"raw" out of the scc insn.   

References can_compare_p(), ccp_store_flag, const0_rtx, CONSTANT_P, delete_insns_since(), emit_conditional_move(), emit_store_flag_1(), emit_store_flag_int(), expand_binop(), gcc_assert, GEN_INT, gen_int_mode(), get_last_insn(), GET_MODE, GET_MODE_CLASS, HONOR_NANS(), HONOR_SNANS(), INTVAL, is_int_mode(), last, expand_operand::mode, NULL_RTX, OPTAB_WIDEN, optimize_insn_for_speed_p(), reverse_condition_maybe_unordered(), rtx_cost(), split_comparison(), STORE_FLAG_VALUE, expand_operand::target, and val_signbit_p().

Referenced by emit_store_flag_1(), emit_store_flag_force(), emit_store_flag_int(), expand_divmod(), expand_POPCOUNT(), expand_sdiv_pow2(), expand_smod_pow2(), noce_emit_store_flag(), and noce_try_sign_mask().

◆ emit_store_flag_1()

◆ emit_store_flag_force()

◆ emit_store_flag_int()

rtx emit_store_flag_int ( rtx target,
rtx subtarget,
enum rtx_code code,
rtx op0,
rtx op1,
scalar_int_mode mode,
int unsignedp,
int normalizep,
rtx trueval )
Subroutine of emit_store_flag that handles cases in which the operands
are scalar integers.  SUBTARGET is the target to use for temporary
operations and TRUEVAL is the value to store when the condition is
true.  All other arguments are as for emit_store_flag.   

References can_compare_p(), ccp_store_flag, const0_rtx, const1_rtx, convert_modes(), convert_move(), delete_insns_since(), emit_move_insn(), emit_store_flag(), emit_store_flag_1(), expand_binop(), expand_unop(), GEN_INT, gen_int_mode(), get_last_insn(), GET_MODE, GET_MODE_BITSIZE(), GET_MODE_SIZE(), INTVAL, last, maybe_expand_shift(), expand_operand::mode, optab_handler(), OPTAB_WIDEN, optimize_insn_for_speed_p(), reverse_condition(), rtx_cost(), rtx_equal_p(), STORE_FLAG_VALUE, expand_operand::target, and word_mode.

Referenced by emit_store_flag().

◆ equivalent_cmp_code()

static enum rtx_code equivalent_cmp_code ( enum rtx_code code)
static
Helper function for canonicalize_cmp_for_target.  Swap between inclusive
and exclusive ranges in order to create an equivalent comparison.  See
canonicalize_cmp_for_target for the possible cases.   

Referenced by canonicalize_comparison().

◆ expand_and()

rtx expand_and ( machine_mode mode,
rtx op0,
rtx op1,
rtx target )
Compute the logical-and of OP0 and OP1, storing it in TARGET
and returning TARGET.

If TARGET is 0, a pseudo-register or constant is returned.   

References emit_move_insn(), expand_binop(), GET_MODE, OPTAB_LIB_WIDEN, and simplify_binary_operation().

Referenced by emit_cstore(), expand_builtin_extract_return_addr(), expand_expr_real_2(), expand_mult_highpart_adjust(), optimize_bitfield_assignment_op(), and reduce_to_bit_field_precision().

◆ expand_dec()

void expand_dec ( rtx target,
rtx dec )

◆ expand_divmod()

rtx expand_divmod ( int rem_flag,
enum tree_code code,
machine_mode mode,
rtx op0,
rtx op1,
rtx target,
int unsignedp,
enum optab_methods methods )
Emit the code to divide OP0 by OP1, putting the result in TARGET
if that is convenient, and returning where the result is.
You may request either the quotient or the remainder as the result;
specify REM_FLAG nonzero to get the remainder.

CODE is the expression code for which kind of division this is;
it controls how rounding is done.  MODE is the machine mode to use.
UNSIGNEDP nonzero means do unsigned division.   
??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
and then correct it by or'ing in missing high bits
if result of ANDI is nonzero.
For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
This could optimize to a bfexts instruction.
But C doesn't use these operations, so their optimizations are
left for later.   
??? For modulo, we don't actually need the highpart of the first product,
the low part will do nicely.  And for small divisors, the second multiply
can also be a low-part only multiply or even be completely left out.
E.g. to calculate the remainder of a division by 3 with a 32 bit
multiply, multiply with 0x55555556 and extract the upper two bits;
the result is exact for inputs up to 0x1fffffff.
The input range can be reduced by using cross-sum rules.
For odd divisors >= 3, the following table gives right shift counts
so that if a number is shifted by an integer multiple of the given
amount, the remainder stays the same:
2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12

Cross-sum rules for even numbers can be derived by leaving as many bits
to the right alone as the divisor has zeros to the right.
E.g. if x is an unsigned 32 bit number:
(x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28

References add_cost(), as_a(), BITS_PER_WORD, choose_multiplier(), const0_rtx, const1_rtx, CONST_INT_P, constm1_rtx, convert_modes(), copy_to_mode_reg(), ctz_or_zero(), delete_insns_since(), do_cmp_and_jump(), emit_barrier(), emit_jump_insn(), emit_label(), emit_move_insn(), emit_store_flag(), emit_store_flag_force(), EXACT_POWER_OF_2_OR_ZERO_P, expand_abs(), expand_binop(), expand_dec(), expand_divmod(), expand_inc(), expand_mult(), expand_sdiv_pow2(), expand_shift(), expand_smod_pow2(), expand_twoval_binop(), expand_twoval_binop_libfunc(), expand_unop(), expmed_mult_highpart(), floor_log2(), FOR_EACH_MODE_FROM, force_operand(), force_reg(), gcc_assert, gcc_unreachable, gen_int_mode(), gen_label_rtx(), gen_lowpart, gen_reg_rtx(), get_last_insn(), GET_MODE, GET_MODE_BITSIZE(), GET_MODE_CLASS, HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, HOST_WIDE_INT_M1U, HOST_WIDE_INT_MIN, HWI_COMPUTABLE_MODE_P(), INTVAL, invert_mod2n(), last, MEM_P, MEM_VOLATILE_P, mul_cost(), wi::neg(), NULL_RTX, OPTAB_DIRECT, optab_handler(), OPTAB_LIB, OPTAB_LIB_WIDEN, optab_libfunc(), OPTAB_WIDEN, optimize_insn_for_speed_p(), plus_constant(), wi::popcount(), REG_FUNCTION_VALUE_P, reg_mentioned_p(), REG_P, sdiv_cost(), sdiv_pow2_cheap(), set_dst_reg_note(), shift_cost(), sign_expand_binop(), smod_pow2_cheap(), targetm, generic_wide_int< storage >::to_uhwi(), and udiv_cost().

Referenced by align_dynamic_address(), expand_divmod(), expand_doubleword_divmod(), expand_doubleword_mod(), expand_expr_divmod(), force_operand(), and round_push().

◆ expand_inc()

void expand_inc ( rtx target,
rtx inc )

◆ expand_mult()

rtx expand_mult ( machine_mode mode,
rtx op0,
rtx op1,
rtx target,
int unsignedp,
bool no_libcall )

◆ expand_mult_const()

static rtx expand_mult_const ( machine_mode mode,
rtx op0,
HOST_WIDE_INT val,
rtx target,
const struct algorithm * alg,
enum mult_variant variant )
static

◆ expand_mult_highpart_adjust()

rtx expand_mult_highpart_adjust ( scalar_int_mode mode,
rtx adj_operand,
rtx op0,
rtx op1,
rtx target,
int unsignedp )
Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
flavor of OP0 and OP1.  ADJ_OPERAND is already the high half of the
product OP0 x OP1.  If UNSIGNEDP is nonzero, adjust the signed product
to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
become signed.

The result is put in TARGET if that is convenient.

MODE is the mode of operation.   

References expand_and(), expand_shift(), force_operand(), GET_MODE_BITSIZE(), and NULL_RTX.

Referenced by expand_expr_real_2(), and expmed_mult_highpart_optab().

◆ expand_rotate_as_vec_perm()

rtx expand_rotate_as_vec_perm ( machine_mode mode,
rtx dst,
rtx x,
rtx amt )
Expand a vector (left) rotate of MODE of X by an immediate AMT as a vector
permute operation.  Emit code to put the result in DST if successfull and
return it.  Otherwise return NULL.  This is intended to implement vector
rotates by byte amounts using vector permutes when the target does not offer
native vector rotate operations.   

References CONST_INT_P, emit_move_insn(), expand_vec_perm_const(), GET_MODE_INNER, GET_MODE_SIZE(), i, INTVAL, lowpart_subreg(), expand_operand::mode, int_vector_builder< T >::new_vector(), NULL_RTX, qimode_for_vec_perm(), and unwrap_const_vec_duplicate().

◆ expand_sdiv_pow2()

static rtx expand_sdiv_pow2 ( scalar_int_mode mode,
rtx op0,
HOST_WIDE_INT d )
static

◆ expand_shift()

◆ expand_shift_1()

static rtx expand_shift_1 ( enum tree_code code,
machine_mode mode,
rtx shifted,
rtx amount,
rtx target,
int unsignedp,
bool may_fail = false )
static
Output a shift instruction for expression code CODE,
with SHIFTED being the rtx for the value to shift,
and AMOUNT the rtx for the amount to shift by.
Store the result in the rtx TARGET, if that is convenient.
If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
Return the rtx for where the value is.
If that cannot be done, abort the compilation unless MAY_FAIL is true,
in which case 0 is returned.   

References add_cost(), const0_rtx, CONST_INT_P, expand_binop(), expand_shift_1(), expand_unop(), force_reg(), gcc_assert, gen_int_mode(), gen_int_shift_amount(), GET_CODE, GET_MODE, GET_MODE_BITSIZE(), GET_MODE_INNER, GET_MODE_PRECISION(), GET_MODE_SIZE(), i, INTVAL, MAX_BITS_PER_WORD, MAX_COST, expand_operand::mode, NULL_RTX, OPTAB_DIRECT, optab_handler(), OPTAB_LIB_WIDEN, OPTAB_MUST_WIDEN, OPTAB_WIDEN, optimize_insn_for_speed_p(), reverse_rotate_by_imm_p(), SCALAR_INT_MODE_P, shift_cost(), SHIFT_COUNT_TRUNCATED, simplify_gen_binary(), simplify_gen_unary(), subreg_lowpart_p(), SUBREG_REG, expand_operand::target, and VECTOR_MODE_P.

Referenced by expand_shift(), expand_shift_1(), expand_variable_shift(), and maybe_expand_shift().

◆ expand_smod_pow2()

◆ expand_variable_shift()

rtx expand_variable_shift ( enum tree_code code,
machine_mode mode,
rtx shifted,
tree amount,
rtx target,
int unsignedp )
Output a shift instruction for expression code CODE,
with SHIFTED being the rtx for the value to shift,
and AMOUNT the tree for the amount to shift by.
Store the result in the rtx TARGET, if that is convenient.
If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
Return the rtx for where the value is.   

References expand_normal(), expand_shift_1(), expand_operand::mode, and expand_operand::target.

Referenced by expand_expr_real_2().

◆ expand_widening_mult()

rtx expand_widening_mult ( machine_mode mode,
rtx op0,
rtx op1,
rtx target,
int unsignedp,
optab this_optab )
Perform a widening multiplication and return an rtx for the result.
MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
TARGET is a suggestion for where to store the result (an rtx).
THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
or smul_widen_optab.

We check specially for a constant integer as OP1, comparing the
cost of a widening multiply against the cost of a sequence of shifts
and adds.   

References choose_mult_variant(), CONST0_RTX, CONST_INT_P, convert_modes(), convert_to_mode(), EXACT_POWER_OF_2_OR_ZERO_P, expand_binop(), expand_mult_const(), expand_shift(), floor_log2(), GET_MODE, HWI_COMPUTABLE_MODE_P(), INTVAL, mul_widen_cost(), OPTAB_LIB_WIDEN, and optimize_insn_for_speed_p().

Referenced by expand_expr_real_2().

◆ expmed_mult_highpart()

static rtx expmed_mult_highpart ( scalar_int_mode mode,
rtx op0,
rtx op1,
rtx target,
int unsignedp,
int max_cost )
static
Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
putting the high half of the result in TARGET if that is convenient,
and return where the result is.  If the operation cannot be performed,
0 is returned.

MODE is the mode of operation and result.

UNSIGNEDP nonzero means unsigned multiply.

MAX_COST is the total allowed cost for the expanded RTL.   

References add_cost(), BITS_PER_WORD, choose_mult_variant(), convert_to_mode(), algorithm::cost, mult_cost::cost, expand_mult_const(), expmed_mult_highpart_optab(), extract_high_half(), force_operand(), gcc_assert, gen_int_mode(), GET_MODE_BITSIZE(), GET_MODE_MASK, GET_MODE_WIDER_MODE(), HWI_COMPUTABLE_MODE_P(), INTVAL, optimize_insn_for_speed_p(), and shift_cost().

Referenced by expand_divmod().

◆ expmed_mult_highpart_optab()

◆ extract_bit_field()

rtx extract_bit_field ( rtx str_rtx,
poly_uint64 bitsize,
poly_uint64 bitnum,
int unsignedp,
rtx target,
machine_mode mode,
machine_mode tmode,
bool reverse,
rtx * alt_rtl )
Generate code to extract a byte-field from STR_RTX
containing BITSIZE bits, starting at BITNUM,
and put it in TARGET if possible (if TARGET is nonzero).
Regardless of TARGET, we return the rtx for where the value is placed.

STR_RTX is the structure containing the byte (a REG or MEM).
UNSIGNEDP is nonzero if this is an unsigned bit field.
MODE is the natural mode of the field value once extracted.
TMODE is the mode the caller would like the value to have;
but the value may be returned with type MODE instead.

If REVERSE is true, the extraction is to be done in reverse order.

If a TARGET is specified and we can store in it at no extra cost,
we do so, and return TARGET.
Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
if they are equally easy.

If the result can be stored at TARGET, and ALT_RTL is non-NULL,
then *ALT_RTL is set to TARGET (before legitimziation).   

References adjust_bitfield_address, convert_extracted_bit_field(), copy_to_reg(), extract_bit_field_1(), flip_storage_order(), gcc_assert, GET_MODE, GET_MODE_BITSIZE(), is_a(), poly_int< N, C >::is_constant(), expand_operand::mode, narrow_bit_field_mem(), strict_volatile_bitfield_p(), and expand_operand::target.

Referenced by assign_parm_setup_reg(), copy_blkmode_from_reg(), copy_blkmode_to_reg(), emit_group_load_1(), expand_expr_real_1(), expand_misaligned_mem_ref(), expand_single_bit_test(), load_register_parameters(), read_complex_part(), store_field(), store_integral_bit_field(), and store_unaligned_arguments_into_pseudos().

◆ extract_bit_field_1()

static rtx extract_bit_field_1 ( rtx str_rtx,
poly_uint64 bitsize,
poly_uint64 bitnum,
int unsignedp,
rtx target,
machine_mode mode,
machine_mode tmode,
bool reverse,
bool fallback_p,
rtx * alt_rtl )
static

◆ extract_bit_field_as_subreg()

static rtx extract_bit_field_as_subreg ( machine_mode mode,
rtx op0,
machine_mode op0_mode,
poly_uint64 bitsize,
poly_uint64 bitnum )
static
See whether it would be valid to extract the part of OP0 with
mode OP0_MODE described by BITNUM and BITSIZE into a value of
mode MODE using a subreg operation.
Return the subreg if so, otherwise return null.   

References force_subreg(), GET_MODE_BITSIZE(), known_eq, lowpart_bit_field_p(), expand_operand::mode, NULL_RTX, and TRULY_NOOP_TRUNCATION_MODES_P.

Referenced by extract_bit_field_1().

◆ extract_bit_field_using_extv()

static rtx extract_bit_field_using_extv ( const extraction_insn * extv,
rtx op0,
opt_scalar_int_mode op0_mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
int unsignedp,
rtx target,
machine_mode mode,
machine_mode tmode )
static
Try to use an ext(z)v pattern to extract a field from OP0.
Return the extracted value on success, otherwise return null.
EXTV describes the extraction instruction to use.  If OP0_MODE
is defined, it is the mode of OP0, otherwise OP0 is a BLKmode MEM.
The other arguments are as for extract_bit_field.   

References convert_extracted_bit_field(), create_fixed_operand(), create_integer_operand(), create_output_operand(), extraction_insn::field_mode, gen_lowpart_if_possible(), gen_lowpart_SUBREG(), gen_reg_rtx(), GET_CODE, GET_MODE, GET_MODE_BITSIZE(), extraction_insn::icode, maybe_expand_insn(), MEM_P, expand_operand::mode, narrow_bit_field_mem(), NULL_RTX, partial_subreg_p(), REG_P, opt_mode< T >::require(), extraction_insn::struct_mode, expand_operand::target, TRULY_NOOP_TRUNCATION_MODES_P, and expand_operand::value.

Referenced by extract_integral_bit_field().

◆ extract_fixed_bit_field()

static rtx extract_fixed_bit_field ( machine_mode tmode,
rtx op0,
opt_scalar_int_mode op0_mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
rtx target,
int unsignedp,
bool reverse )
static
Use shifts and boolean operations to extract a field of BITSIZE bits
from bit BITNUM of OP0.  If OP0_MODE is defined, it is the mode of OP0,
otherwise OP0 is a BLKmode MEM.

UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
If REVERSE is true, the extraction is to be done in reverse order.

If TARGET is nonzero, attempts to store the value there
and return TARGET, but this is not guaranteed.
If TARGET is not used, create a pseudo-reg of mode TMODE for the value.   

References BITS_PER_WORD, extract_fixed_bit_field_1(), extract_split_bit_field(), get_best_mode(), MEM_ALIGN, MEM_P, MEM_VOLATILE_P, expand_operand::mode, narrow_bit_field_mem(), opt_mode< T >::require(), and expand_operand::target.

Referenced by extract_integral_bit_field(), extract_split_bit_field(), and store_split_bit_field().

◆ extract_fixed_bit_field_1()

static rtx extract_fixed_bit_field_1 ( machine_mode tmode,
rtx op0,
scalar_int_mode mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
rtx target,
int unsignedp,
bool reverse )
static
Helper function for extract_fixed_bit_field, extracts
the bit field always using MODE, which is the mode of OP0.
If UNSIGNEDP is -1, the result need not be sign or zero extended.
The other arguments are as for extract_fixed_bit_field.   

References as_a(), convert_to_mode(), expand_binop(), expand_shift(), flip_storage_order(), FOR_EACH_MODE_IN_CLASS, force_reg(), GET_MODE_BITSIZE(), mask_rtx(), expand_operand::mode, new_mode(), OPTAB_LIB_WIDEN, REG_P, opt_mode< T >::require(), and expand_operand::target.

Referenced by extract_fixed_bit_field().

◆ extract_high_half()

static rtx extract_high_half ( scalar_int_mode mode,
rtx op )
static
Subroutine of expmed_mult_highpart.  Return the MODE high part of OP.   

References convert_modes(), expand_shift(), gen_highpart(), GET_MODE_BITSIZE(), GET_MODE_WIDER_MODE(), algorithm::op, and word_mode.

Referenced by expmed_mult_highpart(), and expmed_mult_highpart_optab().

◆ extract_integral_bit_field()

static rtx extract_integral_bit_field ( rtx op0,
opt_scalar_int_mode op0_mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
int unsignedp,
rtx target,
machine_mode mode,
machine_mode tmode,
bool reverse,
bool fallback_p )
static

◆ extract_low_bits()

rtx extract_low_bits ( machine_mode mode,
machine_mode src_mode,
rtx src )
Try to read the low bits of SRC as an rvalue of mode MODE, preserving
the bit pattern.  SRC_MODE is the mode of SRC; if this is smaller than
MODE, fill the upper bits with zeros.  Fail if the layout of either
mode is unknown (as for CC modes) or if the extraction would involve
unprofitable mode punning.  Return the value on success, otherwise
return null.

This is different from gen_lowpart* in these respects:

  - the returned value must always be considered an rvalue

  - when MODE is wider than SRC_MODE, the extraction involves
    a zero extension

  - when MODE is smaller than SRC_MODE, the extraction involves
    a truncation (and is thus subject to TARGET_TRULY_NOOP_TRUNCATION).

In other words, this routine performs a computation, whereas the
gen_lowpart* routines are conceptually lvalue or rvalue subreg
operations.   

References CONSTANT_P, convert_modes(), force_reg(), gen_lowpart, gen_lowpart_common(), gen_rtx_SUBREG(), GET_MODE, GET_MODE_BITSIZE(), GET_MODE_CLASS, int_mode_for_mode(), known_eq, expand_operand::mode, NULL_RTX, simplify_subreg(), subreg_lowpart_offset(), targetm, and validate_subreg().

Referenced by find_shift_sequence(), and get_stored_val().

◆ extract_split_bit_field()

static rtx extract_split_bit_field ( rtx op0,
opt_scalar_int_mode op0_mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitpos,
int unsignedp,
bool reverse )
static
Extract a bit field that is split across two words
and return an RTX for the result.

OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
BITSIZE is the field width; BITPOS, position of its first bit, in the word.
UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
a BLKmode MEM.

If REVERSE is true, the extraction is to be done in reverse order.   

References BITS_PER_WORD, expand_binop(), expand_shift(), extract_fixed_bit_field(), GET_CODE, MEM_ALIGN, MIN, NULL_RTX, offset, operand_subword_force(), OPTAB_LIB_WIDEN, REG_P, opt_mode< T >::require(), SUBREG_P, and word_mode.

Referenced by extract_fixed_bit_field(), and extract_integral_bit_field().

◆ flip_storage_order()

◆ init_expmed()

◆ init_expmed_one_conv()

◆ init_expmed_one_mode()

◆ invert_mod2n()

static unsigned HOST_WIDE_INT invert_mod2n ( unsigned HOST_WIDE_INT x,
int n )
static
Compute the inverse of X mod 2**N, i.e., find Y such that X * Y is congruent
to 1 modulo 2**N, assuming that X is odd.  Bézout's lemma guarantees that Y
exists for any given positive N.   

References gcc_assert, HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, HOST_WIDE_INT_M1U, and y.

Referenced by expand_divmod().

◆ lowpart_bit_field_p()

static bool lowpart_bit_field_p ( poly_uint64 bitnum,
poly_uint64 bitsize,
machine_mode struct_mode )
static
Return true if a bitfield of size BITSIZE at bit number BITNUM within
a structure of mode STRUCT_MODE represents a lowpart subreg.   The subreg
offset is then BITNUM / BITS_PER_UNIT.   

References GET_MODE_BITSIZE(), known_eq, and REGMODE_NATURAL_SIZE.

Referenced by extract_bit_field_as_subreg(), and store_integral_bit_field().

◆ lshift_value()

static rtx lshift_value ( machine_mode mode,
unsigned HOST_WIDE_INT value,
int bitpos )
static
Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
VALUE << BITPOS.   

References immed_wide_int_const(), wi::lshift(), expand_operand::mode, and expand_operand::value.

Referenced by store_fixed_bit_field_1().

◆ make_tree()

tree make_tree ( tree type,
rtx x )
Return a tree node with data type TYPE, describing the value of X.
Usually this is an VAR_DECL, if there is no obvious better choice.
X may be an expression, however we only support those expressions
generated by loop.c.   

References tree_vector_builder::build(), build2(), build_decl(), build_fold_addr_expr, build_real(), CONST_DOUBLE_LOW, CONST_DOUBLE_REAL_VALUE, CONST_POLY_INT_P, const_poly_int_value(), CONST_VECTOR_ELT, CONST_VECTOR_NELTS_PER_PATTERN, CONST_VECTOR_NPATTERNS, convert_memory_address_addr_space(), count, vector_builder< T, Shape, Derived >::encoded_nelts(), fold_build1, fold_build2, fold_convert, wide_int_storage::from_array(), GET_CODE, GET_MODE, HOST_BITS_PER_WIDE_INT, i, make_tree(), NULL_TREE, POINTER_TYPE_P, RTL_LOCATION, SCALAR_INT_TYPE_MODE, signed_type_for(), STATIC_ASSERT, SYMBOL_REF_DECL, TARGET_SUPPORTS_WIDE_INT, TREE_CODE, TREE_TYPE, type(), TYPE_ADDR_SPACE, lang_hooks_for_types::type_for_mode, TYPE_MODE, lang_hooks::types, unsigned_type_for(), wide_int_to_tree(), and XEXP.

Referenced by assign_parm_setup_reg(), emit_block_op_via_libcall(), expand_asm_stmt(), expand_builtin_cexpi(), expand_call(), expand_DIVMOD(), expand_doubleword_mod(), expand_ifn_atomic_bit_test_and(), expand_ifn_atomic_compare_exchange_into_call(), expand_mul_overflow(), expand_sjlj_dispatch_table(), expand_vector_ubsan_overflow(), initialize_argument_information(), make_tree(), maybe_optimize_mod_cmp(), maybe_optimize_pow2p_mod_cmp(), set_storage_via_libcall(), sjlj_emit_function_enter(), and store_expr().

◆ mask_rtx()

static rtx mask_rtx ( scalar_int_mode mode,
int bitpos,
int bitsize,
bool complement )
inlinestatic
Return a constant integer mask value of mode MODE with BITSIZE ones
followed by BITPOS zeros, or the complement of that if COMPLEMENT.
The mask is truncated if necessary to the width of mode MODE.  The
mask is zero-extended if BITSIZE+BITPOS is too small for MODE.   

References GET_MODE_PRECISION(), immed_wide_int_const(), and wi::shifted_mask().

Referenced by add_mask_and_len_args(), extract_fixed_bit_field_1(), simplify_shift_const_1(), and store_fixed_bit_field_1().

◆ maybe_expand_shift()

rtx maybe_expand_shift ( enum tree_code code,
machine_mode mode,
rtx shifted,
int amount,
rtx target,
int unsignedp )
Likewise, but return 0 if that cannot be done.   

References expand_shift_1(), GEN_INT, expand_operand::mode, and expand_operand::target.

Referenced by convert_mode_scalar(), and emit_store_flag_int().

◆ mult_by_coeff_cost()

int mult_by_coeff_cost ( HOST_WIDE_INT coeff,
machine_mode mode,
bool speed )
Return a cost estimate for multiplying a register by the given
COEFFicient in the given MODE and SPEED.   

References choose_mult_variant(), algorithm::cost, mult_cost::cost, gen_raw_REG(), LAST_VIRTUAL_REGISTER, and set_src_cost().

Referenced by analyze_increments(), force_expr_to_var_cost(), get_address_cost(), get_computation_cost(), most_expensive_mult_to_index(), and stmt_cost().

◆ narrow_bit_field_mem()

static rtx narrow_bit_field_mem ( rtx mem,
opt_scalar_int_mode mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
unsigned HOST_WIDE_INT * new_bitnum )
static
If MODE is set, adjust bitfield memory MEM so that it points to the
first unit of mode MODE that contains a bitfield of size BITSIZE at
bit position BITNUM.  If MODE is not set, return a BLKmode reference
to every byte in the bitfield.  Set *NEW_BITNUM to the bit position
of the field within the new memory.   

References adjust_bitfield_address, adjust_bitfield_address_size, GET_MODE_BITSIZE(), alg_hash_entry::mode, and offset.

Referenced by adjust_bit_field_mem_for_reg(), extract_bit_field(), extract_bit_field_using_extv(), extract_fixed_bit_field(), store_bit_field(), store_bit_field_using_insv(), and store_fixed_bit_field().

◆ negate_rtx()

rtx negate_rtx ( machine_mode mode,
rtx x )
Return an rtx representing minus the value of X.
MODE is the intended mode of the result,
useful if X is a CONST_INT.   

References expand_unop(), alg_hash_entry::mode, NULL_RTX, and simplify_unary_operation().

Referenced by expand_binop(), expand_builtin_apply(), expand_expr_real_2(), fill_slots_from_thread(), force_operand(), and push_block().

◆ simple_mem_bitfield_p()

static bool simple_mem_bitfield_p ( rtx op0,
poly_uint64 bitsize,
poly_uint64 bitnum,
machine_mode mode,
poly_uint64 * bytenum )
static
Return true if OP is a memory and if a bitfield of size BITSIZE at
bit number BITNUM can be treated as a simple value of mode MODE.
Store the byte offset in *BYTENUM if so.   

References GET_MODE_ALIGNMENT, GET_MODE_BITSIZE(), known_eq, MEM_ALIGN, MEM_P, alg_hash_entry::mode, and targetm.

Referenced by extract_bit_field_1(), and store_bit_field_1().

◆ store_bit_field()

void store_bit_field ( rtx str_rtx,
poly_uint64 bitsize,
poly_uint64 bitnum,
poly_uint64 bitregion_start,
poly_uint64 bitregion_end,
machine_mode fieldmode,
rtx value,
bool reverse,
bool undefined_p )
Generate code to store value from rtx VALUE
into a bit-field within structure STR_RTX
containing BITSIZE bits starting at bit BITNUM.

BITREGION_START is bitpos of the first bitfield in this region.
BITREGION_END is the bitpos of the ending bitfield in this region.
These two fields are 0, if the C++ memory model does not apply,
or we are not interested in keeping track of bitfield regions.

FIELDMODE is the machine-mode of the FIELD_DECL node for this field.

If REVERSE is true, the store is to be done in reverse order.

If UNDEFINED_P is true then STR_RTX is currently undefined.   

References adjust_bitfield_address, adjust_bitfield_address_size, bits_to_bytes_round_up, copy_to_reg(), emit_move_insn(), flip_storage_order(), gcc_assert, gcc_unreachable, get_best_mode(), GET_MODE_BITSIZE(), INT_MAX, is_a(), poly_int< N, C >::is_constant(), MEM_ALIGN, MEM_P, MEM_VOLATILE_P, narrow_bit_field_mem(), offset, store_bit_field_1(), strict_volatile_bitfield_p(), and expand_operand::value.

Referenced by copy_blkmode_from_reg(), copy_blkmode_to_reg(), emit_group_store(), expand_assignment(), expand_expr_real_2(), noce_emit_move_insn(), store_expr(), store_field(), store_unaligned_arguments_into_pseudos(), and write_complex_part().

◆ store_bit_field_1()

static bool store_bit_field_1 ( rtx str_rtx,
poly_uint64 bitsize,
poly_uint64 bitnum,
poly_uint64 bitregion_start,
poly_uint64 bitregion_end,
machine_mode fieldmode,
rtx value,
bool reverse,
bool fallback_p,
bool undefined_p )
static

◆ store_bit_field_using_insv()

static bool store_bit_field_using_insv ( const extraction_insn * insv,
rtx op0,
opt_scalar_int_mode op0_mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
rtx value,
scalar_int_mode value_mode )
static

◆ store_fixed_bit_field()

static void store_fixed_bit_field ( rtx op0,
opt_scalar_int_mode op0_mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
poly_uint64 bitregion_start,
poly_uint64 bitregion_end,
rtx value,
scalar_int_mode value_mode,
bool reverse )
static
Use shifts and boolean operations to store VALUE into a bit field of
width BITSIZE in OP0, starting at bit BITNUM.  If OP0_MODE is defined,
it is the mode of OP0, otherwise OP0 is a BLKmode MEM.  VALUE_MODE is
the mode of VALUE.

If REVERSE is true, the store is to be done in reverse order.   

References BITS_PER_WORD, opt_mode< T >::exists(), get_best_mode(), GET_MODE_BITSIZE(), MEM_ALIGN, MEM_P, MEM_VOLATILE_P, narrow_bit_field_mem(), opt_mode< T >::require(), store_fixed_bit_field_1(), store_split_bit_field(), and expand_operand::value.

Referenced by store_integral_bit_field(), and store_split_bit_field().

◆ store_fixed_bit_field_1()

static void store_fixed_bit_field_1 ( rtx op0,
scalar_int_mode mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
rtx value,
scalar_int_mode value_mode,
bool reverse )
static
Helper function for store_fixed_bit_field, stores
the bit field always using MODE, which is the mode of OP0.  The other
arguments are as for store_fixed_bit_field.   

References CONST_INT_P, convert_to_mode(), copy_rtx(), emit_move_insn(), expand_binop(), expand_shift(), flip_storage_order(), force_reg(), GET_MODE_BITSIZE(), HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, HOST_WIDE_INT_M1U, lshift_value(), mask_rtx(), expand_operand::mode, NULL_RTX, OPTAB_LIB_WIDEN, UINTVAL, and expand_operand::value.

Referenced by store_fixed_bit_field().

◆ store_integral_bit_field()

◆ store_split_bit_field()

static void store_split_bit_field ( rtx op0,
opt_scalar_int_mode op0_mode,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitpos,
poly_uint64 bitregion_start,
poly_uint64 bitregion_end,
rtx value,
scalar_int_mode value_mode,
bool reverse )
static
Store a bit field that is split across multiple accessible memory objects.

OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
BITSIZE is the field width; BITPOS the position of its first bit
(within the word).
VALUE is the value to store, which has mode VALUE_MODE.
If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
a BLKmode MEM.

If REVERSE is true, the store is to be done in reverse order.

This does not yet handle fields wider than BITS_PER_WORD.   

References BITS_PER_WORD, const0_rtx, CONST_INT_P, CONSTANT_P, opt_mode< T >::exists(), extract_fixed_bit_field(), force_reg(), GEN_INT, gen_lowpart_common(), GET_CODE, GET_MODE, GET_MODE_BITSIZE(), GET_MODE_SIZE(), HOST_WIDE_INT_1, INTVAL, maybe_gt, MEM_ALIGN, MEM_P, MIN, NULL_RTX, offset, operand_subword_force(), REG_P, opt_mode< T >::require(), store_fixed_bit_field(), SUBREG_P, SUBREG_REG, expand_operand::value, and word_mode.

Referenced by store_fixed_bit_field(), and store_integral_bit_field().

◆ strict_volatile_bitfield_p()

static bool strict_volatile_bitfield_p ( rtx op0,
unsigned HOST_WIDE_INT bitsize,
unsigned HOST_WIDE_INT bitnum,
scalar_int_mode fieldmode,
poly_uint64 bitregion_start,
poly_uint64 bitregion_end )
static
Return true if -fstrict-volatile-bitfields applies to an access of OP0
containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
Return false if the access would touch memory outside the range
BITREGION_START to BITREGION_END for conformance to the C++ memory
model.   

References BITS_PER_WORD, GET_MODE_BITSIZE(), maybe_gt, MEM_ALIGN, MEM_P, and MEM_VOLATILE_P.

Referenced by extract_bit_field(), and store_bit_field().

◆ synth_mult()

static void synth_mult ( struct algorithm * alg_out,
unsigned HOST_WIDE_INT t,
const struct mult_cost * cost_limit,
machine_mode mode )
static

Variable Documentation

◆ default_target_expmed

struct target_expmed default_target_expmed
Medium-level subroutines: convert bit-field store and extract
   and shifts, multiplies and divides to rtl instructions.
   Copyright (C) 1987-2024 Free Software Foundation, Inc.

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.   
Work around tree-optimization/91825.   

◆ reverse_float_storage_order_supported

int reverse_float_storage_order_supported = -1
static
Whether reverse FP storage order is supported on the target.   

Referenced by check_reverse_float_storage_order_support(), and flip_storage_order().

◆ reverse_storage_order_supported

int reverse_storage_order_supported = -1
static
Whether reverse storage order is supported on the target.   

Referenced by check_reverse_storage_order_support(), and flip_storage_order().