GCC Middle and Back End API Reference
|
Typedefs | |
typedef poly_int< NUM_POLY_INT_COEFFS, generic_wide_int< wide_int_ref_storage< false, false > > > | rtx_to_poly_wide_ref |
typedef extended_tree< WIDEST_INT_MAX_PRECISION > | widest_extended_tree |
typedef extended_tree< ADDR_MAX_PRECISION > | offset_extended_tree |
typedef const generic_wide_int< widest_extended_tree > | tree_to_widest_ref |
typedef const generic_wide_int< offset_extended_tree > | tree_to_offset_ref |
typedef const generic_wide_int< wide_int_ref_storage< false, false > > | tree_to_wide_ref |
typedef const poly_int< NUM_POLY_INT_COEFFS, generic_wide_int< widest_extended_tree > > | tree_to_poly_widest_ref |
typedef const poly_int< NUM_POLY_INT_COEFFS, generic_wide_int< offset_extended_tree > > | tree_to_poly_offset_ref |
typedef const poly_int< NUM_POLY_INT_COEFFS, generic_wide_int< unextended_tree > > | tree_to_poly_wide_ref |
Enumerations | |
enum | overflow_type { OVF_NONE = 0 , OVF_UNDERFLOW = -1 , OVF_OVERFLOW = 1 , OVF_UNKNOWN = 2 } |
enum | precision_type { FLEXIBLE_PRECISION , VAR_PRECISION , INL_CONST_PRECISION , CONST_PRECISION } |
Functions | |
template<unsigned int N> | |
poly_int< N, hwi_with_prec > | shwi (const poly_int< N, HOST_WIDE_INT > &a, unsigned int precision) |
template<unsigned int N> | |
poly_int< N, hwi_with_prec > | uhwi (const poly_int< N, unsigned HOST_WIDE_INT > &a, unsigned int precision) |
template<unsigned int N, typename Ca > | |
POLY_POLY_RESULT (N, Ca, Ca) sext(const poly_int< N | |
for (unsigned int i=0;i< N;i++) POLY_SET_COEFF(C | |
hwi_with_prec | shwi (HOST_WIDE_INT, machine_mode mode) |
wide_int | min_value (machine_mode, signop) |
wide_int | max_value (machine_mode, signop) |
rtx_to_poly_wide_ref | to_poly_wide (const_rtx, machine_mode) |
tree_to_widest_ref | to_widest (const_tree) |
tree_to_offset_ref | to_offset (const_tree) |
tree_to_wide_ref | to_wide (const_tree) |
wide_int | to_wide (const_tree, unsigned int) |
tree_to_poly_widest_ref | to_poly_widest (const_tree) |
tree_to_poly_offset_ref | to_poly_offset (const_tree) |
tree_to_poly_wide_ref | to_poly_wide (const_tree) |
template<typename T > | |
bool | fits_to_boolean_p (const T &x, const_tree) |
template<typename T > | |
bool | fits_to_tree_p (const T &x, const_tree) |
wide_int | min_value (const_tree) |
wide_int | max_value (const_tree) |
wide_int | from_mpz (const_tree, mpz_t, bool) |
template<typename T > | |
unsigned int | get_precision (const T &) |
template<typename T1 , typename T2 > | |
unsigned int | get_binary_precision (const T1 &, const T2 &) |
template<typename T1 , typename T2 > | |
void | copy (T1 &, const T2 &) |
UNARY_PREDICATE | fits_shwi_p (const T &) |
UNARY_PREDICATE | fits_uhwi_p (const T &) |
UNARY_PREDICATE | neg_p (const T &, signop=SIGNED) |
template<typename T > | |
HOST_WIDE_INT | sign_mask (const T &) |
BINARY_PREDICATE | eq_p (const T1 &, const T2 &) |
BINARY_PREDICATE | ne_p (const T1 &, const T2 &) |
BINARY_PREDICATE | lt_p (const T1 &, const T2 &, signop) |
BINARY_PREDICATE | lts_p (const T1 &, const T2 &) |
BINARY_PREDICATE | ltu_p (const T1 &, const T2 &) |
BINARY_PREDICATE | le_p (const T1 &, const T2 &, signop) |
BINARY_PREDICATE | les_p (const T1 &, const T2 &) |
BINARY_PREDICATE | leu_p (const T1 &, const T2 &) |
BINARY_PREDICATE | gt_p (const T1 &, const T2 &, signop) |
BINARY_PREDICATE | gts_p (const T1 &, const T2 &) |
BINARY_PREDICATE | gtu_p (const T1 &, const T2 &) |
BINARY_PREDICATE | ge_p (const T1 &, const T2 &, signop) |
BINARY_PREDICATE | ges_p (const T1 &, const T2 &) |
BINARY_PREDICATE | geu_p (const T1 &, const T2 &) |
template<typename T1 , typename T2 > | |
int | cmp (const T1 &, const T2 &, signop) |
template<typename T1 , typename T2 > | |
int | cmps (const T1 &, const T2 &) |
template<typename T1 , typename T2 > | |
int | cmpu (const T1 &, const T2 &) |
UNARY_FUNCTION | bit_not (const T &) |
UNARY_FUNCTION | neg (const T &) |
UNARY_FUNCTION | neg (const T &, overflow_type *) |
UNARY_FUNCTION | abs (const T &) |
UNARY_FUNCTION | ext (const T &, unsigned int, signop) |
UNARY_FUNCTION | sext (const T &, unsigned int) |
UNARY_FUNCTION | zext (const T &, unsigned int) |
UNARY_FUNCTION | set_bit (const T &, unsigned int) |
UNARY_FUNCTION | bswap (const T &) |
UNARY_FUNCTION | bitreverse (const T &) |
BINARY_FUNCTION | min (const T1 &, const T2 &, signop) |
BINARY_FUNCTION | smin (const T1 &, const T2 &) |
BINARY_FUNCTION | umin (const T1 &, const T2 &) |
BINARY_FUNCTION | max (const T1 &, const T2 &, signop) |
BINARY_FUNCTION | smax (const T1 &, const T2 &) |
BINARY_FUNCTION | umax (const T1 &, const T2 &) |
BINARY_FUNCTION | bit_and (const T1 &, const T2 &) |
BINARY_FUNCTION | bit_and_not (const T1 &, const T2 &) |
BINARY_FUNCTION | bit_or (const T1 &, const T2 &) |
BINARY_FUNCTION | bit_or_not (const T1 &, const T2 &) |
BINARY_FUNCTION | bit_xor (const T1 &, const T2 &) |
BINARY_FUNCTION | add (const T1 &, const T2 &) |
BINARY_FUNCTION | add (const T1 &, const T2 &, signop, overflow_type *) |
BINARY_FUNCTION | sub (const T1 &, const T2 &) |
BINARY_FUNCTION | sub (const T1 &, const T2 &, signop, overflow_type *) |
BINARY_FUNCTION | mul (const T1 &, const T2 &) |
BINARY_FUNCTION | mul (const T1 &, const T2 &, signop, overflow_type *) |
BINARY_FUNCTION | smul (const T1 &, const T2 &, overflow_type *) |
BINARY_FUNCTION | umul (const T1 &, const T2 &, overflow_type *) |
BINARY_FUNCTION | mul_high (const T1 &, const T2 &, signop) |
BINARY_FUNCTION | div_trunc (const T1 &, const T2 &, signop, overflow_type *=0) |
BINARY_FUNCTION | sdiv_trunc (const T1 &, const T2 &) |
BINARY_FUNCTION | udiv_trunc (const T1 &, const T2 &) |
BINARY_FUNCTION | div_floor (const T1 &, const T2 &, signop, overflow_type *=0) |
BINARY_FUNCTION | udiv_floor (const T1 &, const T2 &) |
BINARY_FUNCTION | sdiv_floor (const T1 &, const T2 &) |
BINARY_FUNCTION | div_ceil (const T1 &, const T2 &, signop, overflow_type *=0) |
BINARY_FUNCTION | udiv_ceil (const T1 &, const T2 &) |
BINARY_FUNCTION | div_round (const T1 &, const T2 &, signop, overflow_type *=0) |
BINARY_FUNCTION | divmod_trunc (const T1 &, const T2 &, signop, WI_BINARY_RESULT(T1, T2) *) |
BINARY_FUNCTION | gcd (const T1 &, const T2 &, signop=UNSIGNED) |
BINARY_FUNCTION | mod_trunc (const T1 &, const T2 &, signop, overflow_type *=0) |
BINARY_FUNCTION | smod_trunc (const T1 &, const T2 &) |
BINARY_FUNCTION | umod_trunc (const T1 &, const T2 &) |
BINARY_FUNCTION | mod_floor (const T1 &, const T2 &, signop, overflow_type *=0) |
BINARY_FUNCTION | umod_floor (const T1 &, const T2 &) |
BINARY_FUNCTION | mod_ceil (const T1 &, const T2 &, signop, overflow_type *=0) |
BINARY_FUNCTION | mod_round (const T1 &, const T2 &, signop, overflow_type *=0) |
template<typename T1 , typename T2 > | |
bool | multiple_of_p (const T1 &, const T2 &, signop) |
template<typename T1 , typename T2 > | |
bool | multiple_of_p (const T1 &, const T2 &, signop, WI_BINARY_RESULT(T1, T2) *) |
SHIFT_FUNCTION | lshift (const T1 &, const T2 &) |
SHIFT_FUNCTION | lrshift (const T1 &, const T2 &) |
SHIFT_FUNCTION | arshift (const T1 &, const T2 &) |
SHIFT_FUNCTION | rshift (const T1 &, const T2 &, signop sgn) |
SHIFT_FUNCTION | lrotate (const T1 &, const T2 &, unsigned int=0) |
SHIFT_FUNCTION | rrotate (const T1 &, const T2 &, unsigned int=0) |
bool | only_sign_bit_p (const wide_int_ref &, unsigned int) |
bool | only_sign_bit_p (const wide_int_ref &) |
int | clz (const wide_int_ref &) |
int | clrsb (const wide_int_ref &) |
int | ctz (const wide_int_ref &) |
int | exact_log2 (const wide_int_ref &) |
int | floor_log2 (const wide_int_ref &) |
int | ffs (const wide_int_ref &) |
int | popcount (const wide_int_ref &) |
int | parity (const wide_int_ref &) |
template<typename T > | |
unsigned HOST_WIDE_INT | extract_uhwi (const T &, unsigned int, unsigned int) |
template<typename T > | |
unsigned int | min_precision (const T &, signop) |
static void | accumulate_overflow (overflow_type &, overflow_type) |
unsigned int | force_to_size (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int, signop sgn) |
unsigned int | from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, bool=true) |
hwi_with_prec | shwi (HOST_WIDE_INT, unsigned int) |
hwi_with_prec | uhwi (unsigned HOST_WIDE_INT, unsigned int) |
hwi_with_prec | minus_one (unsigned int) |
hwi_with_prec | zero (unsigned int) |
hwi_with_prec | one (unsigned int) |
hwi_with_prec | two (unsigned int) |
bool | eq_p_large (const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int) |
bool | lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, const HOST_WIDE_INT *, unsigned int) |
bool | ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, const HOST_WIDE_INT *, unsigned int) |
int | cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int, const HOST_WIDE_INT *, unsigned int) |
int | cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int, const HOST_WIDE_INT *, unsigned int) |
unsigned int | sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int) |
unsigned int | zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int) |
unsigned int | set_bit_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int) |
unsigned int | bswap_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int) |
unsigned int | bitreverse_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int) |
unsigned int | lshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int) |
unsigned int | lrshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int, unsigned int) |
unsigned int | arshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int, unsigned int) |
unsigned int | and_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int) |
unsigned int | and_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int) |
unsigned int | or_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int) |
unsigned int | or_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int) |
unsigned int | xor_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int) |
unsigned int | add_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int, signop, overflow_type *) |
unsigned int | sub_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int, signop, overflow_type *) |
unsigned int | mul_internal (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int, signop, overflow_type *, bool) |
unsigned int | divmod_internal (HOST_WIDE_INT *, unsigned int *, HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int, signop, overflow_type *) |
wide_int | min_value (unsigned int, signop) |
wide_int | min_value (never_used1 *) |
wide_int | min_value (never_used2 *) |
wide_int | max_value (unsigned int, signop) |
wide_int | max_value (never_used1 *) |
wide_int | max_value (never_used2 *) |
wide_int | from_buffer (const unsigned char *, unsigned int) |
void | to_mpz (const wide_int_ref &, mpz_t, signop) |
wide_int | mask (unsigned int, bool, unsigned int) |
wide_int | shifted_mask (unsigned int, unsigned int, bool, unsigned int) |
wide_int | set_bit_in_zero (unsigned int, unsigned int) |
wide_int | insert (const wide_int &x, const wide_int &y, unsigned int, unsigned int) |
wide_int | round_down_for_mask (const wide_int &, const wide_int &) |
wide_int | round_up_for_mask (const wide_int &, const wide_int &) |
wide_int | mod_inv (const wide_int &a, const wide_int &b) |
template<typename T > | |
T | mask (unsigned int, bool) |
template<typename T > | |
T | shifted_mask (unsigned int, unsigned int, bool) |
template<typename T > | |
T | set_bit_in_zero (unsigned int) |
unsigned int | mask (HOST_WIDE_INT *, unsigned int, bool, unsigned int) |
unsigned int | shifted_mask (HOST_WIDE_INT *, unsigned int, unsigned int, bool, unsigned int) |
Variables | |
Ca & | a |
Ca unsigned int | precision |
poly_int< N, C > | r |
i | |
The tree and const_tree overload templates.
Public functions for querying and operating on integers.
trailing_wide_int behaves like a wide_int.
Allow primitive C types to be used in wi:: routines.
Private functions for handling large cases out of line. They take individual length and array parameters because that is cheaper for the inline caller than constructing an object on the stack and passing a reference to it. (Although many callers use wide_int_refs, we generally want those to be removed by SRA.)
typedef poly_int<NUM_POLY_INT_COEFFS, generic_wide_int <wide_int_ref_storage <false, false> > > wi::rtx_to_poly_wide_ref |
typedef const generic_wide_int<offset_extended_tree> wi::tree_to_offset_ref |
typedef const poly_int<NUM_POLY_INT_COEFFS, generic_wide_int <offset_extended_tree> > wi::tree_to_poly_offset_ref |
typedef const poly_int<NUM_POLY_INT_COEFFS, generic_wide_int <unextended_tree> > wi::tree_to_poly_wide_ref |
typedef const poly_int<NUM_POLY_INT_COEFFS, generic_wide_int <widest_extended_tree> > wi::tree_to_poly_widest_ref |
typedef const generic_wide_int<wide_int_ref_storage<false, false> > wi::tree_to_wide_ref |
typedef const generic_wide_int<widest_extended_tree> wi::tree_to_widest_ref |
enum wi::overflow_type |
enum wi::precision_type |
UNARY_FUNCTION wi::abs | ( | const T & | ) |
|
inlinestatic |
Accumulate a set of overflows into OVERFLOW.
References OVF_UNKNOWN.
BINARY_FUNCTION wi::add | ( | const T1 & | , |
const T2 & | ) |
Referenced by add_one(), arith_overflowed_p(), bitmask_inv_cst_vector_p(), build_gt(), build_gt(), build_lt(), canon_condition(), canonicalize_comparison(), check_for_binary_op_overflow(), const_vector_int_elt(), tree_switch_conversion::switch_conversion::exp_index_transform(), expr_to_aff_combination(), fold_div_compare(), fuse_memset_builtins(), irange::invert(), prange::invert(), is_nonwrapping_integer_induction(), iv_can_overflow_p(), operator_plus::lhs_op1_relation(), maybe_canonicalize_mem_ref_addr(), minmax_replacement(), operator_plus::overflow_free_p(), plus_constant(), pointer_may_wrap_p(), poly_int_binop(), irange::set(), should_interchange_loops(), simplify_const_binary_operation(), subtract_one(), ubsan_type_descriptor(), vect_recog_divmod_pattern(), operator_plus::wi_fold(), operator_widen_plus_signed::wi_fold(), operator_widen_plus_unsigned::wi_fold(), and wide_int_binop().
BINARY_FUNCTION wi::add | ( | const T1 & | , |
const T2 & | , | ||
signop | , | ||
overflow_type * | ) |
unsigned int wi::add_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | op0, | ||
unsigned int | op0len, | ||
const HOST_WIDE_INT * | op1, | ||
unsigned int | op1len, | ||
unsigned int | prec, | ||
signop | sgn, | ||
wi::overflow_type * | overflow ) |
Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW whether the result overflows when OP0 and OP1 are treated as having signedness SGN. Return the number of blocks in VAL.
References canonize(), HOST_BITS_PER_WIDE_INT, i, MAX, OVF_NONE, OVF_OVERFLOW, OVF_UNDERFLOW, shift, SIGNED, top_bit_of(), and UNSIGNED.
unsigned int wi::and_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | op0, | ||
unsigned int | op0len, | ||
const HOST_WIDE_INT * | op1, | ||
unsigned int | op1len, | ||
unsigned int | prec ) |
Set VAL to OP0 & OP1. Return the number of blocks used.
References canonize(), MAX, and top_bit_of().
unsigned int wi::and_not_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | op0, | ||
unsigned int | op0len, | ||
const HOST_WIDE_INT * | op1, | ||
unsigned int | op1len, | ||
unsigned int | prec ) |
Set VAL to OP0 & ~OP1. Return the number of blocks used.
References canonize(), MAX, and top_bit_of().
SHIFT_FUNCTION wi::arshift | ( | const T1 & | , |
const T2 & | ) |
Referenced by optimize_bit_field_compare(), and simplify_const_binary_operation().
unsigned int wi::arshift_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | xval, | ||
unsigned int | xlen, | ||
unsigned int | xprecision, | ||
unsigned int | precision, | ||
unsigned int | shift ) |
Arithmetically right shift XVAL by SHIFT and store the result in VAL. Return the number of blocks in VAL. XVAL has XPRECISION bits and VAL has PRECISION bits.
References BLOCKS_NEEDED, canonize(), HOST_BITS_PER_WIDE_INT, MIN, precision, rshift_large_common(), sext_hwi(), and shift.
BINARY_FUNCTION wi::bit_and | ( | const T1 & | , |
const T2 & | ) |
Referenced by irange::contains_p(), get_default_value(), handle_builtin_alloca(), irange_bitmask::intersect(), is_widening_mult_rhs_p(), ipcp_bits_lattice::known_nonzero_p(), maybe_optimize_mod_cmp(), ipcp_bits_lattice::set_to_constant(), simplify_const_binary_operation(), tree_nonzero_bits(), irange_bitmask::verify_mask(), wi_optimize_and_or(), and wide_int_binop().
BINARY_FUNCTION wi::bit_and_not | ( | const T1 & | , |
const T2 & | ) |
Referenced by bit_value_binop(), bit_value_binop(), bit_value_mult_const(), dump_lattice_value(), evaluate_stmt(), expr_not_equal_to(), fold_binary_loc(), get_value_from_alignment(), insert(), ipa_compute_jump_functions_for_edge(), masked_increment(), maybe_set_nonzero_bits(), round_down_for_mask(), round_up_for_mask(), set_lattice_value(), simplify_using_ranges::simplify_bit_ops_using_ranges(), valid_lattice_transition(), value_mask_to_min_max(), operator_bitwise_xor::wi_fold(), and wi_set_zero_nonzero_bits().
UNARY_FUNCTION wi::bit_not | ( | const T & | ) |
Referenced by analyze_and_compute_bitwise_induction_effect(), bit_value_binop(), bitmask_inv_cst_vector_p(), ipcp_bits_lattice::known_nonzero_p(), maybe_set_nonzero_bits(), operator_bitwise_and::op1_range(), ipcp_bits_lattice::set_to_constant(), simplify_const_unary_operation(), and vn_walk_cb_data::vn_walk_cb_data().
BINARY_FUNCTION wi::bit_or | ( | const T1 & | , |
const T2 & | ) |
BINARY_FUNCTION wi::bit_or_not | ( | const T1 & | , |
const T2 & | ) |
BINARY_FUNCTION wi::bit_xor | ( | const T1 & | , |
const T2 & | ) |
Referenced by simplify_const_binary_operation(), and wide_int_binop().
UNARY_FUNCTION wi::bitreverse | ( | const T & | ) |
Referenced by simplify_const_unary_operation().
unsigned int wi::bitreverse_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | xval, | ||
unsigned int | len, | ||
unsigned int | precision ) |
Bitreverse the integer represented by XVAL and LEN into VAL. Return the number of blocks in VAL. Both XVAL and VAL have PRECISION bits.
References canonize(), HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, i, offset, precision, and safe_uhwi().
UNARY_FUNCTION wi::bswap | ( | const T & | ) |
Referenced by evaluate_stmt(), fold_const_call_ss(), and simplify_const_unary_operation().
unsigned int wi::bswap_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | xval, | ||
unsigned int | xlen, | ||
unsigned int | precision ) |
Byte swap the integer represented by XVAL and XLEN into VAL. Return the number of blocks in VAL. Both XVAL and VAL have PRECISION bits.
References BLOCKS_NEEDED, canonize(), gcc_assert, HOST_BITS_PER_WIDE_INT, offset, precision, and safe_uhwi().
int wi::clrsb | ( | const wide_int_ref & | x | ) |
Return the number of redundant sign bits in X. (That is, the number of bits immediately below the sign bit that have the same value as the sign bit.)
References clz_hwi(), count, HOST_BITS_PER_WIDE_INT, mask(), and generic_wide_int< storage >::uhigh().
Referenced by bit_value_binop(), fold_const_call_ss(), min_precision(), simplify_const_binary_operation(), simplify_const_unary_operation(), and wi_optimize_signed_bitwise_op().
int wi::clz | ( | const wide_int_ref & | x | ) |
Return the number of leading (upper) zeros in X.
References clz_hwi(), count, HOST_BITS_PER_WIDE_INT, generic_wide_int< storage >::sign_mask(), and generic_wide_int< storage >::uhigh().
Referenced by bit_value_binop(), floor_log2(), fold_const_call_ss(), fold_const_call_sss(), get_bitmask_from_range(), min_precision(), optimize_range_tests_cmp_bitwise(), optimize_range_tests_to_bit_test(), real_from_integer(), round_down_for_mask(), round_up_for_mask(), simplify_const_binary_operation(), and simplify_const_unary_operation().
|
inline |
Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Signedness of X and Y indicated by SGN.
References cmps(), cmpu(), SIGNED, and y.
Referenced by bit_value_binop(), compare_nonzero_chars(), compare_values_warnv(), get_array_ctor_element_at_index(), simplify_conversion_using_ranges(), value_range_from_overflowed_bounds(), and irange::verify_range().
|
inline |
Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y as signed values.
References cmps_large(), fits_shwi_p(), get_binary_precision(), neg_p(), precision, STATIC_CONSTANT_P, WIDE_INT_REF_FOR, and y.
Referenced by cmp(), fold_nonarray_ctor_reference(), order_drefs(), predict_iv_comparison(), tree_int_cst_compare(), and operator_bitwise_and::wi_fold().
int wi::cmps_large | ( | const HOST_WIDE_INT * | op0, |
unsigned int | op0len, | ||
unsigned int | precision, | ||
const HOST_WIDE_INT * | op1, | ||
unsigned int | op1len ) |
Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using signed compares.
References BLOCKS_NEEDED, HOST_BITS_PER_WIDE_INT, MAX, precision, selt(), and SIGNED.
Referenced by cmps().
|
inline |
Return -1 if X < Y, 0 if X == Y and 1 if X > Y. Treat both X and Y as unsigned values.
References cmpu_large(), get_binary_precision(), LIKELY, precision, STATIC_CONSTANT_P, WIDE_INT_REF_FOR, and y.
Referenced by cmp(), do_warn_aggressive_loop_optimizations(), fold_array_ctor_reference(), set_strlen_range(), and wide_int_cmp().
int wi::cmpu_large | ( | const HOST_WIDE_INT * | op0, |
unsigned int | op0len, | ||
unsigned int | precision, | ||
const HOST_WIDE_INT * | op1, | ||
unsigned int | op1len ) |
Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using unsigned compares.
References BLOCKS_NEEDED, HOST_BITS_PER_WIDE_INT, MAX, precision, selt(), and UNSIGNED.
Referenced by cmpu().
|
inline |
Copy the contents of Y to X, but keeping X's current precision.
Referenced by fixed_wide_int_storage< N >::fixed_wide_int_storage(), trailing_wide_int_storage::operator=(), wide_int_storage::operator=(), widest_int_storage< N >::operator=(), WI_BINARY_RESULT(), wide_int_storage::wide_int_storage(), and widest_int_storage< N >::widest_int_storage().
int wi::ctz | ( | const wide_int_ref & | x | ) |
Return the number of trailing (lower) zeros in X.
References ctz_hwi(), HOST_BITS_PER_WIDE_INT, i, and generic_wide_int< storage >::ulow().
Referenced by irange_bitmask::adjust_range(), bit_value_binop(), bit_value_mult_const(), emit_block_cmp_via_loop(), emit_block_move_via_oriented_loop(), emit_block_move_via_sized_loop(), exact_int_to_float_conversion_p(), ffs(), fold_const_call_ss(), fold_const_call_sss(), get_individual_bits(), mask_with_tz(), maybe_optimize_mod_cmp(), maybe_set_nonzero_bits(), num_ending_zeros(), number_of_iterations_ne_max(), only_sign_bit_p(), simplify_const_unary_operation(), tree_ctz(), vn_walk_cb_data::vn_walk_cb_data(), and wi_optimize_and_or().
BINARY_FUNCTION wi::div_ceil | ( | const T1 & | , |
const T2 & | , | ||
signop | , | ||
overflow_type * | = 0 ) |
Referenced by operator_div::wi_op_overflows(), and wide_int_binop().
BINARY_FUNCTION wi::div_floor | ( | const T1 & | , |
const T2 & | , | ||
signop | , | ||
overflow_type * | = 0 ) |
Referenced by idx_within_array_bound(), operator_div::wi_op_overflows(), and wide_int_binop().
BINARY_FUNCTION wi::div_round | ( | const T1 & | , |
const T2 & | , | ||
signop | , | ||
overflow_type * | = 0 ) |
Referenced by operator_div::wi_op_overflows(), and wide_int_binop().
BINARY_FUNCTION wi::div_trunc | ( | const T1 & | , |
const T2 & | , | ||
signop | , | ||
overflow_type * | = 0 ) |
unsigned int wi::divmod_internal | ( | HOST_WIDE_INT * | quotient, |
unsigned int * | remainder_len, | ||
HOST_WIDE_INT * | remainder, | ||
const HOST_WIDE_INT * | dividend_val, | ||
unsigned int | dividend_len, | ||
unsigned int | dividend_prec, | ||
const HOST_WIDE_INT * | divisor_val, | ||
unsigned int | divisor_len, | ||
unsigned int | divisor_prec, | ||
signop | sgn, | ||
wi::overflow_type * | oflow ) |
Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate the result. If QUOTIENT is nonnull, store the value of the quotient there and return the number of blocks in it. The return value is not defined otherwise. If REMAINDER is nonnull, store the value of the remainder there and store the number of blocks in *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether the division overflowed.
References BLOCKS_NEEDED, canonize_uhwi(), divmod_internal_2(), fits_shwi_p(), fits_uhwi_p(), gcc_checking_assert, HOST_BITS_PER_HALF_WIDE_INT, HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_MIN, i, MIN, neg_p(), only_sign_bit_p(), OVF_NONE, OVF_OVERFLOW, SIGNED, sub_large(), generic_wide_int< storage >::to_shwi(), generic_wide_int< storage >::to_uhwi(), UNLIKELY, UNSIGNED, wi_pack(), wi_unpack(), WIDE_INT_MAX_INL_PRECISION, and zeros.
BINARY_FUNCTION wi::divmod_trunc | ( | const T1 & | , |
const T2 & | , | ||
signop | , | ||
WI_BINARY_RESULT(T1, T2) * | ) |
Referenced by maybe_optimize_mod_cmp(), mod_inv(), multiple_of_p(), and print_decu().
BINARY_PREDICATE wi::eq_p | ( | const T1 & | , |
const T2 & | ) |
Referenced by arith_cast_equal_p(), create_component_ref_by_pieces_1(), tree_switch_conversion::switch_conversion::exp_index_transform(), ffs(), operator_equal::fold_range(), operator_equal::fold_range(), operator_logical_and::fold_range(), operator_not_equal::fold_range(), operator_not_equal::fold_range(), get_size_range(), gimplify_scan_omp_clauses(), integer_onep(), if_chain::is_beneficial(), operator_bitwise_and::lhs_op1_relation(), lower_omp_ordered_clauses(), minmax_replacement(), mod_inv(), operator_abs::op1_range(), operator_equal::op1_range(), operator_equal::op1_range(), operator_not_equal::op1_range(), operator_not_equal::op1_range(), simplify_const_binary_operation(), simplify_const_relational_operation(), ssa_name_has_boolean_range(), try_unroll_loop_completely(), cross_product_operator::wi_cross_product(), operator_abs::wi_fold(), operator_bitwise_and::wi_fold(), operator_lshift::wi_fold(), wi_optimize_and_or(), wi_set_zero_nonzero_bits(), and wi_zero_p().
bool wi::eq_p_large | ( | const HOST_WIDE_INT * | op0, |
unsigned int | op0len, | ||
const HOST_WIDE_INT * | op1, | ||
unsigned int | op1len, | ||
unsigned int | prec ) |
Return true if OP0 == OP1.
References BLOCKS_NEEDED, HOST_BITS_PER_WIDE_INT, and zext_hwi().
int wi::exact_log2 | ( | const wide_int_ref & | x | ) |
If X is an exact power of 2, return the base-2 logarithm, otherwise return -1.
References exact_log2(), HOST_BITS_PER_WIDE_INT, i, generic_wide_int< storage >::sign_mask(), and zext_hwi().
Referenced by bit_value_binop(), do_store_flag(), expand_mult(), fold_binary_loc(), get_debug_computation_at(), tree_switch_conversion::switch_conversion::is_exp_index_transform_viable(), simplify_context::simplify_binary_operation_1(), tree_log2(), validate_test_and_branch(), vect_can_peel_nonlinear_iv_p(), vect_recog_divmod_pattern(), and operator_mult::wi_fold().
UNARY_FUNCTION wi::ext | ( | const T & | , |
unsigned int | , | ||
signop | ) |
|
inline |
Extract WIDTH bits from X, starting at BITPOS.
References get_precision(), HOST_BITS_PER_WIDE_INT, precision, shift, WIDE_INT_REF_FOR, and zext_hwi().
Referenced by choose_multiplier(), native_encode_int(), output_constructor_bitfield(), print_hex(), tree_int_cst_sign_bit(), and unextend().
int wi::ffs | ( | const wide_int_ref & | x | ) |
Return the index of the first (lowest) set bit in X, counting from 1. Return 0 if X is 0.
Referenced by fold_const_call_ss(), and simplify_const_unary_operation().
UNARY_PREDICATE wi::fits_shwi_p | ( | const T & | ) |
Referenced by analyze_increments(), cmps(), divmod_internal(), estimated_loop_iterations_int(), find_constructor_constant_at_offset(), get_computation_cost(), get_constraint_for_ptr_offset(), get_estimated_loop_iterations_int(), get_likely_max_loop_iterations_int(), get_max_loop_iterations_int(), get_maxbound(), access_ref::inform_access(), ipa_odr_read_section(), ipa_polymorphic_call_context::ipa_polymorphic_call_context(), likely_max_loop_iterations_int(), max_loop_iterations_int(), most_expensive_mult_to_index(), native_encode_initializer(), poly_int_rtx_p(), split_loop(), poly_int< N, C >::to_shwi(), tree_fits_poly_int64_p(), tree_fits_shwi_p(), and warn_dealloc_offset().
bool wi::fits_to_boolean_p | ( | const T & | x, |
const_tree | type ) |
References known_eq, and TYPE_UNSIGNED.
Referenced by fits_to_tree_p(), and int_fits_type_p().
bool wi::fits_to_tree_p | ( | const T & | x, |
const_tree | type ) |
References fits_to_boolean_p(), known_eq, sext(), TREE_CODE, TYPE_PRECISION, TYPE_UNSIGNED, and zext().
Referenced by create_intersect_range_checks_index(), create_mul_imm_cand(), force_fit_type(), induction_variable_may_overflow_p(), int_fits_type_p(), ipa_range_contains_p(), loop_exits_before_overflow(), max_stmt_executions_tree(), multiple_of_p(), n_of_executions_at_most(), and tree_fold_binomial().
UNARY_PREDICATE wi::fits_uhwi_p | ( | const T & | ) |
int wi::floor_log2 | ( | const wide_int_ref & | x | ) |
Return the base-2 logarithm of X, rounding down. Return -1 if X is 0.
References clz().
Referenced by cfn_clz::fold_range(), cfn_ctz::fold_range(), cfn_ffs::fold_range(), get_debug_computation_at(), tree_floor_log2(), and wi_set_zero_nonzero_bits().
wi::for | ( | ) |
unsigned int wi::force_to_size | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | xval, | ||
unsigned int | xlen, | ||
unsigned int | xprecision, | ||
unsigned int | precision, | ||
signop | sgn ) |
Convert the number represented by XVAL, XLEN and XPRECISION, which has signedness SGN, to an integer that has PRECISION bits. Store the blocks in VAL and return the number of blocks used. This function can handle both extension (PRECISION > XPRECISION) and truncation (PRECISION < XPRECISION).
References BLOCKS_NEEDED, canonize(), HOST_BITS_PER_WIDE_INT, i, precision, sext_hwi(), UNSIGNED, and zext_hwi().
Referenced by FIXED_WIDE_INT(), wide_int_storage::from(), and WIDEST_INT().
unsigned int wi::from_array | ( | HOST_WIDE_INT * | , |
const HOST_WIDE_INT * | , | ||
unsigned int | , | ||
unsigned int | , | ||
bool | = true ) |
Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the result for an integer with precision PRECISION. Return the length of VAL (after any canonization).
References canonize(), i, and precision.
Referenced by wide_int_storage::from_array().
wide_int wi::from_buffer | ( | const unsigned char * | buffer, |
unsigned int | buffer_len ) |
Construct a wide int from a buffer of length LEN. BUFFER will be read according to byte endianness and word endianness of the target. Only the lower BUFFER_LEN bytes of the result are set; the remaining high bytes are cleared.
References BLOCKS_NEEDED, canonize(), wide_int_storage::create(), HOST_BITS_PER_WIDE_INT, i, offset, precision, wide_int_storage::set_len(), and wide_int_storage::write_val().
Referenced by expand_DEFERRED_INIT(), and native_interpret_int().
wide_int wi::from_mpz | ( | const_tree | type, |
mpz_t | x, | ||
bool | wrap ) |
Returns X converted to TYPE. If WRAP is true, then out-of-range values of VAL will be wrapped; otherwise, they will be set to the appropriate minimum or maximum TYPE bound.
References BLOCKS_NEEDED, canonize(), CEIL, CHAR_BIT, count, wide_int_storage::create(), free(), get_type_static_bounds(), max(), MIN, min(), wide_int_storage::set_len(), TYPE_PRECISION, WIDE_INT_MAX_INL_ELTS, and wide_int_storage::write_val().
Referenced by number_of_iterations_lt(), number_of_iterations_ne(), and vect_peel_nonlinear_iv_init().
BINARY_FUNCTION wi::gcd | ( | const T1 & | , |
const T2 & | , | ||
signop | = UNSIGNED ) |
Referenced by lower_omp_ordered_clauses(), and mod_inv().
BINARY_PREDICATE wi::ge_p | ( | const T1 & | , |
const T2 & | , | ||
signop | ) |
Referenced by prange::contains_p(), dr_step_indicator(), operator_ge::fold_range(), operator_ge::fold_range(), operator_cast::inside_domain_p(), irange::intersect(), irange::intersect(), intersect_range_with_nonzero_bits(), irange::irange_contains_p(), irange::irange_single_pair_union(), tree_switch_conversion::switch_conversion::is_exp_index_transform_viable(), iv_can_overflow_p(), operator_plus::lhs_op1_relation(), operator_rshift::lhs_op1_relation(), irange::nonnegative_p(), operator_bitwise_or::op1_range(), operator_lshift::op1_range(), operator_rshift::op1_range(), operator_abs::wi_fold(), operator_bitwise_and::wi_fold(), operator_bitwise_or::wi_fold(), operator_bitwise_xor::wi_fold(), wi_includes_zero_p(), and wi_set_zero_nonzero_bits().
BINARY_PREDICATE wi::ges_p | ( | const T1 & | , |
const T2 & | ) |
Referenced by dr_step_indicator(), fold_const_builtin_load_exponent(), and operator_absu::wi_fold().
|
inline |
Return the number of bits that the result of a binary operation can hold when the input operands are X and Y.
References WI_BINARY_RESULT, and y.
Return the number of bits that integer X can hold.
Referenced by bit_value_binop(), wi::int_traits< rtx_mode_t >::decompose(), extract_uhwi(), wi::int_traits< wide_int_storage >::get_binary_precision(), wi::int_traits< wide_int_storage >::get_binary_result(), get_ref_base_and_extent(), handle_builtin_alloca(), irange::intersect(), min_precision(), WI_BINARY_RESULT(), operator_widen_mult_signed::wi_fold(), operator_widen_mult_unsigned::wi_fold(), operator_widen_plus_signed::wi_fold(), operator_widen_plus_unsigned::wi_fold(), WI_UNARY_RESULT(), and WI_UNARY_RESULT().
BINARY_PREDICATE wi::geu_p | ( | const T1 & | , |
const T2 & | ) |
BINARY_PREDICATE wi::gt_p | ( | const T1 & | , |
const T2 & | , | ||
signop | ) |
Referenced by create_possibly_reversed_range(), determine_value_range(), tree_switch_conversion::bit_test_cluster::emit(), cfn_clz::fold_range(), operator_gt::fold_range(), operator_gt::fold_range(), get_array_ctor_element_at_index(), irange::intersect(), prange::intersect(), intersect_range_with_nonzero_bits(), operator_plus::lhs_op1_relation(), mod_inv(), operator_trunc_mod::op1_range(), operator_trunc_mod::op2_range(), optimize_range_tests_to_bit_test(), range_from_loop_direction(), irange::union_(), value_range_with_overflow(), cross_product_operator::wi_cross_product(), operator_abs::wi_fold(), operator_bitwise_and::wi_fold(), operator_bitwise_or::wi_fold(), and operator_div::wi_fold().
BINARY_PREDICATE wi::gts_p | ( | const T1 & | , |
const T2 & | ) |
BINARY_PREDICATE wi::gtu_p | ( | const T1 & | , |
const T2 & | ) |
Referenced by estimated_stmt_executions(), expand_builtin_strnlen(), extract_muldiv_1(), field_byte_offset(), gimple_call_alloc_size(), strlen_pass::handle_builtin_strlen(), iv_can_overflow_p(), likely_max_stmt_executions(), masked_increment(), max_stmt_executions(), may_eliminate_iv(), maybe_diag_stxncpy_trunc(), maybe_flat_loop_profile(), maybe_optimize_mod_cmp(), number_of_iterations_cond(), predict_loops(), preprocess_case_label_vec_for_gimple(), print_decu(), should_interchange_loops(), simplify_conversion_using_ranges(), and operator_absu::wi_fold().
wide_int wi::insert | ( | const wide_int & | x, |
const wide_int & | y, | ||
unsigned int | start, | ||
unsigned int | width ) |
Insert WIDTH bits from Y into X starting at START.
References bit_and_not(), wide_int_storage::from(), gcc_checking_assert, wide_int_storage::get_precision(), lshift(), mask(), precision, shifted_mask(), UNSIGNED, and y.
Referenced by try_combine().
BINARY_PREDICATE wi::le_p | ( | const T1 & | , |
const T2 & | , | ||
signop | ) |
Referenced by irange::contains_p(), prange::contains_p(), determine_value_range(), operator_le::fold_range(), operator_le::fold_range(), get_array_ctor_element_at_index(), operator_cast::inside_domain_p(), irange::intersect(), irange::intersect(), intersect_range_with_nonzero_bits(), irange::irange_contains_p(), irange::irange_single_pair_union(), iv_can_overflow_p(), operator_plus::lhs_op1_relation(), irange::nonpositive_p(), refine_value_range_using_guard(), set_switch_stmt_execution_predicate(), WI_BINARY_RESULT(), operator_abs::wi_fold(), and wi_includes_zero_p().
BINARY_PREDICATE wi::les_p | ( | const T1 & | , |
const T2 & | ) |
BINARY_PREDICATE wi::leu_p | ( | const T1 & | , |
const T2 & | ) |
Referenced by adjust_loop_info_after_peeling(), check_nul_terminated_array(), derive_constant_upper_bound_ops(), determine_block_size(), pcom_worker::determine_roots_comp(), doloop_modify(), doloop_optimize(), tree_switch_conversion::bit_test_cluster::emit(), get_size_range(), is_inv_store_elimination_chain(), known_lower(), maybe_diag_stxncpy_trunc(), strlen_pass::maybe_warn_overflow(), optimize_range_tests_to_bit_test(), remove_exits_and_undefined_stmts(), simplify_const_binary_operation(), try_peel_loop(), try_unroll_loop_completely(), unroll_loop_constant_iterations(), and vect_joust_widened_integer().
SHIFT_FUNCTION wi::lrotate | ( | const T1 & | , |
const T2 & | , | ||
unsigned int | = 0 ) |
Referenced by bit_value_binop(), simplify_const_binary_operation(), and wide_int_binop().
SHIFT_FUNCTION wi::lrshift | ( | const T1 & | , |
const T2 & | ) |
Referenced by bit_value_binop(), tree_switch_conversion::bit_test_cluster::emit(), expand_doubleword_mod(), expand_single_bit_test(), field_byte_offset(), fold_const_aggregate_ref_1(), maybe_optimize_mod_cmp(), native_interpret_aggregate(), optimize_bit_field_compare(), optimize_range_tests_to_bit_test(), real_from_integer(), real_to_integer(), simplify_const_binary_operation(), vect_emulate_mixed_dot_prod(), and vn_walk_cb_data::vn_walk_cb_data().
unsigned int wi::lrshift_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | xval, | ||
unsigned int | xlen, | ||
unsigned int | xprecision, | ||
unsigned int | precision, | ||
unsigned int | shift ) |
Logically right shift XVAL by SHIFT and store the result in VAL. Return the number of blocks in VAL. XVAL has XPRECISION bits and VAL has PRECISION bits.
References BLOCKS_NEEDED, canonize(), HOST_BITS_PER_WIDE_INT, precision, rshift_large_common(), shift, and zext_hwi().
SHIFT_FUNCTION wi::lshift | ( | const T1 & | , |
const T2 & | ) |
Referenced by bit_value_binop(), bit_value_mult_const(), tree_switch_conversion::bit_test_cluster::emit(), extract_bit_test_mask(), find_constructor_constant_at_offset(), fold_ternary_loc(), get_individual_bits(), insert(), lshift_value(), maybe_optimize_mod_cmp(), native_encode_initializer(), optimize_range_tests_to_bit_test(), reduction_var_overflows_first(), simplify_const_binary_operation(), size_must_be_zero_p(), tree_nonzero_bits(), value_mask_to_min_max(), vect_recog_divmod_pattern(), operator_lshift::wi_op_overflows(), operator_rshift::wi_op_overflows(), and wide_int_binop().
unsigned int wi::lshift_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | xval, | ||
unsigned int | xlen, | ||
unsigned int | precision, | ||
unsigned int | shift ) |
Left shift XVAL by SHIFT and store the result in VAL. Return the number of blocks in VAL. Both XVAL and VAL have PRECISION bits.
References BLOCKS_NEEDED, canonize(), HOST_BITS_PER_WIDE_INT, i, MIN, precision, safe_uhwi(), and shift.
Referenced by WI_UNARY_RESULT().
BINARY_PREDICATE wi::lt_p | ( | const T1 & | , |
const T2 & | , | ||
signop | ) |
Referenced by check_nul_terminated_array(), irange::contains_p(), dr_step_indicator(), tree_switch_conversion::bit_test_cluster::emit(), operator_lt::fold_range(), operator_lt::fold_range(), irange::intersect(), irange::intersect(), irange::irange_contains_p(), operator_plus::lhs_op1_relation(), mod_inv(), operator_lshift::op1_range(), operator_trunc_mod::op1_range(), operator_trunc_mod::op2_range(), optimize_range_tests_to_bit_test(), set_switch_stmt_execution_predicate(), operator_bitwise_and::wi_fold(), operator_bitwise_or::wi_fold(), operator_bitwise_xor::wi_fold(), and wi_set_zero_nonzero_bits().
BINARY_PREDICATE wi::lts_p | ( | const T1 & | , |
const T2 & | ) |
bool wi::lts_p_large | ( | const HOST_WIDE_INT * | op0, |
unsigned int | op0len, | ||
unsigned int | precision, | ||
const HOST_WIDE_INT * | op1, | ||
unsigned int | op1len ) |
Return true if OP0 < OP1 using signed comparisons.
References BLOCKS_NEEDED, HOST_BITS_PER_WIDE_INT, MAX, precision, selt(), and SIGNED.
BINARY_PREDICATE wi::ltu_p | ( | const T1 & | , |
const T2 & | ) |
Referenced by bit_value_binop(), bound_index(), cache_integer_cst(), compute_doloop_base_on_mode(), decide_unroll_constant_iterations(), decide_unroll_runtime_iterations(), decide_unroll_stupid(), derive_constant_upper_bound_ops(), discover_iteration_bound_by_body_walk(), doloop_simplify_count(), expand_single_bit_test(), extract_bit_test_mask(), get_size_range(), known_lower(), maybe_diag_stxncpy_trunc(), maybe_flat_loop_profile(), maybe_lower_iteration_bound(), maybe_warn_nonstring_arg(), movement_possibility_1(), optimize_range_tests_to_bit_test(), predict_loops(), record_estimate(), record_niter_bound(), reduction_var_overflows_first(), remove_exits_and_undefined_stmts(), remove_redundant_iv_tests(), set_strlen_range(), simplify_const_relational_operation(), tree_fold_binomial(), vect_determine_precisions_from_users(), warn_string_no_nul(), operator_lshift::wi_fold(), and operator_mult::wi_fold().
bool wi::ltu_p_large | ( | const HOST_WIDE_INT * | op0, |
unsigned int | op0len, | ||
unsigned int | precision, | ||
const HOST_WIDE_INT * | op1, | ||
unsigned int | op1len ) |
Return true if OP0 < OP1 using unsigned comparisons.
References BLOCKS_NEEDED, HOST_BITS_PER_WIDE_INT, MAX, precision, selt(), and UNSIGNED.
unsigned int wi::mask | ( | HOST_WIDE_INT * | val, |
unsigned int | width, | ||
bool | negate, | ||
unsigned int | prec ) |
Fill VAL with a mask where the lower WIDTH bits are ones and the bits above that up to PREC are zeros. The result is inverted if NEGATE is true. Return the number of blocks in VAL.
References HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, i, last, and shift.
Return an integer of type T in which the low WIDTH bits are set and the other bits are clear, or the inverse if NEGATE_P.
References HOST_BITS_PER_WIDE_INT, mask(), and STATIC_ASSERT.
Return a PRECISION-bit integer in which the low WIDTH bits are set and the other bits are clear, or the inverse if NEGATE_P.
References wide_int_storage::create(), mask(), precision, wide_int_storage::set_len(), and wide_int_storage::write_val().
Referenced by all_ones_mask_p(), bit_value_binop(), build_low_bits_mask(), build_range_check(), change_zero_ext(), clear_padding_type(), clrsb(), compute_distributive_range(), expand_doubleword_mod(), expand_expr_real_2(), expand_smod_pow2(), fold_binary_loc(), get_bitmask_from_range(), get_value_from_alignment(), insert(), ipa_compute_jump_functions_for_edge(), is_widening_mult_rhs_p(), operator_bitwise_and::lhs_op1_relation(), lower_bound_in_type(), mask(), mask(), mask_with_tz(), max_value(), maybe_optimize_mod_cmp(), mul_internal(), number_of_iterations_ne_max(), operator_cast::op1_range(), operator_lshift::op1_range(), operator_rshift::op1_range(), optimize_range_tests_cmp_bitwise(), reduce_to_bit_field_precision(), round_down_for_mask(), round_up_for_mask(), operator_bitwise_and::simple_op1_range_solver(), simplify_context::simplify_binary_operation_1(), simplify_conversion_using_ranges(), store_constructor(), upper_bound_in_type(), vect_recog_bitfield_ref_pattern(), visit_nary_op(), operator_mult::wi_fold(), wi_optimize_and_or(), wi_optimize_signed_bitwise_op(), and wi_set_zero_nonzero_bits().
BINARY_FUNCTION wi::max | ( | const T1 & | , |
const T2 & | , | ||
signop | ) |
|
inline |
Produce the largest number that is represented in TYPE. The precision and sign are taken from TYPE.
References max_value(), TYPE_PRECISION, and TYPE_SIGN.
Produce the largest number that is represented in MODE. The precision is taken from MODE and the sign from SGN.
References as_a(), GET_MODE_PRECISION(), and max_value().
Referenced by canon_condition(), compute_doloop_base_on_mode(), find_var_cmp_const(), fold_bit_and_mask(), get_legacy_range(), get_type_static_bounds(), gimple_fold_builtin_strlen(), integer_all_onesp(), intersect_range_with_nonzero_bits(), irange::invert(), prange::invert(), irange_val_max(), iv_can_overflow_p(), simplify_using_ranges::legacy_fold_cond_overflow(), loop_niters_no_overflow(), match_arith_overflow(), max_limit(), max_value(), max_value(), minmax_replacement(), minus_op1_op2_relation_effect(), number_of_iterations_until_wrap(), omp_reduction_init_op(), operator_bitwise_or::op1_range(), operator_cast::op1_range(), operator_trunc_mod::op1_range(), operator_trunc_mod::op2_range(), optimize_range_tests_diff(), optimize_range_tests_xor(), overflow_comparison_p_1(), print_int_bound(), range_positives(), refine_value_range_using_guard(), irange::set(), set_min_and_max_values_for_integral_type(), prange::set_nonnegative(), prange::set_nonzero(), irange::set_varying(), prange::set_varying(), simple_iv_with_niters(), simplify_const_binary_operation(), simplify_const_unary_operation(), value_range_with_overflow(), irange::varying_compatible_p(), vect_determine_precisions_from_range(), vect_gen_vector_loop_niters(), operator_abs::wi_fold(), operator_mult::wi_fold(), operator_div::wi_op_overflows(), operator_mult::wi_op_overflows(), and wide_int_to_tree_1().
wide_int wi::max_value | ( | never_used1 * | ) |
wide_int wi::max_value | ( | never_used2 * | ) |
Return the largest SGNed number that is representable in PRECISION bits. TODO: There is still code from the double_int era that trys to make up for the fact that double int's could not represent the min and max values of all types. This code should be removed because the min and max values can always be represented in wide_ints and int-csts.
References gcc_checking_assert, mask(), precision, shwi(), and UNSIGNED.
BINARY_FUNCTION wi::min | ( | const T1 & | , |
const T2 & | , | ||
signop | ) |
Referenced by determine_value_range(), from_mpz(), prange::intersect(), simplify_const_unary_operation(), prange::union_(), vect_determine_precisions_from_range(), cross_product_operator::wi_cross_product(), operator_bitwise_and::wi_fold(), operator_min::wi_fold(), operator_trunc_mod::wi_fold(), and wide_int_binop().
Return the minimum precision needed to store X with sign SGN.
References clrsb(), clz(), get_precision(), and SIGNED.
Referenced by analyze_subscript_affine_affine(), arith_overflowed_p(), bitint_min_cst_precision(), estimate_numbers_of_iterations(), exact_int_to_float_conversion_p(), get_min_precision(), get_unwidened(), is_nonwrapping_integer_induction(), iv_can_overflow_p(), record_estimate(), record_niter_bound(), supportable_indirect_convert_operation(), vect_determine_precisions_from_range(), vect_joust_widened_integer(), vect_min_prec_for_max_niters(), vect_rgroup_iv_might_wrap_p(), vect_truncate_gather_scatter_offset(), vect_verify_full_masking(), vect_verify_full_masking_avx512(), and vectorizable_conversion().
|
inline |
Produce the smallest number that is represented in TYPE. The precision and sign are taken from TYPE.
References min_value(), TYPE_PRECISION, and TYPE_SIGN.
Produce the smallest number that is represented in MODE. The precision is taken from MODE and the sign from SGN.
References as_a(), GET_MODE_PRECISION(), and min_value().
Referenced by canon_condition(), expand_addsub_overflow(), expand_doubleword_mod(), expand_mul_overflow(), find_var_cmp_const(), fold_plusminus_mult_expr(), get_legacy_range(), get_type_static_bounds(), intersect_range_with_nonzero_bits(), irange::invert(), irange_val_min(), iv_can_overflow_p(), min_limit(), min_value(), min_value(), minmax_replacement(), minus_op1_op2_relation_effect(), number_of_iterations_until_wrap(), omp_reduction_init_op(), operator_abs::op1_range(), operator_cast::op1_range(), operator_trunc_mod::op1_range(), operator_trunc_mod::op2_range(), optimize_range_tests_diff(), optimize_range_tests_xor(), print_int_bound(), range_negatives(), refine_value_range_using_guard(), irange::set(), set_min_and_max_values_for_integral_type(), irange::set_varying(), simple_iv_with_niters(), simplify_const_binary_operation(), simplify_const_unary_operation(), value_range_with_overflow(), irange::varying_compatible_p(), vect_determine_precisions_from_range(), operator_abs::wi_fold(), operator_bitwise_and::wi_fold(), and operator_mult::wi_op_overflows().
wide_int wi::min_value | ( | never_used1 * | ) |
wide_int wi::min_value | ( | never_used2 * | ) |
Return the largest SGNed number that is representable in PRECISION bits.
References gcc_checking_assert, precision, set_bit_in_zero(), uhwi(), and UNSIGNED.
|
inline |
Return a wide int of -1 with precision PRECISION.
References precision, and shwi().
Referenced by assert_loop_rolls_lt(), bound_difference_of_offsetted_base(), bounds_add(), operator_bitwise_not::fold_range(), irange_bitmask::intersect(), minus_op1_op2_relation_effect(), number_of_iterations_ne_max(), optimize_spaceship(), range_negatives(), irange::set_nonzero(), irange_bitmask::set_unknown(), simplify_using_ranges::simplify(), simplify_context::simplify_binary_operation_1(), vr_set_zero_nonzero_bits(), operator_div::wi_fold(), and wi_set_zero_nonzero_bits().
BINARY_FUNCTION wi::mod_ceil | ( | const T1 & | , |
const T2 & | , | ||
signop | , | ||
overflow_type * | = 0 ) |
Referenced by wide_int_binop().
BINARY_FUNCTION wi::mod_floor | ( | const T1 & | , |
const T2 & | , | ||
signop | , | ||
overflow_type * | = 0 ) |
Referenced by wide_int_binop().
Compute the modular multiplicative inverse of A modulo B using extended Euclid's algorithm. Assumes A and B are coprime, and that A and B have the same precision.
References a, b, divmod_trunc(), eq_p(), wide_int_storage::from(), gcc_checking_assert, gcd(), gt_p(), lt_p(), mul(), SIGNED, sub(), and UNSIGNED.
Referenced by expand_doubleword_divmod(), and maybe_optimize_mod_cmp().
BINARY_FUNCTION wi::mod_round | ( | const T1 & | , |
const T2 & | , | ||
signop | , | ||
overflow_type * | = 0 ) |
Referenced by wide_int_binop().
BINARY_FUNCTION wi::mod_trunc | ( | const T1 & | , |
const T2 & | , | ||
signop | , | ||
overflow_type * | = 0 ) |
BINARY_FUNCTION wi::mul | ( | const T1 & | , |
const T2 & | ) |
Referenced by arith_overflowed_p(), check_for_binary_op_overflow(), chrec_fold_multiply(), expr_to_aff_combination(), extract_muldiv_1(), fold_div_compare(), induction_variable_may_overflow_p(), is_nonwrapping_integer_induction(), iv_can_overflow_p(), maybe_canonicalize_mem_ref_addr(), mod_inv(), operator_mult::overflow_free_p(), poly_int_binop(), should_interchange_loops(), simplify_const_binary_operation(), vect_create_nonlinear_iv_step(), vect_truncate_gather_scatter_offset(), vectorizable_simd_clone_call(), operator_mult::wi_op_overflows(), and wide_int_binop().
BINARY_FUNCTION wi::mul | ( | const T1 & | , |
const T2 & | , | ||
signop | , | ||
overflow_type * | ) |
BINARY_FUNCTION wi::mul_high | ( | const T1 & | , |
const T2 & | , | ||
signop | ) |
Referenced by simplify_const_binary_operation(), and wide_int_binop().
unsigned int wi::mul_internal | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | op1val, | ||
unsigned int | op1len, | ||
const HOST_WIDE_INT * | op2val, | ||
unsigned int | op2len, | ||
unsigned int | prec, | ||
signop | sgn, | ||
wi::overflow_type * | overflow, | ||
bool | high ) |
Multiply Op1 by Op2. If HIGH is set, only the upper half of the result is returned. If HIGH is not set, throw away the upper half after the check is made to see if it overflows. Unfortunately there is no better way to check for overflow than to do this. If OVERFLOW is nonnull, record in *OVERFLOW whether the result overflowed. SGN controls the signedness and is used to check overflow or if HIGH is set. NOTE: Overflow type for signed overflow is not yet implemented.
References b, BLOCKS_NEEDED, CHAR_BIT, end(), fits_uhwi_p(), HALF_INT_MASK, HOST_BITS_PER_HALF_WIDE_INT, HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1, i, mask(), MIN, neg_p(), OVF_NONE, OVF_OVERFLOW, OVF_UNKNOWN, r, sext_hwi(), shift, SIGN_MASK, SIGNED, generic_wide_int< storage >::to_shwi(), generic_wide_int< storage >::to_uhwi(), generic_wide_int< storage >::ulow(), UNLIKELY, UNSIGNED, wi_pack(), wi_unpack(), and WIDE_INT_MAX_INL_PRECISION.
|
inline |
Return true if X is a multiple of Y. Treat X and Y as having the signedness given by SGN.
References mod_trunc(), and y.
Referenced by create_add_imm_cand(), div_if_zero_remainder(), extract_muldiv_1(), lower_omp_ordered_clauses(), multiple_of_p(), omp_apply_tile(), pcom_worker::suitable_component_p(), and vect_truncate_gather_scatter_offset().
|
inline |
Return true if X is a multiple of Y, storing X / Y in *RES if so. Treat X and Y as having the signedness given by SGN.
References divmod_trunc(), WI_BINARY_RESULT, and y.
BINARY_PREDICATE wi::ne_p | ( | const T1 & | , |
const T2 & | ) |
Referenced by canon_condition(), expand_doubleword_mod(), expr_not_equal_to(), fold_const_call_ss(), fold_const_call_sss(), fold_convert_const(), operator_not_equal::fold_range(), operator_not_equal::fold_range(), fuse_memset_builtins(), handle_builtin_alloca(), ipcp_update_vr(), ipcp_bits_lattice::known_nonzero_p(), match_arith_overflow(), set_strlen_range(), simplify_const_unary_operation(), spaceship_replacement(), value_range_with_overflow(), value_sat_pred_p(), operator_bitwise_xor::wi_fold(), and wi_optimize_and_or().
UNARY_FUNCTION wi::neg | ( | const T & | ) |
Referenced by expand_divmod(), extract_bit_test_mask(), fold_abs_const(), fold_negate_const(), get_debug_computation_at(), iv_can_overflow_p(), omp_adjust_for_condition(), operator_trunc_mod::op2_range(), plus_minus_ranges(), simplify_context::simplify_binary_operation_1(), simplify_builtin_call(), and simplify_const_unary_operation().
UNARY_FUNCTION wi::neg | ( | const T & | , |
overflow_type * | ) |
UNARY_PREDICATE wi::neg_p | ( | const T & | , |
signop | = SIGNED ) |
Referenced by add_elt_to_tree(), all_phi_incrs_profitable_1(), arith_overflowed_p(), bit_value_binop(), bit_value_unop(), cache_integer_cst(), cand_abs_increment(), cmps(), create_add_on_incoming_edge(), derive_constant_upper_bound_ops(), divmod_internal(), dump_generic_node(), expand_doubleword_mod(), find_constructor_constant_at_offset(), fold_abs_const(), fold_plusminus_mult_expr(), operator_lt::fold_range(), operator_lt::fold_range(), get_debug_computation_at(), get_int_cst_ext_nunits(), get_min_precision(), tree_switch_conversion::cluster::get_range(), get_range_pos_neg(), has_dominating_ubsan_ptr_check(), insert_into_preds_of_block(), int_fits_type_p(), iv_can_overflow_p(), lower_omp_ordered_clauses(), maybe_diag_stxncpy_trunc(), maybe_optimize_ubsan_ptr_ifn(), mul_internal(), optimize_range_tests_var_bound(), plus_minus_ranges(), predict_iv_comparison(), print_dec_buf_size(), print_decs(), print_decu(), print_hex_buf_size(), real_from_integer(), record_increment(), record_ubsan_ptr_check_stmt(), replace_mult_candidate(), operator_bitwise_and::simple_op1_range_solver(), simplify_const_binary_operation(), simplify_immed_subreg(), to_mpz(), tree_int_cst_sgn(), tree_nonzero_bits(), ubsan_expand_ptr_ifn(), value_mask_to_min_max(), vect_determine_precisions_from_range(), vect_recog_divmod_pattern(), operator_div::wi_fold(), operator_trunc_mod::wi_fold(), operator_lshift::wi_op_overflows(), operator_rshift::wi_op_overflows(), and wide_int_binop().
|
inline |
Return a wide int of 1 with precision PRECISION.
References precision, and shwi().
Referenced by irange_bitmask::adjust_range(), assume_query::assume_query(), tree_switch_conversion::bit_test_cluster::emit(), fold_const_call_ss(), cfn_constant_float_p::fold_range(), cfn_constant_p::fold_range(), cfn_isfinite::fold_range(), cfn_isinf::fold_range(), cfn_isnormal::fold_range(), if_chain::is_beneficial(), masked_increment(), minus_op1_op2_relation_effect(), multiple_of_p(), optimize_spaceship(), range_true(), range_true_and_false(), irange::set_nonzero(), prange::set_nonzero(), simplify_context::simplify_binary_operation_1(), simplify_using_ranges::simplify_truth_ops_using_ranges(), size_must_be_zero_p(), vect_gen_vector_loop_niters(), and operator_div::wi_fold().
bool wi::only_sign_bit_p | ( | const wide_int_ref & | x | ) |
Return true if X represents the minimum signed value.
References only_sign_bit_p().
bool wi::only_sign_bit_p | ( | const wide_int_ref & | x, |
unsigned int | precision ) |
Return true if sign-extending X to have precision PRECISION would give the minimum signed value at that precision.
References ctz(), and precision.
Referenced by divmod_internal(), may_negate_without_overflow_p(), only_sign_bit_p(), sign_bit_p(), and simplify_const_unary_operation().
unsigned int wi::or_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | op0, | ||
unsigned int | op0len, | ||
const HOST_WIDE_INT * | op1, | ||
unsigned int | op1len, | ||
unsigned int | prec ) |
Set VAL to OP0 | OP1. Return the number of blocks used.
References canonize(), MAX, and top_bit_of().
unsigned int wi::or_not_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | op0, | ||
unsigned int | op0len, | ||
const HOST_WIDE_INT * | op1, | ||
unsigned int | op1len, | ||
unsigned int | prec ) |
Set VAL to OP0 | ~OP1. Return the number of blocks used.
References canonize(), MAX, and top_bit_of().
|
inline |
Return 0 if the number of 1s in X is even and 1 if the number of 1s is odd.
References popcount().
Referenced by fold_const_call_ss(), and simplify_const_unary_operation().
|
inline |
Poly version of wi::sext, with the same interface.
Poly version of wi::zext, with the same interface.
int wi::popcount | ( | const wide_int_ref & | x | ) |
Compute the population count of X.
References count, HOST_BITS_PER_WIDE_INT, i, popcount_hwi(), generic_wide_int< storage >::sign_mask(), and generic_wide_int< storage >::uhigh().
Referenced by bit_value_binop(), bitmask_inv_cst_vector_p(), compute_trims(), do_store_flag(), expand_divmod(), extract_bit_test_mask(), fold_binary_loc(), fold_const_call_ss(), cfn_popcount::fold_range(), integer_pow2p(), negate_expr_p(), parity(), irange::set_range_from_bitmask(), and simplify_const_unary_operation().
Return VAL if VAL has no bits set outside MASK. Otherwise round VAL down to the previous value that has no bits set outside MASK. This rounding wraps for signed values if VAL is negative and the top bit of MASK is clear. For example, round_down_for_mask (6, 0xf1) would give 1 and round_down_for_mask (24, 0xf1) would give 17.
References bit_and_not(), clz(), wide_int_storage::get_precision(), mask(), and precision.
Referenced by intersect_range_with_nonzero_bits().
Return VAL if VAL has no bits set outside MASK. Otherwise round VAL up to the next value that has no bits set outside MASK. The rounding wraps if there are no suitable values greater than VAL. For example, round_up_for_mask (6, 0xf1) would give 16 and round_up_for_mask (24, 0xf1) would give 32.
References bit_and_not(), clz(), wide_int_storage::get_precision(), mask(), and precision.
Referenced by intersect_range_with_nonzero_bits().
SHIFT_FUNCTION wi::rrotate | ( | const T1 & | , |
const T2 & | , | ||
unsigned int | = 0 ) |
Referenced by bit_value_binop(), simplify_const_binary_operation(), and wide_int_binop().
SHIFT_FUNCTION wi::rshift | ( | const T1 & | , |
const T2 & | , | ||
signop | sgn ) |
BINARY_FUNCTION wi::sdiv_floor | ( | const T1 & | , |
const T2 & | ) |
Referenced by analyze_subscript_affine_affine().
BINARY_FUNCTION wi::sdiv_trunc | ( | const T1 & | , |
const T2 & | ) |
Referenced by array_bounds_checker::check_addr_expr(), and multiple_of_p().
UNARY_FUNCTION wi::set_bit | ( | const T & | , |
unsigned int | ) |
Referenced by analyze_and_compute_bitwise_induction_effect(), expand_smod_pow2(), and operator_lshift::op1_range().
Return an integer of type T in which bit BIT is set and all the others are clear.
References shifted_mask().
|
inline |
Return a PRECISION-bit integer in which bit BIT is set and all the others are clear.
References precision, and shifted_mask().
Referenced by choose_multiplier(), clz_loc_descriptor(), doloop_modify(), expand_absneg_bit(), expand_builtin_signbit(), expand_copysign_absneg(), expand_copysign_bit(), expand_vector_condition(), operator_lshift::fold_range(), min_value(), operator_cast::op1_range(), prefer_and_bit_test(), real_to_integer(), sign_mask_for(), operator_bitwise_and::simple_op1_range_solver(), simplify_context::simplify_binary_operation_1(), visit_nary_op(), rt_bb_visited::vwordidx(), operator_bitwise_and::wi_fold(), and operator_lshift::wi_fold().
unsigned int wi::set_bit_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | xval, | ||
unsigned int | xlen, | ||
unsigned int | precision, | ||
unsigned int | bit ) |
Copy the number represented by XVAL and XLEN into VAL, setting bit BIT. Return the number of blocks in VAL. Both XVAL and VAL have PRECISION bits.
References canonize(), HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, i, precision, and safe_uhwi().
UNARY_FUNCTION wi::sext | ( | const T & | , |
unsigned int | ) |
Referenced by tree_switch_conversion::switch_conversion::array_value_type(), assert_loop_rolls_lt(), bit_value_assume_aligned(), bit_value_binop(), bit_value_binop(), bit_value_unop(), bit_value_unop(), ccp_lattice_meet(), derive_constant_upper_bound_ops(), evaluate_stmt(), extract_bit_test_mask(), find_constructor_constant_at_offset(), fits_to_tree_p(), fold_comparison(), fold_const_aggregate_ref_1(), get_addr_base_and_unit_offset_1(), get_inner_reference(), get_range_pos_neg(), get_ref_base_and_extent(), get_value_from_alignment(), has_dominating_ubsan_ptr_check(), loc_list_from_tree_1(), maybe_optimize_ubsan_ptr_ifn(), ipcp_bits_lattice::meet_with(), ipcp_bits_lattice::meet_with(), ipcp_bits_lattice::meet_with_1(), most_expensive_mult_to_index(), native_encode_initializer(), output_constructor_array_range(), output_constructor_regular_field(), prepare_iteration_over_array_elts(), split_address_to_core_and_offset(), wide_int_ext_for_comb(), and wide_int_ext_for_comb().
unsigned int wi::sext_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | xval, | ||
unsigned int | xlen, | ||
unsigned int | precision, | ||
unsigned int | offset ) |
Sign-extend the number represented by XVAL and XLEN into VAL, starting at OFFSET. Return the number of blocks in VAL. Both XVAL and VAL have PRECISION bits.
References canonize(), HOST_BITS_PER_WIDE_INT, i, offset, precision, and sext_hwi().
unsigned int wi::shifted_mask | ( | HOST_WIDE_INT * | val, |
unsigned int | start, | ||
unsigned int | width, | ||
bool | negate, | ||
unsigned int | prec ) |
Fill VAL with a mask where the lower START bits are zeros, the next WIDTH bits are ones, and the bits above that up to PREC are zeros. The result is inverted if NEGATE is true. Return the number of blocks in VAL.
References end(), HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, i, and shift.
|
inline |
Return an integer of type T in which the low START bits are clear, the next WIDTH bits are set, and the other bits are clear, or the inverse if NEGATE_P.
References HOST_BITS_PER_WIDE_INT, shifted_mask(), and STATIC_ASSERT.
|
inline |
Return a PRECISION-bit integer in which the low START bits are clear, the next WIDTH bits are set, and the other bits are clear, or the inverse if NEGATE_P.
References wide_int_storage::create(), precision, wide_int_storage::set_len(), shifted_mask(), and wide_int_storage::write_val().
Referenced by change_zero_ext(), expand_arith_overflow_result_store(), expand_doubleword_divmod(), expand_doubleword_mod(), expand_ubsan_result_store(), extract_bit_test_mask(), fold_ternary_loc(), insert(), mask_rtx(), maybe_optimize_mod_cmp(), maybe_optimize_pow2p_mod_cmp(), omp_context_compute_score(), set_bit_in_zero(), set_bit_in_zero(), shifted_mask(), shifted_mask(), simplify_conversion_using_ranges(), spaceship_replacement(), vect_recog_bit_insert_pattern(), and vect_recog_bitfield_ref_pattern().
|
inline |
Poly version of wi::shwi, with the same interface.
References a, i, N, POLY_SET_COEFF, precision, r, and shwi().
Referenced by alloca_call_type(), build_int_cst(), build_int_cst_type(), expand_doubleword_shift(), expand_expr_real_2(), expand_subword_shift(), fold_const_call_ss(), fold_const_call_ss(), fold_const_call_sss(), cfn_clrsb::fold_range(), cfn_clz::fold_range(), cfn_ctz::fold_range(), cfn_ffs::fold_range(), cfn_goacc_dim::fold_range(), cfn_popcount::fold_range(), cfn_toupper_tolower::get_letter_range(), get_nonzero_bits(), get_shift_range(), get_stridx(), gimple_fold_builtin_strlen(), ipcp_update_vr(), max_value(), maybe_diag_stxncpy_trunc(), minus_one(), one(), pointer_may_wrap_p(), shwi(), shwi(), simplify_const_unary_operation(), tree_nonzero_bits(), two(), vect_recog_divmod_pattern(), and zero().
|
inline |
References as_a(), GET_MODE_PRECISION(), and shwi().
|
inline |
Return -1 if the top bit of X is set and 0 if the top bit is clear.
References WIDE_INT_REF_FOR.
Referenced by dw_wide_int::elt(), and pointer_plus_operator::fold_range().
BINARY_FUNCTION wi::smax | ( | const T1 & | , |
const T2 & | ) |
BINARY_FUNCTION wi::smin | ( | const T1 & | , |
const T2 & | ) |
BINARY_FUNCTION wi::smod_trunc | ( | const T1 & | , |
const T2 & | ) |
Referenced by expand_doubleword_mod().
BINARY_FUNCTION wi::smul | ( | const T1 & | , |
const T2 & | , | ||
overflow_type * | ) |
Referenced by tree_fold_binomial().
BINARY_FUNCTION wi::sub | ( | const T1 & | , |
const T2 & | ) |
Referenced by add_one(), analyze_subscript_affine_affine(), arith_overflowed_p(), build_gt(), build_lt(), canon_condition(), canonicalize_comparison(), check_for_binary_op_overflow(), const_vector_int_elt(), expr_to_aff_combination(), operator_cast::fold_pair(), get_range_strlen_tree(), get_up_bounds_for_array_ref(), prange::invert(), maybe_canonicalize_mem_ref_addr(), minmax_replacement(), mod_inv(), operator_minus::op1_op2_relation_effect(), operator_minus::overflow_free_p(), plus_minus_ranges(), poly_int_binop(), predict_iv_comparison(), irange::set(), simple_iv_with_niters(), simplify_const_binary_operation(), subtract_one(), operator_minus::wi_fold(), range_operator::wi_fold_in_parts(), range_operator::wi_fold_in_parts_equiv(), and wide_int_binop().
BINARY_FUNCTION wi::sub | ( | const T1 & | , |
const T2 & | , | ||
signop | , | ||
overflow_type * | ) |
unsigned int wi::sub_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | op0, | ||
unsigned int | op0len, | ||
const HOST_WIDE_INT * | op1, | ||
unsigned int | op1len, | ||
unsigned int | prec, | ||
signop | sgn, | ||
wi::overflow_type * | overflow ) |
Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW whether the result overflows when OP0 and OP1 are treated as having signedness SGN. Return the number of blocks in VAL.
References canonize(), HOST_BITS_PER_WIDE_INT, i, MAX, OVF_NONE, OVF_OVERFLOW, OVF_UNDERFLOW, shift, SIGNED, top_bit_of(), and UNSIGNED.
Referenced by divmod_internal().
void wi::to_mpz | ( | const wide_int_ref & | x, |
mpz_t | result, | ||
signop | sgn ) |
Sets RESULT from X, the sign is taken according to SGN.
References CEIL, HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, i, and neg_p().
Referenced by assert_loop_rolls_lt(), bound_difference_of_offsetted_base(), bounds_add(), determine_value_range(), get_type_static_bounds(), number_of_iterations_lt(), number_of_iterations_lt_to_ne(), number_of_iterations_ne_max(), refine_value_range_using_guard(), split_to_var_and_offset(), and vect_peel_nonlinear_iv_init().
|
inline |
Refer to INTEGER_CST T as though it were an offset_int. This function is an optimisation of wi::to_widest for cases in which T is known to be a bit or byte count in the range (-(2 ^ (N + BITS_PER_UNIT)), 2 ^ (N + BITS_PER_UNIT)), where N is the target's address size in bits. This is the right choice when operating on bit or byte counts as untyped numbers rather than M-bit values. The wi::to_widest comments about addition, subtraction and multiplication apply here: sequences of 1 << 31 additions and subtractions do not induce overflow, but multiplying the largest sizes might. Again, wi::tree_to_offset_ref wt = wi::to_offset (t); is more efficient than: offset_int wt = wi::to_offset (t).
Referenced by access_ref::add_max_offset(), access_ref::add_offset(), adjust_offset_for_component_ref(), alloca_call_type(), ao_ref_init_from_vn_reference(), array_ref_flexible_size_p(), array_size_for_constructor(), backtrace_base_for_ref(), array_bounds_checker::check_addr_expr(), array_bounds_checker::check_mem_ref(), component_ref_sam_type(), compute_objsize_r(), copy_reference_ops_from_ref(), create_add_imm_cand(), create_add_on_incoming_edge(), create_add_ssa_cand(), create_component_ref_by_pieces_1(), create_intersect_range_checks_index(), create_mul_imm_cand(), create_mul_ssa_cand(), access_ref::dump(), pass_walloca::execute(), field_byte_offset(), find_constructor_constant_at_offset(), fold(), fold_array_ctor_reference(), fold_nonarray_ctor_reference(), get_addr_base_and_unit_offset_1(), get_array_ctor_element_at_index(), get_maxbound(), get_offset_range(), get_range_strlen_tree(), access_ref::get_ref(), get_ref_base_and_extent(), get_size_range(), handle_array_ref(), handle_component_ref(), handle_decl(), handle_mem_ref(), handle_ssa_name(), has_dominating_ubsan_ptr_check(), int_bit_position(), loc_list_from_tree_1(), maybe_optimize_ubsan_ptr_ifn(), maybe_rewrite_mem_ref_base(), maybe_warn_for_bound(), maybe_warn_nonstring_arg(), strlen_pass::maybe_warn_overflow(), access_ref::merge_ref(), native_encode_initializer(), non_rewritable_mem_ref_base(), access_ref::offset_bounded(), access_ref::offset_in_range(), offset_int_type_size_in_bits(), output_constructor_array_range(), output_constructor_regular_field(), prepare_iteration_over_array_elts(), replace_conditional_candidate(), replace_uncond_cands_and_profitable_phis(), replace_unconditional_candidate(), restructure_reference(), access_data::set_bound(), set_component_ref_size(), access_ref::set_max_size_range(), access_ref::size_remaining(), slsr_process_add(), pcom_worker::suitable_component_p(), and valueize_refs_1().
|
inline |
Access INTEGER_CST or POLY_INT_CST tree T as if it were a poly_offset_int. See wi::to_offset for more details.
References poly_int< N, C >::coeffs, i, NUM_POLY_INT_COEFFS, POLY_INT_CST_COEFF, and POLY_INT_CST_P.
Referenced by adjust_offset_for_component_ref(), ao_ref_from_mem(), ao_ref_init_from_vn_reference(), const_binop(), copy_reference_ops_from_ref(), ipa_icf::sem_variable::equals(), execute_update_addresses_taken(), expand_expr_real_1(), extract_base_bit_offset(), fold_comparison(), fold_const_aggregate_ref_1(), fold_indirect_ref_1(), gather_mem_refs_stmt(), get_addr_base_and_unit_offset_1(), get_inner_reference(), get_ref_base_and_extent(), maybe_rewrite_mem_ref_base(), non_rewritable_lvalue_p(), non_rewritable_mem_ref_base(), optimize_memcpy_to_memset(), same_addr_size_stores_p(), split_address_to_core_and_offset(), store_field(), tree_could_trap_p(), valueize_refs_1(), vect_compile_time_alias(), vect_compute_data_ref_alignment(), vect_dr_aligned_if_related_peeled_dr_is(), verify_gimple_assign_ternary(), vn_reference_lookup_3(), vn_reference_maybe_forwprop_address(), and widen_memory_access().
|
inline |
Access X (which satisfies poly_int_rtx_p) as a poly_wide_int. MODE is the mode of X.
References CONST_POLY_INT_P, and const_poly_int_value().
Referenced by addr_for_mem_ref(), rtx_vector_builder::apply_step(), can_min_p(), const_vector_from_tree(), create_intersect_range_checks_index(), drop_tree_overflow(), expand_call_mem_ref(), fold_negate_const(), get_nonzero_bits(), maybe_canonicalize_mem_ref_addr(), mem_ref_offset(), neg_poly_int_rtx(), pointer_may_wrap_p(), poly_int_binop(), fold_using_range::range_of_address(), reduce_to_bit_field_precision(), simplify_const_binary_operation(), rtx_vector_builder::step(), vect_compile_time_alias(), vector_cst_int_elt(), vn_reference_fold_indirect(), vn_reference_lookup_3(), and vn_reference_maybe_forwprop_address().
|
inline |
Access INTEGER_CST or POLY_INT_CST tree T as if it were a poly_wide_int. See wi::to_wide for more details.
References POLY_INT_CST_P, and poly_int_cst_value().
|
inline |
Access INTEGER_CST or POLY_INT_CST tree T as if it were a poly_widest_int. See wi::to_widest for more details.
References poly_int< N, C >::coeffs, i, NUM_POLY_INT_COEFFS, POLY_INT_CST_COEFF, and POLY_INT_CST_P.
Referenced by pcom_worker::aff_combination_dr_offset(), can_min_p(), compare_values_warnv(), data_ref_compare_tree(), decode_addr_const(), dr_may_alias_p(), fold_ctor_reference(), generate_memcpy_builtin(), gimple_fold_partial_load_store_mem_ref(), multiple_of_p(), and tree_to_aff_combination().
|
inline |
Refer to INTEGER_CST T as though it were a wide_int. In contrast to the approximation of infinite-precision numbers given by wi::to_widest and wi::to_offset, this function treats T as a signless collection of N bits, where N is the precision of T's type. As with machine registers, signedness is determined by the operation rather than the operands; for example, there is a distinction between signed and unsigned division. This is the right choice when operating on values with the same type using normal modulo arithmetic. The overflow-checking forms of things like wi::add check whether the result can be represented in T's type. Calling this function should have no overhead in release builds, so it is OK to call it several times for the same tree. If it is useful for readability reasons to reduce the number of calls, it is more efficient to use: wi::tree_to_wide_ref wt = wi::to_wide (t); instead of: wide_int wt = wi::to_wide (t).
References TREE_INT_CST_ELT, TREE_INT_CST_NUNITS, TREE_TYPE, and TYPE_PRECISION.
Referenced by addr_for_mem_ref(), adjust_imagpart_expr(), adjust_realpart_expr(), all_ones_mask_p(), alloca_call_type(), alloca_type_and_limit::alloca_type_and_limit(), tree_vector_builder::apply_step(), arith_cast_equal_p(), tree_switch_conversion::switch_conversion::array_value_type(), bitint_min_cst_precision(), bitmask_inv_cst_vector_p(), bitwise_equal_p(), bitwise_inverted_equal_p(), build_printable_array_type(), build_vec_series(), cache_integer_cst(), gimple_outgoing_range::calc_switch_ranges(), ccp_finalize(), check_nul_terminated_array(), chrec_fold_multiply(), tree_switch_conversion::switch_conversion::collect(), compare_values_warnv(), compute_avail(), compute_distributive_range(), cond_removal_in_builtin_zero_pattern(), const_binop(), tree_switch_conversion::switch_conversion::contains_linear_function_p(), irange::contains_p(), prange::contains_p(), copy_tree_body_r(), create_intersect_range_checks_index(), cgraph_node::create_thunk(), dequeue_and_dump(), determine_block_size(), do_store_flag(), dr_analyze_indices(), dr_step_indicator(), dump_generic_node(), tree_switch_conversion::bit_test_cluster::emit(), evaluate_stmt(), tree_switch_conversion::switch_conversion::exp_index_transform(), expand_builtin_strnlen(), expand_case(), expand_expr_real_1(), expand_omp_target(), expand_single_bit_test(), expr_not_equal_to(), expr_to_aff_combination(), extract_muldiv_1(), find_case_label_range(), find_unswitching_predicates_for_bb(), fold_abs_const(), fold_binary_loc(), fold_bit_and_mask(), fold_const_aggregate_ref_1(), fold_const_call_1(), fold_const_call_1(), fold_convert_const(), fold_convert_const_int_from_int(), fold_convert_const_int_from_real(), fold_div_compare(), fold_negate_expr_1(), fold_not_const(), fold_plusminus_mult_expr(), fold_ternary_loc(), fold_unary_loc(), fuse_memset_builtins(), get_array_ctor_element_at_index(), get_constraint_for_ptr_offset(), get_cst_init_from_scev(), get_min_precision(), get_nonzero_bits(), get_range(), tree_switch_conversion::cluster::get_range(), get_range_pos_neg(), get_range_strlen_tree(), get_size_range(), get_stridx(), get_type_static_bounds(), get_unwidened(), get_up_bounds_for_array_ref(), gimple_bitwise_equal_p(), gimple_bitwise_inverted_equal_p(), gimple_call_alloc_size(), gimple_fold_builtin_strlen(), gimple_fold_indirect_ref(), gimple_parm_array_size(), go_output_typedef(), group_case_labels_stmt(), handle_array_ref(), strlen_pass::handle_builtin_memset(), strlen_pass::handle_builtin_strlen(), operand_compare::hash_operand(), int_fits_type_p(), integer_all_onesp(), integer_nonzerop(), integer_pow2p(), integer_zerop(), ipa_odr_summary_write(), ipa_polymorphic_call_context::ipa_polymorphic_call_context(), ipa_range_contains_p(), if_chain::is_beneficial(), tree_switch_conversion::switch_conversion::is_exp_index_transform_viable(), is_inv_store_elimination_chain(), is_widening_mult_rhs_p(), layout_type(), simplify_using_ranges::legacy_fold_cond_overflow(), lower_coro_builtin(), lower_omp_ordered_clauses(), maskable_range_p(), match_arith_overflow(), may_negate_without_overflow_p(), maybe_diag_stxncpy_trunc(), maybe_optimize_mod_cmp(), maybe_optimize_pow2p_mod_cmp(), maybe_set_nonzero_bits(), maybe_set_strlen_range(), minmax_replacement(), ipa_param_adjustments::modify_call(), movement_possibility_1(), multiple_of_p(), native_encode_initializer(), native_interpret_aggregate(), negate_expr_p(), num_ending_zeros(), number_of_iterations_lt(), number_of_iterations_lt_to_ne(), number_of_iterations_ne_max(), number_of_iterations_until_wrap(), omp_apply_tile(), omp_context_compute_score(), optimize_bit_field_compare(), optimize_range_tests_cmp_bitwise(), optimize_range_tests_diff(), optimize_range_tests_to_bit_test(), optimize_range_tests_var_bound(), optimize_range_tests_xor(), optimize_spaceship(), output_constant(), overflow_comparison_p_1(), poly_int_binop(), preprocess_case_label_vec_for_gimple(), print_node(), print_node_brief(), phi_analyzer::process_phi(), real_value_from_int_cst(), record_nonwrapping_iv(), refine_value_range_using_guard(), remap_gimple_op_r(), round_up_loc(), scan_omp_1_op(), scev_var_range_cant_overflow(), irange::set(), prange::set(), irange::set_nonnegative(), set_strlen_range(), set_switch_stmt_execution_predicate(), sign_bit_p(), simple_iv_with_niters(), simplify_builtin_call(), spaceship_replacement(), split_at_bb_p(), split_constant_offset(), split_constant_offset_1(), split_to_var_and_offset(), tree_vector_builder::step(), to_wide(), tree_ctz(), tree_fits_poly_int64_p(), tree_floor_log2(), tree_int_cst_sgn(), tree_int_cst_sign_bit(), tree_log2(), tree_nonzero_bits(), ubsan_expand_ptr_ifn(), unextend(), unswitch_predicate::unswitch_predicate(), vect_can_peel_nonlinear_iv_p(), vect_create_nonlinear_iv_step(), vect_determine_precisions_from_range(), vect_do_peeling(), vect_emulate_mixed_dot_prod(), vect_get_range_info(), vect_peel_nonlinear_iv_init(), vect_recog_divmod_pattern(), vectorizable_load(), vn_walk_cb_data::vn_walk_cb_data(), warn_string_no_nul(), wide_int_to_tree_1(), and zero_one_minusone().
|
inline |
Convert INTEGER_CST T to a wide_int of precision PREC, extending or truncating as necessary. When extending, use sign extension if T's type is signed and zero extension if T's type is unsigned.
References wide_int_storage::from(), to_wide(), TREE_TYPE, and TYPE_SIGN.
|
inline |
Refer to INTEGER_CST T as though it were a widest_int. This function gives T's actual numerical value, influenced by the signedness of its type. For example, a signed byte with just the top bit set would be -128 while an unsigned byte with the same bit pattern would be 128. This is the right choice when operating on groups of INTEGER_CSTs that might have different signedness or precision. It is also the right choice in code that specifically needs an approximation of infinite-precision arithmetic instead of normal modulo arithmetic. The approximation of infinite precision is good enough for realistic numbers of additions and subtractions of INTEGER_CSTs (where "realistic" includes any number less than 1 << 31) but it cannot represent the result of multiplying the two largest supported INTEGER_CSTs. The overflow-checking form of wi::mul provides a way of multiplying two arbitrary INTEGER_CSTs and checking that the result is representable as a widest_int. Note that any overflow checking done on these values is relative to the range of widest_int rather than the range of a TREE_TYPE. Calling this function should have no overhead in release builds, so it is OK to call it several times for the same tree. If it is useful for readability reasons to reduce the number of calls, it is more efficient to use: wi::tree_to_widest_ref wt = wi::to_widest (t); instead of: widest_int wt = wi::to_widest (t).
Referenced by arith_overflow_check_p(), assert_loop_rolls_lt(), build_range_check(), canonicalize_loop_induction_variables(), ccp_lattice_meet(), compute_objsize_r(), convert_mult_to_fma(), convert_to_integer_1(), derive_constant_upper_bound_ops(), div_if_zero_remainder(), do_warn_aggressive_loop_optimizations(), dr_step_indicator(), dump_generic_node(), dump_lattice_value(), estimate_numbers_of_iterations(), expr_to_aff_combination(), extract_bit_test_mask(), fold_binary_loc(), fold_builtin_bit_query(), get_default_value(), get_min_precision(), ipcp_bits_lattice::get_value_and_mask(), gimple_fold_partial_load_store_mem_ref(), gimplify_scan_omp_clauses(), strlen_pass::handle_integral_assign(), hash_tree(), idx_within_array_bound(), integer_onep(), is_nonwrapping_integer_induction(), loop_niters_no_overflow(), may_eliminate_iv(), maybe_canonicalize_mem_ref_addr(), maybe_optimize_pow2p_mod_cmp(), minmax_from_comparison(), multiple_of_p(), native_encode_int(), number_of_iterations_cond(), number_of_iterations_exit_assumptions(), number_of_iterations_lt_to_ne(), number_of_iterations_until_wrap(), omp_adjust_for_condition(), optimize_range_tests_to_bit_test(), optimize_spaceship(), output_constructor_bitfield(), predict_iv_comparison(), record_estimate(), reduction_var_overflows_first(), remove_redundant_iv_tests(), set_lattice_value(), should_interchange_loops(), simple_cst_equal(), spaceship_replacement(), tree_fits_poly_int64_p(), tree_fits_poly_uint64_p(), tree_fits_shwi_p(), tree_fits_uhwi_p(), tree_fold_binomial(), tree_int_cst_compare(), tree_int_cst_equal(), tree_int_cst_le(), tree_int_cst_lt(), try_peel_loop(), try_transform_to_exit_first_loop_alt(), try_unroll_loop_completely(), ubsan_expand_objsize_ifn(), ubsan_type_descriptor(), valid_constant_size_p(), valid_lattice_transition(), value_sat_pred_p(), value_to_wide_int(), valueized_wider_op(), vect_analyze_loop_costing(), vect_convert_input(), vect_determine_precisions_from_users(), vect_get_loop_variant_data_ptr_increment(), vect_iv_limit_for_partial_vectors(), vect_joust_widened_integer(), vect_min_prec_for_max_niters(), vect_recog_mulhs_pattern(), vect_truncate_gather_scatter_offset(), and vectorizable_reduction().
|
inline |
Return a wide int of 2 with precision PRECISION.
References precision, and shwi().
Referenced by optimize_spaceship().
BINARY_FUNCTION wi::udiv_ceil | ( | const T1 & | , |
const T2 & | ) |
Referenced by vect_transform_loop().
BINARY_FUNCTION wi::udiv_floor | ( | const T1 & | , |
const T2 & | ) |
BINARY_FUNCTION wi::udiv_trunc | ( | const T1 & | , |
const T2 & | ) |
|
inline |
Poly version of wi::uhwi, with the same interface.
References a, i, N, POLY_SET_COEFF, precision, r, and uhwi().
Referenced by build_int_cstu(), choose_multiplier(), compare_nonzero_chars(), exact_int_to_float_conversion_p(), expand_mult_const(), operator_cast::fold_pair(), gimple_parm_array_size(), handle_builtin_alloca(), min_value(), operator_lshift::op1_range(), operator_rshift::op1_range(), ubsan_expand_vptr_ifn(), and uhwi().
|
inline |
BINARY_FUNCTION wi::umax | ( | const T1 & | , |
const T2 & | ) |
Referenced by fold_array_ctor_reference(), and simplify_const_binary_operation().
BINARY_FUNCTION wi::umin | ( | const T1 & | , |
const T2 & | ) |
Referenced by simplify_const_binary_operation(), and vect_transform_loop().
BINARY_FUNCTION wi::umod_floor | ( | const T1 & | , |
const T2 & | ) |
Referenced by restructure_reference().
BINARY_FUNCTION wi::umod_trunc | ( | const T1 & | , |
const T2 & | ) |
BINARY_FUNCTION wi::umul | ( | const T1 & | , |
const T2 & | , | ||
overflow_type * | ) |
Referenced by slow_safe_scale_64bit().
unsigned int wi::xor_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | op0, | ||
unsigned int | op0len, | ||
const HOST_WIDE_INT * | op1, | ||
unsigned int | op1len, | ||
unsigned int | prec ) |
Set VAL to OP0 ^ OP1. Return the number of blocks used.
References canonize(), MAX, and top_bit_of().
|
inline |
Return a wide int of 0 with precision PRECISION.
References precision, and shwi().
Referenced by adjust_pointer_diff_expr(), irange_bitmask::adjust_range(), analyze_and_compute_bitwise_induction_effect(), contains_zero_p(), tree_switch_conversion::bit_test_cluster::emit(), fold_const_call_ss(), fold_convert_const_int_from_real(), cfn_clrsb::fold_range(), cfn_strlen::fold_range(), get_bitmask_from_range(), get_size_range(), get_stridx(), range_query::get_tree_range(), gimple_parm_array_size(), irange_bitmask::intersect(), prange::invert(), ipa_odr_read_section(), operator_plus::lhs_op1_relation(), maybe_diag_stxncpy_trunc(), maybe_set_strlen_range(), minus_op1_op2_relation_effect(), native_decode_rtx(), irange::nonzero_p(), cfn_signbit::op1_range(), operator_bitwise_or::op1_range(), operator_rshift::op1_range(), plus_minus_ranges(), pointer_may_wrap_p(), range_false(), range_is_either_true_or_false(), fold_using_range::range_of_address(), range_positives(), range_true_and_false(), real_to_integer(), irange::set_nonnegative(), prange::set_nonnegative(), irange::set_nonzero(), irange_bitmask::set_nonzero_bits(), vrange::set_nonzero_bits(), irange_bitmask::set_unknown(), prange::set_varying(), irange::set_zero(), prange::set_zero(), operator_bitwise_and::simple_op1_range_solver(), simplify_using_ranges::simplify(), simplify_using_ranges::simplify_truth_ops_using_ranges(), size_must_be_zero_p(), tree_single_nonzero_warnv_p(), vr_set_zero_nonzero_bits(), operator_abs::wi_fold(), operator_absu::wi_fold(), operator_bitwise_and::wi_fold(), operator_div::wi_fold(), operator_trunc_mod::wi_fold(), wi_set_zero_nonzero_bits(), and wi_zero_p().
UNARY_FUNCTION wi::zext | ( | const T & | , |
unsigned int | ) |
unsigned int wi::zext_large | ( | HOST_WIDE_INT * | val, |
const HOST_WIDE_INT * | xval, | ||
unsigned int | xlen, | ||
unsigned int | precision, | ||
unsigned int | offset ) |
Zero-extend the number represented by XVAL and XLEN into VAL, starting at OFFSET. Return the number of blocks in VAL. Both XVAL and VAL have PRECISION bits.
References canonize(), HOST_BITS_PER_WIDE_INT, i, offset, precision, and zext_hwi().
wi::i |
Referenced by copy(), poly_int_cst_value(), shwi(), to_poly_offset(), to_poly_widest(), uhwi(), and WI_UNARY_RESULT().
Ca unsigned int wi::precision |
Referenced by arshift_large(), bitreverse_large(), bswap_large(), cmps(), cmps_large(), cmpu(), cmpu_large(), wi::int_traits< double_int >::decompose(), wi::int_traits< generic_wide_int< storage > >::decompose(), wi::int_traits< rtx_mode_t >::decompose(), wi::int_traits< wi::hwi_with_prec >::decompose(), wi::primitive_int_traits< T, signed_p >::decompose(), extract_uhwi(), force_to_size(), from_array(), from_buffer(), wi::int_traits< double_int >::get_precision(), wi::storage_ref::get_precision(), wi::hwi_with_prec::hwi_with_prec(), insert(), lrshift_large(), lshift_large(), lts_p_large(), ltu_p_large(), mask(), max_value(), min_value(), minus_one(), one(), only_sign_bit_p(), round_down_for_mask(), round_up_for_mask(), set_bit_in_zero(), set_bit_large(), sext_large(), shifted_mask(), shwi(), shwi(), two(), uhwi(), uhwi(), WI_BINARY_RESULT(), WI_UNARY_RESULT(), zero(), and zext_large().