GCC Middle and Back End API Reference
wi Namespace Reference

Data Structures

struct  binary_traits
 
struct  binary_traits< T1, T2, CONST_PRECISION, CONST_PRECISION >
 
struct  binary_traits< T1, T2, CONST_PRECISION, FLEXIBLE_PRECISION >
 
struct  binary_traits< T1, T2, FLEXIBLE_PRECISION, CONST_PRECISION >
 
struct  binary_traits< T1, T2, FLEXIBLE_PRECISION, FLEXIBLE_PRECISION >
 
struct  binary_traits< T1, T2, FLEXIBLE_PRECISION, INL_CONST_PRECISION >
 
struct  binary_traits< T1, T2, FLEXIBLE_PRECISION, VAR_PRECISION >
 
struct  binary_traits< T1, T2, INL_CONST_PRECISION, FLEXIBLE_PRECISION >
 
struct  binary_traits< T1, T2, INL_CONST_PRECISION, INL_CONST_PRECISION >
 
struct  binary_traits< T1, T2, VAR_PRECISION, FLEXIBLE_PRECISION >
 
struct  binary_traits< T1, T2, VAR_PRECISION, VAR_PRECISION >
 
class  extended_tree
 
class  hwi_with_prec
 
struct  int_traits
 
struct  int_traits< double_int >
 
struct  int_traits< extended_tree< N > >
 
struct  int_traits< fixed_wide_int_storage< N > >
 
struct  int_traits< generic_wide_int< storage > >
 
struct  int_traits< int >
 
struct  int_traits< long >
 
struct  int_traits< rtx_mode_t >
 
struct  int_traits< trailing_wide_int_storage >
 
struct  int_traits< unextended_tree >
 
struct  int_traits< unsigned char >
 
struct  int_traits< unsigned int >
 
struct  int_traits< unsigned long >
 
struct  int_traits< unsigned short >
 
struct  int_traits< wi::hwi_with_prec >
 
struct  int_traits< wide_int_ref_storage< SE, HDP > >
 
struct  int_traits< wide_int_storage >
 
struct  int_traits< widest_int_storage< N > >
 
struct  ints_for
 
struct  ints_for< generic_wide_int< extended_tree< N > >, CONST_PRECISION >
 
struct  ints_for< generic_wide_int< extended_tree< N > >, INL_CONST_PRECISION >
 
struct  ints_for< generic_wide_int< unextended_tree >, VAR_PRECISION >
 
struct  ints_for< T, VAR_PRECISION >
 
struct  never_used1
 
struct  never_used2
 
struct  primitive_int_traits
 
class  storage_ref
 
class  unextended_tree
 

Typedefs

typedef poly_int< NUM_POLY_INT_COEFFS, generic_wide_int< wide_int_ref_storage< false, false > > > rtx_to_poly_wide_ref
 
typedef extended_tree< WIDEST_INT_MAX_PRECISIONwidest_extended_tree
 
typedef extended_tree< ADDR_MAX_PRECISIONoffset_extended_tree
 
typedef const generic_wide_int< widest_extended_treetree_to_widest_ref
 
typedef const generic_wide_int< offset_extended_treetree_to_offset_ref
 
typedef const generic_wide_int< wide_int_ref_storage< false, false > > tree_to_wide_ref
 
typedef const poly_int< NUM_POLY_INT_COEFFS, generic_wide_int< widest_extended_tree > > tree_to_poly_widest_ref
 
typedef const poly_int< NUM_POLY_INT_COEFFS, generic_wide_int< offset_extended_tree > > tree_to_poly_offset_ref
 
typedef const poly_int< NUM_POLY_INT_COEFFS, generic_wide_int< unextended_tree > > tree_to_poly_wide_ref
 

Enumerations

enum  overflow_type { OVF_NONE = 0 , OVF_UNDERFLOW = -1 , OVF_OVERFLOW = 1 , OVF_UNKNOWN = 2 }
 
enum  precision_type { FLEXIBLE_PRECISION , VAR_PRECISION , INL_CONST_PRECISION , CONST_PRECISION }
 

Functions

template<unsigned int N>
poly_int< N, hwi_with_precshwi (const poly_int< N, HOST_WIDE_INT > &a, unsigned int precision)
 
template<unsigned int N>
poly_int< N, hwi_with_precuhwi (const poly_int< N, unsigned HOST_WIDE_INT > &a, unsigned int precision)
 
template<unsigned int N, typename Ca >
 POLY_POLY_RESULT (N, Ca, Ca) sext(const poly_int< N
 
 for (unsigned int i=0;i< N;i++) POLY_SET_COEFF(C
 
hwi_with_prec shwi (HOST_WIDE_INT, machine_mode mode)
 
wide_int min_value (machine_mode, signop)
 
wide_int max_value (machine_mode, signop)
 
rtx_to_poly_wide_ref to_poly_wide (const_rtx, machine_mode)
 
tree_to_widest_ref to_widest (const_tree)
 
tree_to_offset_ref to_offset (const_tree)
 
tree_to_wide_ref to_wide (const_tree)
 
wide_int to_wide (const_tree, unsigned int)
 
tree_to_poly_widest_ref to_poly_widest (const_tree)
 
tree_to_poly_offset_ref to_poly_offset (const_tree)
 
tree_to_poly_wide_ref to_poly_wide (const_tree)
 
template<typename T >
bool fits_to_boolean_p (const T &x, const_tree)
 
template<typename T >
bool fits_to_tree_p (const T &x, const_tree)
 
wide_int min_value (const_tree)
 
wide_int max_value (const_tree)
 
wide_int from_mpz (const_tree, mpz_t, bool)
 
template<typename T >
unsigned int get_precision (const T &)
 
template<typename T1 , typename T2 >
unsigned int get_binary_precision (const T1 &, const T2 &)
 
template<typename T1 , typename T2 >
void copy (T1 &, const T2 &)
 
UNARY_PREDICATE fits_shwi_p (const T &)
 
UNARY_PREDICATE fits_uhwi_p (const T &)
 
UNARY_PREDICATE neg_p (const T &, signop=SIGNED)
 
template<typename T >
HOST_WIDE_INT sign_mask (const T &)
 
BINARY_PREDICATE eq_p (const T1 &, const T2 &)
 
BINARY_PREDICATE ne_p (const T1 &, const T2 &)
 
BINARY_PREDICATE lt_p (const T1 &, const T2 &, signop)
 
BINARY_PREDICATE lts_p (const T1 &, const T2 &)
 
BINARY_PREDICATE ltu_p (const T1 &, const T2 &)
 
BINARY_PREDICATE le_p (const T1 &, const T2 &, signop)
 
BINARY_PREDICATE les_p (const T1 &, const T2 &)
 
BINARY_PREDICATE leu_p (const T1 &, const T2 &)
 
BINARY_PREDICATE gt_p (const T1 &, const T2 &, signop)
 
BINARY_PREDICATE gts_p (const T1 &, const T2 &)
 
BINARY_PREDICATE gtu_p (const T1 &, const T2 &)
 
BINARY_PREDICATE ge_p (const T1 &, const T2 &, signop)
 
BINARY_PREDICATE ges_p (const T1 &, const T2 &)
 
BINARY_PREDICATE geu_p (const T1 &, const T2 &)
 
template<typename T1 , typename T2 >
int cmp (const T1 &, const T2 &, signop)
 
template<typename T1 , typename T2 >
int cmps (const T1 &, const T2 &)
 
template<typename T1 , typename T2 >
int cmpu (const T1 &, const T2 &)
 
UNARY_FUNCTION bit_not (const T &)
 
UNARY_FUNCTION neg (const T &)
 
UNARY_FUNCTION neg (const T &, overflow_type *)
 
UNARY_FUNCTION abs (const T &)
 
UNARY_FUNCTION ext (const T &, unsigned int, signop)
 
UNARY_FUNCTION sext (const T &, unsigned int)
 
UNARY_FUNCTION zext (const T &, unsigned int)
 
UNARY_FUNCTION set_bit (const T &, unsigned int)
 
UNARY_FUNCTION bswap (const T &)
 
UNARY_FUNCTION bitreverse (const T &)
 
BINARY_FUNCTION min (const T1 &, const T2 &, signop)
 
BINARY_FUNCTION smin (const T1 &, const T2 &)
 
BINARY_FUNCTION umin (const T1 &, const T2 &)
 
BINARY_FUNCTION max (const T1 &, const T2 &, signop)
 
BINARY_FUNCTION smax (const T1 &, const T2 &)
 
BINARY_FUNCTION umax (const T1 &, const T2 &)
 
BINARY_FUNCTION bit_and (const T1 &, const T2 &)
 
BINARY_FUNCTION bit_and_not (const T1 &, const T2 &)
 
BINARY_FUNCTION bit_or (const T1 &, const T2 &)
 
BINARY_FUNCTION bit_or_not (const T1 &, const T2 &)
 
BINARY_FUNCTION bit_xor (const T1 &, const T2 &)
 
BINARY_FUNCTION add (const T1 &, const T2 &)
 
BINARY_FUNCTION add (const T1 &, const T2 &, signop, overflow_type *)
 
BINARY_FUNCTION sub (const T1 &, const T2 &)
 
BINARY_FUNCTION sub (const T1 &, const T2 &, signop, overflow_type *)
 
BINARY_FUNCTION mul (const T1 &, const T2 &)
 
BINARY_FUNCTION mul (const T1 &, const T2 &, signop, overflow_type *)
 
BINARY_FUNCTION smul (const T1 &, const T2 &, overflow_type *)
 
BINARY_FUNCTION umul (const T1 &, const T2 &, overflow_type *)
 
BINARY_FUNCTION mul_high (const T1 &, const T2 &, signop)
 
BINARY_FUNCTION div_trunc (const T1 &, const T2 &, signop, overflow_type *=0)
 
BINARY_FUNCTION sdiv_trunc (const T1 &, const T2 &)
 
BINARY_FUNCTION udiv_trunc (const T1 &, const T2 &)
 
BINARY_FUNCTION div_floor (const T1 &, const T2 &, signop, overflow_type *=0)
 
BINARY_FUNCTION udiv_floor (const T1 &, const T2 &)
 
BINARY_FUNCTION sdiv_floor (const T1 &, const T2 &)
 
BINARY_FUNCTION div_ceil (const T1 &, const T2 &, signop, overflow_type *=0)
 
BINARY_FUNCTION udiv_ceil (const T1 &, const T2 &)
 
BINARY_FUNCTION div_round (const T1 &, const T2 &, signop, overflow_type *=0)
 
BINARY_FUNCTION divmod_trunc (const T1 &, const T2 &, signop, WI_BINARY_RESULT(T1, T2) *)
 
BINARY_FUNCTION gcd (const T1 &, const T2 &, signop=UNSIGNED)
 
BINARY_FUNCTION mod_trunc (const T1 &, const T2 &, signop, overflow_type *=0)
 
BINARY_FUNCTION smod_trunc (const T1 &, const T2 &)
 
BINARY_FUNCTION umod_trunc (const T1 &, const T2 &)
 
BINARY_FUNCTION mod_floor (const T1 &, const T2 &, signop, overflow_type *=0)
 
BINARY_FUNCTION umod_floor (const T1 &, const T2 &)
 
BINARY_FUNCTION mod_ceil (const T1 &, const T2 &, signop, overflow_type *=0)
 
BINARY_FUNCTION mod_round (const T1 &, const T2 &, signop, overflow_type *=0)
 
template<typename T1 , typename T2 >
bool multiple_of_p (const T1 &, const T2 &, signop)
 
template<typename T1 , typename T2 >
bool multiple_of_p (const T1 &, const T2 &, signop, WI_BINARY_RESULT(T1, T2) *)
 
SHIFT_FUNCTION lshift (const T1 &, const T2 &)
 
SHIFT_FUNCTION lrshift (const T1 &, const T2 &)
 
SHIFT_FUNCTION arshift (const T1 &, const T2 &)
 
SHIFT_FUNCTION rshift (const T1 &, const T2 &, signop sgn)
 
SHIFT_FUNCTION lrotate (const T1 &, const T2 &, unsigned int=0)
 
SHIFT_FUNCTION rrotate (const T1 &, const T2 &, unsigned int=0)
 
bool only_sign_bit_p (const wide_int_ref &, unsigned int)
 
bool only_sign_bit_p (const wide_int_ref &)
 
int clz (const wide_int_ref &)
 
int clrsb (const wide_int_ref &)
 
int ctz (const wide_int_ref &)
 
int exact_log2 (const wide_int_ref &)
 
int floor_log2 (const wide_int_ref &)
 
int ffs (const wide_int_ref &)
 
int popcount (const wide_int_ref &)
 
int parity (const wide_int_ref &)
 
template<typename T >
unsigned HOST_WIDE_INT extract_uhwi (const T &, unsigned int, unsigned int)
 
template<typename T >
unsigned int min_precision (const T &, signop)
 
static void accumulate_overflow (overflow_type &, overflow_type)
 
unsigned int force_to_size (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int, signop sgn)
 
unsigned int from_array (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, bool=true)
 
hwi_with_prec shwi (HOST_WIDE_INT, unsigned int)
 
hwi_with_prec uhwi (unsigned HOST_WIDE_INT, unsigned int)
 
hwi_with_prec minus_one (unsigned int)
 
hwi_with_prec zero (unsigned int)
 
hwi_with_prec one (unsigned int)
 
hwi_with_prec two (unsigned int)
 
bool eq_p_large (const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int)
 
bool lts_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, const HOST_WIDE_INT *, unsigned int)
 
bool ltu_p_large (const HOST_WIDE_INT *, unsigned int, unsigned int, const HOST_WIDE_INT *, unsigned int)
 
int cmps_large (const HOST_WIDE_INT *, unsigned int, unsigned int, const HOST_WIDE_INT *, unsigned int)
 
int cmpu_large (const HOST_WIDE_INT *, unsigned int, unsigned int, const HOST_WIDE_INT *, unsigned int)
 
unsigned int sext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int)
 
unsigned int zext_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int)
 
unsigned int set_bit_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int)
 
unsigned int bswap_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int)
 
unsigned int bitreverse_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int)
 
unsigned int lshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int)
 
unsigned int lrshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int, unsigned int)
 
unsigned int arshift_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, unsigned int, unsigned int)
 
unsigned int and_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int)
 
unsigned int and_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int)
 
unsigned int or_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int)
 
unsigned int or_not_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int)
 
unsigned int xor_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int)
 
unsigned int add_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int, signop, overflow_type *)
 
unsigned int sub_large (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int, signop, overflow_type *)
 
unsigned int mul_internal (HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int, signop, overflow_type *, bool)
 
unsigned int divmod_internal (HOST_WIDE_INT *, unsigned int *, HOST_WIDE_INT *, const HOST_WIDE_INT *, unsigned int, unsigned int, const HOST_WIDE_INT *, unsigned int, unsigned int, signop, overflow_type *)
 
wide_int min_value (unsigned int, signop)
 
wide_int min_value (never_used1 *)
 
wide_int min_value (never_used2 *)
 
wide_int max_value (unsigned int, signop)
 
wide_int max_value (never_used1 *)
 
wide_int max_value (never_used2 *)
 
wide_int from_buffer (const unsigned char *, unsigned int)
 
void to_mpz (const wide_int_ref &, mpz_t, signop)
 
wide_int mask (unsigned int, bool, unsigned int)
 
wide_int shifted_mask (unsigned int, unsigned int, bool, unsigned int)
 
wide_int set_bit_in_zero (unsigned int, unsigned int)
 
wide_int insert (const wide_int &x, const wide_int &y, unsigned int, unsigned int)
 
wide_int round_down_for_mask (const wide_int &, const wide_int &)
 
wide_int round_up_for_mask (const wide_int &, const wide_int &)
 
wide_int mod_inv (const wide_int &a, const wide_int &b)
 
template<typename T >
T mask (unsigned int, bool)
 
template<typename T >
T shifted_mask (unsigned int, unsigned int, bool)
 
template<typename T >
T set_bit_in_zero (unsigned int)
 
unsigned int mask (HOST_WIDE_INT *, unsigned int, bool, unsigned int)
 
unsigned int shifted_mask (HOST_WIDE_INT *, unsigned int, unsigned int, bool, unsigned int)
 

Variables

Ca & a
 
Ca unsigned int precision
 
poly_int< N, C > r
 
 i
 

Detailed Description

The tree and const_tree overload templates.    
Public functions for querying and operating on integers.   
trailing_wide_int behaves like a wide_int.   
Allow primitive C types to be used in wi:: routines.   
Private functions for handling large cases out of line.  They take
individual length and array parameters because that is cheaper for
the inline caller than constructing an object on the stack and
passing a reference to it.  (Although many callers use wide_int_refs,
we generally want those to be removed by SRA.)   

Typedef Documentation

◆ offset_extended_tree

◆ rtx_to_poly_wide_ref

◆ tree_to_offset_ref

◆ tree_to_poly_offset_ref

◆ tree_to_poly_wide_ref

◆ tree_to_poly_widest_ref

◆ tree_to_wide_ref

◆ tree_to_widest_ref

◆ widest_extended_tree

Enumeration Type Documentation

◆ overflow_type

Enumerator
OVF_NONE 
OVF_UNDERFLOW 
OVF_OVERFLOW 
OVF_UNKNOWN 

◆ precision_type

Enumerator
FLEXIBLE_PRECISION 
VAR_PRECISION 
INL_CONST_PRECISION 
CONST_PRECISION 

Function Documentation

◆ abs()

◆ accumulate_overflow()

void wi::accumulate_overflow ( wi::overflow_type & overflow,
wi::overflow_type suboverflow )
inlinestatic
Accumulate a set of overflows into OVERFLOW.   

References OVF_UNKNOWN.

◆ add() [1/2]

◆ add() [2/2]

BINARY_FUNCTION wi::add ( const T1 & ,
const T2 & ,
signop ,
overflow_type *  )

◆ add_large()

unsigned int wi::add_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * op0,
unsigned int op0len,
const HOST_WIDE_INT * op1,
unsigned int op1len,
unsigned int prec,
signop sgn,
wi::overflow_type * overflow )
Set VAL to OP0 + OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
whether the result overflows when OP0 and OP1 are treated as having
signedness SGN.  Return the number of blocks in VAL.   

References canonize(), HOST_BITS_PER_WIDE_INT, i, MAX, OVF_NONE, OVF_OVERFLOW, OVF_UNDERFLOW, shift, SIGNED, top_bit_of(), and UNSIGNED.

◆ and_large()

unsigned int wi::and_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * op0,
unsigned int op0len,
const HOST_WIDE_INT * op1,
unsigned int op1len,
unsigned int prec )
Set VAL to OP0 & OP1.  Return the number of blocks used.   

References canonize(), MAX, and top_bit_of().

◆ and_not_large()

unsigned int wi::and_not_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * op0,
unsigned int op0len,
const HOST_WIDE_INT * op1,
unsigned int op1len,
unsigned int prec )
Set VAL to OP0 & ~OP1.  Return the number of blocks used.   

References canonize(), MAX, and top_bit_of().

◆ arshift()

SHIFT_FUNCTION wi::arshift ( const T1 & ,
const T2 &  )

◆ arshift_large()

unsigned int wi::arshift_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * xval,
unsigned int xlen,
unsigned int xprecision,
unsigned int precision,
unsigned int shift )
Arithmetically right shift XVAL by SHIFT and store the result in VAL.
Return the number of blocks in VAL.  XVAL has XPRECISION bits and
VAL has PRECISION bits.   

References BLOCKS_NEEDED, canonize(), HOST_BITS_PER_WIDE_INT, MIN, precision, rshift_large_common(), sext_hwi(), and shift.

◆ bit_and()

◆ bit_and_not()

◆ bit_not()

◆ bit_or()

◆ bit_or_not()

BINARY_FUNCTION wi::bit_or_not ( const T1 & ,
const T2 &  )

◆ bit_xor()

BINARY_FUNCTION wi::bit_xor ( const T1 & ,
const T2 &  )

◆ bitreverse()

UNARY_FUNCTION wi::bitreverse ( const T & )

◆ bitreverse_large()

unsigned int wi::bitreverse_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * xval,
unsigned int len,
unsigned int precision )
Bitreverse the integer represented by XVAL and LEN into VAL.  Return
the number of blocks in VAL.  Both XVAL and VAL have PRECISION bits.   

References canonize(), HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, i, offset, precision, and safe_uhwi().

◆ bswap()

◆ bswap_large()

unsigned int wi::bswap_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * xval,
unsigned int xlen,
unsigned int precision )
Byte swap the integer represented by XVAL and XLEN into VAL.  Return
the number of blocks in VAL.  Both XVAL and VAL have PRECISION bits.   

References BLOCKS_NEEDED, canonize(), gcc_assert, HOST_BITS_PER_WIDE_INT, offset, precision, and safe_uhwi().

◆ clrsb()

int wi::clrsb ( const wide_int_ref & x)
Return the number of redundant sign bits in X.  (That is, the number
of bits immediately below the sign bit that have the same value as
the sign bit.)   

References clz_hwi(), count, HOST_BITS_PER_WIDE_INT, mask(), and generic_wide_int< storage >::uhigh().

Referenced by bit_value_binop(), fold_const_call_ss(), min_precision(), simplify_const_binary_operation(), simplify_const_unary_operation(), and wi_optimize_signed_bitwise_op().

◆ clz()

◆ cmp()

template<typename T1 , typename T2 >
int wi::cmp ( const T1 & x,
const T2 & y,
signop sgn )
inline
Return -1 if X < Y, 0 if X == Y and 1 if X > Y.  Signedness of
X and Y indicated by SGN.   

References cmps(), cmpu(), SIGNED, and y.

Referenced by bit_value_binop(), compare_nonzero_chars(), compare_values_warnv(), get_array_ctor_element_at_index(), simplify_conversion_using_ranges(), value_range_from_overflowed_bounds(), and irange::verify_range().

◆ cmps()

template<typename T1 , typename T2 >
int wi::cmps ( const T1 & x,
const T2 & y )
inline

◆ cmps_large()

int wi::cmps_large ( const HOST_WIDE_INT * op0,
unsigned int op0len,
unsigned int precision,
const HOST_WIDE_INT * op1,
unsigned int op1len )
Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
signed compares.   

References BLOCKS_NEEDED, HOST_BITS_PER_WIDE_INT, MAX, precision, selt(), and SIGNED.

Referenced by cmps().

◆ cmpu()

template<typename T1 , typename T2 >
int wi::cmpu ( const T1 & x,
const T2 & y )
inline
Return -1 if X < Y, 0 if X == Y and 1 if X > Y.  Treat both X and Y
as unsigned values.   

References cmpu_large(), get_binary_precision(), LIKELY, precision, STATIC_CONSTANT_P, WIDE_INT_REF_FOR, and y.

Referenced by cmp(), do_warn_aggressive_loop_optimizations(), fold_array_ctor_reference(), set_strlen_range(), and wide_int_cmp().

◆ cmpu_large()

int wi::cmpu_large ( const HOST_WIDE_INT * op0,
unsigned int op0len,
unsigned int precision,
const HOST_WIDE_INT * op1,
unsigned int op1len )
Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
unsigned compares.   

References BLOCKS_NEEDED, HOST_BITS_PER_WIDE_INT, MAX, precision, selt(), and UNSIGNED.

Referenced by cmpu().

◆ copy()

template<typename T1 , typename T2 >
void wi::copy ( T1 & x,
const T2 & y )
inline

◆ ctz()

◆ div_ceil()

BINARY_FUNCTION wi::div_ceil ( const T1 & ,
const T2 & ,
signop ,
overflow_type * = 0 )

◆ div_floor()

BINARY_FUNCTION wi::div_floor ( const T1 & ,
const T2 & ,
signop ,
overflow_type * = 0 )

◆ div_round()

BINARY_FUNCTION wi::div_round ( const T1 & ,
const T2 & ,
signop ,
overflow_type * = 0 )

◆ div_trunc()

◆ divmod_internal()

unsigned int wi::divmod_internal ( HOST_WIDE_INT * quotient,
unsigned int * remainder_len,
HOST_WIDE_INT * remainder,
const HOST_WIDE_INT * dividend_val,
unsigned int dividend_len,
unsigned int dividend_prec,
const HOST_WIDE_INT * divisor_val,
unsigned int divisor_len,
unsigned int divisor_prec,
signop sgn,
wi::overflow_type * oflow )
Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
the result.  If QUOTIENT is nonnull, store the value of the quotient
there and return the number of blocks in it.  The return value is
not defined otherwise.  If REMAINDER is nonnull, store the value
of the remainder there and store the number of blocks in
*REMAINDER_LEN.  If OFLOW is not null, store in *OFLOW whether
the division overflowed.   

References BLOCKS_NEEDED, canonize_uhwi(), divmod_internal_2(), fits_shwi_p(), fits_uhwi_p(), gcc_checking_assert, HOST_BITS_PER_HALF_WIDE_INT, HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_MIN, i, MIN, neg_p(), only_sign_bit_p(), OVF_NONE, OVF_OVERFLOW, SIGNED, sub_large(), generic_wide_int< storage >::to_shwi(), generic_wide_int< storage >::to_uhwi(), UNLIKELY, UNSIGNED, wi_pack(), wi_unpack(), WIDE_INT_MAX_INL_PRECISION, and zeros.

◆ divmod_trunc()

BINARY_FUNCTION wi::divmod_trunc ( const T1 & ,
const T2 & ,
signop ,
WI_BINARY_RESULT(T1, T2) *  )

◆ eq_p()

◆ eq_p_large()

bool wi::eq_p_large ( const HOST_WIDE_INT * op0,
unsigned int op0len,
const HOST_WIDE_INT * op1,
unsigned int op1len,
unsigned int prec )
Return true if OP0 == OP1.   

References BLOCKS_NEEDED, HOST_BITS_PER_WIDE_INT, and zext_hwi().

◆ exact_log2()

◆ ext()

◆ extract_uhwi()

template<typename T >
unsigned HOST_WIDE_INT wi::extract_uhwi ( const T & x,
unsigned int bitpos,
unsigned int width )
inline

◆ ffs()

int wi::ffs ( const wide_int_ref & x)
Return the index of the first (lowest) set bit in X, counting from 1.
Return 0 if X is 0.   

References ctz(), and eq_p().

Referenced by fold_const_call_ss(), and simplify_const_unary_operation().

◆ fits_shwi_p()

◆ fits_to_boolean_p()

template<typename T >
bool wi::fits_to_boolean_p ( const T & x,
const_tree type )

References known_eq, and TYPE_UNSIGNED.

Referenced by fits_to_tree_p(), and int_fits_type_p().

◆ fits_to_tree_p()

◆ fits_uhwi_p()

◆ floor_log2()

int wi::floor_log2 ( const wide_int_ref & x)
Return the base-2 logarithm of X, rounding down.  Return -1 if X is 0.   

References clz().

Referenced by cfn_clz::fold_range(), cfn_ctz::fold_range(), cfn_ffs::fold_range(), get_debug_computation_at(), tree_floor_log2(), and wi_set_zero_nonzero_bits().

◆ for()

wi::for ( )

◆ force_to_size()

unsigned int wi::force_to_size ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * xval,
unsigned int xlen,
unsigned int xprecision,
unsigned int precision,
signop sgn )
Convert the number represented by XVAL, XLEN and XPRECISION, which has
signedness SGN, to an integer that has PRECISION bits.  Store the blocks
in VAL and return the number of blocks used.

This function can handle both extension (PRECISION > XPRECISION)
and truncation (PRECISION < XPRECISION).   

References BLOCKS_NEEDED, canonize(), HOST_BITS_PER_WIDE_INT, i, precision, sext_hwi(), UNSIGNED, and zext_hwi().

Referenced by FIXED_WIDE_INT(), wide_int_storage::from(), and WIDEST_INT().

◆ from_array()

unsigned int wi::from_array ( HOST_WIDE_INT * ,
const HOST_WIDE_INT * ,
unsigned int ,
unsigned int ,
bool = true )
Copy XLEN elements from XVAL to VAL.  If NEED_CANON, canonize the
result for an integer with precision PRECISION.  Return the length
of VAL (after any canonization).   

References canonize(), i, and precision.

Referenced by wide_int_storage::from_array().

◆ from_buffer()

wide_int wi::from_buffer ( const unsigned char * buffer,
unsigned int buffer_len )
Construct a wide int from a buffer of length LEN.  BUFFER will be
read according to byte endianness and word endianness of the target.
Only the lower BUFFER_LEN bytes of the result are set; the remaining
high bytes are cleared.   

References BLOCKS_NEEDED, canonize(), wide_int_storage::create(), HOST_BITS_PER_WIDE_INT, i, offset, precision, wide_int_storage::set_len(), and wide_int_storage::write_val().

Referenced by expand_DEFERRED_INIT(), and native_interpret_int().

◆ from_mpz()

wide_int wi::from_mpz ( const_tree type,
mpz_t x,
bool wrap )
Returns X converted to TYPE.  If WRAP is true, then out-of-range
values of VAL will be wrapped; otherwise, they will be set to the
appropriate minimum or maximum TYPE bound.   

References BLOCKS_NEEDED, canonize(), CEIL, CHAR_BIT, count, wide_int_storage::create(), free(), get_type_static_bounds(), max(), MIN, min(), wide_int_storage::set_len(), TYPE_PRECISION, WIDE_INT_MAX_INL_ELTS, and wide_int_storage::write_val().

Referenced by number_of_iterations_lt(), number_of_iterations_ne(), and vect_peel_nonlinear_iv_init().

◆ gcd()

BINARY_FUNCTION wi::gcd ( const T1 & ,
const T2 & ,
signop = UNSIGNED )

◆ ge_p()

◆ ges_p()

BINARY_PREDICATE wi::ges_p ( const T1 & ,
const T2 &  )

◆ get_binary_precision()

template<typename T1 , typename T2 >
unsigned int wi::get_binary_precision ( const T1 & x,
const T2 & y )
inline
Return the number of bits that the result of a binary operation can
hold when the input operands are X and Y.   

References WI_BINARY_RESULT, and y.

Referenced by cmps(), and cmpu().

◆ get_precision()

◆ geu_p()

◆ gt_p()

◆ gts_p()

◆ gtu_p()

◆ insert()

wide_int wi::insert ( const wide_int & x,
const wide_int & y,
unsigned int start,
unsigned int width )
Insert WIDTH bits from Y into X starting at START.   

References bit_and_not(), wide_int_storage::from(), gcc_checking_assert, wide_int_storage::get_precision(), lshift(), mask(), precision, shifted_mask(), UNSIGNED, and y.

Referenced by try_combine().

◆ le_p()

◆ les_p()

◆ leu_p()

◆ lrotate()

SHIFT_FUNCTION wi::lrotate ( const T1 & ,
const T2 & ,
unsigned int = 0 )

◆ lrshift()

◆ lrshift_large()

unsigned int wi::lrshift_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * xval,
unsigned int xlen,
unsigned int xprecision,
unsigned int precision,
unsigned int shift )
Logically right shift XVAL by SHIFT and store the result in VAL.
Return the number of blocks in VAL.  XVAL has XPRECISION bits and
VAL has PRECISION bits.   

References BLOCKS_NEEDED, canonize(), HOST_BITS_PER_WIDE_INT, precision, rshift_large_common(), shift, and zext_hwi().

◆ lshift()

◆ lshift_large()

unsigned int wi::lshift_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * xval,
unsigned int xlen,
unsigned int precision,
unsigned int shift )
Left shift XVAL by SHIFT and store the result in VAL.  Return the
number of blocks in VAL.  Both XVAL and VAL have PRECISION bits.   

References BLOCKS_NEEDED, canonize(), HOST_BITS_PER_WIDE_INT, i, MIN, precision, safe_uhwi(), and shift.

Referenced by WI_UNARY_RESULT().

◆ lt_p()

◆ lts_p()

◆ lts_p_large()

bool wi::lts_p_large ( const HOST_WIDE_INT * op0,
unsigned int op0len,
unsigned int precision,
const HOST_WIDE_INT * op1,
unsigned int op1len )
Return true if OP0 < OP1 using signed comparisons.   

References BLOCKS_NEEDED, HOST_BITS_PER_WIDE_INT, MAX, precision, selt(), and SIGNED.

◆ ltu_p()

◆ ltu_p_large()

bool wi::ltu_p_large ( const HOST_WIDE_INT * op0,
unsigned int op0len,
unsigned int precision,
const HOST_WIDE_INT * op1,
unsigned int op1len )
Return true if OP0 < OP1 using unsigned comparisons.   

References BLOCKS_NEEDED, HOST_BITS_PER_WIDE_INT, MAX, precision, selt(), and UNSIGNED.

◆ mask() [1/3]

unsigned int wi::mask ( HOST_WIDE_INT * val,
unsigned int width,
bool negate,
unsigned int prec )
Fill VAL with a mask where the lower WIDTH bits are ones and the bits
above that up to PREC are zeros.  The result is inverted if NEGATE
is true.  Return the number of blocks in VAL.   

References HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, i, last, and shift.

◆ mask() [2/3]

template<typename T >
T wi::mask ( unsigned int width,
bool negate_p )
inline
Return an integer of type T in which the low WIDTH bits are set
and the other bits are clear, or the inverse if NEGATE_P.   

References HOST_BITS_PER_WIDE_INT, mask(), and STATIC_ASSERT.

◆ mask() [3/3]

◆ max()

◆ max_value() [1/5]

wide_int wi::max_value ( const_tree type)
inline
Produce the largest number that is represented in TYPE.  The precision
and sign are taken from TYPE.   

References max_value(), TYPE_PRECISION, and TYPE_SIGN.

◆ max_value() [2/5]

wide_int wi::max_value ( machine_mode mode,
signop sgn )
inline
Produce the largest number that is represented in MODE.  The precision
is taken from MODE and the sign from SGN.   

References as_a(), GET_MODE_PRECISION(), and max_value().

Referenced by canon_condition(), compute_doloop_base_on_mode(), find_var_cmp_const(), fold_bit_and_mask(), get_legacy_range(), get_type_static_bounds(), gimple_fold_builtin_strlen(), integer_all_onesp(), intersect_range_with_nonzero_bits(), irange::invert(), prange::invert(), irange_val_max(), iv_can_overflow_p(), simplify_using_ranges::legacy_fold_cond_overflow(), loop_niters_no_overflow(), match_arith_overflow(), max_limit(), max_value(), max_value(), minmax_replacement(), minus_op1_op2_relation_effect(), number_of_iterations_until_wrap(), omp_reduction_init_op(), operator_bitwise_or::op1_range(), operator_cast::op1_range(), operator_trunc_mod::op1_range(), operator_trunc_mod::op2_range(), optimize_range_tests_diff(), optimize_range_tests_xor(), overflow_comparison_p_1(), print_int_bound(), range_positives(), refine_value_range_using_guard(), irange::set(), set_min_and_max_values_for_integral_type(), prange::set_nonnegative(), prange::set_nonzero(), irange::set_varying(), prange::set_varying(), simple_iv_with_niters(), simplify_const_binary_operation(), simplify_const_unary_operation(), value_range_with_overflow(), irange::varying_compatible_p(), vect_determine_precisions_from_range(), vect_gen_vector_loop_niters(), operator_abs::wi_fold(), operator_mult::wi_fold(), operator_div::wi_op_overflows(), operator_mult::wi_op_overflows(), and wide_int_to_tree_1().

◆ max_value() [3/5]

wide_int wi::max_value ( never_used1 * )

◆ max_value() [4/5]

wide_int wi::max_value ( never_used2 * )

◆ max_value() [5/5]

wide_int wi::max_value ( unsigned int precision,
signop sgn )
Return the largest SGNed number that is representable in PRECISION bits.

TODO: There is still code from the double_int era that trys to
make up for the fact that double int's could not represent the
min and max values of all types.  This code should be removed
because the min and max values can always be represented in
wide_ints and int-csts.   

References gcc_checking_assert, mask(), precision, shwi(), and UNSIGNED.

◆ min()

◆ min_precision()

◆ min_value() [1/5]

wide_int wi::min_value ( const_tree type)
inline
Produce the smallest number that is represented in TYPE.  The precision
and sign are taken from TYPE.   

References min_value(), TYPE_PRECISION, and TYPE_SIGN.

◆ min_value() [2/5]

◆ min_value() [3/5]

wide_int wi::min_value ( never_used1 * )

◆ min_value() [4/5]

wide_int wi::min_value ( never_used2 * )

◆ min_value() [5/5]

wide_int wi::min_value ( unsigned int precision,
signop sgn )
Return the largest SGNed number that is representable in PRECISION bits.   

References gcc_checking_assert, precision, set_bit_in_zero(), uhwi(), and UNSIGNED.

◆ minus_one()

◆ mod_ceil()

BINARY_FUNCTION wi::mod_ceil ( const T1 & ,
const T2 & ,
signop ,
overflow_type * = 0 )

Referenced by wide_int_binop().

◆ mod_floor()

BINARY_FUNCTION wi::mod_floor ( const T1 & ,
const T2 & ,
signop ,
overflow_type * = 0 )

Referenced by wide_int_binop().

◆ mod_inv()

wide_int wi::mod_inv ( const wide_int & a,
const wide_int & b )
Compute the modular multiplicative inverse of A modulo B
using extended Euclid's algorithm.  Assumes A and B are coprime,
and that A and B have the same precision.   

References a, b, divmod_trunc(), eq_p(), wide_int_storage::from(), gcc_checking_assert, gcd(), gt_p(), lt_p(), mul(), SIGNED, sub(), and UNSIGNED.

Referenced by expand_doubleword_divmod(), and maybe_optimize_mod_cmp().

◆ mod_round()

BINARY_FUNCTION wi::mod_round ( const T1 & ,
const T2 & ,
signop ,
overflow_type * = 0 )

Referenced by wide_int_binop().

◆ mod_trunc()

◆ mul() [1/2]

◆ mul() [2/2]

BINARY_FUNCTION wi::mul ( const T1 & ,
const T2 & ,
signop ,
overflow_type *  )

◆ mul_high()

BINARY_FUNCTION wi::mul_high ( const T1 & ,
const T2 & ,
signop  )

◆ mul_internal()

unsigned int wi::mul_internal ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * op1val,
unsigned int op1len,
const HOST_WIDE_INT * op2val,
unsigned int op2len,
unsigned int prec,
signop sgn,
wi::overflow_type * overflow,
bool high )
Multiply Op1 by Op2.  If HIGH is set, only the upper half of the
result is returned.

If HIGH is not set, throw away the upper half after the check is
made to see if it overflows.  Unfortunately there is no better way
to check for overflow than to do this.  If OVERFLOW is nonnull,
record in *OVERFLOW whether the result overflowed.  SGN controls
the signedness and is used to check overflow or if HIGH is set.

NOTE: Overflow type for signed overflow is not yet implemented.   

References b, BLOCKS_NEEDED, CHAR_BIT, end(), fits_uhwi_p(), HALF_INT_MASK, HOST_BITS_PER_HALF_WIDE_INT, HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1, i, mask(), MIN, neg_p(), OVF_NONE, OVF_OVERFLOW, OVF_UNKNOWN, r, sext_hwi(), shift, SIGN_MASK, SIGNED, generic_wide_int< storage >::to_shwi(), generic_wide_int< storage >::to_uhwi(), generic_wide_int< storage >::ulow(), UNLIKELY, UNSIGNED, wi_pack(), wi_unpack(), and WIDE_INT_MAX_INL_PRECISION.

◆ multiple_of_p() [1/2]

template<typename T1 , typename T2 >
bool wi::multiple_of_p ( const T1 & x,
const T2 & y,
signop sgn )
inline
Return true if X is a multiple of Y.  Treat X and Y as having the
signedness given by SGN.   

References mod_trunc(), and y.

Referenced by create_add_imm_cand(), div_if_zero_remainder(), extract_muldiv_1(), lower_omp_ordered_clauses(), multiple_of_p(), omp_apply_tile(), pcom_worker::suitable_component_p(), and vect_truncate_gather_scatter_offset().

◆ multiple_of_p() [2/2]

template<typename T1 , typename T2 >
bool wi::multiple_of_p ( const T1 & x,
const T2 & y,
signop sgn,
WI_BINARY_RESULT(T1, T2) * res )
inline
Return true if X is a multiple of Y, storing X / Y in *RES if so.
Treat X and Y as having the signedness given by SGN.   

References divmod_trunc(), WI_BINARY_RESULT, and y.

◆ ne_p()

◆ neg() [1/2]

◆ neg() [2/2]

UNARY_FUNCTION wi::neg ( const T & ,
overflow_type *  )

◆ neg_p()

UNARY_PREDICATE wi::neg_p ( const T & ,
signop = SIGNED )

◆ one()

◆ only_sign_bit_p() [1/2]

bool wi::only_sign_bit_p ( const wide_int_ref & x)
Return true if X represents the minimum signed value.   

References only_sign_bit_p().

◆ only_sign_bit_p() [2/2]

bool wi::only_sign_bit_p ( const wide_int_ref & x,
unsigned int precision )
Return true if sign-extending X to have precision PRECISION would give
the minimum signed value at that precision.   

References ctz(), and precision.

Referenced by divmod_internal(), may_negate_without_overflow_p(), only_sign_bit_p(), sign_bit_p(), and simplify_const_unary_operation().

◆ or_large()

unsigned int wi::or_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * op0,
unsigned int op0len,
const HOST_WIDE_INT * op1,
unsigned int op1len,
unsigned int prec )
Set VAL to OP0 | OP1.  Return the number of blocks used.   

References canonize(), MAX, and top_bit_of().

◆ or_not_large()

unsigned int wi::or_not_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * op0,
unsigned int op0len,
const HOST_WIDE_INT * op1,
unsigned int op1len,
unsigned int prec )
Set VAL to OP0 | ~OP1.  Return the number of blocks used.   

References canonize(), MAX, and top_bit_of().

◆ parity()

int wi::parity ( const wide_int_ref & x)
inline
Return 0 if the number of 1s in X is even and 1 if the number of 1s
is odd.   

References popcount().

Referenced by fold_const_call_ss(), and simplify_const_unary_operation().

◆ POLY_POLY_RESULT()

template<unsigned int N, typename Ca >
wi::POLY_POLY_RESULT ( N ,
Ca ,
Ca  ) const
inline
Poly version of wi::sext, with the same interface.   
Poly version of wi::zext, with the same interface.   

◆ popcount()

◆ round_down_for_mask()

wide_int wi::round_down_for_mask ( const wide_int & val,
const wide_int & mask )
Return VAL if VAL has no bits set outside MASK.  Otherwise round VAL
down to the previous value that has no bits set outside MASK.
This rounding wraps for signed values if VAL is negative and
the top bit of MASK is clear.

For example, round_down_for_mask (6, 0xf1) would give 1 and
round_down_for_mask (24, 0xf1) would give 17.   

References bit_and_not(), clz(), wide_int_storage::get_precision(), mask(), and precision.

Referenced by intersect_range_with_nonzero_bits().

◆ round_up_for_mask()

wide_int wi::round_up_for_mask ( const wide_int & val,
const wide_int & mask )
Return VAL if VAL has no bits set outside MASK.  Otherwise round VAL
up to the next value that has no bits set outside MASK.  The rounding
wraps if there are no suitable values greater than VAL.

For example, round_up_for_mask (6, 0xf1) would give 16 and
round_up_for_mask (24, 0xf1) would give 32.   

References bit_and_not(), clz(), wide_int_storage::get_precision(), mask(), and precision.

Referenced by intersect_range_with_nonzero_bits().

◆ rrotate()

SHIFT_FUNCTION wi::rrotate ( const T1 & ,
const T2 & ,
unsigned int = 0 )

◆ rshift()

◆ sdiv_floor()

BINARY_FUNCTION wi::sdiv_floor ( const T1 & ,
const T2 &  )

◆ sdiv_trunc()

BINARY_FUNCTION wi::sdiv_trunc ( const T1 & ,
const T2 &  )

◆ set_bit()

◆ set_bit_in_zero() [1/2]

template<typename T >
T wi::set_bit_in_zero ( unsigned int bit)
inline
Return an integer of type T in which bit BIT is set and all the
others are clear.   

References shifted_mask().

◆ set_bit_in_zero() [2/2]

◆ set_bit_large()

unsigned int wi::set_bit_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * xval,
unsigned int xlen,
unsigned int precision,
unsigned int bit )
Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
Return the number of blocks in VAL.  Both XVAL and VAL have PRECISION
bits.   

References canonize(), HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, i, precision, and safe_uhwi().

◆ sext()

◆ sext_large()

unsigned int wi::sext_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * xval,
unsigned int xlen,
unsigned int precision,
unsigned int offset )
Sign-extend the number represented by XVAL and XLEN into VAL,
starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
and VAL have PRECISION bits.   

References canonize(), HOST_BITS_PER_WIDE_INT, i, offset, precision, and sext_hwi().

◆ shifted_mask() [1/3]

unsigned int wi::shifted_mask ( HOST_WIDE_INT * val,
unsigned int start,
unsigned int width,
bool negate,
unsigned int prec )
Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
bits are ones, and the bits above that up to PREC are zeros.  The result
is inverted if NEGATE is true.  Return the number of blocks in VAL.   

References end(), HOST_BITS_PER_WIDE_INT, HOST_WIDE_INT_1U, i, and shift.

◆ shifted_mask() [2/3]

template<typename T >
T wi::shifted_mask ( unsigned int start,
unsigned int width,
bool negate_p )
inline
Return an integer of type T in which the low START bits are clear,
the next WIDTH bits are set, and the other bits are clear, or the
inverse if NEGATE_P.   

References HOST_BITS_PER_WIDE_INT, shifted_mask(), and STATIC_ASSERT.

◆ shifted_mask() [3/3]

◆ shwi() [1/3]

◆ shwi() [2/3]

wi::hwi_with_prec wi::shwi ( HOST_WIDE_INT val,
machine_mode mode )
inline

References as_a(), GET_MODE_PRECISION(), and shwi().

◆ shwi() [3/3]

wi::hwi_with_prec wi::shwi ( HOST_WIDE_INT val,
unsigned int precision )
inline
Return a signed integer that has value VAL and precision PRECISION.   

References precision, and SIGNED.

◆ sign_mask()

template<typename T >
HOST_WIDE_INT wi::sign_mask ( const T & x)
inline
Return -1 if the top bit of X is set and 0 if the top bit is clear.   

References WIDE_INT_REF_FOR.

Referenced by dw_wide_int::elt(), and pointer_plus_operator::fold_range().

◆ smax()

◆ smin()

◆ smod_trunc()

BINARY_FUNCTION wi::smod_trunc ( const T1 & ,
const T2 &  )

Referenced by expand_doubleword_mod().

◆ smul()

BINARY_FUNCTION wi::smul ( const T1 & ,
const T2 & ,
overflow_type *  )

Referenced by tree_fold_binomial().

◆ sub() [1/2]

◆ sub() [2/2]

BINARY_FUNCTION wi::sub ( const T1 & ,
const T2 & ,
signop ,
overflow_type *  )

◆ sub_large()

unsigned int wi::sub_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * op0,
unsigned int op0len,
const HOST_WIDE_INT * op1,
unsigned int op1len,
unsigned int prec,
signop sgn,
wi::overflow_type * overflow )
Set VAL to OP0 - OP1.  If OVERFLOW is nonnull, record in *OVERFLOW
whether the result overflows when OP0 and OP1 are treated as having
signedness SGN.  Return the number of blocks in VAL.   

References canonize(), HOST_BITS_PER_WIDE_INT, i, MAX, OVF_NONE, OVF_OVERFLOW, OVF_UNDERFLOW, shift, SIGNED, top_bit_of(), and UNSIGNED.

Referenced by divmod_internal().

◆ to_mpz()

◆ to_offset()

wi::tree_to_offset_ref wi::to_offset ( const_tree t)
inline
Refer to INTEGER_CST T as though it were an offset_int.

This function is an optimisation of wi::to_widest for cases
in which T is known to be a bit or byte count in the range
(-(2 ^ (N + BITS_PER_UNIT)), 2 ^ (N + BITS_PER_UNIT)), where N is
the target's address size in bits.

This is the right choice when operating on bit or byte counts as
untyped numbers rather than M-bit values.  The wi::to_widest comments
about addition, subtraction and multiplication apply here: sequences
of 1 << 31 additions and subtractions do not induce overflow, but
multiplying the largest sizes might.  Again,

  wi::tree_to_offset_ref wt = wi::to_offset (t);

is more efficient than:

  offset_int wt = wi::to_offset (t).   

Referenced by access_ref::add_max_offset(), access_ref::add_offset(), adjust_offset_for_component_ref(), alloca_call_type(), ao_ref_init_from_vn_reference(), array_ref_flexible_size_p(), array_size_for_constructor(), backtrace_base_for_ref(), array_bounds_checker::check_addr_expr(), array_bounds_checker::check_mem_ref(), component_ref_sam_type(), compute_objsize_r(), copy_reference_ops_from_ref(), create_add_imm_cand(), create_add_on_incoming_edge(), create_add_ssa_cand(), create_component_ref_by_pieces_1(), create_intersect_range_checks_index(), create_mul_imm_cand(), create_mul_ssa_cand(), access_ref::dump(), pass_walloca::execute(), field_byte_offset(), find_constructor_constant_at_offset(), fold(), fold_array_ctor_reference(), fold_nonarray_ctor_reference(), get_addr_base_and_unit_offset_1(), get_array_ctor_element_at_index(), get_maxbound(), get_offset_range(), get_range_strlen_tree(), access_ref::get_ref(), get_ref_base_and_extent(), get_size_range(), handle_array_ref(), handle_component_ref(), handle_decl(), handle_mem_ref(), handle_ssa_name(), has_dominating_ubsan_ptr_check(), int_bit_position(), loc_list_from_tree_1(), maybe_optimize_ubsan_ptr_ifn(), maybe_rewrite_mem_ref_base(), maybe_warn_for_bound(), maybe_warn_nonstring_arg(), strlen_pass::maybe_warn_overflow(), access_ref::merge_ref(), native_encode_initializer(), non_rewritable_mem_ref_base(), access_ref::offset_bounded(), access_ref::offset_in_range(), offset_int_type_size_in_bits(), output_constructor_array_range(), output_constructor_regular_field(), prepare_iteration_over_array_elts(), replace_conditional_candidate(), replace_uncond_cands_and_profitable_phis(), replace_unconditional_candidate(), restructure_reference(), access_data::set_bound(), set_component_ref_size(), access_ref::set_max_size_range(), access_ref::size_remaining(), slsr_process_add(), pcom_worker::suitable_component_p(), and valueize_refs_1().

◆ to_poly_offset()

◆ to_poly_wide() [1/2]

◆ to_poly_wide() [2/2]

wi::tree_to_poly_wide_ref wi::to_poly_wide ( const_tree t)
inline
Access INTEGER_CST or POLY_INT_CST tree T as if it were a
poly_wide_int.  See wi::to_wide for more details.   

References POLY_INT_CST_P, and poly_int_cst_value().

◆ to_poly_widest()

◆ to_wide() [1/2]

wi::tree_to_wide_ref wi::to_wide ( const_tree t)
inline
Refer to INTEGER_CST T as though it were a wide_int.

In contrast to the approximation of infinite-precision numbers given
by wi::to_widest and wi::to_offset, this function treats T as a
signless collection of N bits, where N is the precision of T's type.
As with machine registers, signedness is determined by the operation
rather than the operands; for example, there is a distinction between
signed and unsigned division.

This is the right choice when operating on values with the same type
using normal modulo arithmetic.  The overflow-checking forms of things
like wi::add check whether the result can be represented in T's type.

Calling this function should have no overhead in release builds,
so it is OK to call it several times for the same tree.  If it is
useful for readability reasons to reduce the number of calls,
it is more efficient to use:

  wi::tree_to_wide_ref wt = wi::to_wide (t);

instead of:

  wide_int wt = wi::to_wide (t).   

References TREE_INT_CST_ELT, TREE_INT_CST_NUNITS, TREE_TYPE, and TYPE_PRECISION.

Referenced by addr_for_mem_ref(), adjust_imagpart_expr(), adjust_realpart_expr(), all_ones_mask_p(), alloca_call_type(), alloca_type_and_limit::alloca_type_and_limit(), tree_vector_builder::apply_step(), arith_cast_equal_p(), tree_switch_conversion::switch_conversion::array_value_type(), bitint_min_cst_precision(), bitmask_inv_cst_vector_p(), bitwise_equal_p(), bitwise_inverted_equal_p(), build_printable_array_type(), build_vec_series(), cache_integer_cst(), gimple_outgoing_range::calc_switch_ranges(), ccp_finalize(), check_nul_terminated_array(), chrec_fold_multiply(), tree_switch_conversion::switch_conversion::collect(), compare_values_warnv(), compute_avail(), compute_distributive_range(), cond_removal_in_builtin_zero_pattern(), const_binop(), tree_switch_conversion::switch_conversion::contains_linear_function_p(), irange::contains_p(), prange::contains_p(), copy_tree_body_r(), create_intersect_range_checks_index(), cgraph_node::create_thunk(), dequeue_and_dump(), determine_block_size(), do_store_flag(), dr_analyze_indices(), dr_step_indicator(), dump_generic_node(), tree_switch_conversion::bit_test_cluster::emit(), evaluate_stmt(), tree_switch_conversion::switch_conversion::exp_index_transform(), expand_builtin_strnlen(), expand_case(), expand_expr_real_1(), expand_omp_target(), expand_single_bit_test(), expr_not_equal_to(), expr_to_aff_combination(), extract_muldiv_1(), find_case_label_range(), find_unswitching_predicates_for_bb(), fold_abs_const(), fold_binary_loc(), fold_bit_and_mask(), fold_const_aggregate_ref_1(), fold_const_call_1(), fold_const_call_1(), fold_convert_const(), fold_convert_const_int_from_int(), fold_convert_const_int_from_real(), fold_div_compare(), fold_negate_expr_1(), fold_not_const(), fold_plusminus_mult_expr(), fold_ternary_loc(), fold_unary_loc(), fuse_memset_builtins(), get_array_ctor_element_at_index(), get_constraint_for_ptr_offset(), get_cst_init_from_scev(), get_min_precision(), get_nonzero_bits(), get_range(), tree_switch_conversion::cluster::get_range(), get_range_pos_neg(), get_range_strlen_tree(), get_size_range(), get_stridx(), get_type_static_bounds(), get_unwidened(), get_up_bounds_for_array_ref(), gimple_bitwise_equal_p(), gimple_bitwise_inverted_equal_p(), gimple_call_alloc_size(), gimple_fold_builtin_strlen(), gimple_fold_indirect_ref(), gimple_parm_array_size(), go_output_typedef(), group_case_labels_stmt(), handle_array_ref(), strlen_pass::handle_builtin_memset(), strlen_pass::handle_builtin_strlen(), operand_compare::hash_operand(), int_fits_type_p(), integer_all_onesp(), integer_nonzerop(), integer_pow2p(), integer_zerop(), ipa_odr_summary_write(), ipa_polymorphic_call_context::ipa_polymorphic_call_context(), ipa_range_contains_p(), if_chain::is_beneficial(), tree_switch_conversion::switch_conversion::is_exp_index_transform_viable(), is_inv_store_elimination_chain(), is_widening_mult_rhs_p(), layout_type(), simplify_using_ranges::legacy_fold_cond_overflow(), lower_coro_builtin(), lower_omp_ordered_clauses(), maskable_range_p(), match_arith_overflow(), may_negate_without_overflow_p(), maybe_diag_stxncpy_trunc(), maybe_optimize_mod_cmp(), maybe_optimize_pow2p_mod_cmp(), maybe_set_nonzero_bits(), maybe_set_strlen_range(), minmax_replacement(), ipa_param_adjustments::modify_call(), movement_possibility_1(), multiple_of_p(), native_encode_initializer(), native_interpret_aggregate(), negate_expr_p(), num_ending_zeros(), number_of_iterations_lt(), number_of_iterations_lt_to_ne(), number_of_iterations_ne_max(), number_of_iterations_until_wrap(), omp_apply_tile(), omp_context_compute_score(), optimize_bit_field_compare(), optimize_range_tests_cmp_bitwise(), optimize_range_tests_diff(), optimize_range_tests_to_bit_test(), optimize_range_tests_var_bound(), optimize_range_tests_xor(), optimize_spaceship(), output_constant(), overflow_comparison_p_1(), poly_int_binop(), preprocess_case_label_vec_for_gimple(), print_node(), print_node_brief(), phi_analyzer::process_phi(), real_value_from_int_cst(), record_nonwrapping_iv(), refine_value_range_using_guard(), remap_gimple_op_r(), round_up_loc(), scan_omp_1_op(), scev_var_range_cant_overflow(), irange::set(), prange::set(), irange::set_nonnegative(), set_strlen_range(), set_switch_stmt_execution_predicate(), sign_bit_p(), simple_iv_with_niters(), simplify_builtin_call(), spaceship_replacement(), split_at_bb_p(), split_constant_offset(), split_constant_offset_1(), split_to_var_and_offset(), tree_vector_builder::step(), to_wide(), tree_ctz(), tree_fits_poly_int64_p(), tree_floor_log2(), tree_int_cst_sgn(), tree_int_cst_sign_bit(), tree_log2(), tree_nonzero_bits(), ubsan_expand_ptr_ifn(), unextend(), unswitch_predicate::unswitch_predicate(), vect_can_peel_nonlinear_iv_p(), vect_create_nonlinear_iv_step(), vect_determine_precisions_from_range(), vect_do_peeling(), vect_emulate_mixed_dot_prod(), vect_get_range_info(), vect_peel_nonlinear_iv_init(), vect_recog_divmod_pattern(), vectorizable_load(), vn_walk_cb_data::vn_walk_cb_data(), warn_string_no_nul(), wide_int_to_tree_1(), and zero_one_minusone().

◆ to_wide() [2/2]

wide_int wi::to_wide ( const_tree t,
unsigned int prec )
inline
Convert INTEGER_CST T to a wide_int of precision PREC, extending or
truncating as necessary.  When extending, use sign extension if T's
type is signed and zero extension if T's type is unsigned.   

References wide_int_storage::from(), to_wide(), TREE_TYPE, and TYPE_SIGN.

◆ to_widest()

wi::tree_to_widest_ref wi::to_widest ( const_tree t)
inline
Refer to INTEGER_CST T as though it were a widest_int.

This function gives T's actual numerical value, influenced by the
signedness of its type.  For example, a signed byte with just the
top bit set would be -128 while an unsigned byte with the same
bit pattern would be 128.

This is the right choice when operating on groups of INTEGER_CSTs
that might have different signedness or precision.  It is also the
right choice in code that specifically needs an approximation of
infinite-precision arithmetic instead of normal modulo arithmetic.

The approximation of infinite precision is good enough for realistic
numbers of additions and subtractions of INTEGER_CSTs (where
"realistic" includes any number less than 1 << 31) but it cannot
represent the result of multiplying the two largest supported
INTEGER_CSTs.  The overflow-checking form of wi::mul provides a way
of multiplying two arbitrary INTEGER_CSTs and checking that the
result is representable as a widest_int.

Note that any overflow checking done on these values is relative to
the range of widest_int rather than the range of a TREE_TYPE.

Calling this function should have no overhead in release builds,
so it is OK to call it several times for the same tree.  If it is
useful for readability reasons to reduce the number of calls,
it is more efficient to use:

  wi::tree_to_widest_ref wt = wi::to_widest (t);

instead of:

  widest_int wt = wi::to_widest (t).   

Referenced by arith_overflow_check_p(), assert_loop_rolls_lt(), build_range_check(), canonicalize_loop_induction_variables(), ccp_lattice_meet(), compute_objsize_r(), convert_mult_to_fma(), convert_to_integer_1(), derive_constant_upper_bound_ops(), div_if_zero_remainder(), do_warn_aggressive_loop_optimizations(), dr_step_indicator(), dump_generic_node(), dump_lattice_value(), estimate_numbers_of_iterations(), expr_to_aff_combination(), extract_bit_test_mask(), fold_binary_loc(), fold_builtin_bit_query(), get_default_value(), get_min_precision(), ipcp_bits_lattice::get_value_and_mask(), gimple_fold_partial_load_store_mem_ref(), gimplify_scan_omp_clauses(), strlen_pass::handle_integral_assign(), hash_tree(), idx_within_array_bound(), integer_onep(), is_nonwrapping_integer_induction(), loop_niters_no_overflow(), may_eliminate_iv(), maybe_canonicalize_mem_ref_addr(), maybe_optimize_pow2p_mod_cmp(), minmax_from_comparison(), multiple_of_p(), native_encode_int(), number_of_iterations_cond(), number_of_iterations_exit_assumptions(), number_of_iterations_lt_to_ne(), number_of_iterations_until_wrap(), omp_adjust_for_condition(), optimize_range_tests_to_bit_test(), optimize_spaceship(), output_constructor_bitfield(), predict_iv_comparison(), record_estimate(), reduction_var_overflows_first(), remove_redundant_iv_tests(), set_lattice_value(), should_interchange_loops(), simple_cst_equal(), spaceship_replacement(), tree_fits_poly_int64_p(), tree_fits_poly_uint64_p(), tree_fits_shwi_p(), tree_fits_uhwi_p(), tree_fold_binomial(), tree_int_cst_compare(), tree_int_cst_equal(), tree_int_cst_le(), tree_int_cst_lt(), try_peel_loop(), try_transform_to_exit_first_loop_alt(), try_unroll_loop_completely(), ubsan_expand_objsize_ifn(), ubsan_type_descriptor(), valid_constant_size_p(), valid_lattice_transition(), value_sat_pred_p(), value_to_wide_int(), valueized_wider_op(), vect_analyze_loop_costing(), vect_convert_input(), vect_determine_precisions_from_users(), vect_get_loop_variant_data_ptr_increment(), vect_iv_limit_for_partial_vectors(), vect_joust_widened_integer(), vect_min_prec_for_max_niters(), vect_recog_mulhs_pattern(), vect_truncate_gather_scatter_offset(), and vectorizable_reduction().

◆ two()

wi::hwi_with_prec wi::two ( unsigned int precision)
inline
Return a wide int of 2 with precision PRECISION.   

References precision, and shwi().

Referenced by optimize_spaceship().

◆ udiv_ceil()

BINARY_FUNCTION wi::udiv_ceil ( const T1 & ,
const T2 &  )

Referenced by vect_transform_loop().

◆ udiv_floor()

◆ udiv_trunc()

◆ uhwi() [1/2]

template<unsigned int N>
poly_int< N, hwi_with_prec > wi::uhwi ( const poly_int< N, unsigned HOST_WIDE_INT > & a,
unsigned int precision )
inline

◆ uhwi() [2/2]

wi::hwi_with_prec wi::uhwi ( unsigned HOST_WIDE_INT val,
unsigned int precision )
inline
Return an unsigned integer that has value VAL and precision PRECISION.   

References precision, and UNSIGNED.

◆ umax()

BINARY_FUNCTION wi::umax ( const T1 & ,
const T2 &  )

◆ umin()

BINARY_FUNCTION wi::umin ( const T1 & ,
const T2 &  )

◆ umod_floor()

BINARY_FUNCTION wi::umod_floor ( const T1 & ,
const T2 &  )

Referenced by restructure_reference().

◆ umod_trunc()

◆ umul()

BINARY_FUNCTION wi::umul ( const T1 & ,
const T2 & ,
overflow_type *  )

Referenced by slow_safe_scale_64bit().

◆ xor_large()

unsigned int wi::xor_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * op0,
unsigned int op0len,
const HOST_WIDE_INT * op1,
unsigned int op1len,
unsigned int prec )
Set VAL to OP0 ^ OP1.  Return the number of blocks used.   

References canonize(), MAX, and top_bit_of().

◆ zero()

wi::hwi_with_prec wi::zero ( unsigned int precision)
inline
Return a wide int of 0 with precision PRECISION.   

References precision, and shwi().

Referenced by adjust_pointer_diff_expr(), irange_bitmask::adjust_range(), analyze_and_compute_bitwise_induction_effect(), contains_zero_p(), tree_switch_conversion::bit_test_cluster::emit(), fold_const_call_ss(), fold_convert_const_int_from_real(), cfn_clrsb::fold_range(), cfn_strlen::fold_range(), get_bitmask_from_range(), get_size_range(), get_stridx(), range_query::get_tree_range(), gimple_parm_array_size(), irange_bitmask::intersect(), prange::invert(), ipa_odr_read_section(), operator_plus::lhs_op1_relation(), maybe_diag_stxncpy_trunc(), maybe_set_strlen_range(), minus_op1_op2_relation_effect(), native_decode_rtx(), irange::nonzero_p(), cfn_signbit::op1_range(), operator_bitwise_or::op1_range(), operator_rshift::op1_range(), plus_minus_ranges(), pointer_may_wrap_p(), range_false(), range_is_either_true_or_false(), fold_using_range::range_of_address(), range_positives(), range_true_and_false(), real_to_integer(), irange::set_nonnegative(), prange::set_nonnegative(), irange::set_nonzero(), irange_bitmask::set_nonzero_bits(), vrange::set_nonzero_bits(), irange_bitmask::set_unknown(), prange::set_varying(), irange::set_zero(), prange::set_zero(), operator_bitwise_and::simple_op1_range_solver(), simplify_using_ranges::simplify(), simplify_using_ranges::simplify_truth_ops_using_ranges(), size_must_be_zero_p(), tree_single_nonzero_warnv_p(), vr_set_zero_nonzero_bits(), operator_abs::wi_fold(), operator_absu::wi_fold(), operator_bitwise_and::wi_fold(), operator_div::wi_fold(), operator_trunc_mod::wi_fold(), wi_set_zero_nonzero_bits(), and wi_zero_p().

◆ zext()

◆ zext_large()

unsigned int wi::zext_large ( HOST_WIDE_INT * val,
const HOST_WIDE_INT * xval,
unsigned int xlen,
unsigned int precision,
unsigned int offset )
Zero-extend the number represented by XVAL and XLEN into VAL,
starting at OFFSET.  Return the number of blocks in VAL.  Both XVAL
and VAL have PRECISION bits.   

References canonize(), HOST_BITS_PER_WIDE_INT, i, offset, precision, and zext_hwi().

Variable Documentation

◆ a

Ca & wi::a

Referenced by shwi(), and uhwi().

◆ i

◆ precision

◆ r

return wi::r

Referenced by shwi(), and uhwi().