GCC Middle and Back End API Reference
tree-vectorizer.h File Reference
#include "tree-data-ref.h"
#include "tree-hash-traits.h"
#include "target.h"
#include "internal-fn.h"
#include "tree-ssa-operands.h"
#include "gimple-match.h"
Include dependency graph for tree-vectorizer.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  stmt_info_for_cost
 
struct  vect_scalar_ops_slice
 
struct  vect_scalar_ops_slice_hash
 
struct  _slp_tree
 
class  _slp_instance
 
struct  scalar_cond_masked_key
 
struct  default_hash_traits< scalar_cond_masked_key >
 
class  vec_lower_bound
 
class  vec_info_shared
 
class  vec_info
 
struct  rgroup_controls
 
struct  vec_loop_masks
 
struct  vect_reusable_accumulator
 
class  _loop_vec_info
 
struct  slp_root
 
class  _bb_vec_info
 
class  dr_vec_info
 
class  _stmt_vec_info
 
struct  gather_scatter_info
 
class  vector_costs
 
class  auto_purge_vect_location
 
struct  vect_loop_form_info
 
class  vect_pattern
 

Macros

#define VECTORIZABLE_CYCLE_DEF(D)
 
#define SLP_INSTANCE_TREE(S)   (S)->root
 
#define SLP_INSTANCE_UNROLLING_FACTOR(S)   (S)->unrolling_factor
 
#define SLP_INSTANCE_LOADS(S)   (S)->loads
 
#define SLP_INSTANCE_ROOT_STMTS(S)   (S)->root_stmts
 
#define SLP_INSTANCE_REMAIN_DEFS(S)   (S)->remain_defs
 
#define SLP_INSTANCE_KIND(S)   (S)->kind
 
#define SLP_TREE_CHILDREN(S)   (S)->children
 
#define SLP_TREE_SCALAR_STMTS(S)   (S)->stmts
 
#define SLP_TREE_SCALAR_OPS(S)   (S)->ops
 
#define SLP_TREE_REF_COUNT(S)   (S)->refcnt
 
#define SLP_TREE_VEC_DEFS(S)   (S)->vec_defs
 
#define SLP_TREE_NUMBER_OF_VEC_STMTS(S)   (S)->vec_stmts_size
 
#define SLP_TREE_LOAD_PERMUTATION(S)   (S)->load_permutation
 
#define SLP_TREE_LANE_PERMUTATION(S)   (S)->lane_permutation
 
#define SLP_TREE_SIMD_CLONE_INFO(S)   (S)->simd_clone_info
 
#define SLP_TREE_DEF_TYPE(S)   (S)->def_type
 
#define SLP_TREE_VECTYPE(S)   (S)->vectype
 
#define SLP_TREE_REPRESENTATIVE(S)   (S)->representative
 
#define SLP_TREE_LANES(S)   (S)->lanes
 
#define SLP_TREE_CODE(S)   (S)->code
 
#define LOOP_VINFO_LOOP(L)   (L)->loop
 
#define LOOP_VINFO_IV_EXIT(L)   (L)->vec_loop_iv_exit
 
#define LOOP_VINFO_EPILOGUE_IV_EXIT(L)   (L)->vec_epilogue_loop_iv_exit
 
#define LOOP_VINFO_SCALAR_IV_EXIT(L)   (L)->scalar_loop_iv_exit
 
#define LOOP_VINFO_BBS(L)   (L)->bbs
 
#define LOOP_VINFO_NITERSM1(L)   (L)->num_itersm1
 
#define LOOP_VINFO_NITERS(L)   (L)->num_iters
 
#define LOOP_VINFO_NITERS_UNCHANGED(L)   (L)->num_iters_unchanged
 
#define LOOP_VINFO_NITERS_ASSUMPTIONS(L)   (L)->num_iters_assumptions
 
#define LOOP_VINFO_COST_MODEL_THRESHOLD(L)   (L)->th
 
#define LOOP_VINFO_VERSIONING_THRESHOLD(L)   (L)->versioning_threshold
 
#define LOOP_VINFO_VECTORIZABLE_P(L)   (L)->vectorizable
 
#define LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P(L)   (L)->can_use_partial_vectors_p
 
#define LOOP_VINFO_USING_PARTIAL_VECTORS_P(L)   (L)->using_partial_vectors_p
 
#define LOOP_VINFO_USING_DECREMENTING_IV_P(L)   (L)->using_decrementing_iv_p
 
#define LOOP_VINFO_USING_SELECT_VL_P(L)   (L)->using_select_vl_p
 
#define LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P(L)    (L)->epil_using_partial_vectors_p
 
#define LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS(L)   (L)->partial_load_store_bias
 
#define LOOP_VINFO_VECT_FACTOR(L)   (L)->vectorization_factor
 
#define LOOP_VINFO_MAX_VECT_FACTOR(L)   (L)->max_vectorization_factor
 
#define LOOP_VINFO_MASKS(L)   (L)->masks
 
#define LOOP_VINFO_LENS(L)   (L)->lens
 
#define LOOP_VINFO_MASK_SKIP_NITERS(L)   (L)->mask_skip_niters
 
#define LOOP_VINFO_RGROUP_COMPARE_TYPE(L)   (L)->rgroup_compare_type
 
#define LOOP_VINFO_RGROUP_IV_TYPE(L)   (L)->rgroup_iv_type
 
#define LOOP_VINFO_PARTIAL_VECTORS_STYLE(L)   (L)->partial_vector_style
 
#define LOOP_VINFO_PTR_MASK(L)   (L)->ptr_mask
 
#define LOOP_VINFO_N_STMTS(L)   (L)->shared->n_stmts
 
#define LOOP_VINFO_LOOP_NEST(L)   (L)->shared->loop_nest
 
#define LOOP_VINFO_DATAREFS(L)   (L)->shared->datarefs
 
#define LOOP_VINFO_DDRS(L)   (L)->shared->ddrs
 
#define LOOP_VINFO_INT_NITERS(L)   (TREE_INT_CST_LOW ((L)->num_iters))
 
#define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L)   (L)->peeling_for_alignment
 
#define LOOP_VINFO_UNALIGNED_DR(L)   (L)->unaligned_dr
 
#define LOOP_VINFO_MAY_MISALIGN_STMTS(L)   (L)->may_misalign_stmts
 
#define LOOP_VINFO_MAY_ALIAS_DDRS(L)   (L)->may_alias_ddrs
 
#define LOOP_VINFO_COMP_ALIAS_DDRS(L)   (L)->comp_alias_ddrs
 
#define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L)   (L)->check_unequal_addrs
 
#define LOOP_VINFO_CHECK_NONZERO(L)   (L)->check_nonzero
 
#define LOOP_VINFO_LOWER_BOUNDS(L)   (L)->lower_bounds
 
#define LOOP_VINFO_GROUPED_STORES(L)   (L)->grouped_stores
 
#define LOOP_VINFO_SLP_INSTANCES(L)   (L)->slp_instances
 
#define LOOP_VINFO_SLP_UNROLLING_FACTOR(L)   (L)->slp_unrolling_factor
 
#define LOOP_VINFO_REDUCTIONS(L)   (L)->reductions
 
#define LOOP_VINFO_REDUCTION_CHAINS(L)   (L)->reduction_chains
 
#define LOOP_VINFO_PEELING_FOR_GAPS(L)   (L)->peeling_for_gaps
 
#define LOOP_VINFO_PEELING_FOR_NITER(L)   (L)->peeling_for_niter
 
#define LOOP_VINFO_EARLY_BREAKS(L)   (L)->early_breaks
 
#define LOOP_VINFO_EARLY_BRK_STORES(L)   (L)->early_break_stores
 
#define LOOP_VINFO_EARLY_BREAKS_VECT_PEELED(L)    (single_pred ((L)->loop->latch) != (L)->vec_loop_iv_exit->src)
 
#define LOOP_VINFO_EARLY_BRK_DEST_BB(L)   (L)->early_break_dest_bb
 
#define LOOP_VINFO_EARLY_BRK_VUSES(L)   (L)->early_break_vuses
 
#define LOOP_VINFO_LOOP_CONDS(L)   (L)->conds
 
#define LOOP_VINFO_LOOP_IV_COND(L)   (L)->loop_iv_cond
 
#define LOOP_VINFO_NO_DATA_DEPENDENCIES(L)   (L)->no_data_dependencies
 
#define LOOP_VINFO_SCALAR_LOOP(L)   (L)->scalar_loop
 
#define LOOP_VINFO_SCALAR_LOOP_SCALING(L)   (L)->scalar_loop_scaling
 
#define LOOP_VINFO_HAS_MASK_STORE(L)   (L)->has_mask_store
 
#define LOOP_VINFO_SCALAR_ITERATION_COST(L)   (L)->scalar_cost_vec
 
#define LOOP_VINFO_ORIG_LOOP_INFO(L)   (L)->orig_loop_info
 
#define LOOP_VINFO_SIMD_IF_COND(L)   (L)->simd_if_cond
 
#define LOOP_VINFO_INNER_LOOP_COST_FACTOR(L)   (L)->inner_loop_cost_factor
 
#define LOOP_VINFO_FULLY_MASKED_P(L)
 
#define LOOP_VINFO_FULLY_WITH_LENGTH_P(L)
 
#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L)    ((L)->may_misalign_stmts.length () > 0)
 
#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L)
 
#define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L)    (LOOP_VINFO_NITERS_ASSUMPTIONS (L))
 
#define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L)    (LOOP_VINFO_SIMD_IF_COND (L))
 
#define LOOP_REQUIRES_VERSIONING(L)
 
#define LOOP_VINFO_NITERS_KNOWN_P(L)    (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
 
#define LOOP_VINFO_EPILOGUE_P(L)    (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL)
 
#define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L)    (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L)))
 
#define BB_VINFO_BB(B)   (B)->bb
 
#define BB_VINFO_GROUPED_STORES(B)   (B)->grouped_stores
 
#define BB_VINFO_SLP_INSTANCES(B)   (B)->slp_instances
 
#define BB_VINFO_DATAREFS(B)   (B)->shared->datarefs
 
#define BB_VINFO_DDRS(B)   (B)->shared->ddrs
 
#define STMT_VINFO_TYPE(S)   (S)->type
 
#define STMT_VINFO_STMT(S)   (S)->stmt
 
#define STMT_VINFO_RELEVANT(S)   (S)->relevant
 
#define STMT_VINFO_LIVE_P(S)   (S)->live
 
#define STMT_VINFO_VECTYPE(S)   (S)->vectype
 
#define STMT_VINFO_VEC_STMTS(S)   (S)->vec_stmts
 
#define STMT_VINFO_VECTORIZABLE(S)   (S)->vectorizable
 
#define STMT_VINFO_DATA_REF(S)   ((S)->dr_aux.dr + 0)
 
#define STMT_VINFO_GATHER_SCATTER_P(S)   (S)->gather_scatter_p
 
#define STMT_VINFO_STRIDED_P(S)   (S)->strided_p
 
#define STMT_VINFO_MEMORY_ACCESS_TYPE(S)   (S)->memory_access_type
 
#define STMT_VINFO_SIMD_LANE_ACCESS_P(S)   (S)->simd_lane_access_p
 
#define STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL(S)   (S)->induc_cond_initial_val
 
#define STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT(S)   (S)->reduc_epilogue_adjustment
 
#define STMT_VINFO_REDUC_IDX(S)   (S)->reduc_idx
 
#define STMT_VINFO_FORCE_SINGLE_CYCLE(S)   (S)->force_single_cycle
 
#define STMT_VINFO_DR_WRT_VEC_LOOP(S)   (S)->dr_wrt_vec_loop
 
#define STMT_VINFO_DR_BASE_ADDRESS(S)   (S)->dr_wrt_vec_loop.base_address
 
#define STMT_VINFO_DR_INIT(S)   (S)->dr_wrt_vec_loop.init
 
#define STMT_VINFO_DR_OFFSET(S)   (S)->dr_wrt_vec_loop.offset
 
#define STMT_VINFO_DR_STEP(S)   (S)->dr_wrt_vec_loop.step
 
#define STMT_VINFO_DR_BASE_ALIGNMENT(S)   (S)->dr_wrt_vec_loop.base_alignment
 
#define STMT_VINFO_DR_BASE_MISALIGNMENT(S)    (S)->dr_wrt_vec_loop.base_misalignment
 
#define STMT_VINFO_DR_OFFSET_ALIGNMENT(S)    (S)->dr_wrt_vec_loop.offset_alignment
 
#define STMT_VINFO_DR_STEP_ALIGNMENT(S)    (S)->dr_wrt_vec_loop.step_alignment
 
#define STMT_VINFO_DR_INFO(S)    (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux)
 
#define STMT_VINFO_IN_PATTERN_P(S)   (S)->in_pattern_p
 
#define STMT_VINFO_RELATED_STMT(S)   (S)->related_stmt
 
#define STMT_VINFO_PATTERN_DEF_SEQ(S)   (S)->pattern_def_seq
 
#define STMT_VINFO_SIMD_CLONE_INFO(S)   (S)->simd_clone_info
 
#define STMT_VINFO_DEF_TYPE(S)   (S)->def_type
 
#define STMT_VINFO_GROUPED_ACCESS(S)    ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S))
 
#define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S)   (S)->loop_phi_evolution_base_unchanged
 
#define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S)   (S)->loop_phi_evolution_part
 
#define STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE(S)   (S)->loop_phi_evolution_type
 
#define STMT_VINFO_MIN_NEG_DIST(S)   (S)->min_neg_dist
 
#define STMT_VINFO_REDUC_TYPE(S)   (S)->reduc_type
 
#define STMT_VINFO_REDUC_CODE(S)   (S)->reduc_code
 
#define STMT_VINFO_REDUC_FN(S)   (S)->reduc_fn
 
#define STMT_VINFO_REDUC_DEF(S)   (S)->reduc_def
 
#define STMT_VINFO_REDUC_VECTYPE(S)   (S)->reduc_vectype
 
#define STMT_VINFO_REDUC_VECTYPE_IN(S)   (S)->reduc_vectype_in
 
#define STMT_VINFO_SLP_VECT_ONLY(S)   (S)->slp_vect_only_p
 
#define STMT_VINFO_SLP_VECT_ONLY_PATTERN(S)   (S)->slp_vect_pattern_only_p
 
#define DR_GROUP_FIRST_ELEMENT(S)    (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element)
 
#define DR_GROUP_NEXT_ELEMENT(S)    (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element)
 
#define DR_GROUP_SIZE(S)    (gcc_checking_assert ((S)->dr_aux.dr), (S)->size)
 
#define DR_GROUP_STORE_COUNT(S)    (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count)
 
#define DR_GROUP_GAP(S)    (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap)
 
#define REDUC_GROUP_FIRST_ELEMENT(S)    (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element)
 
#define REDUC_GROUP_NEXT_ELEMENT(S)    (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element)
 
#define REDUC_GROUP_SIZE(S)    (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size)
 
#define STMT_VINFO_RELEVANT_P(S)   ((S)->relevant != vect_unused_in_scope)
 
#define HYBRID_SLP_STMT(S)   ((S)->slp_type == hybrid)
 
#define PURE_SLP_STMT(S)   ((S)->slp_type == pure_slp)
 
#define STMT_SLP_TYPE(S)   (S)->slp_type
 
#define VECT_MAX_COST   1000
 
#define MAX_INTERM_CVT_STEPS   3
 
#define MAX_VECTORIZATION_FACTOR   INT_MAX
 
#define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE)
 
#define DR_MISALIGNMENT_UNKNOWN   (-1)
 
#define DR_MISALIGNMENT_UNINITIALIZED   (-2)
 
#define SET_DR_MISALIGNMENT(DR, VAL)   set_dr_misalignment (DR, VAL)
 
#define DR_TARGET_ALIGNMENT(DR)   dr_target_alignment (DR)
 
#define SET_DR_TARGET_ALIGNMENT(DR, VAL)   set_dr_target_alignment (DR, VAL)
 
#define DUMP_VECT_SCOPE(MSG)    AUTO_DUMP_SCOPE (MSG, vect_location)
 

Typedefs

typedef class _stmt_vec_infostmt_vec_info
 
typedef struct _slp_treeslp_tree
 
typedef vec< stmt_info_for_coststmt_vector_for_cost
 
typedef hash_map< tree_operand_hash, std::pair< stmt_vec_info, innermost_loop_behavior * > > vec_base_alignments
 
typedef vec< std::pair< unsigned, unsigned > > lane_permutation_t
 
typedef auto_vec< std::pair< unsigned, unsigned >, 16 > auto_lane_permutation_t
 
typedef vec< unsignedload_permutation_t
 
typedef auto_vec< unsigned, 16 > auto_load_permutation_t
 
typedef class _slp_instanceslp_instance
 
typedef hash_set< scalar_cond_masked_keyscalar_cond_masked_set_type
 
typedef pair_hash< tree_operand_hash, tree_operand_hashtree_cond_mask_hash
 
typedef hash_set< tree_cond_mask_hashvec_cond_masked_set_type
 
typedef std::pair< tree, treevec_object_pair
 
typedef auto_vec< rgroup_controlsvec_loop_lens
 
typedef auto_vec< std::pair< data_reference *, tree > > drs_init_vec
 
typedef _loop_vec_infoloop_vec_info
 
typedef opt_pointer_wrapper< loop_vec_infoopt_loop_vec_info
 
typedef _bb_vec_infobb_vec_info
 
typedef struct data_referencedr_p
 
typedef enum _complex_perm_kinds complex_perm_kinds_t
 
typedef hash_map< slp_tree, complex_perm_kinds_tslp_tree_to_load_perm_map_t
 
typedef pair_hash< nofree_ptr_hash< _slp_tree >, nofree_ptr_hash< _slp_tree > > slp_node_hash
 
typedef hash_map< slp_node_hash, boolslp_compat_nodes_map_t
 
typedef vect_pattern *(* vect_pattern_decl_t) (slp_tree_to_load_perm_map_t *, slp_compat_nodes_map_t *, slp_tree *)
 

Enumerations

enum  vect_var_kind { vect_simple_var , vect_pointer_var , vect_scalar_var , vect_mask_var }
 
enum  operation_type { unary_op = 1 , binary_op , ternary_op }
 
enum  dr_alignment_support {
  dr_unaligned_unsupported , dr_unaligned_supported , dr_explicit_realign , dr_explicit_realign_optimized ,
  dr_aligned
}
 
enum  vect_def_type {
  vect_uninitialized_def = 0 , vect_constant_def = 1 , vect_external_def , vect_internal_def ,
  vect_induction_def , vect_reduction_def , vect_double_reduction_def , vect_nested_cycle ,
  vect_first_order_recurrence , vect_condition_def , vect_unknown_def_type
}
 
enum  vect_induction_op_type {
  vect_step_op_add = 0 , vect_step_op_neg , vect_step_op_mul , vect_step_op_shl ,
  vect_step_op_shr
}
 
enum  vect_reduction_type {
  TREE_CODE_REDUCTION , COND_REDUCTION , INTEGER_INDUC_COND_REDUCTION , CONST_COND_REDUCTION ,
  EXTRACT_LAST_REDUCTION , FOLD_LEFT_REDUCTION
}
 
enum  slp_instance_kind {
  slp_inst_kind_store , slp_inst_kind_reduc_group , slp_inst_kind_reduc_chain , slp_inst_kind_bb_reduc ,
  slp_inst_kind_ctor
}
 
enum  vect_partial_vector_style { vect_partial_vectors_none , vect_partial_vectors_while_ult , vect_partial_vectors_avx512 , vect_partial_vectors_len }
 
enum  stmt_vec_info_type {
  undef_vec_info_type = 0 , load_vec_info_type , store_vec_info_type , shift_vec_info_type ,
  op_vec_info_type , call_vec_info_type , call_simd_clone_vec_info_type , assignment_vec_info_type ,
  condition_vec_info_type , comparison_vec_info_type , reduc_vec_info_type , induc_vec_info_type ,
  type_promotion_vec_info_type , type_demotion_vec_info_type , type_conversion_vec_info_type , cycle_phi_info_type ,
  lc_phi_info_type , phi_info_type , recurr_info_type , loop_exit_ctrl_vec_info_type
}
 
enum  vect_relevant {
  vect_unused_in_scope = 0 , vect_used_only_live , vect_used_in_outer_by_reduction , vect_used_in_outer ,
  vect_used_by_reduction , vect_used_in_scope
}
 
enum  slp_vect_type { loop_vect = 0 , pure_slp , hybrid }
 
enum  vec_load_store_type { VLS_LOAD , VLS_STORE , VLS_STORE_INVARIANT }
 
enum  vect_memory_access_type {
  VMAT_INVARIANT , VMAT_CONTIGUOUS , VMAT_CONTIGUOUS_DOWN , VMAT_CONTIGUOUS_PERMUTE ,
  VMAT_CONTIGUOUS_REVERSE , VMAT_LOAD_STORE_LANES , VMAT_ELEMENTWISE , VMAT_STRIDED_SLP ,
  VMAT_GATHER_SCATTER
}
 
enum  _complex_perm_kinds {
  PERM_UNKNOWN , PERM_EVENODD , PERM_ODDEVEN , PERM_ODDODD ,
  PERM_EVENEVEN , PERM_TOP
}
 

Functions

loop_vec_info loop_vec_info_for_loop (class loop *loop)
 
bool nested_in_vect_loop_p (class loop *loop, stmt_vec_info stmt_info)
 
tree vect_phi_initial_value (gphi *phi)
 
bool vect_use_mask_type_p (stmt_vec_info stmt_info)
 
bool is_pattern_stmt_p (stmt_vec_info stmt_info)
 
stmt_vec_info vect_orig_stmt (stmt_vec_info stmt_info)
 
stmt_vec_info get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info)
 
stmt_vec_info vect_stmt_to_vectorize (stmt_vec_info stmt_info)
 
bool is_loop_header_bb_p (basic_block bb)
 
int vect_pow2 (int x)
 
int builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, tree vectype, int misalign)
 
int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
 
vector_costsinit_cost (vec_info *vinfo, bool costing_for_scalar)
 
void dump_stmt_cost (FILE *, int, enum vect_cost_for_stmt, stmt_vec_info, slp_tree, tree, int, unsigned, enum vect_cost_model_location)
 
unsigned add_stmt_cost (vector_costs *costs, int count, enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, slp_tree node, tree vectype, int misalign, enum vect_cost_model_location where)
 
unsigned add_stmt_cost (vector_costs *costs, int count, enum vect_cost_for_stmt kind, enum vect_cost_model_location where)
 
unsigned add_stmt_cost (vector_costs *costs, stmt_info_for_cost *i)
 
void finish_cost (vector_costs *costs, const vector_costs *scalar_costs, unsigned *prologue_cost, unsigned *body_cost, unsigned *epilogue_cost, unsigned *suggested_unroll_factor=NULL)
 
void add_stmt_costs (vector_costs *costs, stmt_vector_for_cost *cost_vec)
 
void set_dr_misalignment (dr_vec_info *dr_info, int val)
 
int dr_misalignment (dr_vec_info *dr_info, tree vectype, poly_int64 offset=0)
 
const poly_uint64 dr_target_alignment (dr_vec_info *dr_info)
 
void set_dr_target_alignment (dr_vec_info *dr_info, poly_uint64 val)
 
bool aligned_access_p (dr_vec_info *dr_info, tree vectype)
 
bool known_alignment_for_access_p (dr_vec_info *dr_info, tree vectype)
 
unsigned int vect_known_alignment_in_bytes (dr_vec_info *dr_info, tree vectype)
 
innermost_loop_behaviorvect_dr_behavior (vec_info *vinfo, dr_vec_info *dr_info)
 
tree get_dr_vinfo_offset (vec_info *vinfo, dr_vec_info *dr_info, bool check_outer=false)
 
enum vect_cost_model loop_cost_model (loop_p loop)
 
bool unlimited_cost_model (loop_p loop)
 
bool vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo)
 
unsigned int vect_get_num_vectors (poly_uint64 nunits, tree vectype)
 
unsigned int vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype)
 
void vect_update_max_nunits (poly_uint64 *max_nunits, poly_uint64 nunits)
 
void vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype)
 
unsigned int vect_vf_for_cost (loop_vec_info loop_vinfo)
 
unsigned int vect_nunits_for_cost (tree vec_type)
 
unsigned HOST_WIDE_INT vect_max_vf (loop_vec_info loop_vinfo)
 
unsigned int vect_get_scalar_dr_size (dr_vec_info *dr_info)
 
bool vect_apply_runtime_profitability_check_p (loop_vec_info loop_vinfo)
 
void vect_set_loop_condition (class loop *, edge, loop_vec_info, tree, tree, tree, bool)
 
bool slpeel_can_duplicate_loop_p (const class loop *, const_edge, const_edge)
 
class loopslpeel_tree_duplicate_loop_to_edge_cfg (class loop *, edge, class loop *, edge, edge, edge *, bool=true, vec< basic_block > *=NULL)
 
class loopvect_loop_versioning (loop_vec_info, gimple *)
 
class loopvect_do_peeling (loop_vec_info, tree, tree, tree *, tree *, tree *, int, bool, bool, tree *)
 
tree vect_get_main_loop_result (loop_vec_info, tree, tree)
 
void vect_prepare_for_masked_peels (loop_vec_info)
 
dump_user_location_t find_loop_location (class loop *)
 
bool vect_can_advance_ivs_p (loop_vec_info)
 
void vect_update_inits_of_drs (loop_vec_info, tree, tree_code)
 
edge vec_init_loop_exit_info (class loop *)
 
void vect_iv_increment_position (edge, gimple_stmt_iterator *, bool *)
 
tree get_related_vectype_for_scalar_type (machine_mode, tree, poly_uint64=0)
 
tree get_vectype_for_scalar_type (vec_info *, tree, unsigned int=0)
 
tree get_vectype_for_scalar_type (vec_info *, tree, slp_tree)
 
tree get_mask_type_for_scalar_type (vec_info *, tree, unsigned int=0)
 
tree get_mask_type_for_scalar_type (vec_info *, tree, slp_tree)
 
tree get_same_sized_vectype (tree, tree)
 
bool vect_chooses_same_modes_p (vec_info *, machine_mode)
 
bool vect_get_loop_mask_type (loop_vec_info)
 
bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, stmt_vec_info *=NULL, gimple **=NULL)
 
bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, tree *, stmt_vec_info *=NULL, gimple **=NULL)
 
bool vect_is_simple_use (vec_info *, stmt_vec_info, slp_tree, unsigned, tree *, slp_tree *, enum vect_def_type *, tree *, stmt_vec_info *=NULL)
 
bool vect_maybe_update_slp_op_vectype (slp_tree, tree)
 
tree perm_mask_for_reverse (tree)
 
bool supportable_widening_operation (vec_info *, code_helper, stmt_vec_info, tree, tree, code_helper *, code_helper *, int *, vec< tree > *)
 
bool supportable_narrowing_operation (code_helper, tree, tree, code_helper *, int *, vec< tree > *)
 
unsigned record_stmt_cost (stmt_vector_for_cost *, int, enum vect_cost_for_stmt, stmt_vec_info, tree, int, enum vect_cost_model_location)
 
unsigned record_stmt_cost (stmt_vector_for_cost *, int, enum vect_cost_for_stmt, slp_tree, tree, int, enum vect_cost_model_location)
 
unsigned record_stmt_cost (stmt_vector_for_cost *, int, enum vect_cost_for_stmt, enum vect_cost_model_location)
 
unsigned record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count, enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, int misalign, enum vect_cost_model_location where)
 
void vect_finish_replace_stmt (vec_info *, stmt_vec_info, gimple *)
 
void vect_finish_stmt_generation (vec_info *, stmt_vec_info, gimple *, gimple_stmt_iterator *)
 
opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info, bool *)
 
tree vect_get_store_rhs (stmt_vec_info)
 
void vect_get_vec_defs_for_operand (vec_info *vinfo, stmt_vec_info, unsigned, tree op, vec< tree > *, tree=NULL)
 
void vect_get_vec_defs (vec_info *, stmt_vec_info, slp_tree, unsigned, tree, vec< tree > *, tree=NULL, vec< tree > *=NULL, tree=NULL, vec< tree > *=NULL, tree=NULL, vec< tree > *=NULL)
 
void vect_get_vec_defs (vec_info *, stmt_vec_info, slp_tree, unsigned, tree, tree, vec< tree > *, tree=NULL, tree=NULL, vec< tree > *=NULL, tree=NULL, tree=NULL, vec< tree > *=NULL, tree=NULL, tree=NULL, vec< tree > *=NULL)
 
tree vect_init_vector (vec_info *, stmt_vec_info, tree, tree, gimple_stmt_iterator *)
 
tree vect_get_slp_vect_def (slp_tree, unsigned)
 
bool vect_transform_stmt (vec_info *, stmt_vec_info, gimple_stmt_iterator *, slp_tree, slp_instance)
 
void vect_remove_stores (vec_info *, stmt_vec_info)
 
bool vect_nop_conversion_p (stmt_vec_info)
 
opt_result vect_analyze_stmt (vec_info *, stmt_vec_info, bool *, slp_tree, slp_instance, stmt_vector_for_cost *)
 
void vect_get_load_cost (vec_info *, stmt_vec_info, int, dr_alignment_support, int, bool, unsigned int *, unsigned int *, stmt_vector_for_cost *, stmt_vector_for_cost *, bool)
 
void vect_get_store_cost (vec_info *, stmt_vec_info, int, dr_alignment_support, int, unsigned int *, stmt_vector_for_cost *)
 
bool vect_supportable_shift (vec_info *, enum tree_code, tree)
 
tree vect_gen_perm_mask_any (tree, const vec_perm_indices &)
 
tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &)
 
void optimize_mask_stores (class loop *)
 
tree vect_gen_while (gimple_seq *, tree, tree, tree, const char *=nullptr)
 
tree vect_gen_while_not (gimple_seq *, tree, tree, tree)
 
opt_result vect_get_vector_types_for_stmt (vec_info *, stmt_vec_info, tree *, tree *, unsigned int=0)
 
opt_tree vect_get_mask_type_for_stmt (stmt_vec_info, unsigned int=0)
 
bool ref_within_array_bound (gimple *, tree)
 
bool vect_can_force_dr_alignment_p (const_tree, poly_uint64)
 
enum dr_alignment_support vect_supportable_dr_alignment (vec_info *, dr_vec_info *, tree, int)
 
tree vect_get_smallest_scalar_type (stmt_vec_info, tree)
 
opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *)
 
bool vect_slp_analyze_instance_dependence (vec_info *, slp_instance)
 
opt_result vect_enhance_data_refs_alignment (loop_vec_info)
 
opt_result vect_analyze_data_refs_alignment (loop_vec_info)
 
bool vect_slp_analyze_instance_alignment (vec_info *, slp_instance)
 
opt_result vect_analyze_data_ref_accesses (vec_info *, vec< int > *)
 
opt_result vect_prune_runtime_alias_test_list (loop_vec_info)
 
bool vect_gather_scatter_fn_p (vec_info *, bool, bool, tree, tree, tree, int, internal_fn *, tree *)
 
bool vect_check_gather_scatter (stmt_vec_info, loop_vec_info, gather_scatter_info *)
 
opt_result vect_find_stmt_data_reference (loop_p, gimple *, vec< data_reference_p > *, vec< int > *, int)
 
opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *, bool *)
 
void vect_record_base_alignments (vec_info *)
 
tree vect_create_data_ref_ptr (vec_info *, stmt_vec_info, tree, class loop *, tree, tree *, gimple_stmt_iterator *, gimple **, bool, tree=NULL_TREE)
 
tree bump_vector_ptr (vec_info *, tree, gimple *, gimple_stmt_iterator *, stmt_vec_info, tree)
 
void vect_copy_ref_info (tree, tree)
 
tree vect_create_destination_var (tree, tree)
 
bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT)
 
internal_fn vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool)
 
bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT)
 
internal_fn vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool)
 
void vect_permute_store_chain (vec_info *, vec< tree > &, unsigned int, stmt_vec_info, gimple_stmt_iterator *, vec< tree > *)
 
tree vect_setup_realignment (vec_info *, stmt_vec_info, gimple_stmt_iterator *, tree *, enum dr_alignment_support, tree, class loop **)
 
void vect_transform_grouped_load (vec_info *, stmt_vec_info, vec< tree >, int, gimple_stmt_iterator *)
 
void vect_record_grouped_load_vectors (vec_info *, stmt_vec_info, vec< tree >)
 
tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *)
 
tree vect_get_new_ssa_name (tree, enum vect_var_kind, const char *=NULL)
 
tree vect_create_addr_base_for_vector_ref (vec_info *, stmt_vec_info, gimple_seq *, tree)
 
tree neutral_op_for_reduction (tree, code_helper, tree, bool=true)
 
widest_int vect_iv_limit_for_partial_vectors (loop_vec_info loop_vinfo)
 
bool vect_rgroup_iv_might_wrap_p (loop_vec_info, rgroup_controls *)
 
opt_result vect_determine_partial_vectors_and_peeling (loop_vec_info)
 
bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree, enum tree_code)
 
bool needs_fold_left_reduction_p (tree, code_helper)
 
opt_loop_vec_info vect_analyze_loop (class loop *, vec_info_shared *)
 
tree vect_build_loop_niters (loop_vec_info, bool *=NULL)
 
void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *, tree *, bool)
 
tree vect_halve_mask_nunits (tree, machine_mode)
 
tree vect_double_mask_nunits (tree, machine_mode)
 
void vect_record_loop_mask (loop_vec_info, vec_loop_masks *, unsigned int, tree, tree)
 
tree vect_get_loop_mask (loop_vec_info, gimple_stmt_iterator *, vec_loop_masks *, unsigned int, tree, unsigned int)
 
void vect_record_loop_len (loop_vec_info, vec_loop_lens *, unsigned int, tree, unsigned int)
 
tree vect_get_loop_len (loop_vec_info, gimple_stmt_iterator *, vec_loop_lens *, unsigned int, tree, unsigned int, unsigned int)
 
tree vect_gen_loop_len_mask (loop_vec_info, gimple_stmt_iterator *, gimple_stmt_iterator *, vec_loop_lens *, unsigned int, tree, tree, unsigned int, unsigned int)
 
gimple_seq vect_gen_len (tree, tree, tree, tree)
 
stmt_vec_info info_for_reduction (vec_info *, stmt_vec_info)
 
bool reduction_fn_for_scalar_code (code_helper, internal_fn *)
 
class loopvect_transform_loop (loop_vec_info, gimple *)
 
opt_result vect_analyze_loop_form (class loop *, vect_loop_form_info *)
 
loop_vec_info vect_create_loop_vinfo (class loop *, vec_info_shared *, const vect_loop_form_info *, loop_vec_info=nullptr)
 
bool vectorizable_live_operation (vec_info *, stmt_vec_info, slp_tree, slp_instance, int, bool, stmt_vector_for_cost *)
 
bool vectorizable_reduction (loop_vec_info, stmt_vec_info, slp_tree, slp_instance, stmt_vector_for_cost *)
 
bool vectorizable_induction (loop_vec_info, stmt_vec_info, gimple **, slp_tree, stmt_vector_for_cost *)
 
bool vect_transform_reduction (loop_vec_info, stmt_vec_info, gimple_stmt_iterator *, gimple **, slp_tree)
 
bool vect_transform_cycle_phi (loop_vec_info, stmt_vec_info, gimple **, slp_tree, slp_instance)
 
bool vectorizable_lc_phi (loop_vec_info, stmt_vec_info, gimple **, slp_tree)
 
bool vectorizable_phi (vec_info *, stmt_vec_info, gimple **, slp_tree, stmt_vector_for_cost *)
 
bool vectorizable_recurr (loop_vec_info, stmt_vec_info, gimple **, slp_tree, stmt_vector_for_cost *)
 
bool vect_emulated_vector_p (tree)
 
bool vect_can_vectorize_without_simd_p (tree_code)
 
bool vect_can_vectorize_without_simd_p (code_helper)
 
int vect_get_known_peeling_cost (loop_vec_info, int, int *, stmt_vector_for_cost *, stmt_vector_for_cost *, stmt_vector_for_cost *)
 
tree cse_and_gimplify_to_preheader (loop_vec_info, tree)
 
tree vect_peel_nonlinear_iv_init (gimple_seq *, tree, tree, tree, enum vect_induction_op_type)
 
void vect_slp_init (void)
 
void vect_slp_fini (void)
 
void vect_free_slp_instance (slp_instance)
 
bool vect_transform_slp_perm_load (vec_info *, slp_tree, const vec< tree > &, gimple_stmt_iterator *, poly_uint64, bool, unsigned *, unsigned *=nullptr, bool=false)
 
bool vect_slp_analyze_operations (vec_info *)
 
void vect_schedule_slp (vec_info *, const vec< slp_instance > &)
 
opt_result vect_analyze_slp (vec_info *, unsigned)
 
bool vect_make_slp_decision (loop_vec_info)
 
void vect_detect_hybrid_slp (loop_vec_info)
 
void vect_optimize_slp (vec_info *)
 
void vect_gather_slp_loads (vec_info *)
 
void vect_get_slp_defs (slp_tree, vec< tree > *)
 
void vect_get_slp_defs (vec_info *, slp_tree, vec< vec< tree > > *, unsigned n=-1U)
 
bool vect_slp_if_converted_bb (basic_block bb, loop_p orig_loop)
 
bool vect_slp_function (function *)
 
stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree)
 
stmt_vec_info vect_find_first_scalar_stmt_in_slp (slp_tree)
 
bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info)
 
bool can_duplicate_and_interleave_p (vec_info *, unsigned int, tree, unsigned int *=NULL, tree *=NULL, tree *=NULL)
 
void duplicate_and_interleave (vec_info *, gimple_seq *, tree, const vec< tree > &, unsigned int, vec< tree > &)
 
int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info)
 
slp_tree vect_create_new_slp_node (unsigned, tree_code)
 
void vect_free_slp_tree (slp_tree)
 
bool compatible_calls_p (gcall *, gcall *)
 
int vect_slp_child_index_for_operand (const gimple *, int op, bool)
 
void vect_mark_pattern_stmts (vec_info *, stmt_vec_info, gimple *, tree)
 
bool vect_get_range_info (tree, wide_int *, wide_int *)
 
void vect_pattern_recog (vec_info *)
 
unsigned vectorize_loops (void)
 
void vect_free_loop_info_assumptions (class loop *)
 
gimplevect_loop_vectorized_call (class loop *, gcond **cond=NULL)
 
bool vect_stmt_dominates_stmt_p (gimple *, gimple *)
 
bool vect_is_store_elt_extraction (vect_cost_for_stmt kind, stmt_vec_info stmt_info)
 
bool vect_is_reduction (stmt_vec_info stmt_info)
 
int vect_reduc_type (vec_info *vinfo, stmt_vec_info stmt_info)
 
tree vect_embedded_comparison_type (stmt_vec_info stmt_info)
 
tree vect_comparison_type (stmt_vec_info stmt_info)
 
bool vect_is_extending_load (class vec_info *vinfo, stmt_vec_info stmt_info)
 
bool vect_is_integer_truncation (stmt_vec_info stmt_info)
 
gimplevect_gimple_build (tree, code_helper, tree, tree=NULL_TREE)
 

Variables

dump_user_location_t vect_location
 
vect_pattern_decl_t slp_patterns []
 
size_t num__slp_patterns
 

Macro Definition Documentation

◆ BB_VINFO_BB

#define BB_VINFO_BB ( B)    (B)->bb

◆ BB_VINFO_DATAREFS

#define BB_VINFO_DATAREFS ( B)    (B)->shared->datarefs

Referenced by vect_slp_region().

◆ BB_VINFO_DDRS

#define BB_VINFO_DDRS ( B)    (B)->shared->ddrs

◆ BB_VINFO_GROUPED_STORES

#define BB_VINFO_GROUPED_STORES ( B)    (B)->grouped_stores

◆ BB_VINFO_SLP_INSTANCES

#define BB_VINFO_SLP_INSTANCES ( B)    (B)->slp_instances

◆ DR_GROUP_FIRST_ELEMENT

◆ DR_GROUP_GAP

◆ DR_GROUP_NEXT_ELEMENT

◆ DR_GROUP_SIZE

◆ DR_GROUP_STORE_COUNT

#define DR_GROUP_STORE_COUNT ( S)     (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count)

Referenced by vect_transform_stmt().

◆ DR_MISALIGNMENT_UNINITIALIZED

#define DR_MISALIGNMENT_UNINITIALIZED   (-2)

◆ DR_MISALIGNMENT_UNKNOWN

◆ DR_TARGET_ALIGNMENT

◆ DUMP_VECT_SCOPE

#define DUMP_VECT_SCOPE ( MSG)     AUTO_DUMP_SCOPE (MSG, vect_location)
A macro for calling:
  dump_begin_scope (MSG, vect_location);
via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc,
and then calling
  dump_end_scope ();
once the object goes out of scope, thus capturing the nesting of
the scopes.

These scopes affect dump messages within them: dump messages at the
top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those
in a nested scope implicitly default to MSG_PRIORITY_INTERNALS.   

Referenced by move_early_exit_stmts(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependences(), vect_analyze_data_refs(), vect_analyze_data_refs_alignment(), vect_analyze_early_break_dependences(), vect_analyze_loop(), vect_analyze_loop_form(), vect_analyze_loop_operations(), vect_analyze_scalar_cycles_1(), vect_analyze_slp(), vect_bb_partition_graph(), vect_compute_single_scalar_iteration_cost(), vect_detect_hybrid_slp(), vect_determine_precisions(), vect_determine_vectorization_factor(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_get_loop_niters(), vect_make_slp_decision(), vect_mark_stmts_to_be_vectorized(), vect_match_slp_patterns(), vect_pattern_recog(), vect_prune_runtime_alias_test_list(), vect_slp_analyze_bb_1(), vect_slp_analyze_instance_alignment(), vect_slp_analyze_instance_dependence(), vect_slp_analyze_operations(), vect_transform_loop(), vect_update_inits_of_drs(), vect_update_vf_for_slp(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_shift(), and vectorizable_simd_clone_call().

◆ HYBRID_SLP_STMT

#define HYBRID_SLP_STMT ( S)    ((S)->slp_type == hybrid)

◆ LOOP_REQUIRES_VERSIONING

#define LOOP_REQUIRES_VERSIONING ( L)
Value:
T * ggc_alloc(ALONE_CXX_MEM_STAT_INFO)
Definition ggc.h:184
#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L)
Definition tree-vectorizer.h:1045
#define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L)
Definition tree-vectorizer.h:1051
#define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L)
Definition tree-vectorizer.h:1049
#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L)
Definition tree-vectorizer.h:1043

Referenced by vect_analyze_loop(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_do_peeling(), vect_estimate_min_profitable_iters(), vect_need_peeling_or_partial_vectors_p(), and vect_transform_loop().

◆ LOOP_REQUIRES_VERSIONING_FOR_ALIAS

#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS ( L)
Value:
((L)->comp_alias_ddrs.length () > 0 \
|| (L)->check_unequal_addrs.length () > 0 \
|| (L)->lower_bounds.length () > 0)

Referenced by vect_estimate_min_profitable_iters(), and vect_loop_versioning().

◆ LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT

#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT ( L)     ((L)->may_misalign_stmts.length () > 0)

◆ LOOP_REQUIRES_VERSIONING_FOR_NITERS

#define LOOP_REQUIRES_VERSIONING_FOR_NITERS ( L)     (LOOP_VINFO_NITERS_ASSUMPTIONS (L))

◆ LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND

#define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND ( L)     (LOOP_VINFO_SIMD_IF_COND (L))

Referenced by vect_loop_versioning().

◆ LOOP_VINFO_BBS

◆ LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P

◆ LOOP_VINFO_CHECK_NONZERO

#define LOOP_VINFO_CHECK_NONZERO ( L)    (L)->check_nonzero

◆ LOOP_VINFO_CHECK_UNEQUAL_ADDRS

#define LOOP_VINFO_CHECK_UNEQUAL_ADDRS ( L)    (L)->check_unequal_addrs

◆ LOOP_VINFO_COMP_ALIAS_DDRS

#define LOOP_VINFO_COMP_ALIAS_DDRS ( L)    (L)->comp_alias_ddrs

◆ LOOP_VINFO_COST_MODEL_THRESHOLD

◆ LOOP_VINFO_DATAREFS

◆ LOOP_VINFO_DDRS

#define LOOP_VINFO_DDRS ( L)    (L)->shared->ddrs

◆ LOOP_VINFO_EARLY_BREAKS

◆ LOOP_VINFO_EARLY_BREAKS_VECT_PEELED

◆ LOOP_VINFO_EARLY_BRK_DEST_BB

#define LOOP_VINFO_EARLY_BRK_DEST_BB ( L)    (L)->early_break_dest_bb

◆ LOOP_VINFO_EARLY_BRK_STORES

#define LOOP_VINFO_EARLY_BRK_STORES ( L)    (L)->early_break_stores

◆ LOOP_VINFO_EARLY_BRK_VUSES

#define LOOP_VINFO_EARLY_BRK_VUSES ( L)    (L)->early_break_vuses

◆ LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P

#define LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P ( L)     (L)->epil_using_partial_vectors_p

◆ LOOP_VINFO_EPILOGUE_IV_EXIT

#define LOOP_VINFO_EPILOGUE_IV_EXIT ( L)    (L)->vec_epilogue_loop_iv_exit

Referenced by vect_do_peeling().

◆ LOOP_VINFO_EPILOGUE_P

◆ LOOP_VINFO_FULLY_MASKED_P

◆ LOOP_VINFO_FULLY_WITH_LENGTH_P

◆ LOOP_VINFO_GROUPED_STORES

#define LOOP_VINFO_GROUPED_STORES ( L)    (L)->grouped_stores

◆ LOOP_VINFO_HAS_MASK_STORE

#define LOOP_VINFO_HAS_MASK_STORE ( L)    (L)->has_mask_store

Referenced by vectorizable_store().

◆ LOOP_VINFO_INNER_LOOP_COST_FACTOR

#define LOOP_VINFO_INNER_LOOP_COST_FACTOR ( L)    (L)->inner_loop_cost_factor

◆ LOOP_VINFO_INT_NITERS

◆ LOOP_VINFO_IV_EXIT

◆ LOOP_VINFO_LENS

◆ LOOP_VINFO_LOOP

#define LOOP_VINFO_LOOP ( L)    (L)->loop
Access Functions.   

Referenced by vector_costs::compare_inside_loop_cost(), cse_and_gimplify_to_preheader(), get_group_load_store_type(), get_initial_def_for_reduction(), vec_info::insert_seq_on_entry(), loop_niters_no_overflow(), move_early_exit_stmts(), parloops_is_simple_reduction(), parloops_is_slp_reduction(), stmt_in_inner_loop_p(), supportable_widening_operation(), vect_analyze_data_ref_access(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_analyze_early_break_dependences(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_analyze_loop_operations(), vect_analyze_possibly_independent_ddr(), vect_analyze_scalar_cycles(), vect_analyze_scalar_cycles_1(), vect_better_loop_vinfo_p(), vect_build_loop_niters(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_can_advance_ivs_p(), vect_check_gather_scatter(), vect_compute_data_ref_alignment(), vect_compute_single_scalar_iteration_cost(), vect_create_cond_for_alias_checks(), vect_create_data_ref_ptr(), vect_create_epilog_for_reduction(), vect_detect_hybrid_slp(), vect_determine_precisions(), vect_determine_vectorization_factor(), vect_do_peeling(), vect_dr_behavior(), vect_emit_reduction_init_stmts(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_gen_vector_loop_niters(), vect_is_simple_reduction(), vect_iv_limit_for_partial_vectors(), vect_known_niters_smaller_than_vf(), vect_loop_versioning(), vect_mark_for_runtime_alias_test(), vect_mark_stmts_to_be_vectorized(), vect_min_prec_for_max_niters(), vect_model_reduction_cost(), vect_need_peeling_or_partial_vectors_p(), vect_pattern_recog(), vect_peeling_hash_choose_best_peeling(), vect_peeling_hash_insert(), vect_phi_first_order_recurrence_p(), vect_prepare_for_masked_peels(), vect_prune_runtime_alias_test_list(), vect_reassociating_reduction_p(), vect_record_base_alignments(), vect_schedule_slp_node(), vect_setup_realignment(), vect_stmt_relevant_p(), vect_supportable_dr_alignment(), vect_transform_cycle_phi(), vect_transform_loop(), vect_transform_loop_stmt(), vect_transform_reduction(), vect_truncate_gather_scatter_offset(), vect_update_ivs_after_vectorizer(), vect_update_vf_for_slp(), vectorizable_call(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().

◆ LOOP_VINFO_LOOP_CONDS

#define LOOP_VINFO_LOOP_CONDS ( L)    (L)->conds

Referenced by vect_create_loop_vinfo().

◆ LOOP_VINFO_LOOP_IV_COND

#define LOOP_VINFO_LOOP_IV_COND ( L)    (L)->loop_iv_cond

◆ LOOP_VINFO_LOOP_NEST

#define LOOP_VINFO_LOOP_NEST ( L)    (L)->shared->loop_nest

◆ LOOP_VINFO_LOWER_BOUNDS

#define LOOP_VINFO_LOWER_BOUNDS ( L)    (L)->lower_bounds

◆ LOOP_VINFO_MASK_SKIP_NITERS

◆ LOOP_VINFO_MASKS

◆ LOOP_VINFO_MAX_VECT_FACTOR

#define LOOP_VINFO_MAX_VECT_FACTOR ( L)    (L)->max_vectorization_factor

◆ LOOP_VINFO_MAY_ALIAS_DDRS

#define LOOP_VINFO_MAY_ALIAS_DDRS ( L)    (L)->may_alias_ddrs

◆ LOOP_VINFO_MAY_MISALIGN_STMTS

#define LOOP_VINFO_MAY_MISALIGN_STMTS ( L)    (L)->may_misalign_stmts

◆ LOOP_VINFO_N_STMTS

#define LOOP_VINFO_N_STMTS ( L)    (L)->shared->n_stmts

Referenced by vect_analyze_loop_2().

◆ LOOP_VINFO_NITERS

◆ LOOP_VINFO_NITERS_ASSUMPTIONS

#define LOOP_VINFO_NITERS_ASSUMPTIONS ( L)    (L)->num_iters_assumptions

◆ LOOP_VINFO_NITERS_KNOWN_P

◆ LOOP_VINFO_NITERS_UNCHANGED

#define LOOP_VINFO_NITERS_UNCHANGED ( L)    (L)->num_iters_unchanged
Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
prologue peeling retain total unchanged scalar loop iterations for
cost model.   

Referenced by vect_create_loop_vinfo(), vect_transform_loop(), and vectorizable_simd_clone_call().

◆ LOOP_VINFO_NITERSM1

◆ LOOP_VINFO_NO_DATA_DEPENDENCIES

#define LOOP_VINFO_NO_DATA_DEPENDENCIES ( L)    (L)->no_data_dependencies

◆ LOOP_VINFO_ORIG_LOOP_INFO

◆ LOOP_VINFO_ORIG_MAX_VECT_FACTOR

#define LOOP_VINFO_ORIG_MAX_VECT_FACTOR ( L)     (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L)))

◆ LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS

◆ LOOP_VINFO_PARTIAL_VECTORS_STYLE

◆ LOOP_VINFO_PEELING_FOR_ALIGNMENT

◆ LOOP_VINFO_PEELING_FOR_GAPS

◆ LOOP_VINFO_PEELING_FOR_NITER

#define LOOP_VINFO_PEELING_FOR_NITER ( L)    (L)->peeling_for_niter

◆ LOOP_VINFO_PTR_MASK

#define LOOP_VINFO_PTR_MASK ( L)    (L)->ptr_mask

◆ LOOP_VINFO_REDUCTION_CHAINS

#define LOOP_VINFO_REDUCTION_CHAINS ( L)    (L)->reduction_chains

◆ LOOP_VINFO_REDUCTIONS

#define LOOP_VINFO_REDUCTIONS ( L)    (L)->reductions

◆ LOOP_VINFO_RGROUP_COMPARE_TYPE

◆ LOOP_VINFO_RGROUP_IV_TYPE

◆ LOOP_VINFO_SCALAR_ITERATION_COST

◆ LOOP_VINFO_SCALAR_IV_EXIT

#define LOOP_VINFO_SCALAR_IV_EXIT ( L)    (L)->scalar_loop_iv_exit

◆ LOOP_VINFO_SCALAR_LOOP

#define LOOP_VINFO_SCALAR_LOOP ( L)    (L)->scalar_loop

◆ LOOP_VINFO_SCALAR_LOOP_SCALING

#define LOOP_VINFO_SCALAR_LOOP_SCALING ( L)    (L)->scalar_loop_scaling

◆ LOOP_VINFO_SIMD_IF_COND

#define LOOP_VINFO_SIMD_IF_COND ( L)    (L)->simd_if_cond

Referenced by vect_analyze_loop_2().

◆ LOOP_VINFO_SLP_INSTANCES

#define LOOP_VINFO_SLP_INSTANCES ( L)    (L)->slp_instances

◆ LOOP_VINFO_SLP_UNROLLING_FACTOR

#define LOOP_VINFO_SLP_UNROLLING_FACTOR ( L)    (L)->slp_unrolling_factor

◆ LOOP_VINFO_UNALIGNED_DR

#define LOOP_VINFO_UNALIGNED_DR ( L)    (L)->unaligned_dr

◆ LOOP_VINFO_USING_DECREMENTING_IV_P

#define LOOP_VINFO_USING_DECREMENTING_IV_P ( L)    (L)->using_decrementing_iv_p

◆ LOOP_VINFO_USING_PARTIAL_VECTORS_P

◆ LOOP_VINFO_USING_SELECT_VL_P

◆ LOOP_VINFO_VECT_FACTOR

#define LOOP_VINFO_VECT_FACTOR ( L)    (L)->vectorization_factor

◆ LOOP_VINFO_VECTORIZABLE_P

#define LOOP_VINFO_VECTORIZABLE_P ( L)    (L)->vectorizable

◆ LOOP_VINFO_VERSIONING_THRESHOLD

#define LOOP_VINFO_VERSIONING_THRESHOLD ( L)    (L)->versioning_threshold

◆ MAX_INTERM_CVT_STEPS

#define MAX_INTERM_CVT_STEPS   3
The maximum number of intermediate steps required in multi-step type
conversion.   

Referenced by supportable_narrowing_operation(), and supportable_widening_operation().

◆ MAX_VECTORIZATION_FACTOR

#define MAX_VECTORIZATION_FACTOR   INT_MAX

◆ PURE_SLP_STMT

◆ REDUC_GROUP_FIRST_ELEMENT

◆ REDUC_GROUP_NEXT_ELEMENT

◆ REDUC_GROUP_SIZE

#define REDUC_GROUP_SIZE ( S)     (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size)

◆ SET_DR_MISALIGNMENT

◆ SET_DR_TARGET_ALIGNMENT

#define SET_DR_TARGET_ALIGNMENT ( DR,
VAL )   set_dr_target_alignment (DR, VAL)

◆ SLP_INSTANCE_KIND

◆ SLP_INSTANCE_LOADS

◆ SLP_INSTANCE_REMAIN_DEFS

#define SLP_INSTANCE_REMAIN_DEFS ( S)    (S)->remain_defs

◆ SLP_INSTANCE_ROOT_STMTS

◆ SLP_INSTANCE_TREE

◆ SLP_INSTANCE_UNROLLING_FACTOR

#define SLP_INSTANCE_UNROLLING_FACTOR ( S)    (S)->unrolling_factor

◆ SLP_TREE_CHILDREN

#define SLP_TREE_CHILDREN ( S)    (S)->children

Referenced by _slp_tree::_slp_tree(), complex_add_pattern::build(), complex_mul_pattern::build(), complex_fms_pattern::build(), addsub_pattern::build(), vect_optimize_slp_pass::build_graph(), vect_optimize_slp_pass::build_vertices(), vect_optimize_slp_pass::change_vec_perm_layout(), compatible_complex_nodes_p(), dot_slp_tree(), vect_optimize_slp_pass::get_result_with_layout(), vect_optimize_slp_pass::internal_node_cost(), linear_loads_p(), complex_add_pattern::matches(), complex_mul_pattern::matches(), complex_fms_pattern::matches(), vect_optimize_slp_pass::materialize(), optimize_load_redistribution(), optimize_load_redistribution_1(), addsub_pattern::recognize(), vect_optimize_slp_pass::start_choosing_layouts(), vect_bb_partition_graph_r(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_scalar_cost(), vect_build_combine_node(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_create_new_slp_node(), vect_create_new_slp_node(), vect_detect_pair_op(), vect_detect_pair_op(), vect_free_slp_tree(), vect_gather_slp_loads(), vect_get_gather_scatter_ops(), vect_get_slp_defs(), vect_get_vec_defs(), vect_is_simple_use(), vect_mark_slp_stmts(), vect_mark_slp_stmts_relevant(), vect_match_slp_patterns_2(), vect_print_slp_graph(), vect_print_slp_tree(), vect_remove_slp_scalar_calls(), vect_schedule_scc(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_slp_analyze_node_operations_1(), vect_slp_build_two_operator_nodes(), vect_slp_gather_vectorized_scalar_stmts(), vect_slp_prune_covered_roots(), vect_transform_cycle_phi(), vect_validate_multiplication(), vectorizable_condition(), vectorizable_induction(), vectorizable_lc_phi(), vectorizable_load(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_slp_permutation(), and _slp_tree::~_slp_tree().

◆ SLP_TREE_CODE

◆ SLP_TREE_DEF_TYPE

◆ SLP_TREE_LANE_PERMUTATION

◆ SLP_TREE_LANES

◆ SLP_TREE_LOAD_PERMUTATION

◆ SLP_TREE_NUMBER_OF_VEC_STMTS

◆ SLP_TREE_REF_COUNT

◆ SLP_TREE_REPRESENTATIVE

◆ SLP_TREE_SCALAR_OPS

◆ SLP_TREE_SCALAR_STMTS

◆ SLP_TREE_SIMD_CLONE_INFO

#define SLP_TREE_SIMD_CLONE_INFO ( S)    (S)->simd_clone_info

◆ SLP_TREE_VEC_DEFS

◆ SLP_TREE_VECTYPE

◆ STMT_SLP_TYPE

◆ STMT_VINFO_DATA_REF

◆ STMT_VINFO_DEF_TYPE

#define STMT_VINFO_DEF_TYPE ( S)    (S)->def_type

Referenced by can_vectorize_live_stmts(), info_for_reduction(), iv_phi_p(), maybe_set_vectorized_backedge_value(), vec_info::new_stmt_vec_info(), parloops_is_simple_reduction(), parloops_valid_reduction_input_p(), process_use(), supportable_widening_operation(), vect_active_double_reduction_p(), vect_analyze_loop_2(), vect_analyze_loop_operations(), vect_analyze_scalar_cycles_1(), vect_analyze_slp(), vect_analyze_slp_instance(), vect_analyze_stmt(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_compute_single_scalar_iteration_cost(), vect_create_epilog_for_reduction(), vect_create_loop_vinfo(), vect_fixup_reduc_chain(), vect_fixup_scalar_cycles_with_patterns(), vect_get_internal_def(), vect_init_pattern_stmt(), vect_inner_phi_in_double_reduction_p(), vect_is_simple_reduction(), vect_is_simple_use(), vect_mark_pattern_stmts(), vect_mark_stmts_to_be_vectorized(), vect_reassociating_reduction_p(), vect_recog_mixed_size_cond_pattern(), vect_recog_over_widening_pattern(), vect_schedule_scc(), vect_stmt_relevant_p(), vect_transform_cycle_phi(), vect_transform_loop(), vect_transform_reduction(), vect_update_vf_for_slp(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_lc_phi(), vectorizable_live_operation(), vectorizable_load(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().

◆ STMT_VINFO_DR_BASE_ADDRESS

#define STMT_VINFO_DR_BASE_ADDRESS ( S)    (S)->dr_wrt_vec_loop.base_address

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_BASE_ALIGNMENT

#define STMT_VINFO_DR_BASE_ALIGNMENT ( S)    (S)->dr_wrt_vec_loop.base_alignment

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_BASE_MISALIGNMENT

#define STMT_VINFO_DR_BASE_MISALIGNMENT ( S)     (S)->dr_wrt_vec_loop.base_misalignment

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_INFO

◆ STMT_VINFO_DR_INIT

#define STMT_VINFO_DR_INIT ( S)    (S)->dr_wrt_vec_loop.init

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_OFFSET

#define STMT_VINFO_DR_OFFSET ( S)    (S)->dr_wrt_vec_loop.offset

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_OFFSET_ALIGNMENT

#define STMT_VINFO_DR_OFFSET_ALIGNMENT ( S)     (S)->dr_wrt_vec_loop.offset_alignment

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_STEP

#define STMT_VINFO_DR_STEP ( S)    (S)->dr_wrt_vec_loop.step

◆ STMT_VINFO_DR_STEP_ALIGNMENT

#define STMT_VINFO_DR_STEP_ALIGNMENT ( S)     (S)->dr_wrt_vec_loop.step_alignment

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_WRT_VEC_LOOP

#define STMT_VINFO_DR_WRT_VEC_LOOP ( S)    (S)->dr_wrt_vec_loop

◆ STMT_VINFO_FORCE_SINGLE_CYCLE

#define STMT_VINFO_FORCE_SINGLE_CYCLE ( S)    (S)->force_single_cycle

◆ STMT_VINFO_GATHER_SCATTER_P

◆ STMT_VINFO_GROUPED_ACCESS

◆ STMT_VINFO_IN_PATTERN_P

◆ STMT_VINFO_LIVE_P

◆ STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED

#define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED ( S)    (S)->loop_phi_evolution_base_unchanged

◆ STMT_VINFO_LOOP_PHI_EVOLUTION_PART

◆ STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE

◆ STMT_VINFO_MEMORY_ACCESS_TYPE

#define STMT_VINFO_MEMORY_ACCESS_TYPE ( S)    (S)->memory_access_type

◆ STMT_VINFO_MIN_NEG_DIST

#define STMT_VINFO_MIN_NEG_DIST ( S)    (S)->min_neg_dist

◆ STMT_VINFO_PATTERN_DEF_SEQ

◆ STMT_VINFO_REDUC_CODE

◆ STMT_VINFO_REDUC_DEF

◆ STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT

#define STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT ( S)    (S)->reduc_epilogue_adjustment

◆ STMT_VINFO_REDUC_FN

#define STMT_VINFO_REDUC_FN ( S)    (S)->reduc_fn

◆ STMT_VINFO_REDUC_IDX

◆ STMT_VINFO_REDUC_TYPE

◆ STMT_VINFO_REDUC_VECTYPE

#define STMT_VINFO_REDUC_VECTYPE ( S)    (S)->reduc_vectype

◆ STMT_VINFO_REDUC_VECTYPE_IN

#define STMT_VINFO_REDUC_VECTYPE_IN ( S)    (S)->reduc_vectype_in

◆ STMT_VINFO_RELATED_STMT

◆ STMT_VINFO_RELEVANT

◆ STMT_VINFO_RELEVANT_P

◆ STMT_VINFO_SIMD_CLONE_INFO

#define STMT_VINFO_SIMD_CLONE_INFO ( S)    (S)->simd_clone_info

◆ STMT_VINFO_SIMD_LANE_ACCESS_P

◆ STMT_VINFO_SLP_VECT_ONLY

#define STMT_VINFO_SLP_VECT_ONLY ( S)    (S)->slp_vect_only_p

◆ STMT_VINFO_SLP_VECT_ONLY_PATTERN

#define STMT_VINFO_SLP_VECT_ONLY_PATTERN ( S)    (S)->slp_vect_pattern_only_p

◆ STMT_VINFO_STMT

◆ STMT_VINFO_STRIDED_P

◆ STMT_VINFO_TYPE

◆ STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL

#define STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL ( S)    (S)->induc_cond_initial_val

◆ STMT_VINFO_VEC_STMTS

◆ STMT_VINFO_VECTORIZABLE

◆ STMT_VINFO_VECTYPE

#define STMT_VINFO_VECTYPE ( S)    (S)->vectype

Referenced by append_pattern_def_seq(), addsub_pattern::build(), complex_pattern::build(), bump_vector_ptr(), get_initial_defs_for_reduction(), get_misalign_in_elems(), record_stmt_cost(), stmt_vectype(), vect_analyze_data_refs(), vect_analyze_data_refs_alignment(), vect_analyze_loop_2(), vect_analyze_stmt(), vect_build_one_gather_load_call(), vect_build_slp_instance(), vect_check_gather_scatter(), vect_check_scalar_mask(), vect_check_store_rhs(), vect_create_cond_for_align_checks(), vect_describe_gather_scatter_call(), vect_determine_vectorization_factor(), vect_determine_vf_for_stmt_1(), vect_dr_misalign_for_aligned_access(), vect_enhance_data_refs_alignment(), vect_find_reusable_accumulator(), vect_gen_prolog_loop_niters(), vect_get_data_access_cost(), vect_get_peeling_costs_all_drs(), vect_get_strided_load_store_ops(), vect_get_vec_defs_for_operand(), vect_get_vector_types_for_stmt(), vect_init_pattern_stmt(), vect_is_simple_use(), vect_model_reduction_cost(), vect_peeling_supportable(), vect_permute_load_chain(), vect_permute_store_chain(), vect_recog_bit_insert_pattern(), vect_recog_bitfield_ref_pattern(), vect_recog_cond_expr_convert_pattern(), vect_recog_gather_scatter_pattern(), vect_recog_popcount_clz_ctz_ffs_pattern(), vect_setup_realignment(), vect_shift_permute_load_chain(), vect_transform_cycle_phi(), vect_transform_grouped_load(), vect_transform_loop(), vect_transform_loop_stmt(), vect_transform_reduction(), vect_transform_stmt(), vect_truncate_gather_scatter_offset(), vect_update_misalignment_for_peel(), vect_vfa_access_size(), vector_alignment_reachable_p(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison(), vectorizable_condition(), vectorizable_conversion(), vectorizable_induction(), vectorizable_lc_phi(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_scan_store(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().

◆ VECT_MAX_COST

#define VECT_MAX_COST   1000

◆ VECT_SCALAR_BOOLEAN_TYPE_P

#define VECT_SCALAR_BOOLEAN_TYPE_P ( TYPE)
Value:
&& TYPE_PRECISION (TYPE) == 1 \
#define TYPE_PRECISION(NODE)
Definition tree.h:2245
#define TYPE_UNSIGNED(NODE)
Definition tree.h:949
#define TREE_CODE(NODE)
Definition tree.h:324
Nonzero if TYPE represents a (scalar) boolean type or type
in the middle-end compatible with it (unsigned precision 1 integral
types).  Used to determine which types should be vectorized as
VECTOR_BOOLEAN_TYPE_P.   

Referenced by check_bool_pattern(), get_same_sized_vectype(), integer_type_for_mask(), possible_vector_mask_operation_p(), vect_check_scalar_mask(), vect_determine_mask_precision(), vect_get_vec_defs_for_operand(), vect_is_simple_cond(), vect_narrowable_type_p(), vect_recog_bool_pattern(), vect_recog_cast_forwprop_pattern(), vect_recog_gcond_pattern(), vect_recog_mask_conversion_pattern(), vectorizable_comparison_1(), and vectorizable_operation().

◆ VECTORIZABLE_CYCLE_DEF

#define VECTORIZABLE_CYCLE_DEF ( D)
Value:
|| ((D) == vect_nested_cycle))
@ vect_nested_cycle
Definition tree-vectorizer.h:67
@ vect_reduction_def
Definition tree-vectorizer.h:65
@ vect_double_reduction_def
Definition tree-vectorizer.h:66

Referenced by info_for_reduction(), maybe_set_vectorized_backedge_value(), vect_compute_single_scalar_iteration_cost(), vect_update_vf_for_slp(), and vectorizable_reduction().

Typedef Documentation

◆ auto_lane_permutation_t

◆ auto_load_permutation_t

◆ bb_vec_info

◆ complex_perm_kinds_t

All possible load permute values that could result from the partial data-flow
analysis.   

◆ dr_p

◆ drs_init_vec

◆ lane_permutation_t

◆ load_permutation_t

◆ loop_vec_info

Info on vectorized loops.                                        

◆ opt_loop_vec_info

Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL
value signifies success, and a NULL value signifies failure, supporting
propagating an opt_problem * describing the failure back up the call
stack.   

◆ scalar_cond_masked_set_type

◆ slp_compat_nodes_map_t

◆ slp_instance

SLP instance is a sequence of stmts in a loop that can be packed into
SIMD stmts.   

◆ slp_node_hash

Cache from nodes pair to being compatible or not.   

◆ slp_tree

◆ slp_tree_to_load_perm_map_t

Cache from nodes to the load permutation they represent.   

◆ stmt_vec_info

Vectorizer
   Copyright (C) 2003-2024 Free Software Foundation, Inc.
   Contributed by Dorit Naishlos <dorit@il.ibm.com>

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.   

◆ stmt_vector_for_cost

◆ tree_cond_mask_hash

Key and map that records association between vector conditions and
corresponding loop mask, and is populated by prepare_vec_mask.   

◆ vec_base_alignments

Maps base addresses to an innermost_loop_behavior and the stmt it was
derived from that gives the maximum known alignment for that base.   

◆ vec_cond_masked_set_type

◆ vec_loop_lens

◆ vec_object_pair

Describes two objects whose addresses must be unequal for the vectorized
loop to be valid.   

◆ vect_pattern_decl_t

Function pointer to create a new pattern matcher from a generic type.   

Enumeration Type Documentation

◆ _complex_perm_kinds

All possible load permute values that could result from the partial data-flow
analysis.   
Enumerator
PERM_UNKNOWN 
PERM_EVENODD 
PERM_ODDEVEN 
PERM_ODDODD 
PERM_EVENEVEN 
PERM_TOP 

◆ dr_alignment_support

Define type of available alignment support.   
Enumerator
dr_unaligned_unsupported 
dr_unaligned_supported 
dr_explicit_realign 
dr_explicit_realign_optimized 
dr_aligned 

◆ operation_type

Defines type of operation.   
Enumerator
unary_op 
binary_op 
ternary_op 

◆ slp_instance_kind

The enum describes the type of operations that an SLP instance
can perform.  
Enumerator
slp_inst_kind_store 
slp_inst_kind_reduc_group 
slp_inst_kind_reduc_chain 
slp_inst_kind_bb_reduc 
slp_inst_kind_ctor 

◆ slp_vect_type

The type of vectorization that can be applied to the stmt: regular loop-based
vectorization; pure SLP - the stmt is a part of SLP instances and does not
have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
a part of SLP instance and also must be loop-based vectorized, since it has
uses outside SLP sequences.

In the loop context the meanings of pure and hybrid SLP are slightly
different. By saying that pure SLP is applied to the loop, we mean that we
exploit only intra-iteration parallelism in the loop; i.e., the loop can be
vectorized without doing any conceptual unrolling, cause we don't pack
together stmts from different iterations, only within a single iteration.
Loop hybrid SLP means that we exploit both intra-iteration and
inter-iteration parallelism (e.g., number of elements in the vector is 4
and the slp-group-size is 2, in which case we don't have enough parallelism
within an iteration, so we obtain the rest of the parallelism from subsequent
iterations by unrolling the loop by 2).   
Enumerator
loop_vect 
pure_slp 
hybrid 

◆ stmt_vec_info_type

Info on vectorized defs.                                         
Enumerator
undef_vec_info_type 
load_vec_info_type 
store_vec_info_type 
shift_vec_info_type 
op_vec_info_type 
call_vec_info_type 
call_simd_clone_vec_info_type 
assignment_vec_info_type 
condition_vec_info_type 
comparison_vec_info_type 
reduc_vec_info_type 
induc_vec_info_type 
type_promotion_vec_info_type 
type_demotion_vec_info_type 
type_conversion_vec_info_type 
cycle_phi_info_type 
lc_phi_info_type 
phi_info_type 
recurr_info_type 
loop_exit_ctrl_vec_info_type 

◆ vec_load_store_type

Says whether a statement is a load, a store of a vectorized statement
result, or a store of an invariant value.   
Enumerator
VLS_LOAD 
VLS_STORE 
VLS_STORE_INVARIANT 

◆ vect_def_type

Define type of def-use cross-iteration cycle.   
Enumerator
vect_uninitialized_def 
vect_constant_def 
vect_external_def 
vect_internal_def 
vect_induction_def 
vect_reduction_def 
vect_double_reduction_def 
vect_nested_cycle 
vect_first_order_recurrence 
vect_condition_def 
vect_unknown_def_type 

◆ vect_induction_op_type

Define operation type of linear/non-linear induction variable.   
Enumerator
vect_step_op_add 
vect_step_op_neg 
vect_step_op_mul 
vect_step_op_shl 
vect_step_op_shr 

◆ vect_memory_access_type

Describes how we're going to vectorize an individual load or store,
or a group of loads or stores.   
Enumerator
VMAT_INVARIANT 
VMAT_CONTIGUOUS 
VMAT_CONTIGUOUS_DOWN 
VMAT_CONTIGUOUS_PERMUTE 
VMAT_CONTIGUOUS_REVERSE 
VMAT_LOAD_STORE_LANES 
VMAT_ELEMENTWISE 
VMAT_STRIDED_SLP 
VMAT_GATHER_SCATTER 

◆ vect_partial_vector_style

Enumerator
vect_partial_vectors_none 
vect_partial_vectors_while_ult 
vect_partial_vectors_avx512 
vect_partial_vectors_len 

◆ vect_reduction_type

Define type of reduction.   
Enumerator
TREE_CODE_REDUCTION 
COND_REDUCTION 
INTEGER_INDUC_COND_REDUCTION 
CONST_COND_REDUCTION 
EXTRACT_LAST_REDUCTION 
FOLD_LEFT_REDUCTION 

◆ vect_relevant

Indicates whether/how a variable is used in the scope of loop/basic
block.   
Enumerator
vect_unused_in_scope 
vect_used_only_live 
vect_used_in_outer_by_reduction 
vect_used_in_outer 
vect_used_by_reduction 
vect_used_in_scope 

◆ vect_var_kind

Used for naming of new temporaries.   
Enumerator
vect_simple_var 
vect_pointer_var 
vect_scalar_var 
vect_mask_var 

Function Documentation

◆ add_stmt_cost() [1/3]

◆ add_stmt_cost() [2/3]

unsigned add_stmt_cost ( vector_costs * costs,
int count,
enum vect_cost_for_stmt kind,
stmt_vec_info stmt_info,
slp_tree node,
tree vectype,
int misalign,
enum vect_cost_model_location where )
inline

◆ add_stmt_cost() [3/3]

unsigned add_stmt_cost ( vector_costs * costs,
stmt_info_for_cost * i )
inline
Alias targetm.vectorize.add_stmt_cost.   

References add_stmt_cost(), and i.

◆ add_stmt_costs()

◆ aligned_access_p()

bool aligned_access_p ( dr_vec_info * dr_info,
tree vectype )
inline
Return true if data access DR_INFO is aligned to the targets
preferred alignment for VECTYPE (which may be less than a full vector).   

References dr_misalignment().

Referenced by vect_enhance_data_refs_alignment(), and vector_alignment_reachable_p().

◆ builtin_vectorization_cost()

int builtin_vectorization_cost ( enum vect_cost_for_stmt type_of_cost,
tree vectype,
int misalign )
inline
Alias targetm.vectorize.builtin_vectorization_cost.   

References ggc_alloc(), and targetm.

Referenced by vector_costs::add_stmt_cost(), record_stmt_cost(), and vect_get_stmt_cost().

◆ bump_vector_ptr()

tree bump_vector_ptr ( vec_info * vinfo,
tree dataref_ptr,
gimple * ptr_incr,
gimple_stmt_iterator * gsi,
stmt_vec_info stmt_info,
tree bump )
extern
Function bump_vector_ptr

  Increment a pointer (to a vector type) by vector-size. If requested,
  i.e. if PTR-INCR is given, then also connect the new increment stmt
  to the existing def-use update-chain of the pointer, by modifying
  the PTR_INCR as illustrated below:

  The pointer def-use update-chain before this function:
                       DATAREF_PTR = phi (p_0, p_2)
                       ....
       PTR_INCR:       p_2 = DATAREF_PTR + step

  The pointer def-use update-chain after this function:
                       DATAREF_PTR = phi (p_0, p_2)
                       ....
                       NEW_DATAREF_PTR = DATAREF_PTR + BUMP
                       ....
       PTR_INCR:       p_2 = NEW_DATAREF_PTR + step

  Input:
  DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
                in the loop.
  PTR_INCR - optional. The stmt that updates the pointer in each iteration of
             the loop.  The increment amount across iterations is expected
             to be vector_size.
  BSI - location where the new update stmt is to be placed.
  STMT_INFO - the original scalar memory-access stmt that is being vectorized.
  BUMP - optional. The offset by which to bump the pointer. If not given,
         the offset is assumed to be vector_size.

  Output: Return NEW_DATAREF_PTR as illustrated above.

References build1(), copy_ssa_name(), DR_PTR_INFO, duplicate_ssa_name_ptr_info(), fold_build2, fold_convert, fold_stmt(), follow_all_ssa_edges(), FOR_EACH_SSA_USE_OPERAND, gcc_assert, ggc_alloc(), gimple_build_assign(), gsi_for_stmt(), gsi_stmt(), is_gimple_min_invariant(), make_ssa_name(), mark_ptr_info_alignment_unknown(), operand_equal_p(), ptr_type_node, SET_USE, SSA_NAME_PTR_INFO, SSA_OP_USE, STMT_VINFO_DATA_REF, STMT_VINFO_VECTYPE, TREE_CODE, TREE_TYPE, TYPE_SIZE_UNIT, update_stmt(), USE_FROM_PTR, and vect_finish_stmt_generation().

Referenced by vectorizable_load(), and vectorizable_store().

◆ can_duplicate_and_interleave_p()

bool can_duplicate_and_interleave_p ( vec_info * vinfo,
unsigned int count,
tree elt_type,
unsigned int * nvectors_out,
tree * vector_type_out,
tree * permutes )
extern
Check whether it is possible to load COUNT elements of type ELT_TYPE
using the method implemented by duplicate_and_interleave.  Return true
if so, returning the number of intermediate vectors in *NVECTORS_OUT
(if nonnull) and the type of each intermediate vector in *VECTOR_TYPE_OUT
(if nonnull).   

References build_nonstandard_integer_type(), can_vec_perm_const_p(), count, GET_MODE_BITSIZE(), GET_MODE_NUNITS(), GET_MODE_SIZE(), GET_MODE_UNIT_SIZE, get_vectype_for_scalar_type(), ggc_alloc(), i, int_mode_for_size(), known_eq, TYPE_MODE, vect_gen_perm_mask_checked(), and VECTOR_MODE_P.

Referenced by duplicate_and_interleave(), vect_build_slp_tree_2(), and vectorizable_reduction().

◆ check_reduction_path()

bool check_reduction_path ( dump_user_location_t loc,
loop_p loop,
gphi * phi,
tree loop_arg,
enum tree_code )
extern
Used in gimple-loop-interchange.c and tree-parloops.cc.   

References check_reduction_path(), ggc_alloc(), and path.

◆ compatible_calls_p()

bool compatible_calls_p ( gcall * call1,
gcall * call2 )
extern

◆ cse_and_gimplify_to_preheader()

tree cse_and_gimplify_to_preheader ( loop_vec_info loop_vinfo,
tree expr )
extern

◆ dr_misalignment()

◆ dr_target_alignment()

const poly_uint64 dr_target_alignment ( dr_vec_info * dr_info)
inline
Only defined once DR_MISALIGNMENT is defined.   

References DR_GROUP_FIRST_ELEMENT, STMT_VINFO_DR_INFO, and STMT_VINFO_GROUPED_ACCESS.

◆ dump_stmt_cost()

◆ duplicate_and_interleave()

void duplicate_and_interleave ( vec_info * vinfo,
gimple_seq * seq,
tree vector_type,
const vec< tree > & elts,
unsigned int nresults,
vec< tree > & results )
extern
Build a variable-length vector in which the elements in ELTS are repeated
to a fill NRESULTS vectors of type VECTOR_TYPE.  Store the vectors in
RESULTS and add any new instructions to SEQ.

The approach we use is:

(1) Find a vector mode VM with integer elements of mode IM.

(2) Replace ELTS[0:NELTS] with ELTS'[0:NELTS'], where each element of
    ELTS' has mode IM.  This involves creating NELTS' VIEW_CONVERT_EXPRs
    from small vectors to IM.

(3) Duplicate each ELTS'[I] into a vector of mode VM.

(4) Use a tree of interleaving VEC_PERM_EXPRs to create VMs with the
    correct byte contents.

(5) Use VIEW_CONVERT_EXPR to cast the final VMs to the required type.

We try to find the largest IM for which this sequence works, in order
to cut down on the number of interleaves.   

References build_vector_type(), can_duplicate_and_interleave_p(), gcc_unreachable, ggc_alloc(), gimple_build(), gimple_build_assign(), gimple_build_vector(), gimple_build_vector_from_val(), gimple_seq_add_stmt(), i, make_ssa_name(), TREE_TYPE, and TYPE_VECTOR_SUBPARTS().

Referenced by get_initial_defs_for_reduction(), and vect_create_constant_vectors().

◆ find_loop_location()

◆ finish_cost()

void finish_cost ( vector_costs * costs,
const vector_costs * scalar_costs,
unsigned * prologue_cost,
unsigned * body_cost,
unsigned * epilogue_cost,
unsigned * suggested_unroll_factor = NULL )
inline
Alias targetm.vectorize.finish_cost.   

Referenced by vect_bb_vectorization_profitable_p(), and vect_estimate_min_profitable_iters().

◆ get_dr_vinfo_offset()

tree get_dr_vinfo_offset ( vec_info * vinfo,
dr_vec_info * dr_info,
bool check_outer = false )
inline
Return the offset calculated by adding the offset of this DR_INFO to the
corresponding data_reference's offset.  If CHECK_OUTER then use
vect_dr_behavior to select the appropriate data_reference to use.   

References dr_info::dr, fold_build2, fold_convert, ggc_alloc(), data_reference::innermost, offset, innermost_loop_behavior::offset, sizetype, TREE_TYPE, and vect_dr_behavior().

Referenced by check_scan_store(), vect_create_addr_base_for_vector_ref(), vectorizable_load(), and vectorizable_store().

◆ get_later_stmt()

◆ get_mask_type_for_scalar_type() [1/2]

tree get_mask_type_for_scalar_type ( vec_info * vinfo,
tree scalar_type,
slp_tree node )
extern
Function get_mask_type_for_scalar_type.

Returns the mask type corresponding to a result of comparison
of vectors of specified SCALAR_TYPE as supported by target.
NODE, if nonnull, is the SLP tree node that will use the returned
vector type.   

References get_vectype_for_scalar_type(), ggc_alloc(), NULL, and truth_type_for().

◆ get_mask_type_for_scalar_type() [2/2]

tree get_mask_type_for_scalar_type ( vec_info * vinfo,
tree scalar_type,
unsigned int group_size )
extern
Function get_mask_type_for_scalar_type.

Returns the mask type corresponding to a result of comparison
of vectors of specified SCALAR_TYPE as supported by target.
If GROUP_SIZE is nonzero and we're performing BB vectorization,
make sure that the number of elements in the vector is no bigger
than GROUP_SIZE.   

References get_vectype_for_scalar_type(), ggc_alloc(), NULL, and truth_type_for().

Referenced by check_bool_pattern(), vect_check_scalar_mask(), vect_convert_mask_for_vectype(), vect_determine_mask_precision(), vect_get_vector_types_for_stmt(), and vect_recog_mask_conversion_pattern().

◆ get_related_vectype_for_scalar_type()

tree get_related_vectype_for_scalar_type ( machine_mode prevailing_mode,
tree scalar_type,
poly_uint64 nunits )
extern
In tree-vect-stmts.cc.   
If NUNITS is nonzero, return a vector type that contains NUNITS
elements of type SCALAR_TYPE, or null if the target doesn't support
such a type.

If NUNITS is zero, return a vector type that contains elements of
type SCALAR_TYPE, choosing whichever vector size the target prefers.

If PREVAILING_MODE is VOIDmode, we have not yet chosen a vector mode
for this vectorization region and want to "autodetect" the best choice.
Otherwise, PREVAILING_MODE is a previously-chosen vector TYPE_MODE
and we want the new type to be interoperable with it.   PREVAILING_MODE
in this case can be a scalar integer mode or a vector mode; when it
is a vector mode, the function acts like a tree-level version of
related_vector_mode.   

References build_nonstandard_integer_type(), build_qualified_type(), build_vector_type_for_mode(), gcc_assert, GET_MODE_BITSIZE(), GET_MODE_SIZE(), ggc_alloc(), INTEGRAL_MODE_P, INTEGRAL_TYPE_P, is_float_mode(), is_int_mode(), KEEP_QUAL_ADDR_SPACE, known_eq, mode_for_vector(), NULL_TREE, POINTER_TYPE_P, related_vector_mode(), SCALAR_FLOAT_TYPE_P, SCALAR_INT_MODE_P, targetm, TREE_CODE, TYPE_ADDR_SPACE, TYPE_ALIGN_UNIT, lang_hooks_for_types::type_for_mode, TYPE_MODE, TYPE_PRECISION, TYPE_QUALS, TYPE_UNSIGNED, lang_hooks::types, and VECTOR_MODE_P.

Referenced by get_same_sized_vectype(), get_vec_alignment_for_array_type(), get_vectype_for_scalar_type(), vect_create_epilog_for_reduction(), vect_create_partial_epilog(), and vect_find_reusable_accumulator().

◆ get_same_sized_vectype()

tree get_same_sized_vectype ( tree scalar_type,
tree vector_type )
extern
Function get_same_sized_vectype

Returns a vector type corresponding to SCALAR_TYPE of size
VECTOR_TYPE if supported by the target.   

References GET_MODE_SIZE(), get_related_vectype_for_scalar_type(), ggc_alloc(), NULL_TREE, truth_type_for(), TYPE_MODE, and VECT_SCALAR_BOOLEAN_TYPE_P.

Referenced by vect_create_epilog_for_reduction(), vect_recog_rotate_pattern(), vectorizable_bswap(), vectorizable_conversion(), vectorizable_induction(), and vectorizable_reduction().

◆ get_vectype_for_scalar_type() [1/2]

tree get_vectype_for_scalar_type ( vec_info * vinfo,
tree scalar_type,
slp_tree node )
extern
Return the vector type corresponding to SCALAR_TYPE as supported
by the target.  NODE, if nonnull, is the SLP tree node that will
use the returned vector type.   

References get_vectype_for_scalar_type(), ggc_alloc(), and SLP_TREE_LANES.

◆ get_vectype_for_scalar_type() [2/2]

tree get_vectype_for_scalar_type ( vec_info * vinfo,
tree scalar_type,
unsigned int group_size )
extern
Function get_vectype_for_scalar_type.

Returns the vector type corresponding to SCALAR_TYPE as supported
by the target.  If GROUP_SIZE is nonzero and we're performing BB
vectorization, make sure that the number of elements in the vector
is no bigger than GROUP_SIZE.   

References hash_set< KeyId, Lazy, Traits >::add(), floor_log2(), gcc_assert, get_related_vectype_for_scalar_type(), ggc_alloc(), maybe_ge, vec_info::slp_instances, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), vec_info::used_vector_modes, and vec_info::vector_mode.

Referenced by adjust_bool_pattern(), adjust_bool_pattern_cast(), can_duplicate_and_interleave_p(), check_bool_pattern(), get_initial_def_for_reduction(), get_mask_type_for_scalar_type(), get_mask_type_for_scalar_type(), get_vectype_for_scalar_type(), vect_add_conversion_to_pattern(), vect_analyze_data_refs(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_convert_input(), vect_determine_mask_precision(), vect_determine_vectorization_factor(), vect_gather_scatter_fn_p(), vect_get_vec_defs_for_operand(), vect_get_vector_types_for_stmt(), vect_is_simple_cond(), vect_phi_first_order_recurrence_p(), vect_recog_abd_pattern(), vect_recog_average_pattern(), vect_recog_bit_insert_pattern(), vect_recog_bitfield_ref_pattern(), vect_recog_bool_pattern(), vect_recog_cast_forwprop_pattern(), vect_recog_cond_expr_convert_pattern(), vect_recog_ctz_ffs_pattern(), vect_recog_divmod_pattern(), vect_recog_gather_scatter_pattern(), vect_recog_gcond_pattern(), vect_recog_mask_conversion_pattern(), vect_recog_mixed_size_cond_pattern(), vect_recog_mulhs_pattern(), vect_recog_mult_pattern(), vect_recog_over_widening_pattern(), vect_recog_popcount_clz_ctz_ffs_pattern(), vect_recog_pow_pattern(), vect_recog_rotate_pattern(), vect_recog_sat_add_pattern(), vect_recog_vector_vector_shift_pattern(), vect_recog_widen_abd_pattern(), vect_recog_widen_op_pattern(), vect_slp_prefer_store_lanes_p(), vect_split_statement(), vect_supportable_direct_optab_p(), vect_supportable_shift(), vect_synth_mult_by_constant(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_conversion(), vectorizable_operation(), vectorizable_reduction(), vectorizable_shift(), and vectorizable_simd_clone_call().

◆ info_for_reduction()

◆ init_cost()

vector_costs * init_cost ( vec_info * vinfo,
bool costing_for_scalar )
inline
Alias targetm.vectorize.init_cost.   

References ggc_alloc(), and targetm.

◆ is_loop_header_bb_p()

bool is_loop_header_bb_p ( basic_block bb)
inline

◆ is_pattern_stmt_p()

◆ is_simple_and_all_uses_invariant()

bool is_simple_and_all_uses_invariant ( stmt_vec_info stmt_info,
loop_vec_info loop_vinfo )
extern
Function is_simple_and_all_uses_invariant

Return true if STMT_INFO is simple and all uses of it are invariant.   

References dump_enabled_p(), dump_printf_loc(), FOR_EACH_SSA_TREE_OPERAND, ggc_alloc(), MSG_MISSED_OPTIMIZATION, SSA_OP_USE, vect_constant_def, vect_external_def, vect_is_simple_use(), vect_location, and vect_uninitialized_def.

Referenced by vect_stmt_relevant_p(), and vectorizable_live_operation().

◆ known_alignment_for_access_p()

bool known_alignment_for_access_p ( dr_vec_info * dr_info,
tree vectype )
inline
Return TRUE if the (mis-)alignment of the data access is known with
respect to the targets preferred alignment for VECTYPE, and FALSE
otherwise.   

References dr_misalignment(), and DR_MISALIGNMENT_UNKNOWN.

Referenced by vect_enhance_data_refs_alignment(), vect_get_peeling_costs_all_drs(), vect_peeling_supportable(), vect_update_misalignment_for_peel(), and vector_alignment_reachable_p().

◆ loop_cost_model()

◆ loop_vec_info_for_loop()

loop_vec_info loop_vec_info_for_loop ( class loop * loop)
inline

◆ needs_fold_left_reduction_p()

bool needs_fold_left_reduction_p ( tree type,
code_helper code )
extern
Return true if we need an in-order reduction for operation CODE
on type TYPE.  NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
overflow must wrap.   

References ggc_alloc(), INTEGRAL_TYPE_P, code_helper::is_tree_code(), operation_no_trapping_overflow(), SAT_FIXED_POINT_TYPE_P, and SCALAR_FLOAT_TYPE_P.

Referenced by vect_optimize_slp_pass::start_choosing_layouts(), vect_reassociating_reduction_p(), vect_slp_check_for_roots(), and vectorizable_reduction().

◆ nested_in_vect_loop_p()

◆ neutral_op_for_reduction()

tree neutral_op_for_reduction ( tree scalar_type,
code_helper code,
tree initial_value,
bool as_initial )
extern
In tree-vect-loop.cc.   
If there is a neutral value X such that a reduction would not be affected
by the introduction of additional X elements, return that X, otherwise
return null.  CODE is the code of the reduction and SCALAR_TYPE is type
of the scalar elements.  If the reduction has just a single initial value
then INITIAL_VALUE is that value, otherwise it is null.
If AS_INITIAL is TRUE the value is supposed to be used as initial value.
In that case no signed zero is returned.   

References build_all_ones_cst(), build_one_cst(), build_real(), build_zero_cst(), dconstm0, ggc_alloc(), HONOR_SIGNED_ZEROS(), code_helper::is_tree_code(), and NULL_TREE.

Referenced by convert_scalar_cond_reduction(), vect_create_epilog_for_reduction(), vect_expand_fold_left(), vect_find_reusable_accumulator(), vect_transform_cycle_phi(), and vectorizable_reduction().

◆ optimize_mask_stores()

void optimize_mask_stores ( class loop * loop)
extern
The code below is trying to perform simple optimization - revert
  if-conversion for masked stores, i.e. if the mask of a store is zero
  do not perform it and all stored value producers also if possible.
  For example,
    for (i=0; i<n; i++)
      if (c[i])
       {
         p1[i] += 1;
         p2[i] = p3[i] +2;
       }
  this transformation will produce the following semi-hammock:

  if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
    {
      vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
      vect__12.22_172 = vect__11.19_170 + vect_cst__171;
      MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
      vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
      vect__19.28_184 = vect__18.25_182 + vect_cst__183;
      MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
    }

References add_bb_to_loop(), add_phi_arg(), build_zero_cst(), CDI_DOMINATORS, cfun, basic_block_def::count, create_empty_bb(), create_phi_node(), dom_info_available_p(), dump_enabled_p(), dump_printf_loc(), EDGE_SUCC, find_loop_location(), flow_loop_nested_p(), FOR_EACH_IMM_USE_FAST, free(), gcc_assert, get_loop_body(), ggc_alloc(), gimple_bb(), gimple_build_cond(), gimple_call_arg(), gimple_call_internal_p(), gimple_get_lhs(), gimple_has_volatile_ops(), gimple_set_vdef(), gimple_vdef(), gimple_vop(), gimple_vuse(), gsi_end_p(), gsi_for_stmt(), gsi_insert_after(), gsi_last_bb(), gsi_move_before(), gsi_next(), gsi_prev(), gsi_remove(), GSI_SAME_STMT, gsi_start_bb(), gsi_stmt(), has_zero_uses(), i, basic_block_def::index, is_gimple_debug(), last, profile_probability::likely(), basic_block_def::loop_father, make_edge(), make_single_succ_edge(), make_ssa_name(), MSG_NOTE, NULL, NULL_TREE, loop::num_nodes, set_immediate_dominator(), split_block(), TREE_CODE, TREE_TYPE, UNKNOWN_LOCATION, USE_STMT, vect_location, VECTOR_TYPE_P, and worklist.

◆ perm_mask_for_reverse()

tree perm_mask_for_reverse ( tree vectype)
extern
If the target supports a permute mask that reverses the elements in
a vector of type VECTYPE, return that mask, otherwise return null.   

References can_vec_perm_const_p(), ggc_alloc(), i, NULL_TREE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), and vect_gen_perm_mask_checked().

Referenced by get_negative_load_store_type(), vectorizable_load(), and vectorizable_store().

◆ record_stmt_cost() [1/4]

unsigned record_stmt_cost ( stmt_vector_for_cost * body_cost_vec,
int count,
enum vect_cost_for_stmt,
enum vect_cost_model_location )
extern

◆ record_stmt_cost() [2/4]

unsigned record_stmt_cost ( stmt_vector_for_cost * body_cost_vec,
int count,
enum vect_cost_for_stmt,
slp_tree node,
tree vectype,
int misalign,
enum vect_cost_model_location )
extern

◆ record_stmt_cost() [3/4]

unsigned record_stmt_cost ( stmt_vector_for_cost * body_cost_vec,
int count,
enum vect_cost_for_stmt,
stmt_vec_info stmt_info,
tree vectype,
int misalign,
enum vect_cost_model_location )
extern

References count, ggc_alloc(), NULL, and record_stmt_cost().

Referenced by record_stmt_cost().

◆ record_stmt_cost() [4/4]

unsigned record_stmt_cost ( stmt_vector_for_cost * body_cost_vec,
int count,
enum vect_cost_for_stmt kind,
stmt_vec_info stmt_info,
int misalign,
enum vect_cost_model_location where )
inline
Overload of record_stmt_cost with VECTYPE derived from STMT_INFO.   

References count, ggc_alloc(), record_stmt_cost(), and STMT_VINFO_VECTYPE.

◆ reduction_fn_for_scalar_code()

bool reduction_fn_for_scalar_code ( code_helper code,
internal_fn * reduc_fn )
extern
Function reduction_fn_for_scalar_code

Input:
CODE - tree_code of a reduction operations.

Output:
REDUC_FN - the corresponding internal function to be used to reduce the
   vector of partial results into a single scalar result, or IFN_LAST
   if the operation is a supported reduction operation, but does not have
   such an internal function.

Return FALSE if CODE currently cannot be vectorized as reduction.   

References ggc_alloc(), IFN_LAST, and code_helper::is_tree_code().

Referenced by vect_slp_check_for_roots(), vectorizable_bb_reduc_epilogue(), vectorizable_reduction(), and vectorize_slp_instance_root_stmt().

◆ ref_within_array_bound()

bool ref_within_array_bound ( gimple * stmt,
tree ref )
extern
In tree-if-conv.cc.   
Return TRUE if ref is a within bound array reference.   

References for_each_index(), gcc_assert, idx_within_array_bound(), loop_containing_stmt(), and NULL.

Referenced by ifcvt_memrefs_wont_trap(), and vect_analyze_early_break_dependences().

◆ set_dr_misalignment()

void set_dr_misalignment ( dr_vec_info * dr_info,
int val )
inline

◆ set_dr_target_alignment()

void set_dr_target_alignment ( dr_vec_info * dr_info,
poly_uint64 val )
inline

◆ slpeel_can_duplicate_loop_p()

bool slpeel_can_duplicate_loop_p ( const class loop * loop,
const_edge exit_e,
const_edge e )
extern
This function verifies that the following restrictions apply to LOOP:
 (1) it consists of exactly 2 basic blocks - header, and an empty latch
     for innermost loop and 5 basic blocks for outer-loop.
 (2) it is single entry, single exit
 (3) its exit condition is the last stmt in the header
 (4) E is the entry/exit edge of LOOP.

References can_copy_bbs_p(), empty_block_p(), free(), get_loop_body_with_size(), get_loop_exit_condition(), ggc_alloc(), gsi_last_bb(), gsi_stmt(), loop::latch, loop_outer(), loop_preheader_edge(), and loop::num_nodes.

Referenced by vect_analyze_loop_2(), vect_do_peeling(), and vect_enhance_data_refs_alignment().

◆ slpeel_tree_duplicate_loop_to_edge_cfg()

class loop * slpeel_tree_duplicate_loop_to_edge_cfg ( class loop * loop,
edge loop_exit,
class loop * scalar_loop,
edge scalar_exit,
edge e,
edge * new_e,
bool flow_loops,
vec< basic_block > * updated_doms )
Given LOOP this function generates a new copy of it and puts it
on E which is either the entry or exit of LOOP.  If SCALAR_LOOP is
non-NULL, assume LOOP and SCALAR_LOOP are equivalent and copy the
basic blocks from SCALAR_LOOP instead of LOOP, but to either the
entry or exit of LOOP.  If FLOW_LOOPS then connect LOOP to SCALAR_LOOP as a
continuation.  This is correct for cases where one loop continues from the
other like in the vectorizer, but not true for uses in e.g. loop distribution
where the contents of the loop body are split but the iteration space of both
copies remains the same.

If UPDATED_DOMS is not NULL it is update with the list of basic blocks whoms
dominators were updated during the peeling.  When doing early break vectorization
then LOOP_VINFO needs to be provided and is used to keep track of any newly created
memory references that need to be updated should we decide to vectorize.   

References add_phi_arg(), add_phi_args_after_copy(), adjust_debug_stmts(), adjust_phi_and_debug_stmts(), CDI_DOMINATORS, checking_verify_dominators(), copy_bbs(), copy_ssa_name(), create_phi_node(), delete_basic_block(), duplicate_loop(), duplicate_subloops(), EDGE_COUNT, EDGE_PRED, first_dom_son(), flow_bb_inside_loop_p(), flush_pending_stmts(), FOR_EACH_EDGE, free(), gcc_assert, get_all_dominated_blocks(), get_bb_copy(), get_immediate_dominator(), get_live_virtual_operand_on_edge(), get_loop_body_with_size(), get_loop_copy(), get_loop_exit_edges(), get_virtual_phi(), ggc_alloc(), gimple_phi_arg_def_from_edge(), gimple_phi_num_args(), gimple_phi_result(), gsi_end_p(), gsi_for_stmt(), gsi_next(), gsi_start_phis(), gsi_stmt(), loop::header, i, loop::inner, iterate_fix_dominators(), loop::latch, loop_latch_edge(), loop_outer(), loop_preheader_edge(), MAY_HAVE_DEBUG_BIND_STMTS, next_dom_son(), NULL, NULL_TREE, loop::num_nodes, PHI_ARG_DEF_FROM_EDGE, PHI_ARG_DEF_PTR_FROM_EDGE, PHI_RESULT, queue, redirect_edge_and_branch(), redirect_edge_and_branch_force(), redirect_edge_pred(), redirect_edge_var_map_clear(), remove_phi_node(), rename_use_op(), rename_variables_in_bb(), set_immediate_dominator(), SET_PHI_ARG_DEF, SET_PHI_ARG_DEF_ON_EDGE, single_pred(), single_pred_edge(), single_succ_edge(), single_succ_p(), split_edge(), TREE_CODE, true, UNKNOWN_LOCATION, and virtual_operand_p().

Referenced by copy_loop_before(), and vect_do_peeling().

◆ supportable_narrowing_operation()

bool supportable_narrowing_operation ( code_helper code,
tree vectype_out,
tree vectype_in,
code_helper * code1,
int * multi_step_cvt,
vec< tree > * interm_types )
extern
Function supportable_narrowing_operation

Check whether an operation represented by the code CODE is a
narrowing operation that is supported by the target platform in
vector form (i.e., when operating on arguments of type VECTYPE_IN
and producing a result of type VECTYPE_OUT).

Narrowing operations we currently support are NOP (CONVERT), FIX_TRUNC
and FLOAT.  This function checks if these operations are supported by
the target platform directly via vector tree-codes.

Output:
- CODE1 is the code of a vector operation to be used when
vectorizing the operation, if available.
- MULTI_STEP_CVT determines the number of required intermediate steps in
case of multi-step conversion (like int->short->char - in that case
MULTI_STEP_CVT will be 1).
- INTERM_TYPES contains the intermediate type required to perform the
narrowing operation (short in the above example).    

References CASE_CONVERT, gcc_unreachable, ggc_alloc(), i, insn_data, code_helper::is_tree_code(), known_eq, MAX_INTERM_CVT_STEPS, insn_operand_data::mode, insn_data_d::operand, optab_default, optab_for_tree_code(), optab_handler(), SCALAR_INT_MODE_P, lang_hooks_for_types::type_for_mode, TYPE_MODE, TYPE_UNSIGNED, TYPE_VECTOR_SUBPARTS(), lang_hooks::types, unknown_optab, vect_double_mask_nunits(), and VECTOR_BOOLEAN_TYPE_P.

Referenced by simple_integer_narrowing(), and vectorizable_conversion().

◆ supportable_widening_operation()

bool supportable_widening_operation ( vec_info * vinfo,
code_helper code,
stmt_vec_info stmt_info,
tree vectype_out,
tree vectype_in,
code_helper * code1,
code_helper * code2,
int * multi_step_cvt,
vec< tree > * interm_types )
extern
Function supportable_widening_operation

Check whether an operation represented by the code CODE is a
widening operation that is supported by the target platform in
vector form (i.e., when operating on arguments of type VECTYPE_IN
producing a result of type VECTYPE_OUT).

Widening operations we currently support are NOP (CONVERT), FLOAT,
FIX_TRUNC and WIDEN_MULT.  This function checks if these operations
are supported by the target platform either directly (via vector
tree-codes), or via target builtins.

Output:
- CODE1 and CODE2 are codes of vector operations to be used when
vectorizing the operation, if available.
- MULTI_STEP_CVT determines the number of required intermediate steps in
case of multi-step conversion (like char->short->int - in that case
MULTI_STEP_CVT will be 1).
- INTERM_TYPES contains the intermediate type required to perform the
widening operation (short in the above example).   

References as_combined_fn(), as_internal_fn(), build_vector_type_for_mode(), CASE_CONVERT, CONVERT_EXPR_CODE_P, direct_internal_fn_optab(), gcc_unreachable, GET_MODE_INNER, ggc_alloc(), gimple_assign_lhs(), i, insn_data, code_helper::is_tree_code(), known_eq, lookup_evenodd_internal_fn(), lookup_hilo_internal_fn(), LOOP_VINFO_LOOP, MAX_INTERM_CVT_STEPS, MAX_TREE_CODES, insn_operand_data::mode, nested_in_vect_loop_p(), NULL, insn_data_d::operand, optab_default, optab_for_tree_code(), optab_handler(), code_helper::safe_as_tree_code(), SCALAR_INT_MODE_P, STMT_VINFO_DEF_TYPE, STMT_VINFO_RELEVANT, supportable_widening_operation(), lang_hooks_for_types::type_for_mode, TYPE_MODE, TYPE_UNSIGNED, TYPE_VECTOR_SUBPARTS(), lang_hooks::types, unknown_optab, vect_halve_mask_nunits(), vect_reduction_def, vect_used_by_reduction, VECTOR_BOOLEAN_TYPE_P, VECTOR_MODE_P, and widening_fn_p().

Referenced by supportable_widening_operation(), vect_recog_abd_pattern(), vect_recog_widen_abd_pattern(), vect_recog_widen_op_pattern(), and vectorizable_conversion().

◆ unlimited_cost_model()

◆ vec_init_loop_exit_info()

◆ vect_analyze_data_ref_accesses()

◆ vect_analyze_data_ref_dependences()

opt_result vect_analyze_data_ref_dependences ( loop_vec_info loop_vinfo,
unsigned int * max_vf )
extern
Function vect_analyze_data_ref_dependences.

Examine all the data references in the loop, and make sure there do not
exist any data dependences between them.  Set *MAX_VF according to
the maximum vectorization factor the data dependences allow.   

References compute_all_dependences(), DUMP_VECT_SCOPE, FOR_EACH_VEC_ELT, gcc_assert, ggc_alloc(), i, LOOP_VINFO_DATAREFS, LOOP_VINFO_DDRS, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_LOOP_NEST, LOOP_VINFO_NO_DATA_DEPENDENCIES, LOOP_VINFO_ORIG_MAX_VECT_FACTOR, opt_result::success(), vect_analyze_data_ref_dependence(), and vect_analyze_early_break_dependences().

Referenced by vect_analyze_loop_2().

◆ vect_analyze_data_refs()

opt_result vect_analyze_data_refs ( vec_info * vinfo,
poly_uint64 * min_vf,
bool * fatal )
extern
Function vect_analyze_data_refs.

 Find all the data references in the loop or basic block.

  The general structure of the analysis of data refs in the vectorizer is as
  follows:
  1- vect_analyze_data_refs(loop/bb): call
     compute_data_dependences_for_loop/bb to find and analyze all data-refs
     in the loop/bb and their dependences.
  2- vect_analyze_dependences(): apply dependence testing using ddrs.
  3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
  4- vect_analyze_drs_access(): check that ref_stmt.step is ok.

References data_reference::aux, build_fold_indirect_ref, vec_info_shared::datarefs, DECL_NONALIASED, dr_analyze_innermost(), DR_BASE_ADDRESS, DR_INIT, DR_IS_READ, DR_IS_WRITE, DR_OFFSET, DR_REF, DR_STEP, DR_STMT, dump_enabled_p(), dump_generic_expr(), dump_printf(), dump_printf_loc(), DUMP_VECT_SCOPE, opt_result::failure_at(), fatal(), fold_build2, fold_build_pointer_plus, FOR_EACH_VEC_ELT, gcc_assert, get_base_address(), get_vectype_for_scalar_type(), ggc_alloc(), i, vec_info::lookup_stmt(), LOOP_VINFO_LOOP, MSG_MISSED_OPTIMIZATION, MSG_NOTE, nested_in_vect_loop_p(), NULL, offset, vec_info::shared, data_reference::stmt, STMT_VINFO_DR_BASE_ADDRESS, STMT_VINFO_DR_BASE_ALIGNMENT, STMT_VINFO_DR_BASE_MISALIGNMENT, STMT_VINFO_DR_INIT, STMT_VINFO_DR_OFFSET, STMT_VINFO_DR_OFFSET_ALIGNMENT, STMT_VINFO_DR_STEP, STMT_VINFO_DR_STEP_ALIGNMENT, STMT_VINFO_DR_WRT_VEC_LOOP, STMT_VINFO_GATHER_SCATTER_P, STMT_VINFO_SIMD_LANE_ACCESS_P, STMT_VINFO_STRIDED_P, STMT_VINFO_VECTORIZABLE, STMT_VINFO_VECTYPE, opt_result::success(), TDF_DETAILS, TREE_CODE, TREE_THIS_VOLATILE, TREE_TYPE, TYPE_VECTOR_SUBPARTS(), unshare_expr(), VAR_P, vect_check_gather_scatter(), and vect_location.

Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().

◆ vect_analyze_data_refs_alignment()

opt_result vect_analyze_data_refs_alignment ( loop_vec_info loop_vinfo)
extern
Function vect_analyze_data_refs_alignment

Analyze the alignment of the data-references in the loop.
Return FALSE if a data reference is found that cannot be vectorized.   

References DR_GROUP_FIRST_ELEMENT, DUMP_VECT_SCOPE, FOR_EACH_VEC_ELT, i, vec_info::lookup_dr(), LOOP_VINFO_DATAREFS, STMT_VINFO_GROUPED_ACCESS, STMT_VINFO_VECTORIZABLE, STMT_VINFO_VECTYPE, opt_result::success(), vect_compute_data_ref_alignment(), and vect_record_base_alignments().

Referenced by vect_analyze_loop_2().

◆ vect_analyze_loop()

◆ vect_analyze_loop_form()

◆ vect_analyze_slp()

◆ vect_analyze_stmt()

opt_result vect_analyze_stmt ( vec_info * vinfo,
stmt_vec_info stmt_info,
bool * need_to_vectorize,
slp_tree node,
slp_instance node_instance,
stmt_vector_for_cost * cost_vec )
extern

◆ vect_apply_runtime_profitability_check_p()

bool vect_apply_runtime_profitability_check_p ( loop_vec_info loop_vinfo)
inline
Return true if LOOP_VINFO requires a runtime check for whether the
vector loop is profitable.   

References LOOP_VINFO_COST_MODEL_THRESHOLD, LOOP_VINFO_NITERS_KNOWN_P, and vect_vf_for_cost().

Referenced by vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_loop_versioning(), and vect_transform_loop().

◆ vect_build_loop_niters()

tree vect_build_loop_niters ( loop_vec_info loop_vinfo,
bool * new_var_p )
extern
This function builds ni_name = number of iterations.  Statements
are emitted on the loop preheader edge.  If NEW_VAR_P is not NULL, set
it to TRUE if new ssa_var is generated.   

References create_tmp_var, force_gimple_operand(), ggc_alloc(), gsi_insert_seq_on_edge_immediate(), loop_preheader_edge(), LOOP_VINFO_LOOP, LOOP_VINFO_NITERS, NULL, TREE_CODE, TREE_TYPE, and unshare_expr().

Referenced by vect_do_peeling(), and vect_transform_loop().

◆ vect_can_advance_ivs_p()

bool vect_can_advance_ivs_p ( loop_vec_info loop_vinfo)
extern
Function vect_can_advance_ivs_p

In case the number of iterations that LOOP iterates is unknown at compile
time, an epilog loop will be generated, and the loop induction variables
(IVs) will be "advanced" to the value they are supposed to take just before
the epilog loop.  Here we check that the access function of the loop IVs
and the expression that represents the loop bound are simple enough.
These restrictions will be relaxed in the future.   

References dump_enabled_p(), dump_printf(), dump_printf_loc(), expr_invariant_in_loop_p(), ggc_alloc(), gsi_end_p(), gsi_next(), gsi_start_phis(), loop::header, iv_phi_p(), vec_info::lookup_stmt(), LOOP_VINFO_LOOP, MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL_TREE, gphi_iterator::phi(), STMT_VINFO_LOOP_PHI_EVOLUTION_PART, STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE, tree_is_chrec(), vect_can_peel_nonlinear_iv_p(), vect_location, and vect_step_op_add.

Referenced by vect_analyze_loop_2(), vect_do_peeling(), and vect_enhance_data_refs_alignment().

◆ vect_can_force_dr_alignment_p()

bool vect_can_force_dr_alignment_p ( const_tree decl,
poly_uint64 alignment )
extern
In tree-vect-data-refs.cc.   
Function vect_force_dr_alignment_p.

Returns whether the alignment of a DECL can be forced to be aligned
on ALIGNMENT bit boundary.   

References decl_in_symtab_p(), symtab_node::get(), ggc_alloc(), known_le, MAX_OFILE_ALIGNMENT, MAX_STACK_ALIGNMENT, TREE_STATIC, and VAR_P.

Referenced by increase_alignment(), and vect_compute_data_ref_alignment().

◆ vect_can_vectorize_without_simd_p() [1/2]

bool vect_can_vectorize_without_simd_p ( code_helper code)
extern
Likewise, but taking a code_helper.   

References code_helper::is_tree_code(), and vect_can_vectorize_without_simd_p().

◆ vect_can_vectorize_without_simd_p() [2/2]

bool vect_can_vectorize_without_simd_p ( tree_code code)
extern
Return true if we can emulate CODE on an integer mode representation
of a vector.   

References ggc_alloc().

Referenced by vect_can_vectorize_without_simd_p(), vectorizable_operation(), and vectorizable_reduction().

◆ vect_check_gather_scatter()

bool vect_check_gather_scatter ( stmt_vec_info stmt_info,
loop_vec_info loop_vinfo,
gather_scatter_info * info )
extern
Return true if a non-affine read or write in STMT_INFO is suitable for a
gather load or scatter store.  Describe the operation in *INFO if so.   

References gather_scatter_info::base, build_fold_addr_expr, CASE_CONVERT, gather_scatter_info::decl, do_add(), DR_IS_READ, DR_REF, gather_scatter_info::element_type, expr_invariant_in_loop_p(), extract_ops_from_tree(), fold_convert, get_gimple_rhs_class(), get_inner_reference(), ggc_alloc(), gimple_assign_rhs1(), gimple_assign_rhs2(), gimple_assign_rhs_code(), gimple_call_internal_fn(), gimple_call_internal_p(), GIMPLE_TERNARY_RHS, gather_scatter_info::ifn, IFN_LAST, integer_zerop(), INTEGRAL_TYPE_P, internal_gather_scatter_fn_p(), is_gimple_assign(), LOOP_VINFO_LOOP, may_be_nonaddressable_p(), mem_ref_offset(), gather_scatter_info::memory_type, NULL_TREE, gather_scatter_info::offset, gather_scatter_info::offset_dt, gather_scatter_info::offset_vectype, operand_equal_p(), POINTER_TYPE_P, gather_scatter_info::scale, signed_char_type_node, size_binop, size_int, size_zero_node, sizetype, SSA_NAME_DEF_STMT, STMT_VINFO_DATA_REF, STMT_VINFO_VECTYPE, STRIP_NOPS, supports_vec_gather_load_p(), supports_vec_scatter_store_p(), targetm, TREE_CODE, tree_fits_shwi_p(), TREE_OPERAND, tree_to_shwi(), TREE_TYPE, TYPE_MODE, TYPE_PRECISION, TYPE_SIZE, unsigned_char_type_node, vect_describe_gather_scatter_call(), vect_gather_scatter_fn_p(), vect_unknown_def_type, and wide_int_to_tree().

Referenced by get_load_store_type(), vect_analyze_data_refs(), vect_detect_hybrid_slp(), vect_get_and_check_slp_defs(), vect_mark_stmts_to_be_vectorized(), vect_recog_gather_scatter_pattern(), and vect_use_strided_gather_scatters_p().

◆ vect_chooses_same_modes_p()

bool vect_chooses_same_modes_p ( vec_info * vinfo,
machine_mode vector_mode )
extern
Return true if replacing LOOP_VINFO->vector_mode with VECTOR_MODE
would not change the chosen vector modes.   

References hash_set< KeyId, Lazy, Traits >::begin(), hash_set< KeyId, Lazy, Traits >::end(), GET_MODE_INNER, i, related_vector_mode(), vec_info::used_vector_modes, and VECTOR_MODE_P.

Referenced by vect_analyze_loop_1(), and vect_slp_region().

◆ vect_comparison_type()

tree vect_comparison_type ( stmt_vec_info stmt_info)
inline
If STMT_INFO is a comparison or contains an embedded comparison, return the
scalar type of the values being compared.  Return null otherwise.   

References ggc_alloc(), gimple_assign_rhs1(), gimple_assign_rhs_code(), tcc_comparison, TREE_CODE_CLASS, TREE_TYPE, and vect_embedded_comparison_type().

◆ vect_copy_ref_info()

void vect_copy_ref_info ( tree dest,
tree src )
extern
Copy memory reference info such as base/clique from the SRC reference
to the DEST MEM_REF.   

References ggc_alloc(), handled_component_p(), MR_DEPENDENCE_BASE, MR_DEPENDENCE_CLIQUE, TREE_CODE, and TREE_OPERAND.

Referenced by vect_setup_realignment(), vectorizable_load(), vectorizable_scan_store(), and vectorizable_store().

◆ vect_create_addr_base_for_vector_ref()

tree vect_create_addr_base_for_vector_ref ( vec_info * vinfo,
stmt_vec_info stmt_info,
gimple_seq * new_stmt_list,
tree offset )
extern
Function vect_create_addr_base_for_vector_ref.

Create an expression that computes the address of the first memory location
that will be accessed for a data reference.

Input:
STMT_INFO: The statement containing the data reference.
NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
OFFSET: Optional. If supplied, it is be added to the initial address.
LOOP:    Specify relative to which loop-nest should the address be computed.
         For example, when the dataref is in an inner-loop nested in an
         outer-loop that is now being vectorized, LOOP can be either the
         outer-loop, or the inner-loop.  The first memory location accessed
         by the following dataref ('in' points to short):

        for (i=0; i<N; i++)
           for (j=0; j<M; j++)
             s += in[i+j]

         is as follows:
         if LOOP=i_loop:        &in             (relative to i_loop)
         if LOOP=j_loop:        &in+i*2B        (relative to j_loop)

Output:
1. Return an SSA_NAME whose value is the address of the memory location of
   the first vector of the data reference.
2. If new_stmt_list is not NULL_TREE after return then the caller must insert
   these statement(s) which define the returned SSA_NAME.

FORNOW: We are only handling array accesses with step 1.   

References build1(), build_pointer_type(), dr_info::dr, DR_PTR_INFO, DR_REF, dump_enabled_p(), dump_printf_loc(), fold_build2, fold_build_pointer_plus, fold_convert, force_gimple_operand(), gcc_assert, get_dr_vinfo_offset(), get_name(), ggc_alloc(), gimple_seq_add_seq(), MSG_NOTE, NULL, offset, size_binop, sizetype, SSA_NAME_PTR_INFO, SSA_NAME_VAR, ssize_int, STMT_VINFO_DR_INFO, strip_zero_offset_components(), TREE_CODE, TREE_TYPE, unshare_expr(), vect_dr_behavior(), vect_duplicate_ssa_name_ptr_info(), vect_get_new_vect_var(), vect_location, and vect_pointer_var.

Referenced by get_misalign_in_elems(), vect_create_cond_for_align_checks(), vect_create_data_ref_ptr(), and vect_setup_realignment().

◆ vect_create_data_ref_ptr()

tree vect_create_data_ref_ptr ( vec_info * vinfo,
stmt_vec_info stmt_info,
tree aggr_type,
class loop * at_loop,
tree offset,
tree * initial_address,
gimple_stmt_iterator * gsi,
gimple ** ptr_incr,
bool only_init,
tree iv_step )
extern
Function vect_create_data_ref_ptr.

Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
location accessed in the loop by STMT_INFO, along with the def-use update
chain to appropriately advance the pointer through the loop iterations.
Also set aliasing information for the pointer.  This pointer is used by
the callers to this function to create a memory reference expression for
vector load/store access.

Input:
1. STMT_INFO: a stmt that references memory. Expected to be of the form
      GIMPLE_ASSIGN <name, data-ref> or
      GIMPLE_ASSIGN <data-ref, name>.
2. AGGR_TYPE: the type of the reference, which should be either a vector
     or an array.
3. AT_LOOP: the loop where the vector memref is to be created.
4. OFFSET (optional): a byte offset to be added to the initial address
     accessed by the data-ref in STMT_INFO.
5. BSI: location where the new stmts are to be placed if there is no loop
6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
     pointing to the initial address.
8. IV_STEP (optional, defaults to NULL): the amount that should be added
     to the IV during each iteration of the loop.  NULL says to move
     by one copy of AGGR_TYPE up or down, depending on the step of the
     data reference.

Output:
1. Declare a new ptr to vector_type, and have it point to the base of the
   data reference (initial addressed accessed by the data reference).
   For example, for vector of type V8HI, the following code is generated:

   v8hi *ap;
   ap = (v8hi *)initial_address;

   if OFFSET is not supplied:
      initial_address = &a[init];
   if OFFSET is supplied:
      initial_address = &a[init] + OFFSET;
   if BYTE_OFFSET is supplied:
      initial_address = &a[init] + BYTE_OFFSET;

   Return the initial_address in INITIAL_ADDRESS.

2. If ONLY_INIT is true, just return the initial pointer.  Otherwise, also
   update the pointer in each iteration of the loop.

   Return the increment stmt that updates the pointer in PTR_INCR.

3. Return the pointer.   

References alias_sets_conflict_p(), build_pointer_type_for_mode(), create_iv(), dr_info::dr, DR_BASE_ADDRESS, DR_BASE_OBJECT, DR_GROUP_FIRST_ELEMENT, DR_GROUP_NEXT_ELEMENT, DR_GROUP_SIZE, DR_PTR_INFO, DR_REF, DR_STEP, dump_enabled_p(), dump_printf(), dump_printf_loc(), fold_build1, fold_convert, gcc_assert, gcc_unreachable, get_alias_set(), get_name(), get_tree_code_name(), ggc_alloc(), gimple_bb(), gsi_insert_seq_before(), gsi_insert_seq_on_edge_immediate(), GSI_SAME_STMT, gsi_stmt(), integer_zerop(), loop_preheader_edge(), LOOP_VINFO_LOOP, MSG_NOTE, nested_in_vect_loop_p(), NULL, NULL_TREE, offset, standard_iv_increment_position(), innermost_loop_behavior::step, STMT_VINFO_DATA_REF, STMT_VINFO_DR_INFO, TREE_CODE, tree_int_cst_sgn(), TREE_TYPE, TYPE_SIZE_UNIT, vect_create_addr_base_for_vector_ref(), vect_dr_behavior(), vect_duplicate_ssa_name_ptr_info(), vect_get_new_vect_var(), vect_location, and vect_pointer_var.

Referenced by vect_setup_realignment(), vectorizable_load(), and vectorizable_store().

◆ vect_create_destination_var()

◆ vect_create_loop_vinfo()

◆ vect_create_new_slp_node()

◆ vect_detect_hybrid_slp()

◆ vect_determine_partial_vectors_and_peeling()

opt_result vect_determine_partial_vectors_and_peeling ( loop_vec_info loop_vinfo)
extern
Used in tree-vect-loop-manip.cc  
Determine if operating on full vectors for LOOP_VINFO might leave
 some scalar iterations still to do.  If so, decide how we should
 handle those scalar iterations.  The possibilities are:

 (1) Make LOOP_VINFO operate on partial vectors instead of full vectors.
     In this case:

       LOOP_VINFO_USING_PARTIAL_VECTORS_P == true
       LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == false
       LOOP_VINFO_PEELING_FOR_NITER == false

 (2) Make LOOP_VINFO operate on full vectors and use an epilogue loop
     to handle the remaining scalar iterations.  In this case:

       LOOP_VINFO_USING_PARTIAL_VECTORS_P == false
       LOOP_VINFO_PEELING_FOR_NITER == true

     There are two choices:

     (2a) Consider vectorizing the epilogue loop at the same VF as the
          main loop, but using partial vectors instead of full vectors.
          In this case:

            LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == true

     (2b) Consider vectorizing the epilogue loop at lower VFs only.
          In this case:

            LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == false

References dump_enabled_p(), dump_printf_loc(), ggc_alloc(), LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P, LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P, LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_PEELING_FOR_NITER, LOOP_VINFO_USING_PARTIAL_VECTORS_P, LOOP_VINFO_USING_SELECT_VL_P, MSG_NOTE, opt_result::success(), _loop_vec_info::suggested_unroll_factor, vect_known_niters_smaller_than_vf(), vect_location, and vect_need_peeling_or_partial_vectors_p().

Referenced by vect_analyze_loop_2(), and vect_do_peeling().

◆ vect_do_peeling()

class loop * vect_do_peeling ( loop_vec_info loop_vinfo,
tree niters,
tree nitersm1,
tree * niters_vector,
tree * step_vector,
tree * niters_vector_mult_vf_var,
int th,
bool check_profitability,
bool niters_no_overflow,
tree * advance )
extern
Function vect_do_peeling.

Input:
- LOOP_VINFO: Represent a loop to be vectorized, which looks like:

    preheader:
  LOOP:
    header_bb:
      loop_body
      if (exit_loop_cond) goto exit_bb
      else                goto header_bb
    exit_bb:

- NITERS: The number of iterations of the loop.
- NITERSM1: The number of iterations of the loop's latch.
- NITERS_NO_OVERFLOW: No overflow in computing NITERS.
- TH, CHECK_PROFITABILITY: Threshold of niters to vectorize loop if
                      CHECK_PROFITABILITY is true.
Output:
- *NITERS_VECTOR and *STEP_VECTOR describe how the main loop should
  iterate after vectorization; see vect_set_loop_condition for details.
- *NITERS_VECTOR_MULT_VF_VAR is either null or an SSA name that
  should be set to the number of scalar iterations handled by the
  vector loop.  The SSA name is only used on exit from the loop.

This function peels prolog and epilog from the loop, adds guards skipping
PROLOG and EPILOG for various conditions.  As a result, the changed CFG
would look like:

    guard_bb_1:
      if (prefer_scalar_loop) goto merge_bb_1
      else                    goto guard_bb_2

    guard_bb_2:
      if (skip_prolog) goto merge_bb_2
      else             goto prolog_preheader

    prolog_preheader:
  PROLOG:
    prolog_header_bb:
      prolog_body
      if (exit_prolog_cond) goto prolog_exit_bb
      else                  goto prolog_header_bb
    prolog_exit_bb:

    merge_bb_2:

    vector_preheader:
  VECTOR LOOP:
    vector_header_bb:
      vector_body
      if (exit_vector_cond) goto vector_exit_bb
      else                  goto vector_header_bb
    vector_exit_bb:

    guard_bb_3:
      if (skip_epilog) goto merge_bb_3
      else             goto epilog_preheader

    merge_bb_1:

    epilog_preheader:
  EPILOG:
    epilog_header_bb:
      epilog_body
      if (exit_epilog_cond) goto merge_bb_3
      else                  goto epilog_header_bb

    merge_bb_3:

Note this function peels prolog and epilog only if it's necessary,
as well as guards.
This function returns the epilogue loop if a decision was made to vectorize
it, otherwise NULL.

The analysis resulting in this epilogue loop's loop_vec_info was performed
in the same vect_analyze_loop call as the main loop's.  At that time
vect_analyze_loop constructs a list of accepted loop_vec_info's for lower
vectorization factors than the main loop.  This list is stored in the main
loop's loop_vec_info in the 'epilogue_vinfos' member.  Everytime we decide to
vectorize the epilogue loop for a lower vectorization factor,  the
loop_vec_info sitting at the top of the epilogue_vinfos list is removed,
updated and linked to the epilogue loop.  This is later used to vectorize
the epilogue.  The reason the loop_vec_info needs updating is that it was
constructed based on the original main loop, and the epilogue loop is a
copy of this loop, so all links pointing to statements in the original loop
need updating.  Furthermore, these loop_vec_infos share the
data_reference's records, which will also need to be updated.

TODO: Guard for prefer_scalar_loop should be emitted along with
versioning conditions if loop versioning is needed.   

References add_phi_arg(), adjust_vec, adjust_vec_debug_stmts(), advance(), profile_probability::always(), profile_probability::apply_scale(), boolean_type_node, build_int_cst(), build_one_cst(), build_zero_cst(), CDI_DOMINATORS, cfun, basic_block_def::count, create_phi_node(), DEF_FROM_PTR, delete_update_ssa(), DR_TARGET_ALIGNMENT, EDGE_PRED, _loop_vec_info::epilogue_vinfos, first_dom_son(), flow_bb_inside_loop_p(), flow_loop_nested_p(), fold_build2, FOR_EACH_IMM_USE_STMT, FOR_EACH_SSA_DEF_OPERAND, free(), free_original_copy_tables(), gcc_assert, gcc_checking_assert, get_bb_original(), get_immediate_dominator(), get_loop_body(), get_loop_copy(), get_loop_exit_edges(), ggc_alloc(), gimple_bb(), gimple_build_assign(), gimple_build_nop(), gimple_debug_bind_p(), gimple_debug_bind_reset_value(), gimple_phi_arg_def_from_edge(), gimple_phi_result(), gsi_end_p(), gsi_for_stmt(), gsi_insert_after(), gsi_insert_before(), gsi_last_bb(), GSI_NEW_STMT, gsi_next(), gsi_start_bb(), gsi_start_phis(), gsi_stmt(), profile_probability::guessed_always(), i, initialize_original_copy_tables(), integer_onep(), poly_int< N, C >::is_constant(), iterate_fix_dominators(), LOOP_C_INFINITE, loop_constraint_clear(), loop_preheader_edge(), LOOP_REQUIRES_VERSIONING, LOOP_VINFO_BBS, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EARLY_BREAKS_VECT_PEELED, LOOP_VINFO_EPILOGUE_IV_EXIT, LOOP_VINFO_INT_NITERS, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LOOP, LOOP_VINFO_NITERS, LOOP_VINFO_NITERS_KNOWN_P, LOOP_VINFO_NITERSM1, LOOP_VINFO_PEELING_FOR_ALIGNMENT, LOOP_VINFO_PEELING_FOR_GAPS, LOOP_VINFO_PEELING_FOR_NITER, LOOP_VINFO_SCALAR_IV_EXIT, LOOP_VINFO_SCALAR_LOOP, LOOP_VINFO_UNALIGNED_DR, LOOP_VINFO_USING_PARTIAL_VECTORS_P, LOOP_VINFO_VECT_FACTOR, make_ssa_name(), MAY_HAVE_DEBUG_BIND_STMTS, need_ssa_update_p(), next_dom_son(), NULL, NULL_TREE, loop::num_nodes, PHI_RESULT, queue, record_niter_bound(), reset_original_copy_tables(), scale_bbs_frequencies(), scale_loop_profile(), scev_reset(), set_immediate_dominator(), set_range_info(), single_pred_edge(), single_pred_p(), single_succ_edge(), slpeel_add_loop_guard(), slpeel_can_duplicate_loop_p(), slpeel_tree_duplicate_loop_to_edge_cfg(), slpeel_update_phi_nodes_for_guard1(), split_edge(), SSA_NAME_DEF_STMT, SSA_OP_DEF, poly_int< N, C >::to_constant(), wi::to_wide(), TREE_CODE, TREE_TYPE, TYPE_MAX_VALUE, ui, UNKNOWN_LOCATION, update_stmt(), vect_build_loop_niters(), vect_can_advance_ivs_p(), vect_determine_partial_vectors_and_peeling(), vect_gen_prolog_loop_niters(), vect_gen_scalar_loop_niters(), vect_gen_vector_loop_niters(), vect_gen_vector_loop_niters_mult_vf(), vect_set_loop_condition(), vect_update_inits_of_drs(), vect_update_ivs_after_vectorizer(), vect_use_loop_mask_for_alignment_p(), vect_vf_for_cost(), and virtual_operand_p().

Referenced by vect_transform_loop().

◆ vect_double_mask_nunits()

tree vect_double_mask_nunits ( tree old_type,
machine_mode new_mode )
extern
Return a mask type with twice as many elements as OLD_TYPE,
given that it should have mode NEW_MODE.   

References build_truth_vector_type_for_mode(), ggc_alloc(), new_mode(), and TYPE_VECTOR_SUBPARTS().

Referenced by supportable_narrowing_operation().

◆ vect_dr_behavior()

innermost_loop_behavior * vect_dr_behavior ( vec_info * vinfo,
dr_vec_info * dr_info )
inline

◆ vect_embedded_comparison_type()

tree vect_embedded_comparison_type ( stmt_vec_info stmt_info)
inline
If STMT_INFO is a COND_EXPR that includes an embedded comparison, return the
scalar type of the values being compared.  Return null otherwise.   

References COMPARISON_CLASS_P, ggc_alloc(), gimple_assign_rhs1(), gimple_assign_rhs_code(), NULL_TREE, TREE_OPERAND, and TREE_TYPE.

Referenced by vect_comparison_type().

◆ vect_emulated_vector_p()

bool vect_emulated_vector_p ( tree vectype)
extern
Return true if VECTYPE represents a vector that requires lowering
by the vector lowering pass.   

References TREE_TYPE, TYPE_MODE, TYPE_PRECISION, VECTOR_BOOLEAN_TYPE_P, and VECTOR_MODE_P.

Referenced by vectorizable_call(), vectorizable_operation(), vectorizable_reduction(), and vectorizable_shift().

◆ vect_enhance_data_refs_alignment()

opt_result vect_enhance_data_refs_alignment ( loop_vec_info loop_vinfo)
extern
Function vect_enhance_data_refs_alignment

This pass will use loop versioning and loop peeling in order to enhance
the alignment of data references in the loop.

FOR NOW: we assume that whatever versioning/peeling takes place, only the
original loop is to be vectorized.  Any other loops that are created by
the transformations performed in this pass - are not supposed to be
vectorized.  This restriction will be relaxed.

This pass will require a cost model to guide it whether to apply peeling
or versioning or a combination of the two.  For example, the scheme that
intel uses when given a loop with several memory accesses, is as follows:
choose one memory access ('p') which alignment you want to force by doing
peeling.  Then, either (1) generate a loop in which 'p' is aligned and all
other accesses are not necessarily aligned, or (2) use loop versioning to
generate one loop in which all accesses are aligned, and another loop in
which only 'p' is necessarily aligned.

("Automatic Intra-Register Vectorization for the Intel Architecture",
Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)

Devising a cost model is the most critical aspect of this work.  It will
guide us on which access to peel for, whether to use loop versioning, how
many versions to create, etc.  The cost model will probably consist of
generic considerations as well as target specific considerations (on
powerpc for example, misaligned stores are more painful than misaligned
loads).

Here are the general steps involved in alignment enhancements:

  -- original loop, before alignment analysis:
     for (i=0; i<N; i++){
       x = q[i];                        # DR_MISALIGNMENT(q) = unknown
       p[i] = y;                        # DR_MISALIGNMENT(p) = unknown
     }

  -- After vect_compute_data_refs_alignment:
     for (i=0; i<N; i++){
       x = q[i];                        # DR_MISALIGNMENT(q) = 3
       p[i] = y;                        # DR_MISALIGNMENT(p) = unknown
     }

  -- Possibility 1: we do loop versioning:
  if (p is aligned) {
     for (i=0; i<N; i++){       # loop 1A
       x = q[i];                        # DR_MISALIGNMENT(q) = 3
       p[i] = y;                        # DR_MISALIGNMENT(p) = 0
     }
  }
  else {
     for (i=0; i<N; i++){       # loop 1B
       x = q[i];                        # DR_MISALIGNMENT(q) = 3
       p[i] = y;                        # DR_MISALIGNMENT(p) = unaligned
     }
  }

  -- Possibility 2: we do loop peeling:
  for (i = 0; i < 3; i++){      # (scalar loop, not to be vectorized).
     x = q[i];
     p[i] = y;
  }
  for (i = 3; i < N; i++){      # loop 2A
     x = q[i];                  # DR_MISALIGNMENT(q) = 0
     p[i] = y;                  # DR_MISALIGNMENT(p) = unknown
  }

  -- Possibility 3: combination of loop peeling and versioning:
  for (i = 0; i < 3; i++){      # (scalar loop, not to be vectorized).
     x = q[i];
     p[i] = y;
  }
  if (p is aligned) {
     for (i = 3; i<N; i++){     # loop 3A
       x = q[i];                        # DR_MISALIGNMENT(q) = 0
       p[i] = y;                        # DR_MISALIGNMENT(p) = 0
     }
  }
  else {
     for (i = 3; i<N; i++){     # loop 3B
       x = q[i];                        # DR_MISALIGNMENT(q) = 0
       p[i] = y;                        # DR_MISALIGNMENT(p) = unaligned
     }
  }

  These loops are later passed to loop_transform to be vectorized.  The
  vectorizer will use the alignment information to guide the transformation
  (whether to generate regular loads/stores, or with special handling for
  misalignment).   

References aligned_access_p(), dr_align_group_sort_cmp(), DR_BASE_ADDRESS, DR_GROUP_SIZE, DR_IS_WRITE, dr_misalignment(), DR_MISALIGNMENT_UNKNOWN, DR_OFFSET, DR_STEP, DR_STEP_ALIGNMENT, DR_TARGET_ALIGNMENT, dr_unaligned_unsupported, dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, flow_loop_nested_p(), FOR_EACH_VEC_ELT, gcc_assert, GET_MODE_SIZE(), ggc_alloc(), i, loop::inner, INT_MAX, is_empty(), known_alignment_for_access_p(), known_le, vec_info::lookup_dr(), loop_cost_model(), loop_preheader_edge(), LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT, LOOP_VINFO_DATAREFS, LOOP_VINFO_EARLY_BREAKS_VECT_PEELED, LOOP_VINFO_INT_NITERS, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LOOP, LOOP_VINFO_MAY_MISALIGN_STMTS, LOOP_VINFO_NITERS_KNOWN_P, LOOP_VINFO_PEELING_FOR_ALIGNMENT, LOOP_VINFO_PTR_MASK, LOOP_VINFO_SCALAR_ITERATION_COST, LOOP_VINFO_UNALIGNED_DR, LOOP_VINFO_VECT_FACTOR, MAX, MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, operand_equal_p(), optimize_loop_nest_for_speed_p(), outermost_invariant_loop_for_expr(), SET_DR_MISALIGNMENT, size_zero_node, slpeel_can_duplicate_loop_p(), STMT_SLP_TYPE, STMT_VINFO_DR_INFO, STMT_VINFO_GROUPED_ACCESS, STMT_VINFO_STRIDED_P, STMT_VINFO_VECTYPE, opt_result::success(), target_align(), tree_int_cst_compare(), TREE_INT_CST_LOW, TREE_TYPE, TYPE_MODE, TYPE_SIZE_UNIT, TYPE_VECTOR_SUBPARTS(), unlimited_cost_model(), vect_can_advance_ivs_p(), VECT_COST_MODEL_CHEAP, vect_dr_aligned_if_related_peeled_dr_is(), vect_dr_misalign_for_aligned_access(), vect_get_known_peeling_cost(), vect_get_peeling_costs_all_drs(), vect_get_scalar_dr_size(), vect_location, vect_peeling_hash_choose_best_peeling(), vect_peeling_hash_insert(), vect_peeling_supportable(), vect_relevant_for_alignment_p(), vect_supportable_dr_alignment(), vect_update_misalignment_for_peel(), vect_vf_for_cost(), and vector_alignment_reachable_p().

Referenced by vect_analyze_loop_2().

◆ vect_find_first_scalar_stmt_in_slp()

stmt_vec_info vect_find_first_scalar_stmt_in_slp ( slp_tree node)
extern

◆ vect_find_last_scalar_stmt_in_slp()

◆ vect_find_stmt_data_reference()

◆ vect_finish_replace_stmt()

void vect_finish_replace_stmt ( vec_info * vinfo,
stmt_vec_info stmt_info,
gimple * vec_stmt )
extern
Replace the scalar statement STMT_INFO with a new vector statement VEC_STMT,
which sets the same scalar result as STMT_INFO did.  Create and return a
stmt_vec_info for VEC_STMT.   

References gcc_assert, ggc_alloc(), gimple_get_lhs(), gsi_for_stmt(), gsi_replace(), scalar_stmt, vect_finish_stmt_generation_1(), and vect_orig_stmt().

Referenced by vectorizable_condition(), and vectorize_fold_left_reduction().

◆ vect_finish_stmt_generation()

void vect_finish_stmt_generation ( vec_info * vinfo,
stmt_vec_info stmt_info,
gimple * vec_stmt,
gimple_stmt_iterator * gsi )
extern
Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it
before *GSI.  Create and return a stmt_vec_info for VEC_STMT.   

References copy_ssa_name(), ECF_CONST, ECF_NOVOPS, ECF_PURE, gcc_assert, ggc_alloc(), gimple_assign_lhs(), gimple_call_flags(), gimple_call_lhs(), gimple_has_mem_ops(), gimple_set_modified(), gimple_set_vdef(), gimple_set_vuse(), gimple_vdef(), gimple_vuse(), gimple_vuse_op(), gsi_end_p(), gsi_insert_before(), GSI_SAME_STMT, gsi_stmt(), is_gimple_assign(), is_gimple_call(), is_gimple_reg(), SET_USE, TREE_CODE, and vect_finish_stmt_generation_1().

Referenced by bump_vector_ptr(), permute_vec_elements(), read_vector_array(), vect_add_slp_permutation(), vect_build_one_gather_load_call(), vect_build_one_scatter_store_call(), vect_clobber_variable(), vect_create_half_widening_stmts(), vect_create_vectorized_demotion_stmts(), vect_emulate_mixed_dot_prod(), vect_gen_widened_results_half(), vect_init_vector_1(), vect_permute_load_chain(), vect_permute_store_chain(), vect_shift_permute_load_chain(), vect_transform_reduction(), vect_transform_slp_perm_load_1(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_load(), vectorizable_operation(), vectorizable_recurr(), vectorizable_scan_store(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_store(), vectorize_fold_left_reduction(), and write_vector_array().

◆ vect_free_loop_info_assumptions()

void vect_free_loop_info_assumptions ( class loop * loop)

◆ vect_free_slp_instance()

◆ vect_free_slp_tree()

◆ vect_gather_scatter_fn_p()

bool vect_gather_scatter_fn_p ( vec_info * vinfo,
bool read_p,
bool masked_p,
tree vectype,
tree memory_type,
tree offset_type,
int scale,
internal_fn * ifn_out,
tree * offset_vectype_out )
extern
Check whether we can use an internal function for a gather load
or scatter store.  READ_P is true for loads and false for stores.
MASKED_P is true if the load or store is conditional.  MEMORY_TYPE is
the type of the memory elements being loaded or stored.  OFFSET_TYPE
is the type of the offset that is being applied to the invariant
base address.  SCALE is the amount by which the offset should
be multiplied *after* it has been converted to address width.

Return true if the function is supported, storing the function id in
*IFN_OUT and the vector type for the offset in *OFFSET_VECTYPE_OUT.   

References build_nonstandard_integer_type(), get_vectype_for_scalar_type(), ggc_alloc(), internal_gather_scatter_fn_supported_p(), POINTER_SIZE, tree_to_uhwi(), TYPE_PRECISION, TYPE_SIZE, TYPE_UNSIGNED, and vector_element_bits().

Referenced by vect_check_gather_scatter(), and vect_truncate_gather_scatter_offset().

◆ vect_gather_slp_loads()

void vect_gather_slp_loads ( vec_info * vinfo)
extern
Gather loads reachable from the individual SLP graph entries.   

References FOR_EACH_VEC_ELT, i, SLP_INSTANCE_LOADS, SLP_INSTANCE_TREE, vec_info::slp_instances, vect_gather_slp_loads(), and visited.

◆ vect_gen_len()

gimple_seq vect_gen_len ( tree len,
tree start_index,
tree end_index,
tree len_limit )
extern
Generate and return statement sequence that sets vector length LEN that is:

min_of_start_and_end = min (START_INDEX, END_INDEX);
left_len = END_INDEX - min_of_start_and_end;
rhs = min (left_len, LEN_LIMIT);
LEN = rhs;

Note: the cost of the code generated by this function is modeled
by vect_estimate_min_profitable_iters, so changes here may need
corresponding changes there.   

References gcc_assert, ggc_alloc(), gimple_build(), gimple_build_assign(), gimple_seq_add_stmt(), NULL, and TREE_TYPE.

Referenced by vect_set_loop_controls_directly().

◆ vect_gen_loop_len_mask()

tree vect_gen_loop_len_mask ( loop_vec_info loop_vinfo,
gimple_stmt_iterator * gsi,
gimple_stmt_iterator * cond_gsi,
vec_loop_lens * lens,
unsigned int nvectors,
tree vectype,
tree stmt,
unsigned int index,
unsigned int factor )
extern
Generate the tree for the loop len mask and return it.  Given the lens,
  nvectors, vectype, index and factor to gen the len mask as below.

  tree len_mask = VCOND_MASK_LEN (compare_mask, ones, zero, len, bias)

References build_all_ones_cst(), build_int_cst(), build_zero_cst(), ggc_alloc(), gimple_build_call_internal(), gimple_call_set_lhs(), gsi_insert_before(), GSI_SAME_STMT, intQI_type_node, LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS, make_temp_ssa_name(), NULL, TREE_TYPE, and vect_get_loop_len().

Referenced by vectorizable_early_exit().

◆ vect_gen_perm_mask_any()

tree vect_gen_perm_mask_any ( tree vectype,
const vec_perm_indices & sel )
extern
Given a vector type VECTYPE, turns permutation SEL into the equivalent
VECTOR_CST mask.  No checks are made that the target platform supports the
mask, so callers may wish to test can_vec_perm_const_p separately, or use
vect_gen_perm_mask_checked.   

References build_vector_type(), gcc_assert, ggc_alloc(), known_eq, ssizetype, TYPE_VECTOR_SUBPARTS(), and vec_perm_indices_to_tree().

Referenced by vect_create_epilog_for_reduction(), vect_create_nonlinear_iv_init(), vect_gen_perm_mask_checked(), and vectorizable_scan_store().

◆ vect_gen_perm_mask_checked()

◆ vect_gen_vector_loop_niters()

void vect_gen_vector_loop_niters ( loop_vec_info loop_vinfo,
tree niters,
tree * niters_vector_ptr,
tree * step_vector_ptr,
bool niters_no_overflow )
extern
NITERS is the number of times that the original scalar loop executes
after peeling.  Work out the maximum number of iterations N that can
be handled by the vectorized form of the loop and then either:

a) set *STEP_VECTOR_PTR to the vectorization factor and generate:

     niters_vector = N

b) set *STEP_VECTOR_PTR to one and generate:

     niters_vector = N / vf

In both cases, store niters_vector in *NITERS_VECTOR_PTR and add
any new statements on the loop preheader edge.  NITERS_NO_OVERFLOW
is true if NITERS doesn't overflow (i.e. if NITERS is always nonzero).   

References build_int_cst(), build_one_cst(), create_tmp_var, exact_log2(), fold_build2, force_gimple_operand(), ggc_alloc(), gsi_insert_seq_on_edge_immediate(), poly_int< N, C >::is_constant(), is_gimple_val(), loop_preheader_edge(), LOOP_VINFO_LOOP, LOOP_VINFO_PEELING_FOR_GAPS, LOOP_VINFO_USING_PARTIAL_VECTORS_P, LOOP_VINFO_VECT_FACTOR, wi::max_value(), NULL, NULL_TREE, wi::one(), wi::rshift(), set_range_info(), TREE_TYPE, TYPE_PRECISION, and TYPE_SIGN.

Referenced by vect_do_peeling(), and vect_transform_loop().

◆ vect_gen_while()

tree vect_gen_while ( gimple_seq * seq,
tree mask_type,
tree start_index,
tree end_index,
const char * name )
extern
Generate and return a vector mask of MASK_TYPE such that
mask[I] is true iff J + START_INDEX < END_INDEX for all J <= I.
Add the statements to SEQ.   

References build_zero_cst(), direct_internal_fn_supported_p(), gcc_checking_assert, ggc_alloc(), gimple_build_call_internal(), gimple_call_set_lhs(), gimple_seq_add_stmt(), make_ssa_name(), make_temp_ssa_name(), NULL, OPTIMIZE_FOR_SPEED, and TREE_TYPE.

Referenced by vect_gen_while_not(), and vect_set_loop_controls_directly().

◆ vect_gen_while_not()

tree vect_gen_while_not ( gimple_seq * seq,
tree mask_type,
tree start_index,
tree end_index )
extern
Generate a vector mask of type MASK_TYPE for which index I is false iff
J + START_INDEX < END_INDEX for all J <= I.  Add the statements to SEQ.   

References ggc_alloc(), gimple_build(), and vect_gen_while().

Referenced by vect_set_loop_controls_directly().

◆ vect_get_known_peeling_cost()

int vect_get_known_peeling_cost ( loop_vec_info loop_vinfo,
int peel_iters_prologue,
int * peel_iters_epilogue,
stmt_vector_for_cost * scalar_cost_vec,
stmt_vector_for_cost * prologue_cost_vec,
stmt_vector_for_cost * epilogue_cost_vec )
extern

◆ vect_get_load_cost()

void vect_get_load_cost ( vec_info * ,
stmt_vec_info stmt_info,
int ncopies,
dr_alignment_support alignment_support_scheme,
int misalignment,
bool add_realign_cost,
unsigned int * inside_cost,
unsigned int * prologue_cost,
stmt_vector_for_cost * prologue_cost_vec,
stmt_vector_for_cost * body_cost_vec,
bool record_prologue_costs )
extern

◆ vect_get_loop_len()

tree vect_get_loop_len ( loop_vec_info loop_vinfo,
gimple_stmt_iterator * gsi,
vec_loop_lens * lens,
unsigned int nvectors,
tree vectype,
unsigned int index,
unsigned int factor )
extern
Given a complete set of lengths LENS, extract length number INDEX
for an rgroup that operates on NVECTORS vectors of type VECTYPE,
where 0 <= INDEX < NVECTORS.  Return a value that contains FACTOR
multipled by the number of elements that should be processed.
Insert any set-up statements before GSI.   

References build_int_cst(), gcc_assert, ggc_alloc(), gimple_build(), gimple_build_nop(), gsi_insert_seq_before(), GSI_SAME_STMT, i, LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS, LOOP_VINFO_RGROUP_COMPARE_TYPE, LOOP_VINFO_RGROUP_IV_TYPE, make_temp_ssa_name(), NULL, NULL_TREE, SSA_NAME_DEF_STMT, and TYPE_VECTOR_SUBPARTS().

Referenced by vect_gen_loop_len_mask(), vect_get_loop_variant_data_ptr_increment(), vect_get_strided_load_store_ops(), vectorizable_call(), vectorizable_condition(), vectorizable_induction(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_operation(), vectorizable_store(), and vectorize_fold_left_reduction().

◆ vect_get_loop_mask()

◆ vect_get_loop_mask_type()

bool vect_get_loop_mask_type ( loop_vec_info )
extern

◆ vect_get_main_loop_result()

tree vect_get_main_loop_result ( loop_vec_info loop_vinfo,
tree main_loop_value,
tree skip_value )
extern
LOOP_VINFO is an epilogue loop whose corresponding main loop can be skipped.
Return a value that equals:

- MAIN_LOOP_VALUE when LOOP_VINFO is entered from the main loop and
- SKIP_VALUE when the main loop is skipped.   

References add_phi_arg(), create_phi_node(), gcc_assert, ggc_alloc(), _loop_vec_info::main_loop_edge, make_ssa_name(), _loop_vec_info::skip_main_loop_edge, TREE_TYPE, and UNKNOWN_LOCATION.

Referenced by vect_transform_cycle_phi().

◆ vect_get_mask_type_for_stmt()

opt_tree vect_get_mask_type_for_stmt ( stmt_vec_info ,
unsigned int = 0 )
extern

◆ vect_get_new_ssa_name()

◆ vect_get_new_vect_var()

tree vect_get_new_vect_var ( tree type,
enum vect_var_kind var_kind,
const char * name )
extern
Function vect_get_new_vect_var.

Returns a name for a new variable.  The current naming scheme appends the
prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
the name of vectorizer generated variables, and appends that to NAME if
provided.   

References create_tmp_reg(), free(), gcc_unreachable, ggc_alloc(), NULL, vect_mask_var, vect_pointer_var, vect_scalar_var, and vect_simple_var.

Referenced by permute_vec_elements(), vect_create_addr_base_for_vector_ref(), vect_create_data_ref_ptr(), vect_create_destination_var(), vectorizable_induction(), vectorizable_nonlinear_induction(), and vectorizable_recurr().

◆ vect_get_num_copies()

◆ vect_get_num_vectors()

unsigned int vect_get_num_vectors ( poly_uint64 nunits,
tree vectype )
inline
Return the number of vectors of type VECTYPE that are needed to get
NUNITS elements.  NUNITS should be based on the vectorization factor,
so it is always a known multiple of the number of elements in VECTYPE.   

References ggc_alloc(), and TYPE_VECTOR_SUBPARTS().

Referenced by vect_get_num_copies(), vect_slp_analyze_node_operations(), vect_slp_analyze_node_operations_1(), and vect_transform_cycle_phi().

◆ vect_get_place_in_interleaving_chain()

int vect_get_place_in_interleaving_chain ( stmt_vec_info stmt_info,
stmt_vec_info first_stmt_info )
extern
Find the place of the data-ref in STMT_INFO in the interleaving chain
that starts from FIRST_STMT_INFO.  Return -1 if the data-ref is not a part
of the chain.   

References DR_GROUP_FIRST_ELEMENT, DR_GROUP_GAP, DR_GROUP_NEXT_ELEMENT, and ggc_alloc().

Referenced by vect_build_slp_tree_2(), and vectorizable_load().

◆ vect_get_range_info()

bool vect_get_range_info ( tree var,
wide_int * min_value,
wide_int * max_value )
extern
Analysis Utilities for Loop Vectorization.
   Copyright (C) 2006-2024 Free Software Foundation, Inc.
   Contributed by Dorit Nuzman <dorit@il.ibm.com>

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.   
TODO:  Note the vectorizer still builds COND_EXPRs with GENERIC compares
in the first operand.  Disentangling this is future work, the
IL is properly transfered to VEC_COND_EXPRs with separate compares.   
Return true if we have a useful VR_RANGE range for VAR, storing it
in *MIN_VALUE and *MAX_VALUE if so.  Note the range in the dump files.   

References cfun, dump_enabled_p(), dump_generic_expr_loc(), dump_hex(), dump_printf(), get_legacy_range(), get_nonzero_bits(), get_range_query(), ggc_alloc(), intersect_range_with_nonzero_bits(), MSG_NOTE, path_range_query::range_of_expr(), irange::set_varying(), TDF_SLIM, wi::to_wide(), TREE_TYPE, TYPE_SIGN, vrange::undefined_p(), vect_location, and VR_RANGE.

Referenced by vect_determine_precisions_from_range(), and vectorizable_conversion().

◆ vect_get_scalar_dr_size()

unsigned int vect_get_scalar_dr_size ( dr_vec_info * dr_info)
inline
Return the size of the value accessed by unvectorized data reference
DR_INFO.  This is only valid once STMT_VINFO_VECTYPE has been calculated
for the associated gimple statement, since that guarantees that DR_INFO
accesses either a scalar or a scalar equivalent.  ("Scalar equivalent"
here includes things like V1SI, which can be vectorized in the same way
as a plain SI.)   

References dr_info::dr, DR_REF, tree_to_uhwi(), TREE_TYPE, and TYPE_SIZE_UNIT.

Referenced by get_group_load_store_type(), vect_enhance_data_refs_alignment(), vect_small_gap_p(), vect_truncate_gather_scatter_offset(), vectorizable_load(), and vectorizable_with_step_bound_p().

◆ vect_get_slp_defs() [1/2]

◆ vect_get_slp_defs() [2/2]

void vect_get_slp_defs ( vec_info * ,
slp_tree slp_node,
vec< vec< tree > > * vec_oprnds,
unsigned n )
extern
Get N vectorized definitions for SLP_NODE.   

References ggc_alloc(), i, SLP_TREE_CHILDREN, vect_get_slp_defs(), and vNULL.

◆ vect_get_slp_vect_def()

tree vect_get_slp_vect_def ( slp_tree slp_node,
unsigned i )
extern
Get the Ith vectorized definition from SLP_NODE.   

References ggc_alloc(), i, and SLP_TREE_VEC_DEFS.

Referenced by vect_create_epilog_for_reduction(), vect_schedule_scc(), vectorizable_induction(), and vectorizable_slp_permutation_1().

◆ vect_get_smallest_scalar_type()

tree vect_get_smallest_scalar_type ( stmt_vec_info stmt_info,
tree scalar_type )
extern
Return the smallest scalar part of STMT_INFO.
This is used to determine the vectype of the stmt.  We generally set the
vectype according to the type of the result (lhs).  For stmts whose
result-type is different than the type of the arguments (e.g., demotion,
promotion), vectype will be reset appropriately (later).  Note that we have
to visit the smallest datatype in this function, because that determines the
VF.  If the smallest datatype in the loop is present only as the rhs of a
promotion operation - we'd miss it.
Such a case, where a variable of this datatype does not appear in the lhs
anywhere in the loop, can only occur if it's an invariant: e.g.:
'int_x = (int) short_inv', which we'd expect to have been optimized away by
invariant motion.  However, we cannot rely on invariant motion to always
take invariants out of the loop, and so in the case of promotion we also
have to check the rhs.
LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
types.   

References ggc_alloc(), gimple_assign_cast_p(), gimple_assign_lhs(), gimple_assign_rhs1(), gimple_assign_rhs_code(), gimple_call_arg(), gimple_call_internal_fn(), gimple_call_internal_p(), gimple_call_num_args(), i, internal_fn_mask_index(), internal_fn_stored_value_index(), internal_load_fn_p(), internal_store_fn_p(), SIMD_CLONE_ARG_TYPE_VECTOR, simd_clone_call_p(), tree_fits_uhwi_p(), TREE_INT_CST_LOW, TREE_TYPE, and TYPE_SIZE_UNIT.

Referenced by vect_get_vector_types_for_stmt().

◆ vect_get_stmt_cost()

int vect_get_stmt_cost ( enum vect_cost_for_stmt type_of_cost)
inline
Get cost by calling cost target builtin.   

References builtin_vectorization_cost(), ggc_alloc(), and NULL.

Referenced by vect_estimate_min_profitable_iters().

◆ vect_get_store_cost()

void vect_get_store_cost ( vec_info * ,
stmt_vec_info stmt_info,
int ncopies,
dr_alignment_support alignment_support_scheme,
int misalignment,
unsigned int * inside_cost,
stmt_vector_for_cost * body_cost_vec )
extern

◆ vect_get_store_rhs()

tree vect_get_store_rhs ( stmt_vec_info stmt_info)
extern

◆ vect_get_vec_defs() [1/2]

void vect_get_vec_defs ( vec_info * vinfo,
stmt_vec_info stmt_info,
slp_tree slp_node,
unsigned ncopies,
tree op0,
tree vectype0,
vec< tree > * vec_oprnds0,
tree op1,
tree vectype1,
vec< tree > * vec_oprnds1,
tree op2,
tree vectype2,
vec< tree > * vec_oprnds2,
tree op3,
tree vectype3,
vec< tree > * vec_oprnds3 )

◆ vect_get_vec_defs() [2/2]

void vect_get_vec_defs ( vec_info * vinfo,
stmt_vec_info stmt_info,
slp_tree slp_node,
unsigned ncopies,
tree op0,
vec< tree > * vec_oprnds0,
tree op1 = NULL,
vec< tree > * vec_oprnds1 = NULL,
tree op2 = NULL,
vec< tree > * vec_oprnds2 = NULL,
tree op3 = NULL,
vec< tree > * vec_oprnds3 = NULL )

◆ vect_get_vec_defs_for_operand()

void vect_get_vec_defs_for_operand ( vec_info * vinfo,
stmt_vec_info stmt_vinfo,
unsigned ncopies,
tree op,
vec< tree > * vec_oprnds,
tree vectype )
Function vect_get_vec_defs_for_operand.

OP is an operand in STMT_VINFO.  This function returns a vector of
NCOPIES defs that will be used in the vectorized stmts for STMT_VINFO.

In the case that OP is an SSA_NAME which is defined in the loop, then
STMT_VINFO_VEC_STMTS of the defining stmt holds the relevant defs.

In case OP is an invariant or constant, a new stmt that creates a vector def
needs to be introduced.  VECTYPE may be used to specify a required type for
vector invariant.   

References dump_enabled_p(), dump_printf_loc(), gcc_assert, get_vectype_for_scalar_type(), ggc_alloc(), gimple_get_lhs(), i, MSG_NOTE, NULL, stmt_vectype(), STMT_VINFO_VEC_STMTS, STMT_VINFO_VECTYPE, TREE_TYPE, truth_type_for(), vect_constant_def, vect_external_def, vect_init_vector(), vect_is_simple_use(), vect_location, VECT_SCALAR_BOOLEAN_TYPE_P, vect_stmt_to_vectorize(), and VECTOR_BOOLEAN_TYPE_P.

Referenced by vect_get_gather_scatter_ops(), vect_get_vec_defs(), vect_transform_cycle_phi(), vect_transform_reduction(), vectorizable_call(), vectorizable_induction(), vectorizable_load(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().

◆ vect_get_vector_types_for_stmt()

opt_result vect_get_vector_types_for_stmt ( vec_info * vinfo,
stmt_vec_info stmt_info,
tree * stmt_vectype_out,
tree * nunits_vectype_out,
unsigned int group_size )
extern
Try to compute the vector types required to vectorize STMT_INFO,
returning true on success and false if vectorization isn't possible.
If GROUP_SIZE is nonzero and we're performing BB vectorization,
take sure that the number of elements in the vectors is no bigger
than GROUP_SIZE.

On success:

- Set *STMT_VECTYPE_OUT to:
  - NULL_TREE if the statement doesn't need to be vectorized;
  - the equivalent of STMT_VINFO_VECTYPE otherwise.

- Set *NUNITS_VECTYPE_OUT to the vector type that contains the maximum
  number of units needed to vectorize STMT_INFO, or NULL_TREE if the
  statement does not help to determine the overall number of units.   

References build_nonstandard_integer_type(), DR_REF, dump_dec(), dump_enabled_p(), dump_printf(), dump_printf_loc(), opt_result::failure_at(), gcc_assert, get_mask_type_for_scalar_type(), get_vectype_for_scalar_type(), ggc_alloc(), gimple_call_arg(), gimple_call_internal_p(), gimple_get_lhs(), MSG_NOTE, NULL_TREE, vec_info::slp_instances, STMT_VINFO_DATA_REF, STMT_VINFO_VECTYPE, opt_result::success(), TREE_TYPE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), vect_get_smallest_scalar_type(), vect_location, vect_use_mask_type_p(), VECTOR_BOOLEAN_TYPE_P, and VECTOR_MODE_P.

Referenced by vect_build_slp_tree_1(), and vect_determine_vf_for_stmt_1().

◆ vect_gimple_build()

◆ vect_grouped_load_supported()

bool vect_grouped_load_supported ( tree vectype,
bool single_element_p,
unsigned HOST_WIDE_INT count )
extern
Function vect_grouped_load_supported.

COUNT is the size of the load group (the number of statements plus the
number of gaps).  SINGLE_ELEMENT_P is true if there is actually
only one statement, with a gap of COUNT - 1.

Returns true if a suitable permute exists.   

References can_vec_perm_const_p(), count, dump_enabled_p(), dump_printf_loc(), exact_log2(), gcc_assert, GET_MODE_NUNITS(), ggc_alloc(), i, maybe_gt, MSG_MISSED_OPTIMIZATION, pow2p_hwi(), TYPE_MODE, TYPE_VECTOR_SUBPARTS(), vect_location, and VECTOR_MODE_P.

Referenced by get_group_load_store_type(), and vect_analyze_loop_2().

◆ vect_grouped_store_supported()

bool vect_grouped_store_supported ( tree vectype,
unsigned HOST_WIDE_INT count )
extern
Function vect_grouped_store_supported.

Returns TRUE if interleave high and interleave low permutations
are supported, and FALSE otherwise.   

References can_vec_perm_const_p(), count, dump_enabled_p(), dump_printf(), dump_printf_loc(), exact_log2(), gcc_assert, GET_MODE_NUNITS(), ggc_alloc(), i, data_reference::indices, MSG_MISSED_OPTIMIZATION, pow2p_hwi(), TYPE_MODE, vect_location, and VECTOR_MODE_P.

Referenced by get_group_load_store_type(), and vect_analyze_loop_2().

◆ vect_halve_mask_nunits()

tree vect_halve_mask_nunits ( tree old_type,
machine_mode new_mode )
extern
Return a mask type with half the number of elements as OLD_TYPE,
given that it should have mode NEW_MODE.   

References build_truth_vector_type_for_mode(), ggc_alloc(), new_mode(), and TYPE_VECTOR_SUBPARTS().

Referenced by supportable_widening_operation(), and vect_maybe_permute_loop_masks().

◆ vect_init_vector()

tree vect_init_vector ( vec_info * vinfo,
stmt_vec_info stmt_info,
tree val,
tree type,
gimple_stmt_iterator * gsi )
extern
Function vect_init_vector.

Insert a new stmt (INIT_STMT) that initializes a new variable of type
TYPE with the value VAL.  If TYPE is a vector type and VAL does not have
vector type a vector with all elements equal to VAL is created first.
Place the initialization at GSI if it is not NULL.  Otherwise, place the
initialization at the loop preheader.
Return the DEF of INIT_STMT.
It will be used in the vectorization of STMT_INFO.   

References build_all_ones_cst(), build_vector_from_val(), build_zero_cst(), CONSTANT_CLASS_P, gcc_assert, ggc_alloc(), gimple_build(), gimple_build_assign(), gimple_convert(), gsi_end_p(), gsi_remove(), gsi_start(), gsi_stmt(), integer_zerop(), INTEGRAL_TYPE_P, make_ssa_name(), NULL, TREE_TYPE, types_compatible_p(), useless_type_conversion_p(), vect_get_new_ssa_name(), vect_init_vector_1(), vect_simple_var, VECTOR_BOOLEAN_TYPE_P, and VECTOR_TYPE_P.

Referenced by vect_build_all_ones_mask(), vect_build_one_scatter_store_call(), vect_build_zero_merge_argument(), vect_create_nonlinear_iv_vec_step(), vect_get_vec_defs_for_operand(), vectorizable_induction(), vectorizable_load(), vectorizable_recurr(), and vectorizable_shift().

◆ vect_is_extending_load()

bool vect_is_extending_load ( class vec_info * vinfo,
stmt_vec_info stmt_info )
inline

◆ vect_is_integer_truncation()

bool vect_is_integer_truncation ( stmt_vec_info stmt_info)
inline

◆ vect_is_reduction()

bool vect_is_reduction ( stmt_vec_info stmt_info)
inline
Return true if STMT_INFO represents part of a reduction.   

References STMT_VINFO_REDUC_IDX.

◆ vect_is_simple_use() [1/3]

bool vect_is_simple_use ( tree operand,
vec_info * vinfo,
enum vect_def_type * dt,
stmt_vec_info * def_stmt_info_out,
gimple ** def_stmt_out )
extern
Function vect_is_simple_use.

Input:
VINFO - the vect info of the loop or basic block that is being vectorized.
OPERAND - operand in the loop or bb.
Output:
DEF_STMT_INFO_OUT (optional) - information about the defining stmt in
  case OPERAND is an SSA_NAME that is defined in the vectorizable region
DEF_STMT_OUT (optional) - the defining stmt in case OPERAND is an SSA_NAME;
  the definition could be anywhere in the function
DT - the type of definition

Returns whether a stmt with OPERAND can be vectorized.
For loops, supportable operands are constants, loop invariants, and operands
that are defined by the current iteration of the loop.  Unsupportable
operands are those that are defined by a previous iteration of the loop (as
is the case in reduction/induction computations).
For basic blocks, supportable operands are constants and bb invariants.
For now, operands defined outside the basic block are not supported.   

References CONSTANT_CLASS_P, dump_enabled_p(), dump_generic_expr(), dump_gimple_expr(), dump_printf(), dump_printf_loc(), ggc_alloc(), is_gimple_min_invariant(), vec_info::lookup_def(), MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, SSA_NAME_DEF_STMT, SSA_NAME_IS_DEFAULT_DEF, STMT_VINFO_DEF_TYPE, TDF_SLIM, TREE_CODE, vect_condition_def, vect_constant_def, vect_double_reduction_def, vect_external_def, vect_first_order_recurrence, vect_induction_def, vect_internal_def, vect_location, vect_nested_cycle, vect_reduction_def, vect_stmt_to_vectorize(), vect_uninitialized_def, and vect_unknown_def_type.

Referenced by get_group_load_store_type(), get_load_store_type(), is_simple_and_all_uses_invariant(), process_use(), type_conversion_p(), vect_check_scalar_mask(), vect_check_store_rhs(), vect_get_and_check_slp_defs(), vect_get_vec_defs_for_operand(), vect_is_simple_cond(), vect_is_simple_use(), vect_is_simple_use(), vect_look_through_possible_promotion(), vect_recog_mask_conversion_pattern(), vect_recog_rotate_pattern(), vect_slp_linearize_chain(), vect_widened_op_tree(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_operation(), vectorizable_reduction(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().

◆ vect_is_simple_use() [2/3]

bool vect_is_simple_use ( tree operand,
vec_info * vinfo,
enum vect_def_type * dt,
tree * vectype,
stmt_vec_info * def_stmt_info_out,
gimple ** def_stmt_out )
extern
Function vect_is_simple_use.

Same as vect_is_simple_use but also determines the vector operand
type of OPERAND and stores it to *VECTYPE.  If the definition of
OPERAND is vect_uninitialized_def, vect_constant_def or
vect_external_def *VECTYPE will be set to NULL_TREE and the caller
is responsible to compute the best suited vector type for the
scalar operand.   

References dump_enabled_p(), dump_printf_loc(), gcc_assert, gcc_unreachable, ggc_alloc(), MSG_NOTE, NULL_TREE, STMT_VINFO_VECTYPE, vect_constant_def, vect_double_reduction_def, vect_external_def, vect_first_order_recurrence, vect_induction_def, vect_internal_def, vect_is_simple_use(), vect_location, vect_nested_cycle, vect_reduction_def, and vect_uninitialized_def.

◆ vect_is_simple_use() [3/3]

bool vect_is_simple_use ( vec_info * vinfo,
stmt_vec_info stmt,
slp_tree slp_node,
unsigned operand,
tree * op,
slp_tree * slp_def,
enum vect_def_type * dt,
tree * vectype,
stmt_vec_info * def_stmt_info_out )
extern
Function vect_is_simple_use.

Same as vect_is_simple_use but determines the operand by operand
position OPERAND from either STMT or SLP_NODE, filling in *OP
and *SLP_DEF (when SLP_NODE is not NULL).   

References COMPARISON_CLASS_P, gcc_unreachable, ggc_alloc(), gimple_assign_rhs1(), gimple_assign_rhs_code(), gimple_call_arg(), gimple_get_lhs(), gimple_op(), NULL, SLP_TREE_CHILDREN, SLP_TREE_DEF_TYPE, SLP_TREE_REPRESENTATIVE, SLP_TREE_SCALAR_OPS, SLP_TREE_VECTYPE, TREE_OPERAND, vect_internal_def, and vect_is_simple_use().

◆ vect_is_store_elt_extraction()

bool vect_is_store_elt_extraction ( vect_cost_for_stmt kind,
stmt_vec_info stmt_info )
inline
----------------------------------------------------------------------
Target support routines
-----------------------------------------------------------------------
The following routines are provided to simplify costing decisions in
target code.  Please add more as needed.   
Return true if an operaton of kind KIND for STMT_INFO represents
the extraction of an element from a vector in preparation for
storing the element to memory.   

References DR_IS_WRITE, STMT_VINFO_DATA_REF, and vec_to_scalar.

◆ vect_iv_increment_position()

void vect_iv_increment_position ( edge loop_exit,
gimple_stmt_iterator * bsi,
bool * insert_after )
extern
Stores the standard position for induction variable increment in belonging to
LOOP_EXIT (just before the exit condition of the given exit to BSI.
INSERT_AFTER is set to true if the increment should be inserted after
*BSI.   

References ggc_alloc(), and gsi_last_bb().

Referenced by vect_create_epilog_for_reduction(), vect_set_loop_condition_normal(), vect_set_loop_condition_partial_vectors_avx512(), and vect_set_loop_controls_directly().

◆ vect_iv_limit_for_partial_vectors()

widest_int vect_iv_limit_for_partial_vectors ( loop_vec_info loop_vinfo)
extern
Decide whether it is possible to use a zero-based induction variable
when vectorizing LOOP_VINFO with partial vectors.  If it is, return
the value that the induction variable must be able to hold in order
to ensure that the rgroups eventually have no active vector elements.
Return -1 otherwise.   

References ggc_alloc(), LOOP_VINFO_LOOP, LOOP_VINFO_MASK_SKIP_NITERS, LOOP_VINFO_PEELING_FOR_ALIGNMENT, LOOP_VINFO_VECT_FACTOR, max_loop_iterations(), wi::to_widest(), TREE_CODE, and vect_max_vf().

Referenced by vect_rgroup_iv_might_wrap_p(), vect_verify_full_masking(), and vect_verify_full_masking_avx512().

◆ vect_known_alignment_in_bytes()

unsigned int vect_known_alignment_in_bytes ( dr_vec_info * dr_info,
tree vectype )
inline
Return the minimum alignment in bytes that the vectorized version
of DR_INFO is guaranteed to have.   

References dr_info::dr, dr_misalignment(), DR_MISALIGNMENT_UNKNOWN, DR_REF, DR_TARGET_ALIGNMENT, ggc_alloc(), TREE_TYPE, and TYPE_ALIGN_UNIT.

Referenced by get_group_load_store_type(), and vectorizable_load().

◆ vect_load_lanes_supported()

internal_fn vect_load_lanes_supported ( tree vectype,
unsigned HOST_WIDE_INT count,
bool masked_p )
extern
Return FN if vec_{masked_,mask_len_}load_lanes is available for COUNT vectors
of type VECTYPE.  MASKED_P says whether the masked form is needed.   

References count, ggc_alloc(), IFN_LAST, and vect_lanes_optab_supported_p().

Referenced by check_load_store_for_partial_vectors(), get_group_load_store_type(), and vect_analyze_loop_2().

◆ vect_loop_vectorized_call()

gimple * vect_loop_vectorized_call ( class loop * loop,
gcond ** cond )

◆ vect_loop_versioning()

class loop * vect_loop_versioning ( loop_vec_info loop_vinfo,
gimple * loop_vectorized_call )
Function vect_loop_versioning.

If the loop has data references that may or may not be aligned or/and
has data reference relations whose independence was not proven then
two versions of the loop need to be generated, one which is vectorized
and one which isn't.  A test is then generated to control which of the
loops is executed.  The test checks for the alignment of all of the
data references that may or may not be aligned.  An additional
sequence of runtime tests is generated for each pairs of DDRs whose
independence was not proven.  The vectorized version of loop is
executed only if both alias and alignment tests are passed.

The test generated to check which version of loop is executed
is modified to also check for profitability as indicated by the
cost model threshold TH.

The versioning precondition(s) are placed in *COND_EXPR and
*COND_EXPR_STMT_LIST.   

References add_phi_arg(), adjust_phi_and_debug_stmts(), profile_probability::always(), bb_loop_depth(), boolean_false_node, boolean_true_node, boolean_type_node, build_int_cst(), build_zero_cst(), CDI_DOMINATORS, copy_ssa_name(), create_phi_node(), dom_info_available_p(), dominated_by_p(), dump_enabled_p(), dump_printf_loc(), EDGE_COMPLEX, EDGE_COUNT, EDGE_PRED, EDGE_SUCC, expr, EXPR_P, extract_true_false_edges_from_block(), first_dom_son(), flow_bb_inside_loop_p(), flow_loop_nested_p(), flush_pending_stmts(), fold_build2, fold_loop_internal_call(), FOR_EACH_SSA_USE_OPERAND, force_gimple_operand_1(), free_original_copy_tables(), gcc_assert, get_current_def(), dump_user_location_t::get_location_t(), get_loop_copy(), ggc_alloc(), gimple_bb(), gimple_build_cond(), gimple_call_lhs(), gimple_cond_set_condition_from_tree(), gimple_phi_arg_location_from_edge(), gimple_seq_add_seq(), gsi_end_p(), gsi_for_stmt(), gsi_insert_after(), gsi_insert_seq_before(), gsi_last_bb(), GSI_NEW_STMT, gsi_next(), GSI_SAME_STMT, gsi_start(), gsi_start_phis(), gsi_stmt(), loop::header, initialize_original_copy_tables(), loop::inner, profile_probability::invert(), is_gimple_condexpr_for_cond(), is_gimple_val(), profile_probability::likely(), LOCATION_LOCUS, LOOP_C_FINITE, loop_constraint_set_p(), loop_outer(), loop_preheader_edge(), LOOP_REQUIRES_VERSIONING_FOR_ALIAS, LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT, LOOP_REQUIRES_VERSIONING_FOR_NITERS, LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND, loop_version(), LOOP_VINFO_COST_MODEL_THRESHOLD, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LOOP, LOOP_VINFO_NITERSM1, LOOP_VINFO_SCALAR_LOOP, LOOP_VINFO_SCALAR_LOOP_SCALING, LOOP_VINFO_VERSIONING_THRESHOLD, make_edge(), MSG_NOTE, MSG_OPTIMIZED_LOCATIONS, MSG_PRIORITY_USER_FACING, loop::next, next_dom_son(), NULL, NULL_TREE, loop::num, outermost_invariant_loop_for_expr(), gphi_iterator::phi(), PHI_ARG_DEF_FROM_EDGE, PHI_RESULT, replace_uses_by(), scale_loop_frequencies(), set_immediate_dominator(), single_exit(), split_block(), split_edge(), profile_probability::sqrt(), SSA_NAME_DEF_STMT, SSA_OP_USE, ssa_redirect_edge(), superloop_at_depth(), TODO_update_ssa_no_phi, TREE_CODE, TREE_TYPE, UNKNOWN_LOCATION, unshare_expr(), update_ssa(), update_stmt(), USE_FROM_PTR, vect_apply_runtime_profitability_check_p(), vect_create_cond_for_alias_checks(), vect_create_cond_for_align_checks(), vect_create_cond_for_lower_bounds(), vect_create_cond_for_niters_checks(), vect_create_cond_for_unequal_addrs(), vect_free_loop_info_assumptions(), vect_location, and vect_loop_vectorized_call().

Referenced by vect_transform_loop().

◆ vect_make_slp_decision()

bool vect_make_slp_decision ( loop_vec_info loop_vinfo)
extern
For each possible SLP instance decide whether to SLP it and calculate overall
unrolling factor needed to SLP the loop.  Return TRUE if decided to SLP at
least one instance.   

References dump_dec(), dump_enabled_p(), dump_printf(), dump_printf_loc(), DUMP_VECT_SCOPE, FOR_EACH_VEC_ELT, ggc_alloc(), i, LOOP_VINFO_SLP_INSTANCES, LOOP_VINFO_SLP_UNROLLING_FACTOR, MSG_NOTE, SLP_INSTANCE_TREE, SLP_INSTANCE_UNROLLING_FACTOR, vect_location, and vect_mark_slp_stmts().

Referenced by vect_analyze_loop_2().

◆ vect_mark_pattern_stmts()

◆ vect_mark_stmts_to_be_vectorized()

opt_result vect_mark_stmts_to_be_vectorized ( loop_vec_info loop_vinfo,
bool * fatal )
extern

◆ vect_max_vf()

unsigned HOST_WIDE_INT vect_max_vf ( loop_vec_info loop_vinfo)
inline
Return the maximum possible vectorization factor for LOOP_VINFO.   

References ggc_alloc(), LOOP_VINFO_VECT_FACTOR, and MAX_VECTORIZATION_FACTOR.

Referenced by vect_iv_limit_for_partial_vectors(), vect_truncate_gather_scatter_offset(), and vect_verify_full_masking_avx512().

◆ vect_maybe_update_slp_op_vectype()

◆ vect_nop_conversion_p()

bool vect_nop_conversion_p ( stmt_vec_info stmt_info)
extern
Return true if we can assume from the scalar form of STMT_INFO that
neither the scalar nor the vector forms will generate code.  STMT_INFO
is known not to involve a data reference.   

References CONVERT_EXPR_CODE_P, ggc_alloc(), gimple_assign_lhs(), gimple_assign_rhs1(), gimple_assign_rhs_code(), tree_nop_conversion_p(), and TREE_TYPE.

Referenced by vect_bb_slp_scalar_cost(), vect_compute_single_scalar_iteration_cost(), and vectorizable_assignment().

◆ vect_nunits_for_cost()

unsigned int vect_nunits_for_cost ( tree vec_type)
inline
Estimate the number of elements in VEC_TYPE for costing purposes.
Pick a reasonable estimate if the exact number isn't known at
compile time.   

References estimated_poly_value(), and TYPE_VECTOR_SUBPARTS().

Referenced by vect_model_reduction_cost(), vectorizable_bb_reduc_epilogue(), vectorizable_load(), and vectorizable_store().

◆ vect_optimize_slp()

void vect_optimize_slp ( vec_info * vinfo)
extern
Optimize the SLP graph of VINFO.   

References vect_optimize_slp_pass::run(), and vec_info::slp_instances.

Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().

◆ vect_orig_stmt()

◆ vect_pattern_recog()

void vect_pattern_recog ( vec_info * vinfo)
Pattern recognition functions.
Additional pattern recognition functions can (and will) be added
in the future.   
Function vect_pattern_recog

Input:
LOOP_VINFO - a struct_loop_info of a loop in which we want to look for
     computation idioms.

Output - for each computation idiom that is detected we create a new stmt
     that provides the same functionality and that can be vectorized.  We
     also record some information in the struct_stmt_info of the relevant
     stmts, as explained below:

At the entry to this function we have the following stmts, with the
following initial value in the STMT_VINFO fields:

      stmt                     in_pattern_p  related_stmt    vec_stmt
      S1: a_i = ....                 -       -               -
      S2: a_2 = ..use(a_i)..         -       -               -
      S3: a_1 = ..use(a_2)..         -       -               -
      S4: a_0 = ..use(a_1)..         -       -               -
      S5: ... = ..use(a_0)..         -       -               -

Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be
represented by a single stmt.  We then:
- create a new stmt S6 equivalent to the pattern (the stmt is not
  inserted into the code)
- fill in the STMT_VINFO fields as follows:

                               in_pattern_p  related_stmt    vec_stmt
      S1: a_i = ....                 -       -               -
      S2: a_2 = ..use(a_i)..         -       -               -
      S3: a_1 = ..use(a_2)..         -       -               -
      S4: a_0 = ..use(a_1)..         true    S6              -
       '---> S6: a_new = ....        -       S4              -
      S5: ... = ..use(a_0)..         -       -               -

(the last stmt in the pattern (S4) and the new pattern stmt (S6) point
to each other through the RELATED_STMT field).

S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead
of S4 because it will replace all its uses.  Stmts {S1,S2,S3} will
remain irrelevant unless used by stmts other than S4.

If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3}
(because they are marked as irrelevant).  It will vectorize S6, and record
a pointer to the new vector stmt VS6 from S6 (as usual).
S4 will be skipped, and S5 will be vectorized as usual:

                               in_pattern_p  related_stmt    vec_stmt
      S1: a_i = ....                 -       -               -
      S2: a_2 = ..use(a_i)..         -       -               -
      S3: a_1 = ..use(a_2)..         -       -               -
    > VS6: va_new = ....             -       -               -
      S4: a_0 = ..use(a_1)..         true    S6              VS6
       '---> S6: a_new = ....        -       S4              VS6
    > VS5: ... = ..vuse(va_new)..    -       -               -
      S5: ... = ..use(a_0)..         -       -               -

DCE could then get rid of {S1,S2,S3,S4,S5} (if their defs are not used
elsewhere), and we'll end up with:

     VS6: va_new = ....
     VS5: ... = ..vuse(va_new)..

In case of more than one pattern statements, e.g., widen-mult with
intermediate type:

  S1  a_t = ;
  S2  a_T = (TYPE) a_t;
        '--> S3: a_it = (interm_type) a_t;
  S4  prod_T = a_T * CONST;
        '--> S5: prod_T' = a_it w* CONST;

there may be other users of a_T outside the pattern.  In that case S2 will
be marked as relevant (as well as S3), and both S2 and S3 will be analyzed
and vectorized.  The vector stmt VS2 will be recorded in S2, and VS3 will
be recorded in S3.   

References DUMP_VECT_SCOPE, ggc_alloc(), gsi_end_p(), gsi_next(), gsi_start_bb(), gsi_stmt(), i, is_gimple_debug(), vec_info::lookup_stmt(), LOOP_VINFO_BBS, LOOP_VINFO_LOOP, loop::num_nodes, NUM_PATTERNS, si, vec_info::stmt_vec_info_ro, STMT_VINFO_VECTORIZABLE, vect_determine_precisions(), vect_pattern_recog_1(), and vect_vect_recog_func_ptrs.

Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().

◆ vect_peel_nonlinear_iv_init()

◆ vect_permute_store_chain()

void vect_permute_store_chain ( vec_info * vinfo,
vec< tree > & dr_chain,
unsigned int length,
stmt_vec_info stmt_info,
gimple_stmt_iterator * gsi,
vec< tree > * result_chain )
extern
Function vect_permute_store_chain.

Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be
a power of 2 or equal to 3, generate interleave_high/low stmts to reorder
the data correctly for the stores.  Return the final references for stores
in RESULT_CHAIN.

E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8.
The input is 4 vectors each containing 8 elements.  We assign a number to
each element, the input sequence is:

1st vec:   0  1  2  3  4  5  6  7
2nd vec:   8  9 10 11 12 13 14 15
3rd vec:  16 17 18 19 20 21 22 23
4th vec:  24 25 26 27 28 29 30 31

The output sequence should be:

1st vec:  0  8 16 24  1  9 17 25
2nd vec:  2 10 18 26  3 11 19 27
3rd vec:  4 12 20 28  5 13 21 30
4th vec:  6 14 22 30  7 15 23 31

i.e., we interleave the contents of the four vectors in their order.

We use interleave_high/low instructions to create such output.  The input of
each interleave_high/low operation is two vectors:
1st vec    2nd vec
0 1 2 3    4 5 6 7
the even elements of the result vector are obtained left-to-right from the
high/low elements of the first vector.  The odd elements of the result are
obtained left-to-right from the high/low elements of the second vector.
The output of interleave_high will be:   0 4 1 5
and of interleave_low:                   2 6 3 7


The permutation is done in log LENGTH stages.  In each stage interleave_high
and interleave_low stmts are created for each pair of vectors in DR_CHAIN,
where the first argument is taken from the first half of DR_CHAIN and the
second argument from it's second half.
In our example,

I1: interleave_high (1st vec, 3rd vec)
I2: interleave_low (1st vec, 3rd vec)
I3: interleave_high (2nd vec, 4th vec)
I4: interleave_low (2nd vec, 4th vec)

The output for the first stage is:

I1:  0 16  1 17  2 18  3 19
I2:  4 20  5 21  6 22  7 23
I3:  8 24  9 25 10 26 11 27
I4: 12 28 13 29 14 30 15 31

The output of the second stage, i.e. the final result is:

I1:  0  8 16 24  1  9 17 25
I2:  2 10 18 26  3 11 19 27
I3:  4 12 20 28  5 13 21 30
I4:  6 14 22 30  7 15 23 31.   

References exact_log2(), gcc_assert, ggc_alloc(), gimple_build_assign(), i, data_reference::indices, make_temp_ssa_name(), NULL, pow2p_hwi(), STMT_VINFO_VECTYPE, poly_int< N, C >::to_constant(), TYPE_VECTOR_SUBPARTS(), vect_finish_stmt_generation(), and vect_gen_perm_mask_checked().

Referenced by vectorizable_store().

◆ vect_phi_initial_value()

tree vect_phi_initial_value ( gphi * phi)
inline
PHI is either a scalar reduction phi or a scalar induction phi.
Return the initial value of the variable on entry to the containing
loop.   

References gcc_assert, gimple_bb(), basic_block_def::loop_father, loop_preheader_edge(), and PHI_ARG_DEF_FROM_EDGE.

Referenced by info_for_reduction(), vect_transform_cycle_phi(), vectorizable_induction(), vectorizable_nonlinear_induction(), and vectorizable_reduction().

◆ vect_pow2()

int vect_pow2 ( int x)
inline
Return pow2 (X).   

References i.

Referenced by vectorizable_conversion().

◆ vect_prepare_for_masked_peels()

void vect_prepare_for_masked_peels ( loop_vec_info loop_vinfo)
extern

◆ vect_prune_runtime_alias_test_list()

opt_result vect_prune_runtime_alias_test_list ( loop_vec_info loop_vinfo)
extern

◆ vect_record_base_alignments()

void vect_record_base_alignments ( vec_info * vinfo)
extern
If the region we're going to vectorize is reached, all unconditional
data references occur at least once.  We can therefore pool the base
alignment guarantees from each unconditional reference.  Do this by
going through all the data references in VINFO and checking whether
the containing statement makes the reference unconditionally.  If so,
record the alignment of the base address in VINFO so that it can be
used for all other references with the same base.   

References vec_info_shared::datarefs, DR_INNERMOST, DR_IS_CONDITIONAL_IN_STMT, ggc_alloc(), vec_info::lookup_dr(), LOOP_VINFO_LOOP, nested_in_vect_loop_p(), NULL, vec_info::shared, STMT_VINFO_DR_WRT_VEC_LOOP, STMT_VINFO_GATHER_SCATTER_P, STMT_VINFO_VECTORIZABLE, and vect_record_base_alignment().

Referenced by vect_analyze_data_refs_alignment(), and vect_slp_analyze_bb_1().

◆ vect_record_grouped_load_vectors()

void vect_record_grouped_load_vectors ( vec_info * ,
stmt_vec_info stmt_info,
vec< tree > result_chain )
extern
RESULT_CHAIN contains the output of a group of grouped loads that were
generated as part of the vectorization of STMT_INFO.  Assign the statement
for each vector to the associated scalar statement.   

References DR_GROUP_FIRST_ELEMENT, DR_GROUP_GAP, DR_GROUP_NEXT_ELEMENT, FOR_EACH_VEC_ELT, ggc_alloc(), i, SSA_NAME_DEF_STMT, and STMT_VINFO_VEC_STMTS.

Referenced by vect_transform_grouped_load(), and vectorizable_load().

◆ vect_record_loop_len()

void vect_record_loop_len ( loop_vec_info loop_vinfo,
vec_loop_lens * lens,
unsigned int nvectors,
tree vectype,
unsigned int factor )
extern
Record that LOOP_VINFO would need LENS to contain a sequence of NVECTORS
lengths for controlling an operation on VECTYPE.  The operation splits
each element of VECTYPE into FACTOR separate subelements, measuring the
length as a number of these subelements.   

References gcc_assert, ggc_alloc(), LOOP_VINFO_VECT_FACTOR, and TYPE_VECTOR_SUBPARTS().

Referenced by check_load_store_for_partial_vectors(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_live_operation(), vectorizable_operation(), and vectorizable_reduction().

◆ vect_record_loop_mask()

void vect_record_loop_mask ( loop_vec_info loop_vinfo,
vec_loop_masks * masks,
unsigned int nvectors,
tree vectype,
tree scalar_mask )
extern
Record that a fully-masked version of LOOP_VINFO would need MASKS to
contain a sequence of NVECTORS masks that each control a vector of type
VECTYPE.  If SCALAR_MASK is nonnull, the fully-masked loop would AND
these vector masks with the vector version of SCALAR_MASK.   

References hash_set< KeyId, Lazy, Traits >::add(), gcc_assert, ggc_alloc(), vec_loop_masks::mask_set, and _loop_vec_info::scalar_cond_masked_set.

Referenced by check_load_store_for_partial_vectors(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_live_operation(), vectorizable_operation(), vectorizable_reduction(), and vectorizable_simd_clone_call().

◆ vect_reduc_type()

int vect_reduc_type ( vec_info * vinfo,
stmt_vec_info stmt_info )
inline
If STMT_INFO describes a reduction, return the vect_reduction_type
of the reduction it describes, otherwise return -1.   

References ggc_alloc(), info_for_reduction(), STMT_VINFO_REDUC_DEF, and STMT_VINFO_REDUC_TYPE.

◆ vect_remove_stores()

void vect_remove_stores ( vec_info * vinfo,
stmt_vec_info first_stmt_info )
extern
Remove a group of stores (for SLP or interleaving), free their
stmt_vec_info.   

References DR_GROUP_NEXT_ELEMENT, ggc_alloc(), vec_info::remove_stmt(), and vect_orig_stmt().

Referenced by vect_transform_loop().

◆ vect_rgroup_iv_might_wrap_p()

bool vect_rgroup_iv_might_wrap_p ( loop_vec_info loop_vinfo,
rgroup_controls * rgc )
For the given rgroup_controls RGC, check whether an induction variable
would ever hit a value that produces a set of all-false masks or zero
lengths before wrapping around.  Return true if it's possible to wrap
around before hitting the desirable value, otherwise return false.   

References ggc_alloc(), LOOP_VINFO_RGROUP_COMPARE_TYPE, wi::min_precision(), TYPE_PRECISION, UNSIGNED, and vect_iv_limit_for_partial_vectors().

Referenced by vect_estimate_min_profitable_iters(), and vect_set_loop_condition_partial_vectors().

◆ vect_schedule_slp()

◆ vect_set_loop_condition()

void vect_set_loop_condition ( class loop * loop,
edge loop_e,
loop_vec_info loop_vinfo,
tree niters,
tree step,
tree final_iv,
bool niters_maybe_zero )
extern
Function prototypes.                                             
Simple loop peeling and versioning utilities for vectorizer's purposes -
in tree-vect-loop-manip.cc.   
If we're using fully-masked loops, make LOOP iterate:

   N == (NITERS - 1) / STEP + 1

times.  When NITERS is zero, this is equivalent to making the loop
execute (1 << M) / STEP times, where M is the precision of NITERS.
NITERS_MAYBE_ZERO is true if this last case might occur.

If we're not using fully-masked loops, make LOOP iterate:

   N == (NITERS - STEP) / STEP + 1

times, where NITERS is known to be outside the range [1, STEP - 1].
This is equivalent to making the loop execute NITERS / STEP times
when NITERS is nonzero and (1 << M) / STEP times otherwise.
NITERS_MAYBE_ZERO again indicates whether this last case might occur.

If FINAL_IV is nonnull, it is an SSA name that should be set to
N * STEP on exit from the loop.

Assumption: the exit-condition of LOOP is the last stmt in the loop.   

References dump_enabled_p(), dump_printf_loc(), get_loop_exit_condition(), ggc_alloc(), gsi_for_stmt(), gsi_remove(), vec_info::lookup_stmt(), LOOP_VINFO_PARTIAL_VECTORS_STYLE, LOOP_VINFO_USING_PARTIAL_VECTORS_P, MSG_NOTE, vec_info::remove_stmt(), vect_location, vect_partial_vectors_avx512, vect_set_loop_condition_normal(), vect_set_loop_condition_partial_vectors(), and vect_set_loop_condition_partial_vectors_avx512().

Referenced by vect_do_peeling(), and vect_transform_loop().

◆ vect_setup_realignment()

tree vect_setup_realignment ( vec_info * vinfo,
stmt_vec_info stmt_info,
gimple_stmt_iterator * gsi,
tree * realignment_token,
enum dr_alignment_support alignment_support_scheme,
tree init_addr,
class loop ** at_loop )
extern
Function vect_setup_realignment

This function is called when vectorizing an unaligned load using
the dr_explicit_realign[_optimized] scheme.
This function generates the following code at the loop prolog:

   p = initial_addr;
x  msq_init = *(floor(p));   # prolog load
   realignment_token = call target_builtin;
 loop:
x  msq = phi (msq_init, ---)

The stmts marked with x are generated only for the case of
dr_explicit_realign_optimized.

The code above sets up a new (vector) pointer, pointing to the first
location accessed by STMT_INFO, and a "floor-aligned" load using that
pointer.  It also generates code to compute the "realignment-token"
(if the relevant target hook was defined), and creates a phi-node at the
loop-header bb whose arguments are the result of the prolog-load (created
by this function) and the result of a load that takes place in the loop
(to be created by the caller to this function).

For the case of dr_explicit_realign_optimized:
The caller to this function uses the phi-result (msq) to create the
realignment code inside the loop, and sets up the missing phi argument,
as follows:
 loop:
   msq = phi (msq_init, lsq)
   lsq = *(floor(p'));        # load in loop
   result = realign_load (msq, lsq, realignment_token);

For the case of dr_explicit_realign:
 loop:
   msq = *(floor(p));   # load in loop
   p' = p + (VS-1);
   lsq = *(floor(p'));  # load in loop
   result = realign_load (msq, lsq, realignment_token);

Input:
STMT_INFO - (scalar) load stmt to be vectorized. This load accesses
            a memory location that may be unaligned.
BSI - place where new code is to be inserted.
ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
                      is used.

Output:
REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
                    target hook, if defined.
Return value - the result of the loop-header phi node.   

References add_phi_arg(), build2(), build_int_cst(), copy_ssa_name(), create_phi_node(), dr_info::dr, dr_explicit_realign, dr_explicit_realign_optimized, DR_REF, DR_TARGET_ALIGNMENT, fold_build2, gcc_assert, get_virtual_phi(), ggc_alloc(), gimple_assign_lhs(), gimple_assign_set_lhs(), gimple_bb(), gimple_build_assign(), gimple_build_call(), gimple_call_lhs(), gimple_call_return_type(), gimple_call_set_lhs(), gimple_set_vuse(), gimple_vuse(), gsi_insert_before(), gsi_insert_on_edge_immediate(), gsi_insert_seq_before(), gsi_insert_seq_on_edge_immediate(), GSI_SAME_STMT, gsi_stmt(), loop::header, loop::inner, loop_preheader_edge(), LOOP_VINFO_LOOP, make_ssa_name(), nested_in_vect_loop_p(), NULL, NULL_TREE, PHI_ARG_DEF_FROM_EDGE, reference_alias_ptr_type(), size_zero_node, STMT_VINFO_DR_INFO, STMT_VINFO_DR_STEP, STMT_VINFO_VECTYPE, targetm, TREE_CODE, tree_int_cst_compare(), TREE_READONLY, TREE_TYPE, UNKNOWN_LOCATION, vect_copy_ref_info(), vect_create_addr_base_for_vector_ref(), vect_create_data_ref_ptr(), and vect_create_destination_var().

Referenced by vectorizable_load().

◆ vect_slp_analyze_instance_alignment()

bool vect_slp_analyze_instance_alignment ( vec_info * vinfo,
slp_instance instance )
extern
Function vect_slp_analyze_instance_alignment

Analyze the alignment of the data-references in the SLP instance.
Return FALSE if a data reference is found that cannot be vectorized.   

References DUMP_VECT_SCOPE, FOR_EACH_VEC_ELT, i, slp_inst_kind_store, SLP_INSTANCE_KIND, SLP_INSTANCE_LOADS, SLP_INSTANCE_TREE, and vect_slp_analyze_node_alignment().

Referenced by vect_slp_analyze_bb_1().

◆ vect_slp_analyze_instance_dependence()

bool vect_slp_analyze_instance_dependence ( vec_info * vinfo,
slp_instance instance )
extern
Function vect_analyze_data_ref_dependences.

Examine all the data references in the basic-block, and make sure there
do not exist any data dependences between them.  Set *MAX_VF according to
the maximum vectorization factor the data dependences allow.   

References DUMP_VECT_SCOPE, ggc_alloc(), gimple_set_visited(), NULL, slp_inst_kind_store, SLP_INSTANCE_KIND, SLP_INSTANCE_LOADS, SLP_INSTANCE_TREE, SLP_TREE_SCALAR_STMTS, vect_find_last_scalar_stmt_in_slp(), vect_slp_analyze_load_dependences(), vect_slp_analyze_store_dependences(), and vNULL.

Referenced by vect_slp_analyze_bb_1().

◆ vect_slp_analyze_operations()

◆ vect_slp_child_index_for_operand()

int vect_slp_child_index_for_operand ( const gimple * stmt,
int op,
bool gather_scatter_p )
extern
Return the SLP node child index for operand OP of STMT.   

References gcc_unreachable, ggc_alloc(), i, and vect_get_operand_map().

Referenced by vect_check_store_rhs(), vectorizable_load(), vectorizable_simd_clone_call(), and vectorizable_store().

◆ vect_slp_fini()

void vect_slp_fini ( void )
extern

References NULL, slp_first_node, and slp_tree_pool.

◆ vect_slp_function()

◆ vect_slp_if_converted_bb()

bool vect_slp_if_converted_bb ( basic_block bb,
loop_p orig_loop )
extern
Special entry for the BB vectorizer.  Analyze and transform a single
if-converted BB with ORIG_LOOPs body being the not if-converted
representation.  Returns true if anything in the basic-block was
vectorized.   

References vect_slp_bbs().

Referenced by try_vectorize_loop_1().

◆ vect_slp_init()

void vect_slp_init ( void )
extern
In tree-vect-slp.cc.   

References ggc_alloc(), and slp_tree_pool.

◆ vect_stmt_dominates_stmt_p()

◆ vect_stmt_to_vectorize()

◆ vect_store_lanes_supported()

internal_fn vect_store_lanes_supported ( tree vectype,
unsigned HOST_WIDE_INT count,
bool masked_p )
extern
Return FN if vec_{mask_,mask_len_}store_lanes is available for COUNT vectors
of type VECTYPE.  MASKED_P says whether the masked form is needed.   

References count, ggc_alloc(), IFN_LAST, and vect_lanes_optab_supported_p().

Referenced by check_load_store_for_partial_vectors(), get_group_load_store_type(), vect_analyze_loop_2(), and vect_slp_prefer_store_lanes_p().

◆ vect_supportable_dr_alignment()

◆ vect_supportable_shift()

bool vect_supportable_shift ( vec_info * vinfo,
enum tree_code code,
tree scalar_type )
extern
Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE
either as shift by a scalar or by a vector.   

References get_vectype_for_scalar_type(), ggc_alloc(), optab_for_tree_code(), optab_handler(), optab_scalar, optab_vector, and TYPE_MODE.

Referenced by vect_synth_mult_by_constant().

◆ vect_transform_cycle_phi()

bool vect_transform_cycle_phi ( loop_vec_info loop_vinfo,
stmt_vec_info stmt_info,
gimple ** vec_stmt,
slp_tree slp_node,
slp_instance slp_node_instance )
extern
Transform phase of a cycle PHI.   

References add_phi_arg(), build_vector_from_val(), build_vector_type(), build_vector_type_for_mode(), COND_REDUCTION, CONST_COND_REDUCTION, create_phi_node(), EXTRACT_LAST_REDUCTION, FOLD_LEFT_REDUCTION, gcc_assert, get_initial_def_for_reduction(), get_initial_defs_for_reduction(), ggc_alloc(), gimple_convert(), gimple_phi_result(), GSI_CONTINUE_LINKING, gsi_end_p(), gsi_insert_seq_after(), gsi_insert_seq_on_edge_immediate(), gsi_last_bb(), gsi_prev(), gsi_stmt(), loop::header, i, info_for_reduction(), loop::inner, INTEGER_INDUC_COND_REDUCTION, integer_zerop(), loop_preheader_edge(), LOOP_VINFO_LOOP, LOOP_VINFO_VECT_FACTOR, _loop_vec_info::main_loop_edge, nested_in_vect_loop_p(), neutral_op_for_reduction(), NULL, NULL_TREE, num_phis(), operand_equal_p(), REDUC_GROUP_FIRST_ELEMENT, SLP_TREE_CHILDREN, SLP_TREE_LANES, SLP_TREE_SCALAR_STMTS, stmt_ends_bb_p(), STMT_VINFO_DEF_TYPE, STMT_VINFO_FORCE_SINGLE_CYCLE, STMT_VINFO_REDUC_CODE, STMT_VINFO_REDUC_DEF, STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT, STMT_VINFO_REDUC_TYPE, STMT_VINFO_REDUC_VECTYPE_IN, STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL, STMT_VINFO_VEC_STMTS, STMT_VINFO_VECTYPE, TREE_CODE, tree_int_cst_lt(), TREE_TYPE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), UNKNOWN_LOCATION, useless_type_conversion_p(), vect_create_destination_var(), vect_create_partial_epilog(), vect_find_reusable_accumulator(), vect_get_main_loop_result(), vect_get_num_copies(), vect_get_num_vectors(), vect_get_slp_defs(), vect_get_vec_defs_for_operand(), vect_phi_initial_value(), vect_reduction_def, and vect_stmt_to_vectorize().

Referenced by vect_transform_stmt().

◆ vect_transform_grouped_load()

void vect_transform_grouped_load ( vec_info * vinfo,
stmt_vec_info stmt_info,
vec< tree > dr_chain,
int size,
gimple_stmt_iterator * gsi )
extern
Function vect_transform_grouped_load.

  Given a chain of input interleaved data-refs (in DR_CHAIN), build statements
  to perform their permutation and ascribe the result vectorized statements to
  the scalar statements.

References ggc_alloc(), pow2p_hwi(), STMT_VINFO_VECTYPE, targetm, TYPE_MODE, vect_permute_load_chain(), vect_record_grouped_load_vectors(), vect_shift_permute_load_chain(), and vNULL.

Referenced by vectorizable_load().

◆ vect_transform_loop()

class loop * vect_transform_loop ( loop_vec_info loop_vinfo,
gimple * loop_vectorized_call )
extern
Drive for loop transformation stage.   
Function vect_transform_loop.

The analysis phase has determined that the loop is vectorizable.
Vectorize the loop - created vectorized stmts to replace the scalar
stmts in the loop, and update the loop exit condition.
Returns scalar epilogue loop if any.   

References advance(), loop::any_estimate, loop::any_likely_upper_bound, loop::any_upper_bound, build_int_cst(), build_one_cst(), build_zero_cst(), vec_info_shared::check_datarefs(), conditional_internal_fn_code(), DR_GROUP_FIRST_ELEMENT, dump_enabled_p(), dump_printf(), dump_printf_loc(), DUMP_VECT_SCOPE, EDGE_COUNT, FOR_EACH_VEC_ELT, loop::force_vectorize, gcc_assert, GET_MODE_NAME, ggc_alloc(), gimple_build_assign(), gimple_call_arg(), gimple_call_internal_fn(), gimple_call_internal_p(), gimple_call_num_args(), gimple_clobber_p(), gimple_get_lhs(), gsi_end_p(), gsi_next(), gsi_remove(), gsi_replace(), gsi_start(), gsi_start_bb(), gsi_start_phis(), gsi_stmt(), loop::header, i, loop::inner, integer_onep(), poly_int< N, C >::is_constant(), known_eq, vec_info::lookup_stmt(), loop_niters_no_overflow(), loop_preheader_edge(), LOOP_REQUIRES_VERSIONING, LOOP_VINFO_BBS, LOOP_VINFO_COST_MODEL_THRESHOLD, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_INT_NITERS, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LOOP, LOOP_VINFO_NITERS, LOOP_VINFO_NITERS_KNOWN_P, LOOP_VINFO_NITERS_UNCHANGED, LOOP_VINFO_NITERSM1, LOOP_VINFO_ORIG_LOOP_INFO, LOOP_VINFO_PEELING_FOR_ALIGNMENT, LOOP_VINFO_PEELING_FOR_GAPS, LOOP_VINFO_SCALAR_IV_EXIT, LOOP_VINFO_SCALAR_LOOP, LOOP_VINFO_SCALAR_LOOP_SCALING, LOOP_VINFO_SLP_INSTANCES, LOOP_VINFO_USING_PARTIAL_VECTORS_P, LOOP_VINFO_VECT_FACTOR, LOOP_VINFO_VERSIONING_THRESHOLD, MAY_HAVE_DEBUG_BIND_STMTS, maybe_flat_loop_profile(), maybe_set_vectorized_backedge_value(), move_early_exit_stmts(), MSG_NOTE, loop::nb_iterations_estimate, loop::nb_iterations_likely_upper_bound, loop::nb_iterations_upper_bound, NULL, NULL_TREE, loop::num_nodes, basic_block_def::preds, PURE_SLP_STMT, release_defs(), vec_info::remove_stmt(), loop::safelen, scale_loop_frequencies(), scale_profile_for_vect_loop(), vec_info::shared, si, loop::simduid, single_pred_p(), vec_info::slp_instances, split_edge(), split_loop_exit_edge(), STMT_VINFO_DEF_TYPE, STMT_VINFO_GROUPED_ACCESS, STMT_VINFO_IN_PATTERN_P, STMT_VINFO_LIVE_P, STMT_VINFO_PATTERN_DEF_SEQ, STMT_VINFO_RELATED_STMT, STMT_VINFO_RELEVANT_P, STMT_VINFO_VECTYPE, TREE_TYPE, TYPE_VECTOR_SUBPARTS(), wi::udiv_ceil(), wi::udiv_floor(), wi::umin(), unlink_stmt_vdef(), loop::unroll, unshare_expr(), update_epilogue_loop_vinfo(), vect_apply_runtime_profitability_check_p(), vect_build_loop_niters(), vect_do_peeling(), vect_double_reduction_def, vect_first_order_recurrence, vect_free_slp_instance(), vect_gen_vector_loop_niters(), vect_induction_def, vect_internal_def, vect_location, vect_loop_kill_debug_uses(), vect_loop_versioning(), vect_nested_cycle, vect_prepare_for_masked_peels(), vect_reduction_def, vect_remove_stores(), vect_schedule_slp(), vect_set_loop_condition(), vect_transform_loop_stmt(), vect_transform_stmt(), vect_use_loop_mask_for_alignment_p(), vect_vf_for_cost(), vec_info::vector_mode, and VECTOR_TYPE_P.

Referenced by vect_transform_loops().

◆ vect_transform_reduction()

bool vect_transform_reduction ( loop_vec_info loop_vinfo,
stmt_vec_info stmt_info,
gimple_stmt_iterator * gsi,
gimple ** vec_stmt,
slp_tree slp_node )
extern
Transform the definition stmt STMT_INFO of a reduction PHI backedge
value.   

References build_vect_cond_expr(), canonicalize_code(), gimple_match_op::code, commutative_binary_op_p(), conditional_internal_fn_code(), dump_enabled_p(), dump_printf_loc(), FOLD_LEFT_REDUCTION, FOR_EACH_VEC_ELT, gcc_assert, gcc_unreachable, get_conditional_internal_fn(), ggc_alloc(), gimple_build_assign(), gimple_build_call_internal(), gimple_call_set_lhs(), gimple_call_set_nothrow(), gimple_extract_op(), gimple_get_lhs(), gimple_set_lhs(), i, info_for_reduction(), loop::inner, internal_fn_else_index(), code_helper::is_internal_fn(), code_helper::is_tree_code(), LOOP_VINFO_FULLY_MASKED_P, LOOP_VINFO_LENS, LOOP_VINFO_LOOP, LOOP_VINFO_MASKS, make_ssa_name(), MSG_NOTE, nested_in_vect_loop_p(), NULL_TREE, gimple_match_op::num_ops, gimple_match_op::ops, SLP_TREE_NUMBER_OF_VEC_STMTS, STMT_VINFO_DEF_TYPE, STMT_VINFO_FORCE_SINGLE_CYCLE, STMT_VINFO_REDUC_DEF, STMT_VINFO_REDUC_FN, STMT_VINFO_REDUC_IDX, STMT_VINFO_REDUC_TYPE, STMT_VINFO_REDUC_VECTYPE_IN, STMT_VINFO_VEC_STMTS, STMT_VINFO_VECTYPE, truth_type_for(), gimple_match_op::type, use_mask_by_cond_expr_p(), vect_create_destination_var(), vect_double_reduction_def, vect_emulate_mixed_dot_prod(), vect_finish_stmt_generation(), vect_get_loop_mask(), vect_get_num_copies(), vect_get_vec_defs(), vect_get_vec_defs_for_operand(), vect_is_emulated_mixed_dot_prod(), vect_location, vect_orig_stmt(), and vectorize_fold_left_reduction().

Referenced by vect_transform_stmt().

◆ vect_transform_slp_perm_load()

bool vect_transform_slp_perm_load ( vec_info * ,
slp_tree ,
const vec< tree > & ,
gimple_stmt_iterator * ,
poly_uint64 ,
bool ,
unsigned * ,
unsigned * = nullptr,
bool = false )
extern

◆ vect_transform_stmt()

bool vect_transform_stmt ( vec_info * vinfo,
stmt_vec_info stmt_info,
gimple_stmt_iterator * gsi,
slp_tree slp_node,
slp_instance slp_node_instance )
extern

◆ vect_update_inits_of_drs()

void vect_update_inits_of_drs ( loop_vec_info loop_vinfo,
tree niters,
tree_code code )
extern
Function vect_update_inits_of_drs

Apply vect_update_inits_of_dr to all accesses in LOOP_VINFO.
CODE and NITERS are as for vect_update_inits_of_dr.   

References DUMP_VECT_SCOPE, fold_convert, FOR_EACH_VEC_ELT, i, vec_info::lookup_dr(), LOOP_VINFO_DATAREFS, sizetype, STMT_VINFO_GATHER_SCATTER_P, STMT_VINFO_SIMD_LANE_ACCESS_P, TREE_TYPE, types_compatible_p(), and vect_update_init_of_dr().

Referenced by update_epilogue_loop_vinfo(), vect_do_peeling(), and vect_prepare_for_masked_peels().

◆ vect_update_max_nunits() [1/2]

void vect_update_max_nunits ( poly_uint64 * max_nunits,
poly_uint64 nunits )
inline
Update maximum unit count *MAX_NUNITS so that it accounts for
NUNITS.  *MAX_NUNITS can be 1 if we haven't yet recorded anything.   

References ggc_alloc().

Referenced by vect_build_slp_tree(), vect_determine_vectorization_factor(), vect_determine_vf_for_stmt_1(), vect_record_max_nunits(), and vect_update_max_nunits().

◆ vect_update_max_nunits() [2/2]

void vect_update_max_nunits ( poly_uint64 * max_nunits,
tree vectype )
inline
Update maximum unit count *MAX_NUNITS so that it accounts for
the number of units in vector type VECTYPE.  *MAX_NUNITS can be 1
if we haven't yet recorded any vector types.   

References TYPE_VECTOR_SUBPARTS(), and vect_update_max_nunits().

◆ vect_use_loop_mask_for_alignment_p()

bool vect_use_loop_mask_for_alignment_p ( loop_vec_info loop_vinfo)
inline
Return true if the loop described by LOOP_VINFO is fully-masked and
if the first iteration should use a partial mask in order to achieve
alignment.   

References LOOP_VINFO_FULLY_MASKED_P, and LOOP_VINFO_PEELING_FOR_ALIGNMENT.

Referenced by vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_can_peel_nonlinear_iv_p(), vect_do_peeling(), vect_estimate_min_profitable_iters(), vect_prepare_for_masked_peels(), and vect_transform_loop().

◆ vect_use_mask_type_p()

bool vect_use_mask_type_p ( stmt_vec_info stmt_info)
inline
Return true if STMT_INFO should produce a vector mask type rather than
a normal nonmask type.   

References ggc_alloc().

Referenced by integer_type_for_mask(), vect_get_vector_types_for_stmt(), and vect_init_pattern_stmt().

◆ vect_vf_for_cost()

unsigned int vect_vf_for_cost ( loop_vec_info loop_vinfo)
inline
Return the vectorization factor that should be used for costing
purposes while vectorizing the loop described by LOOP_VINFO.
Pick a reasonable estimate if the vectorization factor isn't
known at compile time.   

References estimated_poly_value(), and LOOP_VINFO_VECT_FACTOR.

Referenced by vect_analyze_loop_costing(), vect_apply_runtime_profitability_check_p(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_get_peel_iters_epilogue(), vect_known_niters_smaller_than_vf(), and vect_transform_loop().

◆ vectorizable_induction()

bool vectorizable_induction ( loop_vec_info loop_vinfo,
stmt_vec_info stmt_info,
gimple ** vec_stmt,
slp_tree slp_node,
stmt_vector_for_cost * cost_vec )
extern
Function vectorizable_induction

Check if STMT_INFO performs an induction computation that can be vectorized.
If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized
phi to replace it, put it in VEC_STMT, and add it to the same basic block.
Return true if STMT_INFO is vectorizable in this way.   

References add_phi_arg(), build1(), build_index_vector(), build_int_cst(), build_int_cstu(), build_real_from_wide(), build_vector_from_val(), CONSTANT_CLASS_P, create_phi_node(), directly_supported_p(), dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, expr, FLOAT_TYPE_P, flow_bb_inside_loop_p(), fold_convert, FOR_EACH_IMM_USE_FAST, FOR_EACH_VEC_ELT, force_gimple_operand(), gcc_assert, get_same_sized_vectype(), ggc_alloc(), gimple_assign_lhs(), gimple_bb(), gimple_build(), gimple_build_assign(), gimple_build_vector(), gimple_build_vector_from_val(), gimple_convert(), gimple_get_lhs(), gimple_phi_arg_def(), gsi_after_labels(), GSI_CONTINUE_LINKING, gsi_for_stmt(), gsi_insert_on_edge_immediate(), gsi_insert_seq_after(), gsi_insert_seq_before(), gsi_insert_seq_on_edge_immediate(), GSI_SAME_STMT, i, induc_vec_info_type, init_expr(), loop::inner, integer_type_node, integer_zerop(), INTEGRAL_TYPE_P, is_gimple_debug(), least_common_multiple(), vec_info::lookup_stmt(), loop_latch_edge(), loop_preheader_edge(), LOOP_VINFO_LENS, LOOP_VINFO_LOOP, LOOP_VINFO_MASK_SKIP_NITERS, LOOP_VINFO_USING_SELECT_VL_P, LOOP_VINFO_VECT_FACTOR, MSG_MISSED_OPTIMIZATION, MSG_NOTE, nested_in_vect_loop_p(), NULL, NULL_TREE, PHI_ARG_DEF_FROM_EDGE, PHI_RESULT, record_stmt_cost(), SCALAR_FLOAT_TYPE_P, scalar_to_vec, si, SLP_TREE_CHILDREN, SLP_TREE_LANES, SLP_TREE_NUMBER_OF_VEC_STMTS, SLP_TREE_SCALAR_STMTS, SLP_TREE_VEC_DEFS, SLP_TREE_VECTYPE, SSA_NAME_DEF_STMT, STMT_VINFO_DEF_TYPE, STMT_VINFO_LIVE_P, STMT_VINFO_LOOP_PHI_EVOLUTION_PART, STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE, STMT_VINFO_RELEVANT_P, STMT_VINFO_TYPE, STMT_VINFO_VEC_STMTS, STMT_VINFO_VECTYPE, TREE_CODE, TREE_TYPE, type_has_mode_precision_p(), TYPE_VECTOR_SUBPARTS(), UNKNOWN_LOCATION, unshare_expr(), UNSIGNED, USE_STMT, useless_type_conversion_p(), vect_body, vect_get_loop_len(), vect_get_new_ssa_name(), vect_get_new_vect_var(), vect_get_num_copies(), vect_get_slp_vect_def(), vect_get_vec_defs_for_operand(), vect_induction_def, vect_init_vector(), vect_location, vect_maybe_update_slp_op_vectype(), vect_phi_initial_value(), vect_prologue, vect_simple_var, vect_step_op_add, vector_stmt, and vectorizable_nonlinear_induction().

Referenced by vect_analyze_loop_operations(), vect_analyze_stmt(), and vect_transform_stmt().

◆ vectorizable_lc_phi()

◆ vectorizable_live_operation()

bool vectorizable_live_operation ( vec_info * vinfo,
stmt_vec_info stmt_info,
slp_tree slp_node,
slp_instance slp_node_instance,
int slp_index,
bool vec_stmt_p,
stmt_vector_for_cost * cost_vec )
extern
Function vectorizable_live_operation.

STMT_INFO computes a value that is used outside the loop.  Check if
it can be supported.   

References bitsize_int, build3(), build_nonstandard_integer_type(), build_zero_cst(), can_vec_extract_var_idx_p(), direct_internal_fn_supported_p(), dump_enabled_p(), dump_printf_loc(), EXTRACT_LAST_REDUCTION, flow_bb_inside_loop_p(), flow_loop_nested_p(), fold_convert, FOLD_LEFT_REDUCTION, FOR_EACH_IMM_USE_ON_STMT, FOR_EACH_IMM_USE_STMT, force_gimple_operand(), gcc_assert, get_loop_exit_edges(), ggc_alloc(), gimple_bb(), gimple_build_assign(), gimple_get_lhs(), gimple_phi_arg_edge(), gimple_phi_result(), gsi_after_labels(), gsi_for_stmt(), gsi_insert_before(), gsi_insert_seq_after(), gsi_insert_seq_before(), GSI_SAME_STMT, info_for_reduction(), int_const_binop(), is_gimple_debug(), is_simple_and_all_uses_invariant(), vec_info::lookup_stmt(), loop_exit_edge_p(), LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EARLY_BREAKS_VECT_PEELED, LOOP_VINFO_FULLY_MASKED_P, LOOP_VINFO_FULLY_WITH_LENGTH_P, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LENS, LOOP_VINFO_LOOP, LOOP_VINFO_MASKS, MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, NULL_TREE, OPTIMIZE_FOR_SPEED, phi_arg_index_from_use(), PURE_SLP_STMT, record_stmt_cost(), REDUC_GROUP_FIRST_ELEMENT, remove_phi_node(), SET_USE, si, SLP_TREE_LANES, SLP_TREE_NUMBER_OF_VEC_STMTS, SLP_TREE_VEC_DEFS, SLP_TREE_VECTYPE, SSA_NAME_DEF_STMT, SSA_NAME_IS_DEFAULT_DEF, SSA_NAME_OCCURS_IN_ABNORMAL_PHI, STMT_VINFO_DEF_TYPE, STMT_VINFO_LIVE_P, STMT_VINFO_REDUC_DEF, STMT_VINFO_REDUC_TYPE, STMT_VINFO_RELEVANT_P, STMT_VINFO_VEC_STMTS, STMT_VINFO_VECTYPE, TREE_CODE, tree_to_uhwi(), TREE_TYPE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), update_stmt(), vec_to_scalar, vect_create_epilog_for_reduction(), vect_epilogue, vect_get_num_copies(), vect_induction_def, vect_location, vect_orig_stmt(), vect_record_loop_len(), vect_record_loop_mask(), vect_stmt_dominates_stmt_p(), vect_stmt_to_vectorize(), VECTOR_BOOLEAN_TYPE_P, vector_element_bits_tree(), and vectorizable_live_operation_1().

Referenced by can_vectorize_live_stmts(), vect_analyze_loop_operations(), vect_bb_slp_mark_live_stmts(), vect_schedule_slp_node(), and vect_slp_analyze_node_operations_1().

◆ vectorizable_phi()

◆ vectorizable_recurr()

bool vectorizable_recurr ( loop_vec_info loop_vinfo,
stmt_vec_info stmt_info,
gimple ** vec_stmt,
slp_tree slp_node,
stmt_vector_for_cost * cost_vec )
extern
Vectorizes first order recurrences.  An overview of the transformation
is described below. Suppose we have the following loop.

  int t = 0;
  for (int i = 0; i < n; ++i)
    {
      b[i] = a[i] - t;
      t = a[i];
    }

There is a first-order recurrence on 'a'. For this loop, the scalar IR
looks (simplified) like:

 scalar.preheader:
   init = 0;

 scalar.body:
   i = PHI <0(scalar.preheader), i+1(scalar.body)>
   _2 = PHI <(init(scalar.preheader), <_1(scalar.body)>
   _1 = a[i]
   b[i] = _1 - _2
   if (i < n) goto scalar.body

In this example, _2 is a recurrence because it's value depends on the
previous iteration.  We vectorize this as (VF = 4)

 vector.preheader:
   vect_init = vect_cst(..., ..., ..., 0)

 vector.body
   i = PHI <0(vector.preheader), i+4(vector.body)>
   vect_1 = PHI <vect_init(vector.preheader), v2(vector.body)>
   vect_2 = a[i, i+1, i+2, i+3];
   vect_3 = vec_perm (vect_1, vect_2, { 3, 4, 5, 6 })
   b[i, i+1, i+2, i+3] = vect_2 - vect_3
   if (..) goto vector.body

In this function, vectorizable_recurr, we code generate both the
vector PHI node and the permute since those together compute the
vectorized value of the scalar PHI.  We do not yet have the
backedge value to fill in there nor into the vec_perm.  Those
are filled in maybe_set_vectorized_backedge_value and
vect_schedule_scc.

TODO:  Since the scalar loop does not have a use of the recurrence
outside of the loop the natural way to implement peeling via
vectorizing the live value doesn't work.  For now peeling of loops
with a recurrence is not implemented.  For SLP the supported cases
are restricted to those requiring a single vector recurrence PHI.   

References add_phi_arg(), build_vector_from_val(), can_vec_perm_const_p(), create_phi_node(), dump_enabled_p(), dump_printf_loc(), FOR_EACH_VEC_ELT, ggc_alloc(), gimple_bb(), gimple_build_assign(), gimple_convert(), gimple_phi_result(), gsi_for_stmt(), gsi_insert_seq_on_edge_immediate(), gsi_next(), i, vec_info::lookup_def(), loop_latch_edge(), loop_preheader_edge(), LOOP_VINFO_LOOP, make_ssa_name(), maybe_gt, MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, NULL_TREE, PHI_ARG_DEF_FROM_EDGE, record_stmt_cost(), recurr_info_type, scalar_to_vec, SLP_TREE_CHILDREN, SLP_TREE_LANES, SLP_TREE_NUMBER_OF_VEC_STMTS, SLP_TREE_VECTYPE, SSA_NAME_DEF_STMT, STMT_VINFO_DEF_TYPE, STMT_VINFO_TYPE, STMT_VINFO_VEC_STMTS, STMT_VINFO_VECTYPE, TREE_CODE, TREE_TYPE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), types_compatible_p(), UNKNOWN_LOCATION, useless_type_conversion_p(), vect_body, vect_finish_stmt_generation(), vect_first_order_recurrence, vect_gen_perm_mask_checked(), vect_get_new_vect_var(), vect_get_num_copies(), vect_init_vector(), vect_location, vect_maybe_update_slp_op_vectype(), vect_prologue, vect_simple_var, vect_stmt_to_vectorize(), and vector_stmt.

Referenced by vect_analyze_loop_operations(), vect_analyze_stmt(), and vect_transform_stmt().

◆ vectorizable_reduction()

bool vectorizable_reduction ( loop_vec_info loop_vinfo,
stmt_vec_info stmt_info,
slp_tree slp_node,
slp_instance slp_node_instance,
stmt_vector_for_cost * cost_vec )
extern
Function vectorizable_reduction.

Check if STMT_INFO performs a reduction operation that can be vectorized.
If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized
stmt to replace it, put it in VEC_STMT, and insert it at GSI.
Return true if STMT_INFO is vectorizable in this way.

This function also handles reduction idioms (patterns) that have been
recognized in advance during vect_pattern_recog.  In this case, STMT_INFO
may be of this form:
  X = pattern_expr (arg0, arg1, ..., X)
and its STMT_VINFO_RELATED_STMT points to the last stmt in the original
sequence that had been detected and replaced by the pattern-stmt
(STMT_INFO).

This function also handles reduction of condition expressions, for example:
  for (int i = 0; i < N; i++)
    if (a[i] < value)
      last = a[i];
This is handled by vectorising the loop and creating an additional vector
containing the loop indexes for which "a[i] < value" was true.  In the
function epilogue this is reduced to a single max value and then used to
index into the vector of results.

In some cases of reduction patterns, the type of the reduction variable X is
different than the type of the other arguments of STMT_INFO.
In such cases, the vectype that is used when transforming STMT_INFO into
a vector stmt is different than the vectype that is used to determine the
vectorization factor, because it consists of a different number of elements
than the actual number of elements that are being operated upon in parallel.

For example, consider an accumulation of shorts into an int accumulator.
On some targets it's possible to vectorize this pattern operating on 8
shorts at a time (hence, the vectype for purposes of determining the
vectorization factor should be V8HI); on the other hand, the vectype that
is used to create the vector form is actually V4SI (the type of the result).

Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that
indicates what is the actual level of parallelism (V8HI in the example), so
that the right vectorization factor would be derived.  This vectype
corresponds to the type of arguments to the reduction stmt, and should *NOT*
be used to create the vectorized stmt.  The right vectype for the vectorized
stmt is obtained from the type of the result X:
   get_vectype_for_scalar_type (vinfo, TREE_TYPE (X))

This means that, contrary to "regular" reductions (or "regular" stmts in
general), the following equation:
   STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (vinfo, TREE_TYPE (X))
does *NOT* necessarily hold for reduction patterns.   

References associative_binary_op_p(), boolean_type_node, build_int_cst(), can_duplicate_and_interleave_p(), gimple_match_op::code, commutative_binary_op_p(), COMPARISON_CLASS_P, COND_REDUCTION, conditional_internal_fn_code(), CONST_COND_REDUCTION, CONVERT_EXPR_CODE_P, cycle_phi_info_type, direct_internal_fn_supported_p(), directly_supported_p(), dump_enabled_p(), dump_printf(), dump_printf_loc(), expand_vec_cond_expr_p(), EXTRACT_LAST_REDUCTION, FLOAT_TYPE_P, fold_binary, FOLD_LEFT_REDUCTION, fold_left_reduction_fn(), FOR_EACH_VEC_ELT, gcc_assert, gcc_unreachable, get_conditional_internal_fn(), get_masked_reduction_fn(), GET_MODE_PRECISION(), GET_MODE_SIZE(), get_same_sized_vectype(), get_vectype_for_scalar_type(), wi::geu_p(), ggc_alloc(), gimple_bb(), gimple_extract_op(), gimple_phi_result(), HONOR_SIGN_DEPENDENT_ROUNDING(), i, IFN_LAST, info_for_reduction(), loop::inner, int_const_binop(), INTEGER_INDUC_COND_REDUCTION, integer_one_node, integer_onep(), integer_zerop(), INTEGRAL_TYPE_P, internal_fn_mask_index(), is_nonwrapping_integer_induction(), known_eq, vec_info::lookup_def(), vec_info::lookup_stmt(), loop_latch_edge(), LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P, LOOP_VINFO_LENS, LOOP_VINFO_LOOP, LOOP_VINFO_MASKS, LOOP_VINFO_VECT_FACTOR, make_unsigned_type(), max_loop_iterations(), MSG_MISSED_OPTIMIZATION, MSG_NOTE, needs_fold_left_reduction_p(), nested_in_vect_loop_p(), neutral_op_for_reduction(), NULL, NULL_TREE, gimple_match_op::num_ops, gimple_match_op::ops, optab_vector, OPTIMIZE_FOR_SPEED, PHI_ARG_DEF_FROM_EDGE, PHI_RESULT, POINTER_TYPE_P, record_stmt_cost(), REDUC_GROUP_FIRST_ELEMENT, REDUC_GROUP_NEXT_ELEMENT, reduc_vec_info_type, reduction_fn_for_scalar_code(), SCALAR_FLOAT_TYPE_P, SCALAR_TYPE_MODE, single_imm_use(), SLP_TREE_CHILDREN, SLP_TREE_LANES, SLP_TREE_NUMBER_OF_VEC_STMTS, SLP_TREE_SCALAR_STMTS, SLP_TREE_VECTYPE, STMT_VINFO_DEF_TYPE, STMT_VINFO_FORCE_SINGLE_CYCLE, STMT_VINFO_IN_PATTERN_P, STMT_VINFO_LIVE_P, STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED, STMT_VINFO_LOOP_PHI_EVOLUTION_PART, STMT_VINFO_REDUC_CODE, STMT_VINFO_REDUC_DEF, STMT_VINFO_REDUC_FN, STMT_VINFO_REDUC_IDX, STMT_VINFO_REDUC_TYPE, STMT_VINFO_REDUC_VECTYPE, STMT_VINFO_REDUC_VECTYPE_IN, STMT_VINFO_RELATED_STMT, STMT_VINFO_RELEVANT, STMT_VINFO_STMT, STMT_VINFO_TYPE, STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL, STMT_VINFO_VECTYPE, _loop_vec_info::suggested_unroll_factor, wi::to_widest(), TREE_CODE, TREE_CODE_REDUCTION, tree_int_cst_lt(), tree_int_cst_sgn(), tree_nop_conversion_p(), TREE_TYPE, truth_type_for(), gimple_match_op::type, type_has_mode_precision_p(), TYPE_MAX_VALUE, TYPE_MIN_VALUE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), types_compatible_p(), use_mask_by_cond_expr_p(), vect_body, vect_can_vectorize_without_simd_p(), vect_constant_def, vect_double_reduction_def, vect_emulated_vector_p(), vect_get_num_copies(), vect_induction_def, vect_internal_def, vect_is_emulated_mixed_dot_prod(), vect_is_simple_use(), vect_location, vect_maybe_update_slp_op_vectype(), vect_model_reduction_cost(), vect_nested_cycle, vect_orig_stmt(), vect_phi_initial_value(), vect_record_loop_len(), vect_record_loop_mask(), vect_reduction_def, vect_stmt_to_vectorize(), vect_unknown_def_type, vect_unused_in_scope, vect_used_in_outer, vect_used_only_live, vector_stmt, and VECTORIZABLE_CYCLE_DEF.

Referenced by vect_analyze_loop_operations(), and vect_analyze_stmt().

◆ vectorize_loops()

unsigned vectorize_loops ( void )
In tree-vectorizer.cc.   

Variable Documentation

◆ num__slp_patterns

size_t num__slp_patterns
extern
Number of supported pattern matchers.   
Set the number of SLP pattern matchers available.   

Referenced by vect_match_slp_patterns_2().

◆ slp_patterns

vect_pattern_decl_t slp_patterns[]
extern
List of supported pattern matchers.   

Referenced by vect_match_slp_patterns_2().

◆ vect_location

dump_user_location_t vect_location
extern
Source location + hotness information.  
Vectorizer
   Copyright (C) 2003-2024 Free Software Foundation, Inc.
   Contributed by Dorit Naishlos <dorit@il.ibm.com>

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.   
Loop and basic block vectorizer.

 This file contains drivers for the three vectorizers:
 (1) loop vectorizer (inter-iteration parallelism),
 (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop
     vectorizer)
 (3) BB vectorizer (out-of-loops), aka SLP

 The rest of the vectorizer's code is organized as follows:
 - tree-vect-loop.cc - loop specific parts such as reductions, etc. These are
   used by drivers (1) and (2).
 - tree-vect-loop-manip.cc - vectorizer's loop control-flow utilities, used by
   drivers (1) and (2).
 - tree-vect-slp.cc - BB vectorization specific analysis and transformation,
   used by drivers (2) and (3).
 - tree-vect-stmts.cc - statements analysis and transformation (used by all).
 - tree-vect-data-refs.cc - vectorizer specific data-refs analysis and
   manipulations (used by all).
 - tree-vect-patterns.cc - vectorizable code patterns detector (used by all)

 Here's a poor attempt at illustrating that:

    tree-vectorizer.cc:
    loop_vect()  loop_aware_slp()  slp_vect()
         |        /           \          /
         |       /             \        /
         tree-vect-loop.cc  tree-vect-slp.cc
               | \      \  /      /   |
               |  \      \/      /    |
               |   \     /\     /     |
               |    \   /  \   /      |
        tree-vect-stmts.cc  tree-vect-data-refs.cc
                      \      /
                   tree-vect-patterns.cc
Loop or bb location, with hotness information.   

Referenced by check_load_store_for_partial_vectors(), check_scan_store(), dependence_distance_ge_vf(), vect_optimize_slp_pass::dump(), get_group_alias_ptr_type(), get_group_load_store_type(), get_load_store_type(), get_negative_load_store_type(), vect_optimize_slp_pass::get_result_with_layout(), increase_alignment(), is_simple_and_all_uses_invariant(), vect_optimize_slp_pass::materialize(), maybe_push_to_hybrid_worklist(), move_early_exit_stmts(), optimize_load_redistribution_1(), optimize_mask_stores(), parloops_is_simple_reduction(), parloops_is_slp_reduction(), process_use(), report_ploop_op(), report_vect_op(), try_vectorize_loop_1(), vect_analyze_data_ref_access(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_analyze_early_break_dependences(), vect_analyze_group_access_1(), vect_analyze_loop(), vect_analyze_loop_1(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_analyze_loop_form(), vect_analyze_loop_operations(), vect_analyze_scalar_cycles_1(), vect_analyze_slp(), vect_analyze_slp_instance(), vect_analyze_stmt(), vect_bb_partition_graph(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_mark_live_stmts(), vect_bb_vectorization_profitable_p(), vect_build_slp_instance(), vect_build_slp_tree(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_can_advance_ivs_p(), vect_can_peel_nonlinear_iv_p(), vect_check_lower_bound(), vect_check_nonzero_value(), vect_check_scalar_mask(), vect_check_store_rhs(), vect_compute_data_ref_alignment(), vect_create_addr_base_for_vector_ref(), vect_create_cond_for_alias_checks(), vect_create_data_ref_ptr(), vect_create_epilog_for_reduction(), vect_detect_hybrid_slp(), vect_determine_mask_precision(), vect_determine_min_output_precision_1(), vect_determine_partial_vectors_and_peeling(), vect_determine_precisions_from_range(), vect_determine_precisions_from_users(), vect_determine_vectorization_factor(), vect_determine_vf_for_stmt(), vect_determine_vf_for_stmt_1(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_finish_stmt_generation_1(), vect_gen_prolog_loop_niters(), vect_get_and_check_slp_defs(), vect_get_data_access_cost(), vect_get_load_cost(), vect_get_loop_niters(), vect_get_peel_iters_epilogue(), vect_get_range_info(), vect_get_store_cost(), vect_get_vec_defs_for_operand(), vect_get_vector_types_for_stmt(), vect_grouped_load_supported(), vect_grouped_store_supported(), vect_init_vector_1(), vect_is_simple_iv_evolution(), vect_is_simple_reduction(), vect_is_simple_use(), vect_is_simple_use(), vect_joust_loop_vinfos(), vect_lanes_optab_supported_p(), vect_loop_kill_debug_uses(), vect_loop_versioning(), vect_make_slp_decision(), vect_mark_for_runtime_alias_test(), vect_mark_pattern_stmts(), vect_mark_relevant(), vect_mark_stmts_to_be_vectorized(), vect_match_slp_patterns(), vect_model_promotion_demotion_cost(), vect_model_simple_cost(), vect_pattern_detected(), vect_pattern_recog_1(), vect_pattern_validate_optab(), vect_prepare_for_masked_peels(), vect_prune_runtime_alias_test_list(), vect_recog_average_pattern(), vect_recog_cond_expr_convert_pattern(), vect_recog_ctz_ffs_pattern(), vect_recog_mulhs_pattern(), vect_recog_over_widening_pattern(), vect_recog_popcount_clz_ctz_ffs_pattern(), vect_record_base_alignment(), vect_record_max_nunits(), vect_schedule_slp(), vect_schedule_slp_node(), vect_set_loop_condition(), vect_shift_permute_load_chain(), vect_slp_analyze_bb_1(), vect_slp_analyze_data_ref_dependence(), vect_slp_analyze_node_operations(), vect_slp_analyze_operations(), vect_slp_bbs(), vect_slp_convert_to_external(), vect_slp_function(), vect_slp_region(), vect_split_slp_store_group(), vect_split_statement(), vect_stmt_relevant_p(), vect_transform_loop(), vect_transform_loop_stmt(), vect_transform_loops(), vect_transform_reduction(), vect_transform_slp_perm_load_1(), vect_transform_stmt(), vect_truncate_gather_scatter_offset(), vect_update_ivs_after_vectorizer(), vect_update_misalignment_for_peel(), vect_update_vf_for_slp(), vect_use_strided_gather_scatters_p(), vect_verify_loop_lens(), vector_alignment_reachable_p(), vectorizable_assignment(), vectorizable_bb_reduc_epilogue(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_lc_phi(), vectorizable_live_operation(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_scan_store(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_slp_permutation_1(), vectorizable_store(), vectorize_fold_left_reduction(), and auto_purge_vect_location::~auto_purge_vect_location().