GCC Middle and Back End API Reference
tree-vectorizer.h File Reference
#include "tree-data-ref.h"
#include "tree-hash-traits.h"
#include "target.h"
#include "internal-fn.h"
#include "tree-ssa-operands.h"
#include "gimple-match.h"
Include dependency graph for tree-vectorizer.h:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Data Structures

struct  stmt_info_for_cost
 
struct  vect_scalar_ops_slice
 
struct  vect_scalar_ops_slice_hash
 
struct  _slp_tree
 
class  _slp_instance
 
struct  scalar_cond_masked_key
 
struct  default_hash_traits< scalar_cond_masked_key >
 
class  vec_lower_bound
 
class  vec_info_shared
 
class  vec_info
 
struct  rgroup_controls
 
struct  vec_loop_masks
 
struct  vect_reusable_accumulator
 
class  _loop_vec_info
 
struct  slp_root
 
class  _bb_vec_info
 
class  dr_vec_info
 
class  _stmt_vec_info
 
struct  gather_scatter_info
 
class  vector_costs
 
class  auto_purge_vect_location
 
struct  vect_loop_form_info
 
class  vect_pattern
 

Macros

#define VECTORIZABLE_CYCLE_DEF(D)
 
#define SLP_INSTANCE_TREE(S)   (S)->root
 
#define SLP_INSTANCE_UNROLLING_FACTOR(S)   (S)->unrolling_factor
 
#define SLP_INSTANCE_LOADS(S)   (S)->loads
 
#define SLP_INSTANCE_ROOT_STMTS(S)   (S)->root_stmts
 
#define SLP_INSTANCE_REMAIN_DEFS(S)   (S)->remain_defs
 
#define SLP_INSTANCE_KIND(S)   (S)->kind
 
#define SLP_TREE_CHILDREN(S)   (S)->children
 
#define SLP_TREE_SCALAR_STMTS(S)   (S)->stmts
 
#define SLP_TREE_SCALAR_OPS(S)   (S)->ops
 
#define SLP_TREE_REF_COUNT(S)   (S)->refcnt
 
#define SLP_TREE_VEC_DEFS(S)   (S)->vec_defs
 
#define SLP_TREE_NUMBER_OF_VEC_STMTS(S)   (S)->vec_stmts_size
 
#define SLP_TREE_LOAD_PERMUTATION(S)   (S)->load_permutation
 
#define SLP_TREE_LANE_PERMUTATION(S)   (S)->lane_permutation
 
#define SLP_TREE_SIMD_CLONE_INFO(S)   (S)->simd_clone_info
 
#define SLP_TREE_DEF_TYPE(S)   (S)->def_type
 
#define SLP_TREE_VECTYPE(S)   (S)->vectype
 
#define SLP_TREE_REPRESENTATIVE(S)   (S)->representative
 
#define SLP_TREE_LANES(S)   (S)->lanes
 
#define SLP_TREE_CODE(S)   (S)->code
 
#define LOOP_VINFO_LOOP(L)   (L)->loop
 
#define LOOP_VINFO_IV_EXIT(L)   (L)->vec_loop_iv_exit
 
#define LOOP_VINFO_EPILOGUE_IV_EXIT(L)   (L)->vec_epilogue_loop_iv_exit
 
#define LOOP_VINFO_SCALAR_IV_EXIT(L)   (L)->scalar_loop_iv_exit
 
#define LOOP_VINFO_BBS(L)   (L)->bbs
 
#define LOOP_VINFO_NITERSM1(L)   (L)->num_itersm1
 
#define LOOP_VINFO_NITERS(L)   (L)->num_iters
 
#define LOOP_VINFO_NITERS_UNCHANGED(L)   (L)->num_iters_unchanged
 
#define LOOP_VINFO_NITERS_ASSUMPTIONS(L)   (L)->num_iters_assumptions
 
#define LOOP_VINFO_COST_MODEL_THRESHOLD(L)   (L)->th
 
#define LOOP_VINFO_VERSIONING_THRESHOLD(L)   (L)->versioning_threshold
 
#define LOOP_VINFO_VECTORIZABLE_P(L)   (L)->vectorizable
 
#define LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P(L)   (L)->can_use_partial_vectors_p
 
#define LOOP_VINFO_USING_PARTIAL_VECTORS_P(L)   (L)->using_partial_vectors_p
 
#define LOOP_VINFO_USING_DECREMENTING_IV_P(L)   (L)->using_decrementing_iv_p
 
#define LOOP_VINFO_USING_SELECT_VL_P(L)   (L)->using_select_vl_p
 
#define LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P(L)    (L)->epil_using_partial_vectors_p
 
#define LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS(L)   (L)->partial_load_store_bias
 
#define LOOP_VINFO_VECT_FACTOR(L)   (L)->vectorization_factor
 
#define LOOP_VINFO_MAX_VECT_FACTOR(L)   (L)->max_vectorization_factor
 
#define LOOP_VINFO_MASKS(L)   (L)->masks
 
#define LOOP_VINFO_LENS(L)   (L)->lens
 
#define LOOP_VINFO_MASK_SKIP_NITERS(L)   (L)->mask_skip_niters
 
#define LOOP_VINFO_RGROUP_COMPARE_TYPE(L)   (L)->rgroup_compare_type
 
#define LOOP_VINFO_RGROUP_IV_TYPE(L)   (L)->rgroup_iv_type
 
#define LOOP_VINFO_PARTIAL_VECTORS_STYLE(L)   (L)->partial_vector_style
 
#define LOOP_VINFO_PTR_MASK(L)   (L)->ptr_mask
 
#define LOOP_VINFO_N_STMTS(L)   (L)->shared->n_stmts
 
#define LOOP_VINFO_LOOP_NEST(L)   (L)->shared->loop_nest
 
#define LOOP_VINFO_DATAREFS(L)   (L)->shared->datarefs
 
#define LOOP_VINFO_DDRS(L)   (L)->shared->ddrs
 
#define LOOP_VINFO_INT_NITERS(L)   (TREE_INT_CST_LOW ((L)->num_iters))
 
#define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L)   (L)->peeling_for_alignment
 
#define LOOP_VINFO_UNALIGNED_DR(L)   (L)->unaligned_dr
 
#define LOOP_VINFO_MAY_MISALIGN_STMTS(L)   (L)->may_misalign_stmts
 
#define LOOP_VINFO_MAY_ALIAS_DDRS(L)   (L)->may_alias_ddrs
 
#define LOOP_VINFO_COMP_ALIAS_DDRS(L)   (L)->comp_alias_ddrs
 
#define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L)   (L)->check_unequal_addrs
 
#define LOOP_VINFO_CHECK_NONZERO(L)   (L)->check_nonzero
 
#define LOOP_VINFO_LOWER_BOUNDS(L)   (L)->lower_bounds
 
#define LOOP_VINFO_GROUPED_STORES(L)   (L)->grouped_stores
 
#define LOOP_VINFO_SLP_INSTANCES(L)   (L)->slp_instances
 
#define LOOP_VINFO_SLP_UNROLLING_FACTOR(L)   (L)->slp_unrolling_factor
 
#define LOOP_VINFO_REDUCTIONS(L)   (L)->reductions
 
#define LOOP_VINFO_REDUCTION_CHAINS(L)   (L)->reduction_chains
 
#define LOOP_VINFO_PEELING_FOR_GAPS(L)   (L)->peeling_for_gaps
 
#define LOOP_VINFO_PEELING_FOR_NITER(L)   (L)->peeling_for_niter
 
#define LOOP_VINFO_EARLY_BREAKS(L)   (L)->early_breaks
 
#define LOOP_VINFO_EARLY_BRK_STORES(L)   (L)->early_break_stores
 
#define LOOP_VINFO_EARLY_BREAKS_VECT_PEELED(L)    (single_pred ((L)->loop->latch) != (L)->vec_loop_iv_exit->src)
 
#define LOOP_VINFO_EARLY_BRK_DEST_BB(L)   (L)->early_break_dest_bb
 
#define LOOP_VINFO_EARLY_BRK_VUSES(L)   (L)->early_break_vuses
 
#define LOOP_VINFO_LOOP_CONDS(L)   (L)->conds
 
#define LOOP_VINFO_LOOP_IV_COND(L)   (L)->loop_iv_cond
 
#define LOOP_VINFO_NO_DATA_DEPENDENCIES(L)   (L)->no_data_dependencies
 
#define LOOP_VINFO_SCALAR_LOOP(L)   (L)->scalar_loop
 
#define LOOP_VINFO_SCALAR_LOOP_SCALING(L)   (L)->scalar_loop_scaling
 
#define LOOP_VINFO_HAS_MASK_STORE(L)   (L)->has_mask_store
 
#define LOOP_VINFO_SCALAR_ITERATION_COST(L)   (L)->scalar_cost_vec
 
#define LOOP_VINFO_ORIG_LOOP_INFO(L)   (L)->orig_loop_info
 
#define LOOP_VINFO_SIMD_IF_COND(L)   (L)->simd_if_cond
 
#define LOOP_VINFO_INNER_LOOP_COST_FACTOR(L)   (L)->inner_loop_cost_factor
 
#define LOOP_VINFO_FULLY_MASKED_P(L)
 
#define LOOP_VINFO_FULLY_WITH_LENGTH_P(L)
 
#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L)    ((L)->may_misalign_stmts.length () > 0)
 
#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L)
 
#define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L)    (LOOP_VINFO_NITERS_ASSUMPTIONS (L))
 
#define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L)    (LOOP_VINFO_SIMD_IF_COND (L))
 
#define LOOP_REQUIRES_VERSIONING(L)
 
#define LOOP_VINFO_NITERS_KNOWN_P(L)    (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
 
#define LOOP_VINFO_EPILOGUE_P(L)    (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL)
 
#define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L)    (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L)))
 
#define BB_VINFO_BB(B)   (B)->bb
 
#define BB_VINFO_GROUPED_STORES(B)   (B)->grouped_stores
 
#define BB_VINFO_SLP_INSTANCES(B)   (B)->slp_instances
 
#define BB_VINFO_DATAREFS(B)   (B)->shared->datarefs
 
#define BB_VINFO_DDRS(B)   (B)->shared->ddrs
 
#define STMT_VINFO_TYPE(S)   (S)->type
 
#define STMT_VINFO_STMT(S)   (S)->stmt
 
#define STMT_VINFO_RELEVANT(S)   (S)->relevant
 
#define STMT_VINFO_LIVE_P(S)   (S)->live
 
#define STMT_VINFO_VECTYPE(S)   (S)->vectype
 
#define STMT_VINFO_VEC_STMTS(S)   (S)->vec_stmts
 
#define STMT_VINFO_VECTORIZABLE(S)   (S)->vectorizable
 
#define STMT_VINFO_DATA_REF(S)   ((S)->dr_aux.dr + 0)
 
#define STMT_VINFO_GATHER_SCATTER_P(S)   (S)->gather_scatter_p
 
#define STMT_VINFO_STRIDED_P(S)   (S)->strided_p
 
#define STMT_VINFO_MEMORY_ACCESS_TYPE(S)   (S)->memory_access_type
 
#define STMT_VINFO_SIMD_LANE_ACCESS_P(S)   (S)->simd_lane_access_p
 
#define STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL(S)   (S)->induc_cond_initial_val
 
#define STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT(S)   (S)->reduc_epilogue_adjustment
 
#define STMT_VINFO_REDUC_IDX(S)   (S)->reduc_idx
 
#define STMT_VINFO_FORCE_SINGLE_CYCLE(S)   (S)->force_single_cycle
 
#define STMT_VINFO_DR_WRT_VEC_LOOP(S)   (S)->dr_wrt_vec_loop
 
#define STMT_VINFO_DR_BASE_ADDRESS(S)   (S)->dr_wrt_vec_loop.base_address
 
#define STMT_VINFO_DR_INIT(S)   (S)->dr_wrt_vec_loop.init
 
#define STMT_VINFO_DR_OFFSET(S)   (S)->dr_wrt_vec_loop.offset
 
#define STMT_VINFO_DR_STEP(S)   (S)->dr_wrt_vec_loop.step
 
#define STMT_VINFO_DR_BASE_ALIGNMENT(S)   (S)->dr_wrt_vec_loop.base_alignment
 
#define STMT_VINFO_DR_BASE_MISALIGNMENT(S)    (S)->dr_wrt_vec_loop.base_misalignment
 
#define STMT_VINFO_DR_OFFSET_ALIGNMENT(S)    (S)->dr_wrt_vec_loop.offset_alignment
 
#define STMT_VINFO_DR_STEP_ALIGNMENT(S)    (S)->dr_wrt_vec_loop.step_alignment
 
#define STMT_VINFO_DR_INFO(S)    (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux)
 
#define STMT_VINFO_IN_PATTERN_P(S)   (S)->in_pattern_p
 
#define STMT_VINFO_RELATED_STMT(S)   (S)->related_stmt
 
#define STMT_VINFO_PATTERN_DEF_SEQ(S)   (S)->pattern_def_seq
 
#define STMT_VINFO_SIMD_CLONE_INFO(S)   (S)->simd_clone_info
 
#define STMT_VINFO_DEF_TYPE(S)   (S)->def_type
 
#define STMT_VINFO_GROUPED_ACCESS(S)    ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S))
 
#define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S)   (S)->loop_phi_evolution_base_unchanged
 
#define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S)   (S)->loop_phi_evolution_part
 
#define STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE(S)   (S)->loop_phi_evolution_type
 
#define STMT_VINFO_MIN_NEG_DIST(S)   (S)->min_neg_dist
 
#define STMT_VINFO_REDUC_TYPE(S)   (S)->reduc_type
 
#define STMT_VINFO_REDUC_CODE(S)   (S)->reduc_code
 
#define STMT_VINFO_REDUC_FN(S)   (S)->reduc_fn
 
#define STMT_VINFO_REDUC_DEF(S)   (S)->reduc_def
 
#define STMT_VINFO_REDUC_VECTYPE(S)   (S)->reduc_vectype
 
#define STMT_VINFO_REDUC_VECTYPE_IN(S)   (S)->reduc_vectype_in
 
#define STMT_VINFO_SLP_VECT_ONLY(S)   (S)->slp_vect_only_p
 
#define STMT_VINFO_SLP_VECT_ONLY_PATTERN(S)   (S)->slp_vect_pattern_only_p
 
#define DR_GROUP_FIRST_ELEMENT(S)    (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element)
 
#define DR_GROUP_NEXT_ELEMENT(S)    (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element)
 
#define DR_GROUP_SIZE(S)    (gcc_checking_assert ((S)->dr_aux.dr), (S)->size)
 
#define DR_GROUP_STORE_COUNT(S)    (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count)
 
#define DR_GROUP_GAP(S)    (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap)
 
#define REDUC_GROUP_FIRST_ELEMENT(S)    (gcc_checking_assert (!(S)->dr_aux.dr), (S)->first_element)
 
#define REDUC_GROUP_NEXT_ELEMENT(S)    (gcc_checking_assert (!(S)->dr_aux.dr), (S)->next_element)
 
#define REDUC_GROUP_SIZE(S)    (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size)
 
#define STMT_VINFO_RELEVANT_P(S)   ((S)->relevant != vect_unused_in_scope)
 
#define HYBRID_SLP_STMT(S)   ((S)->slp_type == hybrid)
 
#define PURE_SLP_STMT(S)   ((S)->slp_type == pure_slp)
 
#define STMT_SLP_TYPE(S)   (S)->slp_type
 
#define VECT_MAX_COST   1000
 
#define MAX_INTERM_CVT_STEPS   3
 
#define MAX_VECTORIZATION_FACTOR   INT_MAX
 
#define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE)
 
#define DR_MISALIGNMENT_UNKNOWN   (-1)
 
#define DR_MISALIGNMENT_UNINITIALIZED   (-2)
 
#define SET_DR_MISALIGNMENT(DR, VAL)   set_dr_misalignment (DR, VAL)
 
#define DR_TARGET_ALIGNMENT(DR)   dr_target_alignment (DR)
 
#define SET_DR_TARGET_ALIGNMENT(DR, VAL)   set_dr_target_alignment (DR, VAL)
 
#define DUMP_VECT_SCOPE(MSG)    AUTO_DUMP_SCOPE (MSG, vect_location)
 

Typedefs

typedef class _stmt_vec_infostmt_vec_info
 
typedef struct _slp_treeslp_tree
 
typedef vec< stmt_info_for_coststmt_vector_for_cost
 
typedef hash_map< tree_operand_hash, std::pair< stmt_vec_info, innermost_loop_behavior * > > vec_base_alignments
 
typedef vec< std::pair< unsigned, unsigned > > lane_permutation_t
 
typedef auto_vec< std::pair< unsigned, unsigned >, 16 > auto_lane_permutation_t
 
typedef vec< unsignedload_permutation_t
 
typedef auto_vec< unsigned, 16 > auto_load_permutation_t
 
typedef class _slp_instanceslp_instance
 
typedef hash_set< scalar_cond_masked_keyscalar_cond_masked_set_type
 
typedef pair_hash< tree_operand_hash, tree_operand_hashtree_cond_mask_hash
 
typedef hash_set< tree_cond_mask_hashvec_cond_masked_set_type
 
typedef std::pair< tree, treevec_object_pair
 
typedef auto_vec< rgroup_controlsvec_loop_lens
 
typedef auto_vec< std::pair< data_reference *, tree > > drs_init_vec
 
typedef _loop_vec_infoloop_vec_info
 
typedef opt_pointer_wrapper< loop_vec_infoopt_loop_vec_info
 
typedef _bb_vec_infobb_vec_info
 
typedef struct data_referencedr_p
 
typedef enum _complex_perm_kinds complex_perm_kinds_t
 
typedef hash_map< slp_tree, complex_perm_kinds_tslp_tree_to_load_perm_map_t
 
typedef pair_hash< nofree_ptr_hash< _slp_tree >, nofree_ptr_hash< _slp_tree > > slp_node_hash
 
typedef hash_map< slp_node_hash, boolslp_compat_nodes_map_t
 
typedef vect_pattern *(* vect_pattern_decl_t) (slp_tree_to_load_perm_map_t *, slp_compat_nodes_map_t *, slp_tree *)
 

Enumerations

enum  vect_var_kind { vect_simple_var , vect_pointer_var , vect_scalar_var , vect_mask_var }
 
enum  operation_type { unary_op = 1 , binary_op , ternary_op }
 
enum  dr_alignment_support {
  dr_unaligned_unsupported , dr_unaligned_supported , dr_explicit_realign , dr_explicit_realign_optimized ,
  dr_aligned
}
 
enum  vect_def_type {
  vect_uninitialized_def = 0 , vect_constant_def = 1 , vect_external_def , vect_internal_def ,
  vect_induction_def , vect_reduction_def , vect_double_reduction_def , vect_nested_cycle ,
  vect_first_order_recurrence , vect_condition_def , vect_unknown_def_type
}
 
enum  vect_induction_op_type {
  vect_step_op_add = 0 , vect_step_op_neg , vect_step_op_mul , vect_step_op_shl ,
  vect_step_op_shr
}
 
enum  vect_reduction_type {
  TREE_CODE_REDUCTION , COND_REDUCTION , INTEGER_INDUC_COND_REDUCTION , CONST_COND_REDUCTION ,
  EXTRACT_LAST_REDUCTION , FOLD_LEFT_REDUCTION
}
 
enum  slp_instance_kind {
  slp_inst_kind_store , slp_inst_kind_reduc_group , slp_inst_kind_reduc_chain , slp_inst_kind_bb_reduc ,
  slp_inst_kind_ctor
}
 
enum  vect_partial_vector_style { vect_partial_vectors_none , vect_partial_vectors_while_ult , vect_partial_vectors_avx512 , vect_partial_vectors_len }
 
enum  stmt_vec_info_type {
  undef_vec_info_type = 0 , load_vec_info_type , store_vec_info_type , shift_vec_info_type ,
  op_vec_info_type , call_vec_info_type , call_simd_clone_vec_info_type , assignment_vec_info_type ,
  condition_vec_info_type , comparison_vec_info_type , reduc_vec_info_type , induc_vec_info_type ,
  type_promotion_vec_info_type , type_demotion_vec_info_type , type_conversion_vec_info_type , cycle_phi_info_type ,
  lc_phi_info_type , phi_info_type , recurr_info_type , loop_exit_ctrl_vec_info_type
}
 
enum  vect_relevant {
  vect_unused_in_scope = 0 , vect_used_only_live , vect_used_in_outer_by_reduction , vect_used_in_outer ,
  vect_used_by_reduction , vect_used_in_scope
}
 
enum  slp_vect_type { loop_vect = 0 , pure_slp , hybrid }
 
enum  vec_load_store_type { VLS_LOAD , VLS_STORE , VLS_STORE_INVARIANT }
 
enum  vect_memory_access_type {
  VMAT_INVARIANT , VMAT_CONTIGUOUS , VMAT_CONTIGUOUS_DOWN , VMAT_CONTIGUOUS_PERMUTE ,
  VMAT_CONTIGUOUS_REVERSE , VMAT_LOAD_STORE_LANES , VMAT_ELEMENTWISE , VMAT_STRIDED_SLP ,
  VMAT_GATHER_SCATTER
}
 
enum  _complex_perm_kinds {
  PERM_UNKNOWN , PERM_EVENODD , PERM_ODDEVEN , PERM_ODDODD ,
  PERM_EVENEVEN , PERM_TOP
}
 

Functions

loop_vec_info loop_vec_info_for_loop (class loop *loop)
 
bool nested_in_vect_loop_p (class loop *loop, stmt_vec_info stmt_info)
 
tree vect_phi_initial_value (gphi *phi)
 
bool vect_use_mask_type_p (stmt_vec_info stmt_info)
 
bool is_pattern_stmt_p (stmt_vec_info stmt_info)
 
stmt_vec_info vect_orig_stmt (stmt_vec_info stmt_info)
 
stmt_vec_info get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info)
 
stmt_vec_info vect_stmt_to_vectorize (stmt_vec_info stmt_info)
 
bool is_loop_header_bb_p (basic_block bb)
 
int vect_pow2 (int x)
 
int builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost, tree vectype, int misalign)
 
int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
 
vector_costsinit_cost (vec_info *vinfo, bool costing_for_scalar)
 
void dump_stmt_cost (FILE *, int, enum vect_cost_for_stmt, stmt_vec_info, slp_tree, tree, int, unsigned, enum vect_cost_model_location)
 
unsigned add_stmt_cost (vector_costs *costs, int count, enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, slp_tree node, tree vectype, int misalign, enum vect_cost_model_location where)
 
unsigned add_stmt_cost (vector_costs *costs, int count, enum vect_cost_for_stmt kind, enum vect_cost_model_location where)
 
unsigned add_stmt_cost (vector_costs *costs, stmt_info_for_cost *i)
 
void finish_cost (vector_costs *costs, const vector_costs *scalar_costs, unsigned *prologue_cost, unsigned *body_cost, unsigned *epilogue_cost, unsigned *suggested_unroll_factor=NULL)
 
void add_stmt_costs (vector_costs *costs, stmt_vector_for_cost *cost_vec)
 
void set_dr_misalignment (dr_vec_info *dr_info, int val)
 
int dr_misalignment (dr_vec_info *dr_info, tree vectype, poly_int64 offset=0)
 
const poly_uint64 dr_target_alignment (dr_vec_info *dr_info)
 
void set_dr_target_alignment (dr_vec_info *dr_info, poly_uint64 val)
 
bool aligned_access_p (dr_vec_info *dr_info, tree vectype)
 
bool known_alignment_for_access_p (dr_vec_info *dr_info, tree vectype)
 
unsigned int vect_known_alignment_in_bytes (dr_vec_info *dr_info, tree vectype)
 
innermost_loop_behaviorvect_dr_behavior (vec_info *vinfo, dr_vec_info *dr_info)
 
tree get_dr_vinfo_offset (vec_info *vinfo, dr_vec_info *dr_info, bool check_outer=false)
 
enum vect_cost_model loop_cost_model (loop_p loop)
 
bool unlimited_cost_model (loop_p loop)
 
bool vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo)
 
unsigned int vect_get_num_vectors (poly_uint64 nunits, tree vectype)
 
unsigned int vect_get_num_copies (loop_vec_info loop_vinfo, tree vectype)
 
void vect_update_max_nunits (poly_uint64 *max_nunits, poly_uint64 nunits)
 
void vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype)
 
unsigned int vect_vf_for_cost (loop_vec_info loop_vinfo)
 
unsigned int vect_nunits_for_cost (tree vec_type)
 
unsigned HOST_WIDE_INT vect_max_vf (loop_vec_info loop_vinfo)
 
unsigned int vect_get_scalar_dr_size (dr_vec_info *dr_info)
 
bool vect_apply_runtime_profitability_check_p (loop_vec_info loop_vinfo)
 
void vect_set_loop_condition (class loop *, edge, loop_vec_info, tree, tree, tree, bool)
 
bool slpeel_can_duplicate_loop_p (const class loop *, const_edge, const_edge)
 
class loopslpeel_tree_duplicate_loop_to_edge_cfg (class loop *, edge, class loop *, edge, edge, edge *, bool=true, vec< basic_block > *=NULL)
 
class loopvect_loop_versioning (loop_vec_info, gimple *)
 
class loopvect_do_peeling (loop_vec_info, tree, tree, tree *, tree *, tree *, int, bool, bool, tree *)
 
tree vect_get_main_loop_result (loop_vec_info, tree, tree)
 
void vect_prepare_for_masked_peels (loop_vec_info)
 
dump_user_location_t find_loop_location (class loop *)
 
bool vect_can_advance_ivs_p (loop_vec_info)
 
void vect_update_inits_of_drs (loop_vec_info, tree, tree_code)
 
edge vec_init_loop_exit_info (class loop *)
 
void vect_iv_increment_position (edge, gimple_stmt_iterator *, bool *)
 
tree get_related_vectype_for_scalar_type (machine_mode, tree, poly_uint64=0)
 
tree get_vectype_for_scalar_type (vec_info *, tree, unsigned int=0)
 
tree get_vectype_for_scalar_type (vec_info *, tree, slp_tree)
 
tree get_mask_type_for_scalar_type (vec_info *, tree, unsigned int=0)
 
tree get_mask_type_for_scalar_type (vec_info *, tree, slp_tree)
 
tree get_same_sized_vectype (tree, tree)
 
bool vect_chooses_same_modes_p (vec_info *, machine_mode)
 
bool vect_get_loop_mask_type (loop_vec_info)
 
bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, stmt_vec_info *=NULL, gimple **=NULL)
 
bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *, tree *, stmt_vec_info *=NULL, gimple **=NULL)
 
bool vect_is_simple_use (vec_info *, stmt_vec_info, slp_tree, unsigned, tree *, slp_tree *, enum vect_def_type *, tree *, stmt_vec_info *=NULL)
 
bool vect_maybe_update_slp_op_vectype (slp_tree, tree)
 
tree perm_mask_for_reverse (tree)
 
bool supportable_widening_operation (vec_info *, code_helper, stmt_vec_info, tree, tree, code_helper *, code_helper *, int *, vec< tree > *)
 
bool supportable_narrowing_operation (code_helper, tree, tree, code_helper *, int *, vec< tree > *)
 
unsigned record_stmt_cost (stmt_vector_for_cost *, int, enum vect_cost_for_stmt, stmt_vec_info, tree, int, enum vect_cost_model_location)
 
unsigned record_stmt_cost (stmt_vector_for_cost *, int, enum vect_cost_for_stmt, slp_tree, tree, int, enum vect_cost_model_location)
 
unsigned record_stmt_cost (stmt_vector_for_cost *, int, enum vect_cost_for_stmt, enum vect_cost_model_location)
 
unsigned record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count, enum vect_cost_for_stmt kind, stmt_vec_info stmt_info, int misalign, enum vect_cost_model_location where)
 
void vect_finish_replace_stmt (vec_info *, stmt_vec_info, gimple *)
 
void vect_finish_stmt_generation (vec_info *, stmt_vec_info, gimple *, gimple_stmt_iterator *)
 
opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info, bool *)
 
tree vect_get_store_rhs (stmt_vec_info)
 
void vect_get_vec_defs_for_operand (vec_info *vinfo, stmt_vec_info, unsigned, tree op, vec< tree > *, tree=NULL)
 
void vect_get_vec_defs (vec_info *, stmt_vec_info, slp_tree, unsigned, tree, vec< tree > *, tree=NULL, vec< tree > *=NULL, tree=NULL, vec< tree > *=NULL, tree=NULL, vec< tree > *=NULL)
 
void vect_get_vec_defs (vec_info *, stmt_vec_info, slp_tree, unsigned, tree, tree, vec< tree > *, tree=NULL, tree=NULL, vec< tree > *=NULL, tree=NULL, tree=NULL, vec< tree > *=NULL, tree=NULL, tree=NULL, vec< tree > *=NULL)
 
tree vect_init_vector (vec_info *, stmt_vec_info, tree, tree, gimple_stmt_iterator *)
 
tree vect_get_slp_vect_def (slp_tree, unsigned)
 
bool vect_transform_stmt (vec_info *, stmt_vec_info, gimple_stmt_iterator *, slp_tree, slp_instance)
 
void vect_remove_stores (vec_info *, stmt_vec_info)
 
bool vect_nop_conversion_p (stmt_vec_info)
 
opt_result vect_analyze_stmt (vec_info *, stmt_vec_info, bool *, slp_tree, slp_instance, stmt_vector_for_cost *)
 
void vect_get_load_cost (vec_info *, stmt_vec_info, int, dr_alignment_support, int, bool, unsigned int *, unsigned int *, stmt_vector_for_cost *, stmt_vector_for_cost *, bool)
 
void vect_get_store_cost (vec_info *, stmt_vec_info, int, dr_alignment_support, int, unsigned int *, stmt_vector_for_cost *)
 
bool vect_supportable_shift (vec_info *, enum tree_code, tree)
 
tree vect_gen_perm_mask_any (tree, const vec_perm_indices &)
 
tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &)
 
void optimize_mask_stores (class loop *)
 
tree vect_gen_while (gimple_seq *, tree, tree, tree, const char *=nullptr)
 
tree vect_gen_while_not (gimple_seq *, tree, tree, tree)
 
opt_result vect_get_vector_types_for_stmt (vec_info *, stmt_vec_info, tree *, tree *, unsigned int=0)
 
opt_tree vect_get_mask_type_for_stmt (stmt_vec_info, unsigned int=0)
 
bool ref_within_array_bound (gimple *, tree)
 
bool vect_can_force_dr_alignment_p (const_tree, poly_uint64)
 
enum dr_alignment_support vect_supportable_dr_alignment (vec_info *, dr_vec_info *, tree, int)
 
tree vect_get_smallest_scalar_type (stmt_vec_info, tree)
 
opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *)
 
bool vect_slp_analyze_instance_dependence (vec_info *, slp_instance)
 
opt_result vect_enhance_data_refs_alignment (loop_vec_info)
 
opt_result vect_analyze_data_refs_alignment (loop_vec_info)
 
bool vect_slp_analyze_instance_alignment (vec_info *, slp_instance)
 
opt_result vect_analyze_data_ref_accesses (vec_info *, vec< int > *)
 
opt_result vect_prune_runtime_alias_test_list (loop_vec_info)
 
bool vect_gather_scatter_fn_p (vec_info *, bool, bool, tree, tree, tree, int, internal_fn *, tree *)
 
bool vect_check_gather_scatter (stmt_vec_info, loop_vec_info, gather_scatter_info *)
 
opt_result vect_find_stmt_data_reference (loop_p, gimple *, vec< data_reference_p > *, vec< int > *, int)
 
opt_result vect_analyze_data_refs (vec_info *, poly_uint64 *, bool *)
 
void vect_record_base_alignments (vec_info *)
 
tree vect_create_data_ref_ptr (vec_info *, stmt_vec_info, tree, class loop *, tree, tree *, gimple_stmt_iterator *, gimple **, bool, tree=NULL_TREE)
 
tree bump_vector_ptr (vec_info *, tree, gimple *, gimple_stmt_iterator *, stmt_vec_info, tree)
 
void vect_copy_ref_info (tree, tree)
 
tree vect_create_destination_var (tree, tree)
 
bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT)
 
internal_fn vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool)
 
bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT)
 
internal_fn vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT, bool)
 
void vect_permute_store_chain (vec_info *, vec< tree > &, unsigned int, stmt_vec_info, gimple_stmt_iterator *, vec< tree > *)
 
tree vect_setup_realignment (vec_info *, stmt_vec_info, gimple_stmt_iterator *, tree *, enum dr_alignment_support, tree, class loop **)
 
void vect_transform_grouped_load (vec_info *, stmt_vec_info, vec< tree >, int, gimple_stmt_iterator *)
 
void vect_record_grouped_load_vectors (vec_info *, stmt_vec_info, vec< tree >)
 
tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *)
 
tree vect_get_new_ssa_name (tree, enum vect_var_kind, const char *=NULL)
 
tree vect_create_addr_base_for_vector_ref (vec_info *, stmt_vec_info, gimple_seq *, tree)
 
tree neutral_op_for_reduction (tree, code_helper, tree, bool=true)
 
widest_int vect_iv_limit_for_partial_vectors (loop_vec_info loop_vinfo)
 
bool vect_rgroup_iv_might_wrap_p (loop_vec_info, rgroup_controls *)
 
opt_result vect_determine_partial_vectors_and_peeling (loop_vec_info)
 
bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree, enum tree_code)
 
bool needs_fold_left_reduction_p (tree, code_helper)
 
opt_loop_vec_info vect_analyze_loop (class loop *, vec_info_shared *)
 
tree vect_build_loop_niters (loop_vec_info, bool *=NULL)
 
void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *, tree *, bool)
 
tree vect_halve_mask_nunits (tree, machine_mode)
 
tree vect_double_mask_nunits (tree, machine_mode)
 
void vect_record_loop_mask (loop_vec_info, vec_loop_masks *, unsigned int, tree, tree)
 
tree vect_get_loop_mask (loop_vec_info, gimple_stmt_iterator *, vec_loop_masks *, unsigned int, tree, unsigned int)
 
void vect_record_loop_len (loop_vec_info, vec_loop_lens *, unsigned int, tree, unsigned int)
 
tree vect_get_loop_len (loop_vec_info, gimple_stmt_iterator *, vec_loop_lens *, unsigned int, tree, unsigned int, unsigned int)
 
gimple_seq vect_gen_len (tree, tree, tree, tree)
 
stmt_vec_info info_for_reduction (vec_info *, stmt_vec_info)
 
bool reduction_fn_for_scalar_code (code_helper, internal_fn *)
 
class loopvect_transform_loop (loop_vec_info, gimple *)
 
opt_result vect_analyze_loop_form (class loop *, vect_loop_form_info *)
 
loop_vec_info vect_create_loop_vinfo (class loop *, vec_info_shared *, const vect_loop_form_info *, loop_vec_info=nullptr)
 
bool vectorizable_live_operation (vec_info *, stmt_vec_info, slp_tree, slp_instance, int, bool, stmt_vector_for_cost *)
 
bool vectorizable_reduction (loop_vec_info, stmt_vec_info, slp_tree, slp_instance, stmt_vector_for_cost *)
 
bool vectorizable_induction (loop_vec_info, stmt_vec_info, gimple **, slp_tree, stmt_vector_for_cost *)
 
bool vect_transform_reduction (loop_vec_info, stmt_vec_info, gimple_stmt_iterator *, gimple **, slp_tree)
 
bool vect_transform_cycle_phi (loop_vec_info, stmt_vec_info, gimple **, slp_tree, slp_instance)
 
bool vectorizable_lc_phi (loop_vec_info, stmt_vec_info, gimple **, slp_tree)
 
bool vectorizable_phi (vec_info *, stmt_vec_info, gimple **, slp_tree, stmt_vector_for_cost *)
 
bool vectorizable_recurr (loop_vec_info, stmt_vec_info, gimple **, slp_tree, stmt_vector_for_cost *)
 
bool vect_emulated_vector_p (tree)
 
bool vect_can_vectorize_without_simd_p (tree_code)
 
bool vect_can_vectorize_without_simd_p (code_helper)
 
int vect_get_known_peeling_cost (loop_vec_info, int, int *, stmt_vector_for_cost *, stmt_vector_for_cost *, stmt_vector_for_cost *)
 
tree cse_and_gimplify_to_preheader (loop_vec_info, tree)
 
tree vect_peel_nonlinear_iv_init (gimple_seq *, tree, tree, tree, enum vect_induction_op_type)
 
void vect_slp_init (void)
 
void vect_slp_fini (void)
 
void vect_free_slp_instance (slp_instance)
 
bool vect_transform_slp_perm_load (vec_info *, slp_tree, const vec< tree > &, gimple_stmt_iterator *, poly_uint64, bool, unsigned *, unsigned *=nullptr, bool=false)
 
bool vect_slp_analyze_operations (vec_info *)
 
void vect_schedule_slp (vec_info *, const vec< slp_instance > &)
 
opt_result vect_analyze_slp (vec_info *, unsigned)
 
bool vect_make_slp_decision (loop_vec_info)
 
void vect_detect_hybrid_slp (loop_vec_info)
 
void vect_optimize_slp (vec_info *)
 
void vect_gather_slp_loads (vec_info *)
 
void vect_get_slp_defs (slp_tree, vec< tree > *)
 
void vect_get_slp_defs (vec_info *, slp_tree, vec< vec< tree > > *, unsigned n=-1U)
 
bool vect_slp_if_converted_bb (basic_block bb, loop_p orig_loop)
 
bool vect_slp_function (function *)
 
stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree)
 
stmt_vec_info vect_find_first_scalar_stmt_in_slp (slp_tree)
 
bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info)
 
bool can_duplicate_and_interleave_p (vec_info *, unsigned int, tree, unsigned int *=NULL, tree *=NULL, tree *=NULL)
 
void duplicate_and_interleave (vec_info *, gimple_seq *, tree, const vec< tree > &, unsigned int, vec< tree > &)
 
int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info)
 
slp_tree vect_create_new_slp_node (unsigned, tree_code)
 
void vect_free_slp_tree (slp_tree)
 
bool compatible_calls_p (gcall *, gcall *)
 
int vect_slp_child_index_for_operand (const gimple *, int op, bool)
 
void vect_mark_pattern_stmts (vec_info *, stmt_vec_info, gimple *, tree)
 
bool vect_get_range_info (tree, wide_int *, wide_int *)
 
void vect_pattern_recog (vec_info *)
 
unsigned vectorize_loops (void)
 
void vect_free_loop_info_assumptions (class loop *)
 
gimplevect_loop_vectorized_call (class loop *, gcond **cond=NULL)
 
bool vect_stmt_dominates_stmt_p (gimple *, gimple *)
 
bool vect_is_store_elt_extraction (vect_cost_for_stmt kind, stmt_vec_info stmt_info)
 
bool vect_is_reduction (stmt_vec_info stmt_info)
 
int vect_reduc_type (vec_info *vinfo, stmt_vec_info stmt_info)
 
tree vect_embedded_comparison_type (stmt_vec_info stmt_info)
 
tree vect_comparison_type (stmt_vec_info stmt_info)
 
bool vect_is_extending_load (class vec_info *vinfo, stmt_vec_info stmt_info)
 
bool vect_is_integer_truncation (stmt_vec_info stmt_info)
 
gimplevect_gimple_build (tree, code_helper, tree, tree=NULL_TREE)
 

Variables

dump_user_location_t vect_location
 
vect_pattern_decl_t slp_patterns []
 
size_t num__slp_patterns
 

Macro Definition Documentation

◆ BB_VINFO_BB

#define BB_VINFO_BB ( B)    (B)->bb

◆ BB_VINFO_DATAREFS

#define BB_VINFO_DATAREFS ( B)    (B)->shared->datarefs

Referenced by vect_slp_region().

◆ BB_VINFO_DDRS

#define BB_VINFO_DDRS ( B)    (B)->shared->ddrs

◆ BB_VINFO_GROUPED_STORES

#define BB_VINFO_GROUPED_STORES ( B)    (B)->grouped_stores

◆ BB_VINFO_SLP_INSTANCES

#define BB_VINFO_SLP_INSTANCES ( B)    (B)->slp_instances

◆ DR_GROUP_FIRST_ELEMENT

◆ DR_GROUP_GAP

◆ DR_GROUP_NEXT_ELEMENT

◆ DR_GROUP_SIZE

◆ DR_GROUP_STORE_COUNT

#define DR_GROUP_STORE_COUNT ( S)     (gcc_checking_assert ((S)->dr_aux.dr), (S)->store_count)

Referenced by vect_transform_stmt().

◆ DR_MISALIGNMENT_UNINITIALIZED

#define DR_MISALIGNMENT_UNINITIALIZED   (-2)

◆ DR_MISALIGNMENT_UNKNOWN

◆ DR_TARGET_ALIGNMENT

◆ DUMP_VECT_SCOPE

#define DUMP_VECT_SCOPE ( MSG)     AUTO_DUMP_SCOPE (MSG, vect_location)
A macro for calling:
  dump_begin_scope (MSG, vect_location);
via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc,
and then calling
  dump_end_scope ();
once the object goes out of scope, thus capturing the nesting of
the scopes.

These scopes affect dump messages within them: dump messages at the
top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those
in a nested scope implicitly default to MSG_PRIORITY_INTERNALS.   

Referenced by move_early_exit_stmts(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependences(), vect_analyze_data_refs(), vect_analyze_data_refs_alignment(), vect_analyze_early_break_dependences(), vect_analyze_loop(), vect_analyze_loop_form(), vect_analyze_loop_operations(), vect_analyze_scalar_cycles_1(), vect_analyze_slp(), vect_bb_partition_graph(), vect_compute_single_scalar_iteration_cost(), vect_detect_hybrid_slp(), vect_determine_precisions(), vect_determine_vectorization_factor(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_get_loop_niters(), vect_make_slp_decision(), vect_mark_stmts_to_be_vectorized(), vect_match_slp_patterns(), vect_pattern_recog(), vect_prune_runtime_alias_test_list(), vect_slp_analyze_bb_1(), vect_slp_analyze_instance_alignment(), vect_slp_analyze_instance_dependence(), vect_slp_analyze_operations(), vect_transform_loop(), vect_update_inits_of_drs(), vect_update_vf_for_slp(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_shift(), and vectorizable_simd_clone_call().

◆ HYBRID_SLP_STMT

#define HYBRID_SLP_STMT ( S)    ((S)->slp_type == hybrid)

◆ LOOP_REQUIRES_VERSIONING

#define LOOP_REQUIRES_VERSIONING ( L)
Value:
T * ggc_alloc(ALONE_CXX_MEM_STAT_INFO)
Definition ggc.h:184
#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L)
Definition tree-vectorizer.h:1045
#define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L)
Definition tree-vectorizer.h:1051
#define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L)
Definition tree-vectorizer.h:1049
#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L)
Definition tree-vectorizer.h:1043

Referenced by vect_analyze_loop(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_do_peeling(), vect_estimate_min_profitable_iters(), vect_need_peeling_or_partial_vectors_p(), and vect_transform_loop().

◆ LOOP_REQUIRES_VERSIONING_FOR_ALIAS

#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS ( L)
Value:
((L)->comp_alias_ddrs.length () > 0 \
|| (L)->check_unequal_addrs.length () > 0 \
|| (L)->lower_bounds.length () > 0)

Referenced by vect_estimate_min_profitable_iters(), and vect_loop_versioning().

◆ LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT

#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT ( L)     ((L)->may_misalign_stmts.length () > 0)

◆ LOOP_REQUIRES_VERSIONING_FOR_NITERS

#define LOOP_REQUIRES_VERSIONING_FOR_NITERS ( L)     (LOOP_VINFO_NITERS_ASSUMPTIONS (L))

◆ LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND

#define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND ( L)     (LOOP_VINFO_SIMD_IF_COND (L))

Referenced by vect_loop_versioning().

◆ LOOP_VINFO_BBS

◆ LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P

◆ LOOP_VINFO_CHECK_NONZERO

#define LOOP_VINFO_CHECK_NONZERO ( L)    (L)->check_nonzero

◆ LOOP_VINFO_CHECK_UNEQUAL_ADDRS

#define LOOP_VINFO_CHECK_UNEQUAL_ADDRS ( L)    (L)->check_unequal_addrs

◆ LOOP_VINFO_COMP_ALIAS_DDRS

#define LOOP_VINFO_COMP_ALIAS_DDRS ( L)    (L)->comp_alias_ddrs

◆ LOOP_VINFO_COST_MODEL_THRESHOLD

◆ LOOP_VINFO_DATAREFS

◆ LOOP_VINFO_DDRS

#define LOOP_VINFO_DDRS ( L)    (L)->shared->ddrs

◆ LOOP_VINFO_EARLY_BREAKS

◆ LOOP_VINFO_EARLY_BREAKS_VECT_PEELED

◆ LOOP_VINFO_EARLY_BRK_DEST_BB

#define LOOP_VINFO_EARLY_BRK_DEST_BB ( L)    (L)->early_break_dest_bb

◆ LOOP_VINFO_EARLY_BRK_STORES

#define LOOP_VINFO_EARLY_BRK_STORES ( L)    (L)->early_break_stores

◆ LOOP_VINFO_EARLY_BRK_VUSES

#define LOOP_VINFO_EARLY_BRK_VUSES ( L)    (L)->early_break_vuses

◆ LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P

#define LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P ( L)     (L)->epil_using_partial_vectors_p

◆ LOOP_VINFO_EPILOGUE_IV_EXIT

#define LOOP_VINFO_EPILOGUE_IV_EXIT ( L)    (L)->vec_epilogue_loop_iv_exit

Referenced by vect_do_peeling().

◆ LOOP_VINFO_EPILOGUE_P

◆ LOOP_VINFO_FULLY_MASKED_P

◆ LOOP_VINFO_FULLY_WITH_LENGTH_P

◆ LOOP_VINFO_GROUPED_STORES

#define LOOP_VINFO_GROUPED_STORES ( L)    (L)->grouped_stores

◆ LOOP_VINFO_HAS_MASK_STORE

#define LOOP_VINFO_HAS_MASK_STORE ( L)    (L)->has_mask_store

Referenced by vectorizable_store().

◆ LOOP_VINFO_INNER_LOOP_COST_FACTOR

#define LOOP_VINFO_INNER_LOOP_COST_FACTOR ( L)    (L)->inner_loop_cost_factor

◆ LOOP_VINFO_INT_NITERS

◆ LOOP_VINFO_IV_EXIT

◆ LOOP_VINFO_LENS

◆ LOOP_VINFO_LOOP

#define LOOP_VINFO_LOOP ( L)    (L)->loop
Access Functions.   

Referenced by vector_costs::compare_inside_loop_cost(), cse_and_gimplify_to_preheader(), get_group_load_store_type(), get_initial_def_for_reduction(), vec_info::insert_seq_on_entry(), loop_niters_no_overflow(), move_early_exit_stmts(), parloops_is_simple_reduction(), parloops_is_slp_reduction(), stmt_in_inner_loop_p(), supportable_widening_operation(), vect_analyze_data_ref_access(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_analyze_early_break_dependences(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_analyze_loop_operations(), vect_analyze_possibly_independent_ddr(), vect_analyze_scalar_cycles(), vect_analyze_scalar_cycles_1(), vect_better_loop_vinfo_p(), vect_build_loop_niters(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_can_advance_ivs_p(), vect_check_gather_scatter(), vect_compute_data_ref_alignment(), vect_compute_single_scalar_iteration_cost(), vect_create_cond_for_alias_checks(), vect_create_data_ref_ptr(), vect_create_epilog_for_reduction(), vect_detect_hybrid_slp(), vect_determine_precisions(), vect_determine_vectorization_factor(), vect_do_peeling(), vect_dr_behavior(), vect_emit_reduction_init_stmts(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_gen_vector_loop_niters(), vect_is_simple_reduction(), vect_iv_limit_for_partial_vectors(), vect_known_niters_smaller_than_vf(), vect_loop_versioning(), vect_mark_for_runtime_alias_test(), vect_mark_stmts_to_be_vectorized(), vect_min_prec_for_max_niters(), vect_model_reduction_cost(), vect_need_peeling_or_partial_vectors_p(), vect_pattern_recog(), vect_peeling_hash_choose_best_peeling(), vect_peeling_hash_insert(), vect_phi_first_order_recurrence_p(), vect_prepare_for_masked_peels(), vect_prune_runtime_alias_test_list(), vect_reassociating_reduction_p(), vect_record_base_alignments(), vect_schedule_slp_node(), vect_setup_realignment(), vect_stmt_relevant_p(), vect_supportable_dr_alignment(), vect_transform_cycle_phi(), vect_transform_loop(), vect_transform_loop_stmt(), vect_transform_reduction(), vect_truncate_gather_scatter_offset(), vect_update_ivs_after_vectorizer(), vect_update_vf_for_slp(), vectorizable_call(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().

◆ LOOP_VINFO_LOOP_CONDS

#define LOOP_VINFO_LOOP_CONDS ( L)    (L)->conds

Referenced by vect_create_loop_vinfo().

◆ LOOP_VINFO_LOOP_IV_COND

#define LOOP_VINFO_LOOP_IV_COND ( L)    (L)->loop_iv_cond

◆ LOOP_VINFO_LOOP_NEST

#define LOOP_VINFO_LOOP_NEST ( L)    (L)->shared->loop_nest

◆ LOOP_VINFO_LOWER_BOUNDS

#define LOOP_VINFO_LOWER_BOUNDS ( L)    (L)->lower_bounds

◆ LOOP_VINFO_MASK_SKIP_NITERS

◆ LOOP_VINFO_MASKS

◆ LOOP_VINFO_MAX_VECT_FACTOR

#define LOOP_VINFO_MAX_VECT_FACTOR ( L)    (L)->max_vectorization_factor

◆ LOOP_VINFO_MAY_ALIAS_DDRS

#define LOOP_VINFO_MAY_ALIAS_DDRS ( L)    (L)->may_alias_ddrs

◆ LOOP_VINFO_MAY_MISALIGN_STMTS

#define LOOP_VINFO_MAY_MISALIGN_STMTS ( L)    (L)->may_misalign_stmts

◆ LOOP_VINFO_N_STMTS

#define LOOP_VINFO_N_STMTS ( L)    (L)->shared->n_stmts

Referenced by vect_analyze_loop_2().

◆ LOOP_VINFO_NITERS

◆ LOOP_VINFO_NITERS_ASSUMPTIONS

#define LOOP_VINFO_NITERS_ASSUMPTIONS ( L)    (L)->num_iters_assumptions

◆ LOOP_VINFO_NITERS_KNOWN_P

◆ LOOP_VINFO_NITERS_UNCHANGED

#define LOOP_VINFO_NITERS_UNCHANGED ( L)    (L)->num_iters_unchanged
Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
prologue peeling retain total unchanged scalar loop iterations for
cost model.   

Referenced by vect_create_loop_vinfo(), vect_transform_loop(), and vectorizable_simd_clone_call().

◆ LOOP_VINFO_NITERSM1

◆ LOOP_VINFO_NO_DATA_DEPENDENCIES

#define LOOP_VINFO_NO_DATA_DEPENDENCIES ( L)    (L)->no_data_dependencies

◆ LOOP_VINFO_ORIG_LOOP_INFO

◆ LOOP_VINFO_ORIG_MAX_VECT_FACTOR

#define LOOP_VINFO_ORIG_MAX_VECT_FACTOR ( L)     (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L)))

◆ LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS

◆ LOOP_VINFO_PARTIAL_VECTORS_STYLE

◆ LOOP_VINFO_PEELING_FOR_ALIGNMENT

◆ LOOP_VINFO_PEELING_FOR_GAPS

◆ LOOP_VINFO_PEELING_FOR_NITER

#define LOOP_VINFO_PEELING_FOR_NITER ( L)    (L)->peeling_for_niter

◆ LOOP_VINFO_PTR_MASK

#define LOOP_VINFO_PTR_MASK ( L)    (L)->ptr_mask

◆ LOOP_VINFO_REDUCTION_CHAINS

#define LOOP_VINFO_REDUCTION_CHAINS ( L)    (L)->reduction_chains

◆ LOOP_VINFO_REDUCTIONS

#define LOOP_VINFO_REDUCTIONS ( L)    (L)->reductions

◆ LOOP_VINFO_RGROUP_COMPARE_TYPE

◆ LOOP_VINFO_RGROUP_IV_TYPE

◆ LOOP_VINFO_SCALAR_ITERATION_COST

◆ LOOP_VINFO_SCALAR_IV_EXIT

#define LOOP_VINFO_SCALAR_IV_EXIT ( L)    (L)->scalar_loop_iv_exit

◆ LOOP_VINFO_SCALAR_LOOP

#define LOOP_VINFO_SCALAR_LOOP ( L)    (L)->scalar_loop

◆ LOOP_VINFO_SCALAR_LOOP_SCALING

#define LOOP_VINFO_SCALAR_LOOP_SCALING ( L)    (L)->scalar_loop_scaling

◆ LOOP_VINFO_SIMD_IF_COND

#define LOOP_VINFO_SIMD_IF_COND ( L)    (L)->simd_if_cond

Referenced by vect_analyze_loop_2().

◆ LOOP_VINFO_SLP_INSTANCES

#define LOOP_VINFO_SLP_INSTANCES ( L)    (L)->slp_instances

◆ LOOP_VINFO_SLP_UNROLLING_FACTOR

#define LOOP_VINFO_SLP_UNROLLING_FACTOR ( L)    (L)->slp_unrolling_factor

◆ LOOP_VINFO_UNALIGNED_DR

#define LOOP_VINFO_UNALIGNED_DR ( L)    (L)->unaligned_dr

◆ LOOP_VINFO_USING_DECREMENTING_IV_P

#define LOOP_VINFO_USING_DECREMENTING_IV_P ( L)    (L)->using_decrementing_iv_p

◆ LOOP_VINFO_USING_PARTIAL_VECTORS_P

◆ LOOP_VINFO_USING_SELECT_VL_P

◆ LOOP_VINFO_VECT_FACTOR

#define LOOP_VINFO_VECT_FACTOR ( L)    (L)->vectorization_factor

◆ LOOP_VINFO_VECTORIZABLE_P

#define LOOP_VINFO_VECTORIZABLE_P ( L)    (L)->vectorizable

◆ LOOP_VINFO_VERSIONING_THRESHOLD

#define LOOP_VINFO_VERSIONING_THRESHOLD ( L)    (L)->versioning_threshold

◆ MAX_INTERM_CVT_STEPS

#define MAX_INTERM_CVT_STEPS   3
The maximum number of intermediate steps required in multi-step type
conversion.   

Referenced by supportable_narrowing_operation(), and supportable_widening_operation().

◆ MAX_VECTORIZATION_FACTOR

#define MAX_VECTORIZATION_FACTOR   INT_MAX

◆ PURE_SLP_STMT

◆ REDUC_GROUP_FIRST_ELEMENT

◆ REDUC_GROUP_NEXT_ELEMENT

◆ REDUC_GROUP_SIZE

#define REDUC_GROUP_SIZE ( S)     (gcc_checking_assert (!(S)->dr_aux.dr), (S)->size)

◆ SET_DR_MISALIGNMENT

◆ SET_DR_TARGET_ALIGNMENT

#define SET_DR_TARGET_ALIGNMENT ( DR,
VAL )   set_dr_target_alignment (DR, VAL)

◆ SLP_INSTANCE_KIND

◆ SLP_INSTANCE_LOADS

◆ SLP_INSTANCE_REMAIN_DEFS

#define SLP_INSTANCE_REMAIN_DEFS ( S)    (S)->remain_defs

◆ SLP_INSTANCE_ROOT_STMTS

◆ SLP_INSTANCE_TREE

◆ SLP_INSTANCE_UNROLLING_FACTOR

#define SLP_INSTANCE_UNROLLING_FACTOR ( S)    (S)->unrolling_factor

◆ SLP_TREE_CHILDREN

#define SLP_TREE_CHILDREN ( S)    (S)->children

Referenced by _slp_tree::_slp_tree(), complex_add_pattern::build(), complex_mul_pattern::build(), complex_fms_pattern::build(), addsub_pattern::build(), vect_optimize_slp_pass::build_graph(), vect_optimize_slp_pass::build_vertices(), vect_optimize_slp_pass::change_vec_perm_layout(), compatible_complex_nodes_p(), dot_slp_tree(), vect_optimize_slp_pass::get_result_with_layout(), vect_optimize_slp_pass::internal_node_cost(), linear_loads_p(), complex_add_pattern::matches(), complex_mul_pattern::matches(), complex_fms_pattern::matches(), vect_optimize_slp_pass::materialize(), optimize_load_redistribution(), optimize_load_redistribution_1(), addsub_pattern::recognize(), vect_optimize_slp_pass::start_choosing_layouts(), vect_bb_partition_graph_r(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_scalar_cost(), vect_build_combine_node(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_create_new_slp_node(), vect_create_new_slp_node(), vect_detect_pair_op(), vect_detect_pair_op(), vect_free_slp_tree(), vect_gather_slp_loads(), vect_get_gather_scatter_ops(), vect_get_slp_defs(), vect_get_vec_defs(), vect_is_simple_use(), vect_mark_slp_stmts(), vect_mark_slp_stmts_relevant(), vect_match_slp_patterns_2(), vect_print_slp_graph(), vect_print_slp_tree(), vect_remove_slp_scalar_calls(), vect_schedule_scc(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_slp_analyze_node_operations_1(), vect_slp_build_two_operator_nodes(), vect_slp_gather_vectorized_scalar_stmts(), vect_slp_prune_covered_roots(), vect_transform_cycle_phi(), vect_validate_multiplication(), vectorizable_condition(), vectorizable_induction(), vectorizable_lc_phi(), vectorizable_load(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_slp_permutation(), and _slp_tree::~_slp_tree().

◆ SLP_TREE_CODE

◆ SLP_TREE_DEF_TYPE

◆ SLP_TREE_LANE_PERMUTATION

◆ SLP_TREE_LANES

◆ SLP_TREE_LOAD_PERMUTATION

◆ SLP_TREE_NUMBER_OF_VEC_STMTS

◆ SLP_TREE_REF_COUNT

◆ SLP_TREE_REPRESENTATIVE

◆ SLP_TREE_SCALAR_OPS

◆ SLP_TREE_SCALAR_STMTS

◆ SLP_TREE_SIMD_CLONE_INFO

#define SLP_TREE_SIMD_CLONE_INFO ( S)    (S)->simd_clone_info

◆ SLP_TREE_VEC_DEFS

◆ SLP_TREE_VECTYPE

◆ STMT_SLP_TYPE

◆ STMT_VINFO_DATA_REF

◆ STMT_VINFO_DEF_TYPE

#define STMT_VINFO_DEF_TYPE ( S)    (S)->def_type

Referenced by can_vectorize_live_stmts(), info_for_reduction(), iv_phi_p(), maybe_set_vectorized_backedge_value(), vec_info::new_stmt_vec_info(), parloops_is_simple_reduction(), parloops_valid_reduction_input_p(), process_use(), supportable_widening_operation(), vect_active_double_reduction_p(), vect_analyze_loop_2(), vect_analyze_loop_operations(), vect_analyze_scalar_cycles_1(), vect_analyze_slp(), vect_analyze_slp_instance(), vect_analyze_stmt(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_compute_single_scalar_iteration_cost(), vect_create_epilog_for_reduction(), vect_create_loop_vinfo(), vect_fixup_reduc_chain(), vect_fixup_scalar_cycles_with_patterns(), vect_get_internal_def(), vect_init_pattern_stmt(), vect_inner_phi_in_double_reduction_p(), vect_is_simple_reduction(), vect_is_simple_use(), vect_mark_pattern_stmts(), vect_mark_stmts_to_be_vectorized(), vect_reassociating_reduction_p(), vect_recog_mixed_size_cond_pattern(), vect_recog_over_widening_pattern(), vect_schedule_scc(), vect_stmt_relevant_p(), vect_transform_cycle_phi(), vect_transform_loop(), vect_transform_reduction(), vect_update_vf_for_slp(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_lc_phi(), vectorizable_live_operation(), vectorizable_load(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().

◆ STMT_VINFO_DR_BASE_ADDRESS

#define STMT_VINFO_DR_BASE_ADDRESS ( S)    (S)->dr_wrt_vec_loop.base_address

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_BASE_ALIGNMENT

#define STMT_VINFO_DR_BASE_ALIGNMENT ( S)    (S)->dr_wrt_vec_loop.base_alignment

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_BASE_MISALIGNMENT

#define STMT_VINFO_DR_BASE_MISALIGNMENT ( S)     (S)->dr_wrt_vec_loop.base_misalignment

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_INFO

◆ STMT_VINFO_DR_INIT

#define STMT_VINFO_DR_INIT ( S)    (S)->dr_wrt_vec_loop.init

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_OFFSET

#define STMT_VINFO_DR_OFFSET ( S)    (S)->dr_wrt_vec_loop.offset

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_OFFSET_ALIGNMENT

#define STMT_VINFO_DR_OFFSET_ALIGNMENT ( S)     (S)->dr_wrt_vec_loop.offset_alignment

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_STEP

#define STMT_VINFO_DR_STEP ( S)    (S)->dr_wrt_vec_loop.step

◆ STMT_VINFO_DR_STEP_ALIGNMENT

#define STMT_VINFO_DR_STEP_ALIGNMENT ( S)     (S)->dr_wrt_vec_loop.step_alignment

Referenced by vect_analyze_data_refs().

◆ STMT_VINFO_DR_WRT_VEC_LOOP

#define STMT_VINFO_DR_WRT_VEC_LOOP ( S)    (S)->dr_wrt_vec_loop

◆ STMT_VINFO_FORCE_SINGLE_CYCLE

#define STMT_VINFO_FORCE_SINGLE_CYCLE ( S)    (S)->force_single_cycle

◆ STMT_VINFO_GATHER_SCATTER_P

◆ STMT_VINFO_GROUPED_ACCESS

◆ STMT_VINFO_IN_PATTERN_P

◆ STMT_VINFO_LIVE_P

◆ STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED

#define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED ( S)    (S)->loop_phi_evolution_base_unchanged

◆ STMT_VINFO_LOOP_PHI_EVOLUTION_PART

◆ STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE

◆ STMT_VINFO_MEMORY_ACCESS_TYPE

#define STMT_VINFO_MEMORY_ACCESS_TYPE ( S)    (S)->memory_access_type

◆ STMT_VINFO_MIN_NEG_DIST

#define STMT_VINFO_MIN_NEG_DIST ( S)    (S)->min_neg_dist

◆ STMT_VINFO_PATTERN_DEF_SEQ

◆ STMT_VINFO_REDUC_CODE

◆ STMT_VINFO_REDUC_DEF

◆ STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT

#define STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT ( S)    (S)->reduc_epilogue_adjustment

◆ STMT_VINFO_REDUC_FN

#define STMT_VINFO_REDUC_FN ( S)    (S)->reduc_fn

◆ STMT_VINFO_REDUC_IDX

◆ STMT_VINFO_REDUC_TYPE

◆ STMT_VINFO_REDUC_VECTYPE

#define STMT_VINFO_REDUC_VECTYPE ( S)    (S)->reduc_vectype

◆ STMT_VINFO_REDUC_VECTYPE_IN

#define STMT_VINFO_REDUC_VECTYPE_IN ( S)    (S)->reduc_vectype_in

◆ STMT_VINFO_RELATED_STMT

◆ STMT_VINFO_RELEVANT

◆ STMT_VINFO_RELEVANT_P

◆ STMT_VINFO_SIMD_CLONE_INFO

#define STMT_VINFO_SIMD_CLONE_INFO ( S)    (S)->simd_clone_info

◆ STMT_VINFO_SIMD_LANE_ACCESS_P

◆ STMT_VINFO_SLP_VECT_ONLY

#define STMT_VINFO_SLP_VECT_ONLY ( S)    (S)->slp_vect_only_p

◆ STMT_VINFO_SLP_VECT_ONLY_PATTERN

#define STMT_VINFO_SLP_VECT_ONLY_PATTERN ( S)    (S)->slp_vect_pattern_only_p

◆ STMT_VINFO_STMT

◆ STMT_VINFO_STRIDED_P

◆ STMT_VINFO_TYPE

◆ STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL

#define STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL ( S)    (S)->induc_cond_initial_val

◆ STMT_VINFO_VEC_STMTS

◆ STMT_VINFO_VECTORIZABLE

◆ STMT_VINFO_VECTYPE

#define STMT_VINFO_VECTYPE ( S)    (S)->vectype

Referenced by append_pattern_def_seq(), addsub_pattern::build(), complex_pattern::build(), bump_vector_ptr(), get_initial_defs_for_reduction(), get_misalign_in_elems(), record_stmt_cost(), stmt_vectype(), vect_analyze_data_refs(), vect_analyze_data_refs_alignment(), vect_analyze_loop_2(), vect_analyze_stmt(), vect_build_one_gather_load_call(), vect_build_slp_instance(), vect_check_gather_scatter(), vect_check_scalar_mask(), vect_check_store_rhs(), vect_create_cond_for_align_checks(), vect_describe_gather_scatter_call(), vect_determine_vectorization_factor(), vect_determine_vf_for_stmt_1(), vect_dr_misalign_for_aligned_access(), vect_enhance_data_refs_alignment(), vect_find_reusable_accumulator(), vect_gen_prolog_loop_niters(), vect_get_data_access_cost(), vect_get_peeling_costs_all_drs(), vect_get_strided_load_store_ops(), vect_get_vec_defs_for_operand(), vect_get_vector_types_for_stmt(), vect_init_pattern_stmt(), vect_is_simple_use(), vect_model_reduction_cost(), vect_peeling_supportable(), vect_permute_load_chain(), vect_permute_store_chain(), vect_recog_bit_insert_pattern(), vect_recog_bitfield_ref_pattern(), vect_recog_cond_expr_convert_pattern(), vect_recog_gather_scatter_pattern(), vect_recog_popcount_clz_ctz_ffs_pattern(), vect_setup_realignment(), vect_shift_permute_load_chain(), vect_transform_cycle_phi(), vect_transform_grouped_load(), vect_transform_loop(), vect_transform_loop_stmt(), vect_transform_reduction(), vect_transform_stmt(), vect_truncate_gather_scatter_offset(), vect_update_misalignment_for_peel(), vect_vfa_access_size(), vector_alignment_reachable_p(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison(), vectorizable_condition(), vectorizable_conversion(), vectorizable_induction(), vectorizable_lc_phi(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_scan_store(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().

◆ VECT_MAX_COST

#define VECT_MAX_COST   1000

◆ VECT_SCALAR_BOOLEAN_TYPE_P

#define VECT_SCALAR_BOOLEAN_TYPE_P ( TYPE)
Value:
&& TYPE_PRECISION (TYPE) == 1 \
#define TYPE_PRECISION(NODE)
Definition tree.h:2245
#define TYPE_UNSIGNED(NODE)
Definition tree.h:949
#define TREE_CODE(NODE)
Definition tree.h:324
Nonzero if TYPE represents a (scalar) boolean type or type
in the middle-end compatible with it (unsigned precision 1 integral
types).  Used to determine which types should be vectorized as
VECTOR_BOOLEAN_TYPE_P.   

Referenced by check_bool_pattern(), get_same_sized_vectype(), integer_type_for_mask(), possible_vector_mask_operation_p(), vect_check_scalar_mask(), vect_determine_mask_precision(), vect_get_vec_defs_for_operand(), vect_is_simple_cond(), vect_narrowable_type_p(), vect_recog_bool_pattern(), vect_recog_cast_forwprop_pattern(), vect_recog_gcond_pattern(), vect_recog_mask_conversion_pattern(), vectorizable_comparison_1(), and vectorizable_operation().

◆ VECTORIZABLE_CYCLE_DEF

#define VECTORIZABLE_CYCLE_DEF ( D)
Value:
|| ((D) == vect_nested_cycle))
@ vect_nested_cycle
Definition tree-vectorizer.h:67
@ vect_reduction_def
Definition tree-vectorizer.h:65
@ vect_double_reduction_def
Definition tree-vectorizer.h:66

Referenced by info_for_reduction(), maybe_set_vectorized_backedge_value(), vect_compute_single_scalar_iteration_cost(), vect_update_vf_for_slp(), and vectorizable_reduction().

Typedef Documentation

◆ auto_lane_permutation_t

◆ auto_load_permutation_t

◆ bb_vec_info

◆ complex_perm_kinds_t

All possible load permute values that could result from the partial data-flow
analysis.   

◆ dr_p

◆ drs_init_vec

◆ lane_permutation_t

◆ load_permutation_t

◆ loop_vec_info

Info on vectorized loops.                                        

◆ opt_loop_vec_info

Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL
value signifies success, and a NULL value signifies failure, supporting
propagating an opt_problem * describing the failure back up the call
stack.   

◆ scalar_cond_masked_set_type

◆ slp_compat_nodes_map_t

◆ slp_instance

SLP instance is a sequence of stmts in a loop that can be packed into
SIMD stmts.   

◆ slp_node_hash

Cache from nodes pair to being compatible or not.   

◆ slp_tree

◆ slp_tree_to_load_perm_map_t

Cache from nodes to the load permutation they represent.   

◆ stmt_vec_info

Vectorizer
   Copyright (C) 2003-2024 Free Software Foundation, Inc.
   Contributed by Dorit Naishlos <dorit@il.ibm.com>

This file is part of GCC.

GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.

GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
for more details.

You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3.  If not see
<http://www.gnu.org/licenses/>.   

◆ stmt_vector_for_cost

◆ tree_cond_mask_hash

Key and map that records association between vector conditions and
corresponding loop mask, and is populated by prepare_vec_mask.   

◆ vec_base_alignments

Maps base addresses to an innermost_loop_behavior and the stmt it was
derived from that gives the maximum known alignment for that base.   

◆ vec_cond_masked_set_type

◆ vec_loop_lens

◆ vec_object_pair

Describes two objects whose addresses must be unequal for the vectorized
loop to be valid.   

◆ vect_pattern_decl_t

Function pointer to create a new pattern matcher from a generic type.   

Enumeration Type Documentation

◆ _complex_perm_kinds

All possible load permute values that could result from the partial data-flow
analysis.   
Enumerator
PERM_UNKNOWN 
PERM_EVENODD 
PERM_ODDEVEN 
PERM_ODDODD 
PERM_EVENEVEN 
PERM_TOP 

◆ dr_alignment_support

Define type of available alignment support.   
Enumerator
dr_unaligned_unsupported 
dr_unaligned_supported 
dr_explicit_realign 
dr_explicit_realign_optimized 
dr_aligned 

◆ operation_type

Defines type of operation.   
Enumerator
unary_op 
binary_op 
ternary_op 

◆ slp_instance_kind

The enum describes the type of operations that an SLP instance
can perform.  
Enumerator
slp_inst_kind_store 
slp_inst_kind_reduc_group 
slp_inst_kind_reduc_chain 
slp_inst_kind_bb_reduc 
slp_inst_kind_ctor 

◆ slp_vect_type

The type of vectorization that can be applied to the stmt: regular loop-based
vectorization; pure SLP - the stmt is a part of SLP instances and does not
have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is
a part of SLP instance and also must be loop-based vectorized, since it has
uses outside SLP sequences.

In the loop context the meanings of pure and hybrid SLP are slightly
different. By saying that pure SLP is applied to the loop, we mean that we
exploit only intra-iteration parallelism in the loop; i.e., the loop can be
vectorized without doing any conceptual unrolling, cause we don't pack
together stmts from different iterations, only within a single iteration.
Loop hybrid SLP means that we exploit both intra-iteration and
inter-iteration parallelism (e.g., number of elements in the vector is 4
and the slp-group-size is 2, in which case we don't have enough parallelism
within an iteration, so we obtain the rest of the parallelism from subsequent
iterations by unrolling the loop by 2).   
Enumerator
loop_vect 
pure_slp 
hybrid 

◆ stmt_vec_info_type

Info on vectorized defs.                                         
Enumerator
undef_vec_info_type 
load_vec_info_type 
store_vec_info_type 
shift_vec_info_type 
op_vec_info_type 
call_vec_info_type 
call_simd_clone_vec_info_type 
assignment_vec_info_type 
condition_vec_info_type 
comparison_vec_info_type 
reduc_vec_info_type 
induc_vec_info_type 
type_promotion_vec_info_type 
type_demotion_vec_info_type 
type_conversion_vec_info_type 
cycle_phi_info_type 
lc_phi_info_type 
phi_info_type 
recurr_info_type 
loop_exit_ctrl_vec_info_type 

◆ vec_load_store_type

Says whether a statement is a load, a store of a vectorized statement
result, or a store of an invariant value.   
Enumerator
VLS_LOAD 
VLS_STORE 
VLS_STORE_INVARIANT 

◆ vect_def_type

Define type of def-use cross-iteration cycle.   
Enumerator
vect_uninitialized_def 
vect_constant_def 
vect_external_def 
vect_internal_def 
vect_induction_def 
vect_reduction_def 
vect_double_reduction_def 
vect_nested_cycle 
vect_first_order_recurrence 
vect_condition_def 
vect_unknown_def_type 

◆ vect_induction_op_type

Define operation type of linear/non-linear induction variable.   
Enumerator
vect_step_op_add 
vect_step_op_neg 
vect_step_op_mul 
vect_step_op_shl 
vect_step_op_shr 

◆ vect_memory_access_type

Describes how we're going to vectorize an individual load or store,
or a group of loads or stores.   
Enumerator
VMAT_INVARIANT 
VMAT_CONTIGUOUS 
VMAT_CONTIGUOUS_DOWN 
VMAT_CONTIGUOUS_PERMUTE 
VMAT_CONTIGUOUS_REVERSE 
VMAT_LOAD_STORE_LANES 
VMAT_ELEMENTWISE 
VMAT_STRIDED_SLP 
VMAT_GATHER_SCATTER 

◆ vect_partial_vector_style

Enumerator
vect_partial_vectors_none 
vect_partial_vectors_while_ult 
vect_partial_vectors_avx512 
vect_partial_vectors_len 

◆ vect_reduction_type

Define type of reduction.   
Enumerator
TREE_CODE_REDUCTION 
COND_REDUCTION 
INTEGER_INDUC_COND_REDUCTION 
CONST_COND_REDUCTION 
EXTRACT_LAST_REDUCTION 
FOLD_LEFT_REDUCTION 

◆ vect_relevant

Indicates whether/how a variable is used in the scope of loop/basic
block.   
Enumerator
vect_unused_in_scope 
vect_used_only_live 
vect_used_in_outer_by_reduction 
vect_used_in_outer 
vect_used_by_reduction 
vect_used_in_scope 

◆ vect_var_kind

Used for naming of new temporaries.   
Enumerator
vect_simple_var 
vect_pointer_var 
vect_scalar_var 
vect_mask_var 

Function Documentation

◆ add_stmt_cost() [1/3]

◆ add_stmt_cost() [2/3]

unsigned add_stmt_cost ( vector_costs * costs,
int count,
enum vect_cost_for_stmt kind,
stmt_vec_info stmt_info,
slp_tree node,
tree vectype,
int misalign,
enum vect_cost_model_location where )
inline

◆ add_stmt_cost() [3/3]

unsigned add_stmt_cost ( vector_costs * costs,
stmt_info_for_cost * i )
inline
Alias targetm.vectorize.add_stmt_cost.   

References add_stmt_cost(), and i.

◆ add_stmt_costs()

◆ aligned_access_p()

bool aligned_access_p ( dr_vec_info * dr_info,
tree vectype )
inline
Return true if data access DR_INFO is aligned to the targets
preferred alignment for VECTYPE (which may be less than a full vector).   

References dr_misalignment().

Referenced by vect_enhance_data_refs_alignment(), and vector_alignment_reachable_p().

◆ builtin_vectorization_cost()

int builtin_vectorization_cost ( enum vect_cost_for_stmt type_of_cost,
tree vectype,
int misalign )
inline
Alias targetm.vectorize.builtin_vectorization_cost.   

References ggc_alloc(), and targetm.

Referenced by vector_costs::add_stmt_cost(), record_stmt_cost(), and vect_get_stmt_cost().

◆ bump_vector_ptr()

tree bump_vector_ptr ( vec_info * vinfo,
tree dataref_ptr,
gimple * ptr_incr,
gimple_stmt_iterator * gsi,
stmt_vec_info stmt_info,
tree bump )
extern
Function bump_vector_ptr

  Increment a pointer (to a vector type) by vector-size. If requested,
  i.e. if PTR-INCR is given, then also connect the new increment stmt
  to the existing def-use update-chain of the pointer, by modifying
  the PTR_INCR as illustrated below:

  The pointer def-use update-chain before this function:
                       DATAREF_PTR = phi (p_0, p_2)
                       ....
       PTR_INCR:       p_2 = DATAREF_PTR + step

  The pointer def-use update-chain after this function:
                       DATAREF_PTR = phi (p_0, p_2)
                       ....
                       NEW_DATAREF_PTR = DATAREF_PTR + BUMP
                       ....
       PTR_INCR:       p_2 = NEW_DATAREF_PTR + step

  Input:
  DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
                in the loop.
  PTR_INCR - optional. The stmt that updates the pointer in each iteration of
             the loop.  The increment amount across iterations is expected
             to be vector_size.
  BSI - location where the new update stmt is to be placed.
  STMT_INFO - the original scalar memory-access stmt that is being vectorized.
  BUMP - optional. The offset by which to bump the pointer. If not given,
         the offset is assumed to be vector_size.

  Output: Return NEW_DATAREF_PTR as illustrated above.

References build1(), copy_ssa_name(), DR_PTR_INFO, duplicate_ssa_name_ptr_info(), fold_build2, fold_convert, fold_stmt(), follow_all_ssa_edges(), FOR_EACH_SSA_USE_OPERAND, gcc_assert, ggc_alloc(), gimple_build_assign(), gsi_for_stmt(), gsi_stmt(), is_gimple_min_invariant(), make_ssa_name(), mark_ptr_info_alignment_unknown(), operand_equal_p(), ptr_type_node, SET_USE, SSA_NAME_PTR_INFO, SSA_OP_USE, STMT_VINFO_DATA_REF, STMT_VINFO_VECTYPE, TREE_CODE, TREE_TYPE, TYPE_SIZE_UNIT, update_stmt(), USE_FROM_PTR, and vect_finish_stmt_generation().

Referenced by vectorizable_load(), and vectorizable_store().

◆ can_duplicate_and_interleave_p()

bool can_duplicate_and_interleave_p ( vec_info * vinfo,
unsigned int count,
tree elt_type,
unsigned int * nvectors_out,
tree * vector_type_out,
tree * permutes )
extern
Check whether it is possible to load COUNT elements of type ELT_TYPE
using the method implemented by duplicate_and_interleave.  Return true
if so, returning the number of intermediate vectors in *NVECTORS_OUT
(if nonnull) and the type of each intermediate vector in *VECTOR_TYPE_OUT
(if nonnull).   

References build_nonstandard_integer_type(), can_vec_perm_const_p(), count, GET_MODE_BITSIZE(), GET_MODE_NUNITS(), GET_MODE_SIZE(), GET_MODE_UNIT_SIZE, get_vectype_for_scalar_type(), ggc_alloc(), i, int_mode_for_size(), known_eq, TYPE_MODE, vect_gen_perm_mask_checked(), and VECTOR_MODE_P.

Referenced by duplicate_and_interleave(), vect_build_slp_tree_2(), and vectorizable_reduction().

◆ check_reduction_path()

bool check_reduction_path ( dump_user_location_t loc,
loop_p loop,
gphi * phi,
tree loop_arg,
enum tree_code )
extern
Used in gimple-loop-interchange.c and tree-parloops.cc.   

References check_reduction_path(), ggc_alloc(), and path.

◆ compatible_calls_p()

bool compatible_calls_p ( gcall * call1,
gcall * call2 )
extern

◆ cse_and_gimplify_to_preheader()

tree cse_and_gimplify_to_preheader ( loop_vec_info loop_vinfo,
tree expr )
extern

◆ dr_misalignment()

◆ dr_target_alignment()

const poly_uint64 dr_target_alignment ( dr_vec_info * dr_info)
inline
Only defined once DR_MISALIGNMENT is defined.   

References DR_GROUP_FIRST_ELEMENT, STMT_VINFO_DR_INFO, and STMT_VINFO_GROUPED_ACCESS.

◆ dump_stmt_cost()

◆ duplicate_and_interleave()

void duplicate_and_interleave ( vec_info * vinfo,
gimple_seq * seq,
tree vector_type,
const vec< tree > & elts,
unsigned int nresults,
vec< tree > & results )
extern
Build a variable-length vector in which the elements in ELTS are repeated
to a fill NRESULTS vectors of type VECTOR_TYPE.  Store the vectors in
RESULTS and add any new instructions to SEQ.

The approach we use is:

(1) Find a vector mode VM with integer elements of mode IM.

(2) Replace ELTS[0:NELTS] with ELTS'[0:NELTS'], where each element of
    ELTS' has mode IM.  This involves creating NELTS' VIEW_CONVERT_EXPRs
    from small vectors to IM.

(3) Duplicate each ELTS'[I] into a vector of mode VM.

(4) Use a tree of interleaving VEC_PERM_EXPRs to create VMs with the
    correct byte contents.

(5) Use VIEW_CONVERT_EXPR to cast the final VMs to the required type.

We try to find the largest IM for which this sequence works, in order
to cut down on the number of interleaves.   

References build_vector_type(), can_duplicate_and_interleave_p(), gcc_unreachable, ggc_alloc(), gimple_build(), gimple_build_assign(), gimple_build_vector(), gimple_build_vector_from_val(), gimple_seq_add_stmt(), i, make_ssa_name(), TREE_TYPE, and TYPE_VECTOR_SUBPARTS().

Referenced by get_initial_defs_for_reduction(), and vect_create_constant_vectors().

◆ find_loop_location()

◆ finish_cost()

void finish_cost ( vector_costs * costs,
const vector_costs * scalar_costs,
unsigned * prologue_cost,
unsigned * body_cost,
unsigned * epilogue_cost,
unsigned * suggested_unroll_factor = NULL )
inline
Alias targetm.vectorize.finish_cost.   

Referenced by vect_bb_vectorization_profitable_p(), and vect_estimate_min_profitable_iters().

◆ get_dr_vinfo_offset()

tree get_dr_vinfo_offset ( vec_info * vinfo,
dr_vec_info * dr_info,
bool check_outer = false )
inline
Return the offset calculated by adding the offset of this DR_INFO to the
corresponding data_reference's offset.  If CHECK_OUTER then use
vect_dr_behavior to select the appropriate data_reference to use.   

References dr_info::dr, fold_build2, fold_convert, ggc_alloc(), data_reference::innermost, offset, innermost_loop_behavior::offset, sizetype, TREE_TYPE, and vect_dr_behavior().

Referenced by check_scan_store(), vect_create_addr_base_for_vector_ref(), vectorizable_load(), and vectorizable_store().

◆ get_later_stmt()

◆ get_mask_type_for_scalar_type() [1/2]

tree get_mask_type_for_scalar_type ( vec_info * vinfo,
tree scalar_type,
slp_tree node )
extern
Function get_mask_type_for_scalar_type.

Returns the mask type corresponding to a result of comparison
of vectors of specified SCALAR_TYPE as supported by target.
NODE, if nonnull, is the SLP tree node that will use the returned
vector type.   

References get_vectype_for_scalar_type(), ggc_alloc(), NULL, and truth_type_for().

◆ get_mask_type_for_scalar_type() [2/2]

tree get_mask_type_for_scalar_type ( vec_info * vinfo,
tree scalar_type,
unsigned int group_size )
extern
Function get_mask_type_for_scalar_type.

Returns the mask type corresponding to a result of comparison
of vectors of specified SCALAR_TYPE as supported by target.
If GROUP_SIZE is nonzero and we're performing BB vectorization,
make sure that the number of elements in the vector is no bigger
than GROUP_SIZE.   

References get_vectype_for_scalar_type(), ggc_alloc(), NULL, and truth_type_for().

Referenced by check_bool_pattern(), vect_check_scalar_mask(), vect_convert_mask_for_vectype(), vect_determine_mask_precision(), vect_get_vector_types_for_stmt(), and vect_recog_mask_conversion_pattern().

◆ get_related_vectype_for_scalar_type()

tree get_related_vectype_for_scalar_type ( machine_mode prevailing_mode,
tree scalar_type,
poly_uint64 nunits )
extern
In tree-vect-stmts.cc.   
If NUNITS is nonzero, return a vector type that contains NUNITS
elements of type SCALAR_TYPE, or null if the target doesn't support
such a type.

If NUNITS is zero, return a vector type that contains elements of
type SCALAR_TYPE, choosing whichever vector size the target prefers.

If PREVAILING_MODE is VOIDmode, we have not yet chosen a vector mode
for this vectorization region and want to "autodetect" the best choice.
Otherwise, PREVAILING_MODE is a previously-chosen vector TYPE_MODE
and we want the new type to be interoperable with it.   PREVAILING_MODE
in this case can be a scalar integer mode or a vector mode; when it
is a vector mode, the function acts like a tree-level version of
related_vector_mode.   

References build_nonstandard_integer_type(), build_qualified_type(), build_vector_type_for_mode(), gcc_assert, GET_MODE_BITSIZE(), GET_MODE_SIZE(), ggc_alloc(), INTEGRAL_MODE_P, INTEGRAL_TYPE_P, is_float_mode(), is_int_mode(), KEEP_QUAL_ADDR_SPACE, known_eq, mode_for_vector(), NULL_TREE, POINTER_TYPE_P, related_vector_mode(), SCALAR_FLOAT_TYPE_P, SCALAR_INT_MODE_P, targetm, TREE_CODE, TYPE_ADDR_SPACE, TYPE_ALIGN_UNIT, lang_hooks_for_types::type_for_mode, TYPE_MODE, TYPE_PRECISION, TYPE_QUALS, TYPE_UNSIGNED, lang_hooks::types, and VECTOR_MODE_P.

Referenced by get_same_sized_vectype(), get_vec_alignment_for_array_type(), get_vectype_for_scalar_type(), vect_create_epilog_for_reduction(), vect_create_partial_epilog(), and vect_find_reusable_accumulator().

◆ get_same_sized_vectype()

tree get_same_sized_vectype ( tree scalar_type,
tree vector_type )
extern
Function get_same_sized_vectype

Returns a vector type corresponding to SCALAR_TYPE of size
VECTOR_TYPE if supported by the target.   

References GET_MODE_SIZE(), get_related_vectype_for_scalar_type(), ggc_alloc(), NULL_TREE, truth_type_for(), TYPE_MODE, and VECT_SCALAR_BOOLEAN_TYPE_P.

Referenced by vect_create_epilog_for_reduction(), vect_recog_rotate_pattern(), vectorizable_bswap(), vectorizable_conversion(), vectorizable_induction(), and vectorizable_reduction().

◆ get_vectype_for_scalar_type() [1/2]

tree get_vectype_for_scalar_type ( vec_info * vinfo,
tree scalar_type,
slp_tree node )
extern
Return the vector type corresponding to SCALAR_TYPE as supported
by the target.  NODE, if nonnull, is the SLP tree node that will
use the returned vector type.   

References get_vectype_for_scalar_type(), ggc_alloc(), and SLP_TREE_LANES.

◆ get_vectype_for_scalar_type() [2/2]

tree get_vectype_for_scalar_type ( vec_info * vinfo,
tree scalar_type,
unsigned int group_size )
extern
Function get_vectype_for_scalar_type.

Returns the vector type corresponding to SCALAR_TYPE as supported
by the target.  If GROUP_SIZE is nonzero and we're performing BB
vectorization, make sure that the number of elements in the vector
is no bigger than GROUP_SIZE.   

References hash_set< KeyId, Lazy, Traits >::add(), floor_log2(), gcc_assert, get_related_vectype_for_scalar_type(), ggc_alloc(), maybe_ge, vec_info::slp_instances, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), vec_info::used_vector_modes, and vec_info::vector_mode.

Referenced by adjust_bool_pattern(), adjust_bool_pattern_cast(), can_duplicate_and_interleave_p(), check_bool_pattern(), get_initial_def_for_reduction(), get_mask_type_for_scalar_type(), get_mask_type_for_scalar_type(), get_vectype_for_scalar_type(), vect_add_conversion_to_pattern(), vect_analyze_data_refs(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_convert_input(), vect_determine_mask_precision(), vect_determine_vectorization_factor(), vect_gather_scatter_fn_p(), vect_get_vec_defs_for_operand(), vect_get_vector_types_for_stmt(), vect_is_simple_cond(), vect_phi_first_order_recurrence_p(), vect_recog_abd_pattern(), vect_recog_average_pattern(), vect_recog_bit_insert_pattern(), vect_recog_bitfield_ref_pattern(), vect_recog_bool_pattern(), vect_recog_cast_forwprop_pattern(), vect_recog_cond_expr_convert_pattern(), vect_recog_ctz_ffs_pattern(), vect_recog_divmod_pattern(), vect_recog_gather_scatter_pattern(), vect_recog_gcond_pattern(), vect_recog_mask_conversion_pattern(), vect_recog_mixed_size_cond_pattern(), vect_recog_mulhs_pattern(), vect_recog_mult_pattern(), vect_recog_over_widening_pattern(), vect_recog_popcount_clz_ctz_ffs_pattern(), vect_recog_pow_pattern(), vect_recog_rotate_pattern(), vect_recog_vector_vector_shift_pattern(), vect_recog_widen_abd_pattern(), vect_recog_widen_op_pattern(), vect_slp_prefer_store_lanes_p(), vect_split_statement(), vect_supportable_direct_optab_p(), vect_supportable_shift(), vect_synth_mult_by_constant(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_conversion(), vectorizable_operation(), vectorizable_reduction(), vectorizable_shift(), and vectorizable_simd_clone_call().

◆ info_for_reduction()

◆ init_cost()

vector_costs * init_cost ( vec_info * vinfo,
bool costing_for_scalar )
inline
Alias targetm.vectorize.init_cost.   

References ggc_alloc(), and targetm.

◆ is_loop_header_bb_p()

bool is_loop_header_bb_p ( basic_block bb)
inline

◆ is_pattern_stmt_p()

◆ is_simple_and_all_uses_invariant()

bool is_simple_and_all_uses_invariant ( stmt_vec_info stmt_info,
loop_vec_info loop_vinfo )
extern
Function is_simple_and_all_uses_invariant

Return true if STMT_INFO is simple and all uses of it are invariant.   

References dump_enabled_p(), dump_printf_loc(), FOR_EACH_SSA_TREE_OPERAND, ggc_alloc(), MSG_MISSED_OPTIMIZATION, SSA_OP_USE, vect_constant_def, vect_external_def, vect_is_simple_use(), vect_location, and vect_uninitialized_def.

Referenced by vect_stmt_relevant_p(), and vectorizable_live_operation().

◆ known_alignment_for_access_p()

bool known_alignment_for_access_p ( dr_vec_info * dr_info,
tree vectype )
inline
Return TRUE if the (mis-)alignment of the data access is known with
respect to the targets preferred alignment for VECTYPE, and FALSE
otherwise.   

References dr_misalignment(), and DR_MISALIGNMENT_UNKNOWN.

Referenced by vect_enhance_data_refs_alignment(), vect_get_peeling_costs_all_drs(), vect_peeling_supportable(), vect_update_misalignment_for_peel(), and vector_alignment_reachable_p().

◆ loop_cost_model()

◆ loop_vec_info_for_loop()

loop_vec_info loop_vec_info_for_loop ( class loop * loop)
inline

◆ needs_fold_left_reduction_p()

bool needs_fold_left_reduction_p ( tree type,
code_helper code )
extern
Return true if we need an in-order reduction for operation CODE
on type TYPE.  NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer
overflow must wrap.   

References ggc_alloc(), INTEGRAL_TYPE_P, code_helper::is_tree_code(), operation_no_trapping_overflow(), SAT_FIXED_POINT_TYPE_P, and SCALAR_FLOAT_TYPE_P.

Referenced by vect_optimize_slp_pass::start_choosing_layouts(), vect_reassociating_reduction_p(), vect_slp_check_for_roots(), and vectorizable_reduction().

◆ nested_in_vect_loop_p()

◆ neutral_op_for_reduction()

tree neutral_op_for_reduction ( tree scalar_type,
code_helper code,
tree initial_value,
bool as_initial )
extern
In tree-vect-loop.cc.   
If there is a neutral value X such that a reduction would not be affected
by the introduction of additional X elements, return that X, otherwise
return null.  CODE is the code of the reduction and SCALAR_TYPE is type
of the scalar elements.  If the reduction has just a single initial value
then INITIAL_VALUE is that value, otherwise it is null.
If AS_INITIAL is TRUE the value is supposed to be used as initial value.
In that case no signed zero is returned.   

References build_all_ones_cst(), build_one_cst(), build_real(), build_zero_cst(), dconstm0, ggc_alloc(), HONOR_SIGNED_ZEROS(), code_helper::is_tree_code(), and NULL_TREE.

Referenced by convert_scalar_cond_reduction(), vect_create_epilog_for_reduction(), vect_expand_fold_left(), vect_find_reusable_accumulator(), vect_transform_cycle_phi(), and vectorizable_reduction().

◆ optimize_mask_stores()

void optimize_mask_stores ( class loop * loop)
extern
The code below is trying to perform simple optimization - revert
  if-conversion for masked stores, i.e. if the mask of a store is zero
  do not perform it and all stored value producers also if possible.
  For example,
    for (i=0; i<n; i++)
      if (c[i])
       {
         p1[i] += 1;
         p2[i] = p3[i] +2;
       }
  this transformation will produce the following semi-hammock:

  if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 })
    {
      vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165);
      vect__12.22_172 = vect__11.19_170 + vect_cst__171;
      MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172);
      vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165);
      vect__19.28_184 = vect__18.25_182 + vect_cst__183;
      MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184);
    }

References add_bb_to_loop(), add_phi_arg(), build_zero_cst(), CDI_DOMINATORS, cfun, basic_block_def::count, create_empty_bb(), create_phi_node(), dom_info_available_p(), dump_enabled_p(), dump_printf_loc(), EDGE_SUCC, find_loop_location(), flow_loop_nested_p(), FOR_EACH_IMM_USE_FAST, free(), gcc_assert, get_loop_body(), ggc_alloc(), gimple_bb(), gimple_build_cond(), gimple_call_arg(), gimple_call_internal_p(), gimple_get_lhs(), gimple_has_volatile_ops(), gimple_set_vdef(), gimple_vdef(), gimple_vop(), gimple_vuse(), gsi_end_p(), gsi_for_stmt(), gsi_insert_after(), gsi_last_bb(), gsi_move_before(), gsi_next(), gsi_prev(), gsi_remove(), GSI_SAME_STMT, gsi_start_bb(), gsi_stmt(), has_zero_uses(), i, basic_block_def::index, is_gimple_debug(), last, profile_probability::likely(), basic_block_def::loop_father, make_edge(), make_single_succ_edge(), make_ssa_name(), MSG_NOTE, NULL, NULL_TREE, loop::num_nodes, set_immediate_dominator(), split_block(), TREE_CODE, TREE_TYPE, UNKNOWN_LOCATION, USE_STMT, vect_location, VECTOR_TYPE_P, and worklist.

◆ perm_mask_for_reverse()

tree perm_mask_for_reverse ( tree vectype)
extern
If the target supports a permute mask that reverses the elements in
a vector of type VECTYPE, return that mask, otherwise return null.   

References can_vec_perm_const_p(), ggc_alloc(), i, NULL_TREE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), and vect_gen_perm_mask_checked().

Referenced by get_negative_load_store_type(), vectorizable_load(), and vectorizable_store().

◆ record_stmt_cost() [1/4]

unsigned record_stmt_cost ( stmt_vector_for_cost * body_cost_vec,
int count,
enum vect_cost_for_stmt,
enum vect_cost_model_location )
extern

◆ record_stmt_cost() [2/4]

unsigned record_stmt_cost ( stmt_vector_for_cost * body_cost_vec,
int count,
enum vect_cost_for_stmt,
slp_tree node,
tree vectype,
int misalign,
enum vect_cost_model_location )
extern

◆ record_stmt_cost() [3/4]

unsigned record_stmt_cost ( stmt_vector_for_cost * body_cost_vec,
int count,
enum vect_cost_for_stmt,
stmt_vec_info stmt_info,
tree vectype,
int misalign,
enum vect_cost_model_location )
extern

References count, ggc_alloc(), NULL, and record_stmt_cost().

Referenced by record_stmt_cost().

◆ record_stmt_cost() [4/4]

unsigned record_stmt_cost ( stmt_vector_for_cost * body_cost_vec,
int count,
enum vect_cost_for_stmt kind,
stmt_vec_info stmt_info,
int misalign,
enum vect_cost_model_location where )
inline
Overload of record_stmt_cost with VECTYPE derived from STMT_INFO.   

References count, ggc_alloc(), record_stmt_cost(), and STMT_VINFO_VECTYPE.

◆ reduction_fn_for_scalar_code()

bool reduction_fn_for_scalar_code ( code_helper code,
internal_fn * reduc_fn )
extern
Function reduction_fn_for_scalar_code

Input:
CODE - tree_code of a reduction operations.

Output:
REDUC_FN - the corresponding internal function to be used to reduce the
   vector of partial results into a single scalar result, or IFN_LAST
   if the operation is a supported reduction operation, but does not have
   such an internal function.

Return FALSE if CODE currently cannot be vectorized as reduction.   

References ggc_alloc(), IFN_LAST, and code_helper::is_tree_code().

Referenced by vect_slp_check_for_roots(), vectorizable_bb_reduc_epilogue(), vectorizable_reduction(), and vectorize_slp_instance_root_stmt().

◆ ref_within_array_bound()

bool ref_within_array_bound ( gimple * stmt,
tree ref )
extern
In tree-if-conv.cc.   
Return TRUE if ref is a within bound array reference.   

References for_each_index(), gcc_assert, idx_within_array_bound(), loop_containing_stmt(), and NULL.

Referenced by ifcvt_memrefs_wont_trap(), and vect_analyze_early_break_dependences().

◆ set_dr_misalignment()

void set_dr_misalignment ( dr_vec_info * dr_info,
int val )
inline

◆ set_dr_target_alignment()

void set_dr_target_alignment ( dr_vec_info * dr_info,
poly_uint64 val )
inline

◆ slpeel_can_duplicate_loop_p()

bool slpeel_can_duplicate_loop_p ( const class loop * loop,
const_edge exit_e,
const_edge e )
extern
This function verifies that the following restrictions apply to LOOP:
 (1) it consists of exactly 2 basic blocks - header, and an empty latch
     for innermost loop and 5 basic blocks for outer-loop.
 (2) it is single entry, single exit
 (3) its exit condition is the last stmt in the header
 (4) E is the entry/exit edge of LOOP.

References can_copy_bbs_p(), empty_block_p(), free(), get_loop_body_with_size(), get_loop_exit_condition(), ggc_alloc(), gsi_last_bb(), gsi_stmt(), loop::latch, loop_outer(), loop_preheader_edge(), and loop::num_nodes.

Referenced by vect_analyze_loop_2(), vect_do_peeling(), and vect_enhance_data_refs_alignment().

◆ slpeel_tree_duplicate_loop_to_edge_cfg()

class loop * slpeel_tree_duplicate_loop_to_edge_cfg ( class loop * loop,
edge loop_exit,
class loop * scalar_loop,
edge scalar_exit,
edge e,
edge * new_e,
bool flow_loops,
vec< basic_block > * updated_doms )
Given LOOP this function generates a new copy of it and puts it
on E which is either the entry or exit of LOOP.  If SCALAR_LOOP is
non-NULL, assume LOOP and SCALAR_LOOP are equivalent and copy the
basic blocks from SCALAR_LOOP instead of LOOP, but to either the
entry or exit of LOOP.  If FLOW_LOOPS then connect LOOP to SCALAR_LOOP as a
continuation.  This is correct for cases where one loop continues from the
other like in the vectorizer, but not true for uses in e.g. loop distribution
where the contents of the loop body are split but the iteration space of both
copies remains the same.

If UPDATED_DOMS is not NULL it is update with the list of basic blocks whoms
dominators were updated during the peeling.  When doing early break vectorization
then LOOP_VINFO needs to be provided and is used to keep track of any newly created
memory references that need to be updated should we decide to vectorize.   

References add_phi_arg(), add_phi_args_after_copy(), adjust_debug_stmts(), adjust_phi_and_debug_stmts(), CDI_DOMINATORS, checking_verify_dominators(), copy_bbs(), copy_ssa_name(), create_phi_node(), delete_basic_block(), duplicate_loop(), duplicate_subloops(), EDGE_COUNT, EDGE_PRED, first_dom_son(), flow_bb_inside_loop_p(), flush_pending_stmts(), FOR_EACH_EDGE, free(), gcc_assert, get_all_dominated_blocks(), get_bb_copy(), get_immediate_dominator(), get_live_virtual_operand_on_edge(), get_loop_body_with_size(), get_loop_copy(), get_loop_exit_edges(), get_virtual_phi(), ggc_alloc(), gimple_phi_arg_def_from_edge(), gimple_phi_num_args(), gimple_phi_result(), gsi_end_p(), gsi_for_stmt(), gsi_next(), gsi_start_phis(), gsi_stmt(), loop::header, i, loop::inner, iterate_fix_dominators(), loop::latch, loop_latch_edge(), loop_outer(), loop_preheader_edge(), MAY_HAVE_DEBUG_BIND_STMTS, next_dom_son(), NULL, NULL_TREE, loop::num_nodes, PHI_ARG_DEF_FROM_EDGE, PHI_ARG_DEF_PTR_FROM_EDGE, PHI_RESULT, queue, redirect_edge_and_branch(), redirect_edge_and_branch_force(), redirect_edge_pred(), redirect_edge_var_map_clear(), remove_phi_node(), rename_use_op(), rename_variables_in_bb(), set_immediate_dominator(), SET_PHI_ARG_DEF, SET_PHI_ARG_DEF_ON_EDGE, single_pred(), single_pred_edge(), single_succ_edge(), single_succ_p(), split_edge(), TREE_CODE, true, UNKNOWN_LOCATION, and virtual_operand_p().

Referenced by copy_loop_before(), and vect_do_peeling().

◆ supportable_narrowing_operation()

bool supportable_narrowing_operation ( code_helper code,
tree vectype_out,
tree vectype_in,
code_helper * code1,
int * multi_step_cvt,
vec< tree > * interm_types )
extern
Function supportable_narrowing_operation

Check whether an operation represented by the code CODE is a
narrowing operation that is supported by the target platform in
vector form (i.e., when operating on arguments of type VECTYPE_IN
and producing a result of type VECTYPE_OUT).

Narrowing operations we currently support are NOP (CONVERT), FIX_TRUNC
and FLOAT.  This function checks if these operations are supported by
the target platform directly via vector tree-codes.

Output:
- CODE1 is the code of a vector operation to be used when
vectorizing the operation, if available.
- MULTI_STEP_CVT determines the number of required intermediate steps in
case of multi-step conversion (like int->short->char - in that case
MULTI_STEP_CVT will be 1).
- INTERM_TYPES contains the intermediate type required to perform the
narrowing operation (short in the above example).    

References CASE_CONVERT, gcc_unreachable, ggc_alloc(), i, insn_data, code_helper::is_tree_code(), known_eq, MAX_INTERM_CVT_STEPS, insn_operand_data::mode, insn_data_d::operand, optab_default, optab_for_tree_code(), optab_handler(), SCALAR_INT_MODE_P, lang_hooks_for_types::type_for_mode, TYPE_MODE, TYPE_UNSIGNED, TYPE_VECTOR_SUBPARTS(), lang_hooks::types, unknown_optab, vect_double_mask_nunits(), and VECTOR_BOOLEAN_TYPE_P.

Referenced by simple_integer_narrowing(), and vectorizable_conversion().

◆ supportable_widening_operation()

bool supportable_widening_operation ( vec_info * vinfo,
code_helper code,
stmt_vec_info stmt_info,
tree vectype_out,
tree vectype_in,
code_helper * code1,
code_helper * code2,
int * multi_step_cvt,
vec< tree > * interm_types )
extern
Function supportable_widening_operation

Check whether an operation represented by the code CODE is a
widening operation that is supported by the target platform in
vector form (i.e., when operating on arguments of type VECTYPE_IN
producing a result of type VECTYPE_OUT).

Widening operations we currently support are NOP (CONVERT), FLOAT,
FIX_TRUNC and WIDEN_MULT.  This function checks if these operations
are supported by the target platform either directly (via vector
tree-codes), or via target builtins.

Output:
- CODE1 and CODE2 are codes of vector operations to be used when
vectorizing the operation, if available.
- MULTI_STEP_CVT determines the number of required intermediate steps in
case of multi-step conversion (like char->short->int - in that case
MULTI_STEP_CVT will be 1).
- INTERM_TYPES contains the intermediate type required to perform the
widening operation (short in the above example).   

References as_combined_fn(), as_internal_fn(), build_vector_type_for_mode(), CASE_CONVERT, CONVERT_EXPR_CODE_P, direct_internal_fn_optab(), gcc_unreachable, GET_MODE_INNER, ggc_alloc(), gimple_assign_lhs(), i, insn_data, code_helper::is_tree_code(), known_eq, lookup_evenodd_internal_fn(), lookup_hilo_internal_fn(), LOOP_VINFO_LOOP, MAX_INTERM_CVT_STEPS, MAX_TREE_CODES, insn_operand_data::mode, nested_in_vect_loop_p(), NULL, insn_data_d::operand, optab_default, optab_for_tree_code(), optab_handler(), code_helper::safe_as_tree_code(), SCALAR_INT_MODE_P, STMT_VINFO_DEF_TYPE, STMT_VINFO_RELEVANT, supportable_widening_operation(), lang_hooks_for_types::type_for_mode, TYPE_MODE, TYPE_UNSIGNED, TYPE_VECTOR_SUBPARTS(), lang_hooks::types, unknown_optab, vect_halve_mask_nunits(), vect_reduction_def, vect_used_by_reduction, VECTOR_BOOLEAN_TYPE_P, VECTOR_MODE_P, and widening_fn_p().

Referenced by supportable_widening_operation(), vect_recog_abd_pattern(), vect_recog_widen_abd_pattern(), vect_recog_widen_op_pattern(), and vectorizable_conversion().

◆ unlimited_cost_model()

◆ vec_init_loop_exit_info()

◆ vect_analyze_data_ref_accesses()

◆ vect_analyze_data_ref_dependences()

opt_result vect_analyze_data_ref_dependences ( loop_vec_info loop_vinfo,
unsigned int * max_vf )
extern
Function vect_analyze_data_ref_dependences.

Examine all the data references in the loop, and make sure there do not
exist any data dependences between them.  Set *MAX_VF according to
the maximum vectorization factor the data dependences allow.   

References compute_all_dependences(), DUMP_VECT_SCOPE, FOR_EACH_VEC_ELT, gcc_assert, ggc_alloc(), i, LOOP_VINFO_DATAREFS, LOOP_VINFO_DDRS, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_LOOP_NEST, LOOP_VINFO_NO_DATA_DEPENDENCIES, LOOP_VINFO_ORIG_MAX_VECT_FACTOR, opt_result::success(), vect_analyze_data_ref_dependence(), and vect_analyze_early_break_dependences().

Referenced by vect_analyze_loop_2().

◆ vect_analyze_data_refs()

opt_result vect_analyze_data_refs ( vec_info * vinfo,
poly_uint64 * min_vf,
bool * fatal )
extern
Function vect_analyze_data_refs.

 Find all the data references in the loop or basic block.

  The general structure of the analysis of data refs in the vectorizer is as
  follows:
  1- vect_analyze_data_refs(loop/bb): call
     compute_data_dependences_for_loop/bb to find and analyze all data-refs
     in the loop/bb and their dependences.
  2- vect_analyze_dependences(): apply dependence testing using ddrs.
  3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
  4- vect_analyze_drs_access(): check that ref_stmt.step is ok.

References data_reference::aux, build_fold_indirect_ref, vec_info_shared::datarefs, DECL_NONALIASED, dr_analyze_innermost(), DR_BASE_ADDRESS, DR_INIT, DR_IS_READ, DR_IS_WRITE, DR_OFFSET, DR_REF, DR_STEP, DR_STMT, dump_enabled_p(), dump_generic_expr(), dump_printf(), dump_printf_loc(), DUMP_VECT_SCOPE, opt_result::failure_at(), fatal(), fold_build2, fold_build_pointer_plus, FOR_EACH_VEC_ELT, gcc_assert, get_base_address(), get_vectype_for_scalar_type(), ggc_alloc(), i, vec_info::lookup_stmt(), LOOP_VINFO_LOOP, MSG_MISSED_OPTIMIZATION, MSG_NOTE, nested_in_vect_loop_p(), NULL, offset, vec_info::shared, data_reference::stmt, STMT_VINFO_DR_BASE_ADDRESS, STMT_VINFO_DR_BASE_ALIGNMENT, STMT_VINFO_DR_BASE_MISALIGNMENT, STMT_VINFO_DR_INIT, STMT_VINFO_DR_OFFSET, STMT_VINFO_DR_OFFSET_ALIGNMENT, STMT_VINFO_DR_STEP, STMT_VINFO_DR_STEP_ALIGNMENT, STMT_VINFO_DR_WRT_VEC_LOOP, STMT_VINFO_GATHER_SCATTER_P, STMT_VINFO_SIMD_LANE_ACCESS_P, STMT_VINFO_STRIDED_P, STMT_VINFO_VECTORIZABLE, STMT_VINFO_VECTYPE, opt_result::success(), TDF_DETAILS, TREE_CODE, TREE_THIS_VOLATILE, TREE_TYPE, TYPE_VECTOR_SUBPARTS(), unshare_expr(), VAR_P, vect_check_gather_scatter(), and vect_location.

Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().

◆ vect_analyze_data_refs_alignment()

opt_result vect_analyze_data_refs_alignment ( loop_vec_info loop_vinfo)
extern
Function vect_analyze_data_refs_alignment

Analyze the alignment of the data-references in the loop.
Return FALSE if a data reference is found that cannot be vectorized.   

References DR_GROUP_FIRST_ELEMENT, DUMP_VECT_SCOPE, FOR_EACH_VEC_ELT, i, vec_info::lookup_dr(), LOOP_VINFO_DATAREFS, STMT_VINFO_GROUPED_ACCESS, STMT_VINFO_VECTORIZABLE, STMT_VINFO_VECTYPE, opt_result::success(), vect_compute_data_ref_alignment(), and vect_record_base_alignments().

Referenced by vect_analyze_loop_2().

◆ vect_analyze_loop()

◆ vect_analyze_loop_form()

◆ vect_analyze_slp()

◆ vect_analyze_stmt()

opt_result vect_analyze_stmt ( vec_info * vinfo,
stmt_vec_info stmt_info,
bool * need_to_vectorize,
slp_tree node,
slp_instance node_instance,
stmt_vector_for_cost * cost_vec )
extern

◆ vect_apply_runtime_profitability_check_p()

bool vect_apply_runtime_profitability_check_p ( loop_vec_info loop_vinfo)
inline
Return true if LOOP_VINFO requires a runtime check for whether the
vector loop is profitable.   

References LOOP_VINFO_COST_MODEL_THRESHOLD, LOOP_VINFO_NITERS_KNOWN_P, and vect_vf_for_cost().

Referenced by vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_loop_versioning(), and vect_transform_loop().

◆ vect_build_loop_niters()

tree vect_build_loop_niters ( loop_vec_info loop_vinfo,
bool * new_var_p )
extern
This function builds ni_name = number of iterations.  Statements
are emitted on the loop preheader edge.  If NEW_VAR_P is not NULL, set
it to TRUE if new ssa_var is generated.   

References create_tmp_var, force_gimple_operand(), ggc_alloc(), gsi_insert_seq_on_edge_immediate(), loop_preheader_edge(), LOOP_VINFO_LOOP, LOOP_VINFO_NITERS, NULL, TREE_CODE, TREE_TYPE, and unshare_expr().

Referenced by vect_do_peeling(), and vect_transform_loop().

◆ vect_can_advance_ivs_p()

bool vect_can_advance_ivs_p ( loop_vec_info loop_vinfo)
extern
Function vect_can_advance_ivs_p

In case the number of iterations that LOOP iterates is unknown at compile
time, an epilog loop will be generated, and the loop induction variables
(IVs) will be "advanced" to the value they are supposed to take just before
the epilog loop.  Here we check that the access function of the loop IVs
and the expression that represents the loop bound are simple enough.
These restrictions will be relaxed in the future.   

References dump_enabled_p(), dump_printf(), dump_printf_loc(), expr_invariant_in_loop_p(), ggc_alloc(), gsi_end_p(), gsi_next(), gsi_start_phis(), loop::header, iv_phi_p(), vec_info::lookup_stmt(), LOOP_VINFO_LOOP, MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL_TREE, gphi_iterator::phi(), STMT_VINFO_LOOP_PHI_EVOLUTION_PART, STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE, tree_is_chrec(), vect_can_peel_nonlinear_iv_p(), vect_location, and vect_step_op_add.

Referenced by vect_analyze_loop_2(), vect_do_peeling(), and vect_enhance_data_refs_alignment().

◆ vect_can_force_dr_alignment_p()

bool vect_can_force_dr_alignment_p ( const_tree decl,
poly_uint64 alignment )
extern
In tree-vect-data-refs.cc.   
Function vect_force_dr_alignment_p.

Returns whether the alignment of a DECL can be forced to be aligned
on ALIGNMENT bit boundary.   

References decl_in_symtab_p(), symtab_node::get(), ggc_alloc(), known_le, MAX_OFILE_ALIGNMENT, MAX_STACK_ALIGNMENT, TREE_STATIC, and VAR_P.

Referenced by increase_alignment(), and vect_compute_data_ref_alignment().

◆ vect_can_vectorize_without_simd_p() [1/2]

bool vect_can_vectorize_without_simd_p ( code_helper code)
extern
Likewise, but taking a code_helper.   

References code_helper::is_tree_code(), and vect_can_vectorize_without_simd_p().

◆ vect_can_vectorize_without_simd_p() [2/2]

bool vect_can_vectorize_without_simd_p ( tree_code code)
extern
Return true if we can emulate CODE on an integer mode representation
of a vector.   

References ggc_alloc().

Referenced by vect_can_vectorize_without_simd_p(), vectorizable_operation(), and vectorizable_reduction().

◆ vect_check_gather_scatter()

bool vect_check_gather_scatter ( stmt_vec_info stmt_info,
loop_vec_info loop_vinfo,
gather_scatter_info * info )
extern
Return true if a non-affine read or write in STMT_INFO is suitable for a
gather load or scatter store.  Describe the operation in *INFO if so.   

References gather_scatter_info::base, build_fold_addr_expr, CASE_CONVERT, gather_scatter_info::decl, do_add(), DR_IS_READ, DR_REF, gather_scatter_info::element_type, expr_invariant_in_loop_p(), extract_ops_from_tree(), fold_convert, get_gimple_rhs_class(), get_inner_reference(), ggc_alloc(), gimple_assign_rhs1(), gimple_assign_rhs2(), gimple_assign_rhs_code(), gimple_call_internal_fn(), gimple_call_internal_p(), GIMPLE_TERNARY_RHS, gather_scatter_info::ifn, IFN_LAST, integer_zerop(), INTEGRAL_TYPE_P, internal_gather_scatter_fn_p(), is_gimple_assign(), LOOP_VINFO_LOOP, may_be_nonaddressable_p(), mem_ref_offset(), gather_scatter_info::memory_type, NULL_TREE, gather_scatter_info::offset, gather_scatter_info::offset_dt, gather_scatter_info::offset_vectype, operand_equal_p(), POINTER_TYPE_P, gather_scatter_info::scale, signed_char_type_node, size_binop, size_int, size_zero_node, sizetype, SSA_NAME_DEF_STMT, STMT_VINFO_DATA_REF, STMT_VINFO_VECTYPE, STRIP_NOPS, supports_vec_gather_load_p(), supports_vec_scatter_store_p(), targetm, TREE_CODE, tree_fits_shwi_p(), TREE_OPERAND, tree_to_shwi(), TREE_TYPE, TYPE_MODE, TYPE_PRECISION, TYPE_SIZE, unsigned_char_type_node, vect_describe_gather_scatter_call(), vect_gather_scatter_fn_p(), vect_unknown_def_type, and wide_int_to_tree().

Referenced by get_load_store_type(), vect_analyze_data_refs(), vect_detect_hybrid_slp(), vect_get_and_check_slp_defs(), vect_mark_stmts_to_be_vectorized(), vect_recog_gather_scatter_pattern(), and vect_use_strided_gather_scatters_p().

◆ vect_chooses_same_modes_p()

bool vect_chooses_same_modes_p ( vec_info * vinfo,
machine_mode vector_mode )
extern
Return true if replacing LOOP_VINFO->vector_mode with VECTOR_MODE
would not change the chosen vector modes.   

References hash_set< KeyId, Lazy, Traits >::begin(), hash_set< KeyId, Lazy, Traits >::end(), GET_MODE_INNER, i, related_vector_mode(), vec_info::used_vector_modes, and VECTOR_MODE_P.

Referenced by vect_analyze_loop_1(), and vect_slp_region().

◆ vect_comparison_type()

tree vect_comparison_type ( stmt_vec_info stmt_info)
inline
If STMT_INFO is a comparison or contains an embedded comparison, return the
scalar type of the values being compared.  Return null otherwise.   

References ggc_alloc(), gimple_assign_rhs1(), gimple_assign_rhs_code(), tcc_comparison, TREE_CODE_CLASS, TREE_TYPE, and vect_embedded_comparison_type().

◆ vect_copy_ref_info()

void vect_copy_ref_info ( tree dest,
tree src )
extern
Copy memory reference info such as base/clique from the SRC reference
to the DEST MEM_REF.   

References ggc_alloc(), handled_component_p(), MR_DEPENDENCE_BASE, MR_DEPENDENCE_CLIQUE, TREE_CODE, and TREE_OPERAND.

Referenced by vect_setup_realignment(), vectorizable_load(), vectorizable_scan_store(), and vectorizable_store().

◆ vect_create_addr_base_for_vector_ref()

tree vect_create_addr_base_for_vector_ref ( vec_info * vinfo,
stmt_vec_info stmt_info,
gimple_seq * new_stmt_list,
tree offset )
extern
Function vect_create_addr_base_for_vector_ref.

Create an expression that computes the address of the first memory location
that will be accessed for a data reference.

Input:
STMT_INFO: The statement containing the data reference.
NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
OFFSET: Optional. If supplied, it is be added to the initial address.
LOOP:    Specify relative to which loop-nest should the address be computed.
         For example, when the dataref is in an inner-loop nested in an
         outer-loop that is now being vectorized, LOOP can be either the
         outer-loop, or the inner-loop.  The first memory location accessed
         by the following dataref ('in' points to short):

        for (i=0; i<N; i++)
           for (j=0; j<M; j++)
             s += in[i+j]

         is as follows:
         if LOOP=i_loop:        &in             (relative to i_loop)
         if LOOP=j_loop:        &in+i*2B        (relative to j_loop)

Output:
1. Return an SSA_NAME whose value is the address of the memory location of
   the first vector of the data reference.
2. If new_stmt_list is not NULL_TREE after return then the caller must insert
   these statement(s) which define the returned SSA_NAME.

FORNOW: We are only handling array accesses with step 1.   

References build1(), build_pointer_type(), dr_info::dr, DR_PTR_INFO, DR_REF, dump_enabled_p(), dump_printf_loc(), fold_build2, fold_build_pointer_plus, fold_convert, force_gimple_operand(), gcc_assert, get_dr_vinfo_offset(), get_name(), ggc_alloc(), gimple_seq_add_seq(), MSG_NOTE, NULL, offset, size_binop, sizetype, SSA_NAME_PTR_INFO, SSA_NAME_VAR, ssize_int, STMT_VINFO_DR_INFO, strip_zero_offset_components(), TREE_CODE, TREE_TYPE, unshare_expr(), vect_dr_behavior(), vect_duplicate_ssa_name_ptr_info(), vect_get_new_vect_var(), vect_location, and vect_pointer_var.

Referenced by get_misalign_in_elems(), vect_create_cond_for_align_checks(), vect_create_data_ref_ptr(), and vect_setup_realignment().

◆ vect_create_data_ref_ptr()

tree vect_create_data_ref_ptr ( vec_info * vinfo,
stmt_vec_info stmt_info,
tree aggr_type,
class loop * at_loop,
tree offset,
tree * initial_address,
gimple_stmt_iterator * gsi,
gimple ** ptr_incr,
bool only_init,
tree iv_step )
extern
Function vect_create_data_ref_ptr.

Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
location accessed in the loop by STMT_INFO, along with the def-use update
chain to appropriately advance the pointer through the loop iterations.
Also set aliasing information for the pointer.  This pointer is used by
the callers to this function to create a memory reference expression for
vector load/store access.

Input:
1. STMT_INFO: a stmt that references memory. Expected to be of the form
      GIMPLE_ASSIGN <name, data-ref> or
      GIMPLE_ASSIGN <data-ref, name>.
2. AGGR_TYPE: the type of the reference, which should be either a vector
     or an array.
3. AT_LOOP: the loop where the vector memref is to be created.
4. OFFSET (optional): a byte offset to be added to the initial address
     accessed by the data-ref in STMT_INFO.
5. BSI: location where the new stmts are to be placed if there is no loop
6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
     pointing to the initial address.
8. IV_STEP (optional, defaults to NULL): the amount that should be added
     to the IV during each iteration of the loop.  NULL says to move
     by one copy of AGGR_TYPE up or down, depending on the step of the
     data reference.

Output:
1. Declare a new ptr to vector_type, and have it point to the base of the
   data reference (initial addressed accessed by the data reference).
   For example, for vector of type V8HI, the following code is generated:

   v8hi *ap;
   ap = (v8hi *)initial_address;

   if OFFSET is not supplied:
      initial_address = &a[init];
   if OFFSET is supplied:
      initial_address = &a[init] + OFFSET;
   if BYTE_OFFSET is supplied:
      initial_address = &a[init] + BYTE_OFFSET;

   Return the initial_address in INITIAL_ADDRESS.

2. If ONLY_INIT is true, just return the initial pointer.  Otherwise, also
   update the pointer in each iteration of the loop.

   Return the increment stmt that updates the pointer in PTR_INCR.

3. Return the pointer.   

References alias_sets_conflict_p(), build_pointer_type_for_mode(), create_iv(), dr_info::dr, DR_BASE_ADDRESS, DR_BASE_OBJECT, DR_GROUP_FIRST_ELEMENT, DR_GROUP_NEXT_ELEMENT, DR_GROUP_SIZE, DR_PTR_INFO, DR_REF, DR_STEP, dump_enabled_p(), dump_printf(), dump_printf_loc(), fold_build1, fold_convert, gcc_assert, gcc_unreachable, get_alias_set(), get_name(), get_tree_code_name(), ggc_alloc(), gimple_bb(), gsi_insert_seq_before(), gsi_insert_seq_on_edge_immediate(), GSI_SAME_STMT, gsi_stmt(), integer_zerop(), loop_preheader_edge(), LOOP_VINFO_LOOP, MSG_NOTE, nested_in_vect_loop_p(), NULL, NULL_TREE, offset, standard_iv_increment_position(), innermost_loop_behavior::step, STMT_VINFO_DATA_REF, STMT_VINFO_DR_INFO, TREE_CODE, tree_int_cst_sgn(), TREE_TYPE, TYPE_SIZE_UNIT, vect_create_addr_base_for_vector_ref(), vect_dr_behavior(), vect_duplicate_ssa_name_ptr_info(), vect_get_new_vect_var(), vect_location, and vect_pointer_var.

Referenced by vect_setup_realignment(), vectorizable_load(), and vectorizable_store().

◆ vect_create_destination_var()

◆ vect_create_loop_vinfo()

◆ vect_create_new_slp_node()

◆ vect_detect_hybrid_slp()

◆ vect_determine_partial_vectors_and_peeling()

opt_result vect_determine_partial_vectors_and_peeling ( loop_vec_info loop_vinfo)
extern
Used in tree-vect-loop-manip.cc  
Determine if operating on full vectors for LOOP_VINFO might leave
 some scalar iterations still to do.  If so, decide how we should
 handle those scalar iterations.  The possibilities are:

 (1) Make LOOP_VINFO operate on partial vectors instead of full vectors.
     In this case:

       LOOP_VINFO_USING_PARTIAL_VECTORS_P == true
       LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == false
       LOOP_VINFO_PEELING_FOR_NITER == false

 (2) Make LOOP_VINFO operate on full vectors and use an epilogue loop
     to handle the remaining scalar iterations.  In this case:

       LOOP_VINFO_USING_PARTIAL_VECTORS_P == false
       LOOP_VINFO_PEELING_FOR_NITER == true

     There are two choices:

     (2a) Consider vectorizing the epilogue loop at the same VF as the
          main loop, but using partial vectors instead of full vectors.
          In this case:

            LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == true

     (2b) Consider vectorizing the epilogue loop at lower VFs only.
          In this case:

            LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == false

References dump_enabled_p(), dump_printf_loc(), ggc_alloc(), LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P, LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P, LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_PEELING_FOR_NITER, LOOP_VINFO_USING_PARTIAL_VECTORS_P, LOOP_VINFO_USING_SELECT_VL_P, MSG_NOTE, opt_result::success(), _loop_vec_info::suggested_unroll_factor, vect_known_niters_smaller_than_vf(), vect_location, and vect_need_peeling_or_partial_vectors_p().

Referenced by vect_analyze_loop_2(), and vect_do_peeling().

◆ vect_do_peeling()

class loop * vect_do_peeling ( loop_vec_info loop_vinfo,
tree niters,
tree nitersm1,
tree * niters_vector,
tree * step_vector,
tree * niters_vector_mult_vf_var,
int th,
bool check_profitability,
bool niters_no_overflow,
tree * advance )
extern
Function vect_do_peeling.

Input:
- LOOP_VINFO: Represent a loop to be vectorized, which looks like:

    preheader:
  LOOP:
    header_bb:
      loop_body
      if (exit_loop_cond) goto exit_bb
      else                goto header_bb
    exit_bb:

- NITERS: The number of iterations of the loop.
- NITERSM1: The number of iterations of the loop's latch.
- NITERS_NO_OVERFLOW: No overflow in computing NITERS.
- TH, CHECK_PROFITABILITY: Threshold of niters to vectorize loop if
                      CHECK_PROFITABILITY is true.
Output:
- *NITERS_VECTOR and *STEP_VECTOR describe how the main loop should
  iterate after vectorization; see vect_set_loop_condition for details.
- *NITERS_VECTOR_MULT_VF_VAR is either null or an SSA name that
  should be set to the number of scalar iterations handled by the
  vector loop.  The SSA name is only used on exit from the loop.

This function peels prolog and epilog from the loop, adds guards skipping
PROLOG and EPILOG for various conditions.  As a result, the changed CFG
would look like:

    guard_bb_1:
      if (prefer_scalar_loop) goto merge_bb_1
      else                    goto guard_bb_2

    guard_bb_2:
      if (skip_prolog) goto merge_bb_2
      else             goto prolog_preheader

    prolog_preheader:
  PROLOG:
    prolog_header_bb:
      prolog_body
      if (exit_prolog_cond) goto prolog_exit_bb
      else                  goto prolog_header_bb
    prolog_exit_bb:

    merge_bb_2:

    vector_preheader:
  VECTOR LOOP:
    vector_header_bb:
      vector_body
      if (exit_vector_cond) goto vector_exit_bb
      else                  goto vector_header_bb
    vector_exit_bb:

    guard_bb_3:
      if (skip_epilog) goto merge_bb_3
      else             goto epilog_preheader

    merge_bb_1:

    epilog_preheader:
  EPILOG:
    epilog_header_bb:
      epilog_body
      if (exit_epilog_cond) goto merge_bb_3
      else                  goto epilog_header_bb

    merge_bb_3:

Note this function peels prolog and epilog only if it's necessary,
as well as guards.
This function returns the epilogue loop if a decision was made to vectorize
it, otherwise NULL.

The analysis resulting in this epilogue loop's loop_vec_info was performed
in the same vect_analyze_loop call as the main loop's.  At that time
vect_analyze_loop constructs a list of accepted loop_vec_info's for lower
vectorization factors than the main loop.  This list is stored in the main
loop's loop_vec_info in the 'epilogue_vinfos' member.  Everytime we decide to
vectorize the epilogue loop for a lower vectorization factor,  the
loop_vec_info sitting at the top of the epilogue_vinfos list is removed,
updated and linked to the epilogue loop.  This is later used to vectorize
the epilogue.  The reason the loop_vec_info needs updating is that it was
constructed based on the original main loop, and the epilogue loop is a
copy of this loop, so all links pointing to statements in the original loop
need updating.  Furthermore, these loop_vec_infos share the
data_reference's records, which will also need to be updated.

TODO: Guard for prefer_scalar_loop should be emitted along with
versioning conditions if loop versioning is needed.   

References add_phi_arg(), adjust_vec, adjust_vec_debug_stmts(), advance(), profile_probability::always(), profile_probability::apply_scale(), boolean_type_node, build_int_cst(), build_one_cst(), build_zero_cst(), CDI_DOMINATORS, cfun, basic_block_def::count, create_phi_node(), DEF_FROM_PTR, delete_update_ssa(), DR_TARGET_ALIGNMENT, EDGE_PRED, _loop_vec_info::epilogue_vinfos, first_dom_son(), flow_bb_inside_loop_p(), flow_loop_nested_p(), fold_build2, FOR_EACH_IMM_USE_STMT, FOR_EACH_SSA_DEF_OPERAND, free(), free_original_copy_tables(), gcc_assert, gcc_checking_assert, get_bb_original(), get_immediate_dominator(), get_loop_body(), get_loop_copy(), get_loop_exit_edges(), ggc_alloc(), gimple_bb(), gimple_build_assign(), gimple_build_nop(), gimple_debug_bind_p(), gimple_debug_bind_reset_value(), gimple_phi_arg_def_from_edge(), gimple_phi_result(), gsi_end_p(), gsi_for_stmt(), gsi_insert_after(), gsi_insert_before(), gsi_last_bb(), GSI_NEW_STMT, gsi_next(), gsi_start_bb(), gsi_start_phis(), gsi_stmt(), profile_probability::guessed_always(), i, initialize_original_copy_tables(), integer_onep(), poly_int< N, C >::is_constant(), iterate_fix_dominators(), LOOP_C_INFINITE, loop_constraint_clear(), loop_preheader_edge(), LOOP_REQUIRES_VERSIONING, LOOP_VINFO_BBS, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EARLY_BREAKS_VECT_PEELED, LOOP_VINFO_EPILOGUE_IV_EXIT, LOOP_VINFO_INT_NITERS, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LOOP, LOOP_VINFO_NITERS, LOOP_VINFO_NITERS_KNOWN_P, LOOP_VINFO_NITERSM1, LOOP_VINFO_PEELING_FOR_ALIGNMENT, LOOP_VINFO_PEELING_FOR_GAPS, LOOP_VINFO_PEELING_FOR_NITER, LOOP_VINFO_SCALAR_IV_EXIT, LOOP_VINFO_SCALAR_LOOP, LOOP_VINFO_UNALIGNED_DR, LOOP_VINFO_USING_PARTIAL_VECTORS_P, LOOP_VINFO_VECT_FACTOR, make_ssa_name(), MAY_HAVE_DEBUG_BIND_STMTS, need_ssa_update_p(), next_dom_son(), NULL, NULL_TREE, loop::num_nodes, PHI_RESULT, queue, record_niter_bound(), reset_original_copy_tables(), scale_bbs_frequencies(), scale_loop_profile(), scev_reset(), set_immediate_dominator(), set_range_info(), single_pred_edge(), single_pred_p(), single_succ_edge(), slpeel_add_loop_guard(), slpeel_can_duplicate_loop_p(), slpeel_tree_duplicate_loop_to_edge_cfg(), slpeel_update_phi_nodes_for_guard1(), split_edge(), SSA_NAME_DEF_STMT, SSA_OP_DEF, poly_int< N, C >::to_constant(), wi::to_wide(), TREE_CODE, TREE_TYPE, TYPE_MAX_VALUE, ui, UNKNOWN_LOCATION, update_stmt(), vect_build_loop_niters(), vect_can_advance_ivs_p(), vect_determine_partial_vectors_and_peeling(), vect_gen_prolog_loop_niters(), vect_gen_scalar_loop_niters(), vect_gen_vector_loop_niters(), vect_gen_vector_loop_niters_mult_vf(), vect_set_loop_condition(), vect_update_inits_of_drs(), vect_update_ivs_after_vectorizer(), vect_use_loop_mask_for_alignment_p(), vect_vf_for_cost(), and virtual_operand_p().

Referenced by vect_transform_loop().

◆ vect_double_mask_nunits()

tree vect_double_mask_nunits ( tree old_type,
machine_mode new_mode )
extern
Return a mask type with twice as many elements as OLD_TYPE,
given that it should have mode NEW_MODE.   

References build_truth_vector_type_for_mode(), ggc_alloc(), new_mode(), and TYPE_VECTOR_SUBPARTS().

Referenced by supportable_narrowing_operation().

◆ vect_dr_behavior()

innermost_loop_behavior * vect_dr_behavior ( vec_info * vinfo,
dr_vec_info * dr_info )
inline

◆ vect_embedded_comparison_type()

tree vect_embedded_comparison_type ( stmt_vec_info stmt_info)
inline
If STMT_INFO is a COND_EXPR that includes an embedded comparison, return the
scalar type of the values being compared.  Return null otherwise.   

References COMPARISON_CLASS_P, ggc_alloc(), gimple_assign_rhs1(), gimple_assign_rhs_code(), NULL_TREE, TREE_OPERAND, and TREE_TYPE.

Referenced by vect_comparison_type().

◆ vect_emulated_vector_p()

bool vect_emulated_vector_p ( tree vectype)
extern
Return true if VECTYPE represents a vector that requires lowering
by the vector lowering pass.   

References TREE_TYPE, TYPE_MODE, TYPE_PRECISION, VECTOR_BOOLEAN_TYPE_P, and VECTOR_MODE_P.

Referenced by vectorizable_call(), vectorizable_operation(), vectorizable_reduction(), and vectorizable_shift().

◆ vect_enhance_data_refs_alignment()

opt_result vect_enhance_data_refs_alignment ( loop_vec_info loop_vinfo)
extern
Function vect_enhance_data_refs_alignment

This pass will use loop versioning and loop peeling in order to enhance
the alignment of data references in the loop.

FOR NOW: we assume that whatever versioning/peeling takes place, only the
original loop is to be vectorized.  Any other loops that are created by
the transformations performed in this pass - are not supposed to be
vectorized.  This restriction will be relaxed.

This pass will require a cost model to guide it whether to apply peeling
or versioning or a combination of the two.  For example, the scheme that
intel uses when given a loop with several memory accesses, is as follows:
choose one memory access ('p') which alignment you want to force by doing
peeling.  Then, either (1) generate a loop in which 'p' is aligned and all
other accesses are not necessarily aligned, or (2) use loop versioning to
generate one loop in which all accesses are aligned, and another loop in
which only 'p' is necessarily aligned.

("Automatic Intra-Register Vectorization for the Intel Architecture",
Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)

Devising a cost model is the most critical aspect of this work.  It will
guide us on which access to peel for, whether to use loop versioning, how
many versions to create, etc.  The cost model will probably consist of
generic considerations as well as target specific considerations (on
powerpc for example, misaligned stores are more painful than misaligned
loads).

Here are the general steps involved in alignment enhancements:

  -- original loop, before alignment analysis:
     for (i=0; i<N; i++){
       x = q[i];                        # DR_MISALIGNMENT(q) = unknown
       p[i] = y;                        # DR_MISALIGNMENT(p) = unknown
     }

  -- After vect_compute_data_refs_alignment:
     for (i=0; i<N; i++){
       x = q[i];                        # DR_MISALIGNMENT(q) = 3
       p[i] = y;                        # DR_MISALIGNMENT(p) = unknown
     }

  -- Possibility 1: we do loop versioning:
  if (p is aligned) {
     for (i=0; i<N; i++){       # loop 1A
       x = q[i];                        # DR_MISALIGNMENT(q) = 3
       p[i] = y;                        # DR_MISALIGNMENT(p) = 0
     }
  }
  else {
     for (i=0; i<N; i++){       # loop 1B
       x = q[i];                        # DR_MISALIGNMENT(q) = 3
       p[i] = y;                        # DR_MISALIGNMENT(p) = unaligned
     }
  }

  -- Possibility 2: we do loop peeling:
  for (i = 0; i < 3; i++){      # (scalar loop, not to be vectorized).
     x = q[i];
     p[i] = y;
  }
  for (i = 3; i < N; i++){      # loop 2A
     x = q[i];                  # DR_MISALIGNMENT(q) = 0
     p[i] = y;                  # DR_MISALIGNMENT(p) = unknown
  }

  -- Possibility 3: combination of loop peeling and versioning:
  for (i = 0; i < 3; i++){      # (scalar loop, not to be vectorized).
     x = q[i];
     p[i] = y;
  }
  if (p is aligned) {
     for (i = 3; i<N; i++){     # loop 3A
       x = q[i];                        # DR_MISALIGNMENT(q) = 0
       p[i] = y;                        # DR_MISALIGNMENT(p) = 0
     }
  }
  else {
     for (i = 3; i<N; i++){     # loop 3B
       x = q[i];                        # DR_MISALIGNMENT(q) = 0
       p[i] = y;                        # DR_MISALIGNMENT(p) = unaligned
     }
  }

  These loops are later passed to loop_transform to be vectorized.  The
  vectorizer will use the alignment information to guide the transformation
  (whether to generate regular loads/stores, or with special handling for
  misalignment).   

References aligned_access_p(), dr_align_group_sort_cmp(), DR_BASE_ADDRESS, DR_GROUP_SIZE, DR_IS_WRITE, dr_misalignment(), DR_MISALIGNMENT_UNKNOWN, DR_OFFSET, DR_STEP, DR_STEP_ALIGNMENT, DR_TARGET_ALIGNMENT, dr_unaligned_unsupported, dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, flow_loop_nested_p(), FOR_EACH_VEC_ELT, gcc_assert, GET_MODE_SIZE(), ggc_alloc(), i, loop::inner, INT_MAX, is_empty(), known_alignment_for_access_p(), known_le, vec_info::lookup_dr(), loop_cost_model(), loop_preheader_edge(), LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT, LOOP_VINFO_DATAREFS, LOOP_VINFO_EARLY_BREAKS_VECT_PEELED, LOOP_VINFO_INT_NITERS, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LOOP, LOOP_VINFO_MAY_MISALIGN_STMTS, LOOP_VINFO_NITERS_KNOWN_P, LOOP_VINFO_PEELING_FOR_ALIGNMENT, LOOP_VINFO_PTR_MASK, LOOP_VINFO_SCALAR_ITERATION_COST, LOOP_VINFO_UNALIGNED_DR, LOOP_VINFO_VECT_FACTOR, MAX, MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, operand_equal_p(), optimize_loop_nest_for_speed_p(), outermost_invariant_loop_for_expr(), SET_DR_MISALIGNMENT, size_zero_node, slpeel_can_duplicate_loop_p(), STMT_SLP_TYPE, STMT_VINFO_DR_INFO, STMT_VINFO_GROUPED_ACCESS, STMT_VINFO_STRIDED_P, STMT_VINFO_VECTYPE, opt_result::success(), target_align(), tree_int_cst_compare(), TREE_INT_CST_LOW, TREE_TYPE, TYPE_MODE, TYPE_SIZE_UNIT, TYPE_VECTOR_SUBPARTS(), unlimited_cost_model(), vect_can_advance_ivs_p(), VECT_COST_MODEL_CHEAP, vect_dr_aligned_if_related_peeled_dr_is(), vect_dr_misalign_for_aligned_access(), vect_get_known_peeling_cost(), vect_get_peeling_costs_all_drs(), vect_get_scalar_dr_size(), vect_location, vect_peeling_hash_choose_best_peeling(), vect_peeling_hash_insert(), vect_peeling_supportable(), vect_relevant_for_alignment_p(), vect_supportable_dr_alignment(), vect_update_misalignment_for_peel(), vect_vf_for_cost(), and vector_alignment_reachable_p().

Referenced by vect_analyze_loop_2().

◆ vect_find_first_scalar_stmt_in_slp()

stmt_vec_info vect_find_first_scalar_stmt_in_slp ( slp_tree node)
extern

◆ vect_find_last_scalar_stmt_in_slp()

<