#include "tree-data-ref.h"
#include "tree-hash-traits.h"
#include "target.h"
#include "internal-fn.h"
#include "tree-ssa-operands.h"
#include "gimple-match.h"
#include "dominance.h"
Go to the source code of this file.
Data Structures | |
struct | stmt_info_for_cost |
struct | vect_scalar_ops_slice |
struct | vect_scalar_ops_slice_hash |
struct | _slp_tree |
class | _slp_instance |
struct | scalar_cond_masked_key |
struct | default_hash_traits< scalar_cond_masked_key > |
class | vec_lower_bound |
class | vec_info_shared |
class | vec_info |
struct | rgroup_controls |
struct | vec_loop_masks |
struct | vect_reusable_accumulator |
class | _loop_vec_info |
struct | slp_root |
class | _bb_vec_info |
class | dr_vec_info |
class | _stmt_vec_info |
struct | gather_scatter_info |
class | vector_costs |
class | auto_purge_vect_location |
struct | vect_loop_form_info |
class | vect_pattern |
Variables | |
dump_user_location_t | vect_location |
vect_pattern_decl_t | slp_patterns [] |
size_t | num__slp_patterns |
#define BB_VINFO_BBS | ( | B | ) |
#define BB_VINFO_DATAREFS | ( | B | ) |
Referenced by vect_slp_region().
#define BB_VINFO_DDRS | ( | B | ) |
#define BB_VINFO_GROUPED_STORES | ( | B | ) |
Referenced by vect_analyze_group_access_1().
#define BB_VINFO_NBBS | ( | B | ) |
#define BB_VINFO_SLP_INSTANCES | ( | B | ) |
Referenced by vect_slp_analyze_bb_1(), and vect_slp_region().
#define DR_GROUP_FIRST_ELEMENT | ( | S | ) |
Referenced by vect_optimize_slp_pass::decide_masked_load_lanes(), dr_misalignment(), dr_safe_speculative_read_required(), dr_set_safe_speculative_read_required(), dr_target_alignment(), ensure_base_align(), get_group_load_store_type(), get_load_store_type(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_data_ref_access(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs_alignment(), vect_analyze_group_access(), vect_analyze_group_access_1(), vect_analyze_loop_2(), vect_analyze_slp(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_compute_data_ref_alignment(), vect_create_data_ref_ptr(), vect_dissolve_slp_only_groups(), vect_fixup_store_groups_with_patterns(), vect_get_place_in_interleaving_chain(), vect_lower_load_permutations(), vect_lower_load_permutations(), vect_preserves_scalar_order_p(), vect_prune_runtime_alias_test_list(), vect_record_grouped_load_vectors(), vect_relevant_for_alignment_p(), vect_slp_analyze_data_ref_dependence(), vect_slp_analyze_instance_dependence(), vect_slp_analyze_load_dependences(), vect_slp_analyze_node_alignment(), vect_small_gap_p(), vect_split_slp_store_group(), vect_supportable_dr_alignment(), vect_transform_slp_perm_load_1(), vect_transform_stmt(), vect_vfa_access_size(), vector_alignment_reachable_p(), vectorizable_load(), vectorizable_store(), and vllp_cmp().
#define DR_GROUP_GAP | ( | S | ) |
Referenced by get_group_load_store_type(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_analyze_group_access_1(), vect_build_slp_tree_2(), vect_dissolve_slp_only_groups(), vect_fixup_store_groups_with_patterns(), vect_get_place_in_interleaving_chain(), vect_lower_load_permutations(), vect_record_grouped_load_vectors(), vect_split_slp_store_group(), vect_vfa_access_size(), and vectorizable_load().
#define DR_GROUP_NEXT_ELEMENT | ( | S | ) |
Referenced by get_group_alias_ptr_type(), get_group_load_store_type(), get_load_store_type(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_analyze_data_ref_access(), vect_analyze_data_ref_accesses(), vect_analyze_group_access(), vect_analyze_group_access_1(), vect_analyze_loop_2(), vect_analyze_slp_instance(), vect_build_slp_tree_2(), vect_create_data_ref_ptr(), vect_dissolve_slp_only_groups(), vect_fixup_store_groups_with_patterns(), vect_get_place_in_interleaving_chain(), vect_lower_load_permutations(), vect_preserves_scalar_order_p(), vect_record_grouped_load_vectors(), vect_remove_stores(), vect_slp_analyze_load_dependences(), vect_split_slp_store_group(), vectorizable_load(), and vectorizable_store().
#define DR_GROUP_SIZE | ( | S | ) |
Referenced by vect_optimize_slp_pass::decide_masked_load_lanes(), get_group_load_store_type(), get_load_store_type(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_group_access_1(), vect_analyze_loop_2(), vect_analyze_slp(), vect_analyze_slp_instance(), vect_build_slp_tree_2(), vect_compute_data_ref_alignment(), vect_create_data_ref_ptr(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_fixup_store_groups_with_patterns(), vect_lower_load_permutations(), vect_slp_analyze_instance_dependence(), vect_small_gap_p(), vect_split_slp_store_group(), vect_supportable_dr_alignment(), vect_transform_slp_perm_load_1(), vect_transform_stmt(), vect_vfa_access_size(), vector_alignment_reachable_p(), vectorizable_load(), and vectorizable_store().
#define DR_GROUP_STORE_COUNT | ( | S | ) |
Referenced by vect_transform_stmt().
#define DR_MISALIGNMENT_UNINITIALIZED (-2) |
Referenced by dr_misalignment(), ensure_base_align(), vec_info::new_stmt_vec_info(), and vect_slp_analyze_node_alignment().
#define DR_MISALIGNMENT_UNKNOWN (-1) |
Info on data references alignment.
Referenced by dr_misalignment(), get_group_load_store_type(), get_load_store_type(), known_alignment_for_access_p(), vect_compute_data_ref_alignment(), vect_dissolve_slp_only_groups(), vect_dr_misalign_for_aligned_access(), vect_enhance_data_refs_alignment(), vect_get_peeling_costs_all_drs(), vect_known_alignment_in_bytes(), vect_peeling_supportable(), vect_slp_analyze_node_alignment(), vect_supportable_dr_alignment(), vect_update_misalignment_for_peel(), vectorizable_load(), and vectorizable_store().
#define DR_SCALAR_KNOWN_BOUNDS | ( | DR | ) |
Referenced by get_load_store_type(), and vect_analyze_early_break_dependences().
#define DR_TARGET_ALIGNMENT | ( | DR | ) |
Referenced by ensure_base_align(), get_load_store_type(), get_misalign_in_elems(), vect_dr_aligned_if_related_peeled_dr_is(), vect_enhance_data_refs_alignment(), vect_gen_prolog_loop_niters(), vect_get_peeling_costs_all_drs(), vect_known_alignment_in_bytes(), vect_peeling_supportable(), vect_setup_realignment(), vect_update_misalignment_for_peel(), vectorizable_load(), and vectorizable_store().
#define DUMP_VECT_SCOPE | ( | MSG | ) |
A macro for calling: dump_begin_scope (MSG, vect_location); via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc, and then calling dump_end_scope (); once the object goes out of scope, thus capturing the nesting of the scopes. These scopes affect dump messages within them: dump messages at the top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those in a nested scope implicitly default to MSG_PRIORITY_INTERNALS.
Referenced by move_early_exit_stmts(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependences(), vect_analyze_data_refs(), vect_analyze_data_refs_alignment(), vect_analyze_early_break_dependences(), vect_analyze_loop(), vect_analyze_loop_form(), vect_analyze_loop_operations(), vect_analyze_scalar_cycles_1(), vect_analyze_slp(), vect_bb_partition_graph(), vect_compute_single_scalar_iteration_cost(), vect_detect_hybrid_slp(), vect_determine_precisions(), vect_determine_vectorization_factor(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_get_loop_niters(), vect_make_slp_decision(), vect_mark_stmts_to_be_vectorized(), vect_match_slp_patterns(), vect_pattern_recog(), vect_prune_runtime_alias_test_list(), vect_slp_analyze_bb_1(), vect_slp_analyze_instance_alignment(), vect_slp_analyze_instance_dependence(), vect_slp_analyze_operations(), vect_transform_loop(), vect_update_inits_of_drs(), vect_update_vf_for_slp(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_shift(), and vectorizable_simd_clone_call().
#define HYBRID_SLP_STMT | ( | S | ) |
#define LOOP_REQUIRES_VERSIONING | ( | L | ) |
Referenced by vect_analyze_loop(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_estimate_min_profitable_iters(), vect_need_peeling_or_partial_vectors_p(), and vect_transform_loop().
#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS | ( | L | ) |
Referenced by vect_estimate_min_profitable_iters(), and vect_loop_versioning().
#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT | ( | L | ) |
Referenced by vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), and vect_loop_versioning().
#define LOOP_REQUIRES_VERSIONING_FOR_NITERS | ( | L | ) |
Referenced by vect_estimate_min_profitable_iters(), and vect_loop_versioning().
#define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND | ( | L | ) |
Referenced by vect_loop_versioning().
#define LOOP_VINFO_BBS | ( | L | ) |
Referenced by update_epilogue_loop_vinfo(), vect_analyze_loop_2(), vect_analyze_loop_operations(), vect_compute_single_scalar_iteration_cost(), vect_detect_hybrid_slp(), vect_determine_vectorization_factor(), vect_do_peeling(), vect_mark_stmts_to_be_vectorized(), vect_transform_loop(), and vect_update_vf_for_slp().
#define LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P | ( | L | ) |
Referenced by check_load_store_for_partial_vectors(), get_group_load_store_type(), vect_analyze_loop_2(), vect_determine_partial_vectors_and_peeling(), vect_reduction_update_partial_vector_usage(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_lane_reducing(), vectorizable_live_operation(), vectorizable_load(), vectorizable_operation(), vectorizable_reduction(), vectorizable_simd_clone_call(), and vectorizable_store().
#define LOOP_VINFO_CHECK_NONZERO | ( | L | ) |
Referenced by vect_check_nonzero_value(), and vect_prune_runtime_alias_test_list().
#define LOOP_VINFO_CHECK_UNEQUAL_ADDRS | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_create_cond_for_unequal_addrs(), vect_estimate_min_profitable_iters(), and vect_prune_runtime_alias_test_list().
#define LOOP_VINFO_COMP_ALIAS_DDRS | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_create_cond_for_alias_checks(), vect_estimate_min_profitable_iters(), and vect_prune_runtime_alias_test_list().
#define LOOP_VINFO_COST_MODEL_THRESHOLD | ( | L | ) |
#define LOOP_VINFO_DATAREFS | ( | L | ) |
Referenced by update_epilogue_loop_vinfo(), vect_analyze_data_ref_dependences(), vect_analyze_data_refs_alignment(), vect_analyze_loop_2(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_get_peeling_costs_all_drs(), vect_peeling_supportable(), and vect_update_inits_of_drs().
#define LOOP_VINFO_DDRS | ( | L | ) |
Referenced by vect_analyze_data_ref_dependences().
#define LOOP_VINFO_DRS_ADVANCED_BY | ( | L | ) |
Referenced by update_epilogue_loop_vinfo(), and vect_transform_loop().
#define LOOP_VINFO_EARLY_BREAKS | ( | L | ) |
Referenced by can_vectorize_live_stmts(), get_load_store_type(), vect_analyze_data_ref_dependences(), vect_analyze_loop(), vect_analyze_loop_2(), vect_create_loop_vinfo(), vect_do_peeling(), vect_recog_gcond_pattern(), vect_stmt_relevant_p(), vect_transform_loop(), vect_use_loop_mask_for_alignment_p(), vectorizable_induction(), vectorizable_live_operation(), and vectorizable_live_operation_1().
#define LOOP_VINFO_EARLY_BREAKS_LIVE_IVS | ( | L | ) |
Referenced by vect_analyze_slp(), and vect_stmt_relevant_p().
#define LOOP_VINFO_EARLY_BREAKS_VECT_PEELED | ( | L | ) |
Referenced by vect_analyze_early_break_dependences(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_gen_vector_loop_niters_mult_vf(), vect_set_loop_condition_partial_vectors(), vect_set_loop_condition_partial_vectors_avx512(), and vectorizable_live_operation().
#define LOOP_VINFO_EARLY_BRK_DEST_BB | ( | L | ) |
Referenced by move_early_exit_stmts(), and vect_analyze_early_break_dependences().
#define LOOP_VINFO_EARLY_BRK_STORES | ( | L | ) |
Referenced by move_early_exit_stmts(), and vect_analyze_early_break_dependences().
#define LOOP_VINFO_EARLY_BRK_VUSES | ( | L | ) |
Referenced by move_early_exit_stmts(), and vect_analyze_early_break_dependences().
#define LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P | ( | L | ) |
Referenced by vect_determine_partial_vectors_and_peeling().
#define LOOP_VINFO_EPILOGUE_IV_EXIT | ( | L | ) |
Referenced by vect_do_peeling().
#define LOOP_VINFO_EPILOGUE_P | ( | L | ) |
Referenced by vect_analyze_data_ref_dependences(), vect_analyze_loop_1(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_check_gather_scatter(), vect_create_loop_vinfo(), vect_determine_partial_vectors_and_peeling(), vect_estimate_min_profitable_iters(), vect_transform_loop(), and vect_transform_loops().
#define LOOP_VINFO_FULLY_MASKED_P | ( | L | ) |
Referenced by check_scan_store(), vect_estimate_min_profitable_iters(), vect_schedule_slp_node(), vect_set_loop_condition_partial_vectors(), vect_set_loop_controls_directly(), vect_transform_reduction(), vect_use_loop_mask_for_alignment_p(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_operation(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
#define LOOP_VINFO_FULLY_WITH_LENGTH_P | ( | L | ) |
Referenced by vect_estimate_min_profitable_iters(), vect_schedule_slp_node(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_operation(), vectorizable_store(), and vectorize_fold_left_reduction().
#define LOOP_VINFO_GROUPED_STORES | ( | L | ) |
Referenced by vect_analyze_group_access_1().
#define LOOP_VINFO_HAS_MASK_STORE | ( | L | ) |
Referenced by vectorizable_store().
#define LOOP_VINFO_INNER_LOOP_COST_FACTOR | ( | L | ) |
Referenced by vector_costs::adjust_cost_for_freq(), vect_compute_single_scalar_iteration_cost(), and vect_create_loop_vinfo().
#define LOOP_VINFO_INT_NITERS | ( | L | ) |
Referenced by vector_costs::better_epilogue_loop_than_p(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_can_peel_nonlinear_iv_p(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_get_peel_iters_epilogue(), vect_known_niters_smaller_than_vf(), vect_need_peeling_or_partial_vectors_p(), and vect_transform_loop().
#define LOOP_VINFO_INV_PATTERN_DEF_SEQ | ( | L | ) |
Referenced by vect_transform_loop().
#define LOOP_VINFO_IV_EXIT | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_create_epilog_for_reduction(), vect_create_loop_vinfo(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_gen_vector_loop_niters_mult_vf(), vect_loop_versioning(), vect_set_loop_controls_directly(), vect_transform_loop(), vect_update_ivs_after_vectorizer(), and vectorizable_live_operation().
#define LOOP_VINFO_LENS | ( | L | ) |
Referenced by check_load_store_for_partial_vectors(), vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), vect_reduction_update_partial_vector_usage(), vect_set_loop_condition_partial_vectors(), vect_transform_reduction(), vect_verify_loop_lens(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_operation(), and vectorizable_store().
#define LOOP_VINFO_LOOP | ( | L | ) |
Access Functions.
Referenced by vector_costs::compare_inside_loop_cost(), cse_and_gimplify_to_preheader(), get_group_load_store_type(), vec_info::insert_seq_on_entry(), loop_niters_no_overflow(), move_early_exit_stmts(), parloops_is_simple_reduction(), parloops_is_slp_reduction(), stmt_in_inner_loop_p(), supportable_widening_operation(), vect_analyze_data_ref_access(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_analyze_early_break_dependences(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_analyze_loop_operations(), vect_analyze_possibly_independent_ddr(), vect_analyze_scalar_cycles(), vect_analyze_scalar_cycles_1(), vect_analyze_slp(), vect_better_loop_vinfo_p(), vect_build_loop_niters(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_can_advance_ivs_p(), vect_check_gather_scatter(), vect_compute_data_ref_alignment(), vect_compute_single_scalar_iteration_cost(), vect_create_cond_for_alias_checks(), vect_create_data_ref_ptr(), vect_create_epilog_for_reduction(), vect_detect_hybrid_slp(), vect_determine_vectorization_factor(), vect_do_peeling(), vect_dr_behavior(), vect_emit_reduction_init_stmts(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_gen_vector_loop_niters(), vect_is_simple_reduction(), vect_iv_limit_for_partial_vectors(), vect_known_niters_smaller_than_vf(), vect_loop_versioning(), vect_mark_for_runtime_alias_test(), vect_mark_stmts_to_be_vectorized(), vect_min_prec_for_max_niters(), vect_model_reduction_cost(), vect_need_peeling_or_partial_vectors_p(), vect_peeling_hash_choose_best_peeling(), vect_peeling_hash_insert(), vect_phi_first_order_recurrence_p(), vect_prepare_for_masked_peels(), vect_prune_runtime_alias_test_list(), vect_reassociating_reduction_p(), vect_record_base_alignments(), vect_schedule_slp_node(), vect_setup_realignment(), vect_stmt_relevant_p(), vect_supportable_dr_alignment(), vect_transform_cycle_phi(), vect_transform_loop(), vect_transform_reduction(), vect_truncate_gather_scatter_offset(), vect_update_ivs_after_vectorizer(), vect_update_vf_for_slp(), vectorizable_call(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
#define LOOP_VINFO_LOOP_CONDS | ( | L | ) |
Referenced by vect_analyze_slp(), and vect_create_loop_vinfo().
#define LOOP_VINFO_LOOP_IV_COND | ( | L | ) |
Referenced by vect_create_loop_vinfo(), and vect_stmt_relevant_p().
#define LOOP_VINFO_LOOP_NEST | ( | L | ) |
Referenced by vect_analyze_data_ref_dependences(), and vect_prune_runtime_alias_test_list().
#define LOOP_VINFO_LOWER_BOUNDS | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_check_lower_bound(), vect_create_cond_for_lower_bounds(), and vect_estimate_min_profitable_iters().
#define LOOP_VINFO_MAIN_LOOP_INFO | ( | L | ) |
Referenced by vect_analyze_loop_costing(), and vect_create_loop_vinfo().
#define LOOP_VINFO_MASK_NITERS_PFA_OFFSET | ( | L | ) |
Referenced by vectorizable_induction(), and vectorizable_live_operation().
#define LOOP_VINFO_MASK_SKIP_NITERS | ( | L | ) |
Referenced by vect_can_peel_nonlinear_iv_p(), vect_iv_limit_for_partial_vectors(), vect_prepare_for_masked_peels(), vect_set_loop_condition_partial_vectors(), vect_set_loop_condition_partial_vectors_avx512(), vectorizable_induction(), and vectorizable_nonlinear_induction().
#define LOOP_VINFO_MASKS | ( | L | ) |
Referenced by can_produce_all_loop_masks_p(), check_load_store_for_partial_vectors(), vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), vect_get_max_nscalars_per_iter(), vect_reduction_update_partial_vector_usage(), vect_set_loop_condition_partial_vectors(), vect_set_loop_condition_partial_vectors_avx512(), vect_transform_reduction(), vect_verify_full_masking(), vect_verify_full_masking_avx512(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_operation(), vectorizable_simd_clone_call(), and vectorizable_store().
#define LOOP_VINFO_MAX_VECT_FACTOR | ( | L | ) |
Referenced by vect_analyze_loop_2(), and vect_estimate_min_profitable_iters().
#define LOOP_VINFO_MAY_ALIAS_DDRS | ( | L | ) |
Referenced by vect_mark_for_runtime_alias_test(), and vect_prune_runtime_alias_test_list().
#define LOOP_VINFO_MAY_MISALIGN_STMTS | ( | L | ) |
Referenced by vect_create_cond_for_align_checks(), vect_enhance_data_refs_alignment(), and vect_estimate_min_profitable_iters().
#define LOOP_VINFO_MUST_USE_PARTIAL_VECTORS_P | ( | L | ) |
Referenced by get_group_load_store_type(), get_load_store_type(), vect_analyze_loop_2(), and vect_determine_partial_vectors_and_peeling().
#define LOOP_VINFO_NBBS | ( | L | ) |
Referenced by update_epilogue_loop_vinfo().
#define LOOP_VINFO_NITERS | ( | L | ) |
Referenced by loop_niters_no_overflow(), vect_build_loop_niters(), vect_create_loop_vinfo(), vect_do_peeling(), vect_gen_prolog_loop_niters(), vect_need_peeling_or_partial_vectors_p(), vect_prepare_for_masked_peels(), vect_prune_runtime_alias_test_list(), vect_transform_loop(), vect_verify_loop_lens(), and vectorizable_simd_clone_call().
#define LOOP_VINFO_NITERS_ASSUMPTIONS | ( | L | ) |
Referenced by vect_create_cond_for_niters_checks(), and vect_create_loop_vinfo().
#define LOOP_VINFO_NITERS_KNOWN_P | ( | L | ) |
Referenced by vector_costs::better_epilogue_loop_than_p(), loop_niters_no_overflow(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_apply_runtime_profitability_check_p(), vect_can_peel_nonlinear_iv_p(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_get_known_peeling_cost(), vect_get_peel_iters_epilogue(), vect_known_niters_smaller_than_vf(), vect_need_peeling_or_partial_vectors_p(), and vect_transform_loop().
#define LOOP_VINFO_NITERS_UNCHANGED | ( | L | ) |
Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after prologue peeling retain total unchanged scalar loop iterations for cost model.
Referenced by vect_create_loop_vinfo(), vect_transform_loop(), and vectorizable_simd_clone_call().
#define LOOP_VINFO_NITERSM1 | ( | L | ) |
Referenced by loop_niters_no_overflow(), vect_analyze_loop_costing(), vect_create_loop_vinfo(), vect_do_peeling(), vect_loop_versioning(), vect_min_prec_for_max_niters(), and vect_transform_loop().
#define LOOP_VINFO_NO_DATA_DEPENDENCIES | ( | L | ) |
Referenced by vect_analyze_data_ref_dependence(), vect_analyze_data_ref_dependences(), vect_analyze_possibly_independent_ddr(), and vectorizable_load().
#define LOOP_VINFO_NON_LINEAR_IV | ( | L | ) |
Referenced by vect_analyze_scalar_cycles_1(), and vect_use_loop_mask_for_alignment_p().
#define LOOP_VINFO_ORIG_LOOP_INFO | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_better_loop_vinfo_p(), vect_check_gather_scatter(), vect_compute_data_ref_alignment(), vect_create_loop_vinfo(), vect_find_reusable_accumulator(), vect_need_peeling_or_partial_vectors_p(), and vect_transform_loop().
#define LOOP_VINFO_ORIG_MAX_VECT_FACTOR | ( | L | ) |
Referenced by vect_analyze_data_ref_dependences().
#define LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), vect_gen_loop_len_mask(), vect_get_loop_len(), vect_set_loop_controls_directly(), vect_verify_loop_lens(), vectorizable_call(), vectorizable_condition(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_operation(), vectorizable_store(), and vectorize_fold_left_reduction().
#define LOOP_VINFO_PARTIAL_VECTORS_STYLE | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), vect_get_loop_mask(), vect_set_loop_condition(), vect_verify_full_masking(), vect_verify_full_masking_avx512(), and vect_verify_loop_lens().
#define LOOP_VINFO_PEELING_FOR_ALIGNMENT | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_can_peel_nonlinear_iv_p(), vect_compute_data_ref_alignment(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_gen_prolog_loop_niters(), vect_iv_limit_for_partial_vectors(), vect_need_peeling_or_partial_vectors_p(), vect_prepare_for_masked_peels(), vect_transform_loop(), and vect_use_loop_mask_for_alignment_p().
#define LOOP_VINFO_PEELING_FOR_GAPS | ( | L | ) |
Referenced by get_group_load_store_type(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_do_peeling(), vect_estimate_min_profitable_iters(), vect_gen_vector_loop_niters(), vect_get_peel_iters_epilogue(), vect_need_peeling_or_partial_vectors_p(), vect_transform_loop(), and vectorizable_load().
#define LOOP_VINFO_PEELING_FOR_NITER | ( | L | ) |
Referenced by vect_analyze_loop(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_determine_partial_vectors_and_peeling(), and vect_do_peeling().
#define LOOP_VINFO_PTR_MASK | ( | L | ) |
Referenced by vect_create_cond_for_align_checks(), and vect_enhance_data_refs_alignment().
#define LOOP_VINFO_REDUCTION_CHAINS | ( | L | ) |
Referenced by parloops_is_slp_reduction(), vect_analyze_loop_2(), vect_fixup_scalar_cycles_with_patterns(), and vect_is_simple_reduction().
#define LOOP_VINFO_REDUCTIONS | ( | L | ) |
Referenced by vect_analyze_scalar_cycles_1().
#define LOOP_VINFO_RGROUP_COMPARE_TYPE | ( | L | ) |
Referenced by vect_get_loop_len(), vect_rgroup_iv_might_wrap_p(), vect_set_loop_condition_partial_vectors(), vect_set_loop_controls_directly(), vect_verify_full_masking(), vect_verify_full_masking_avx512(), and vect_verify_loop_lens().
#define LOOP_VINFO_RGROUP_IV_TYPE | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), vect_get_loop_len(), vect_set_loop_condition_partial_vectors(), vect_set_loop_condition_partial_vectors_avx512(), vect_set_loop_controls_directly(), vect_verify_full_masking(), vect_verify_full_masking_avx512(), and vect_verify_loop_lens().
#define LOOP_VINFO_SCALAR_ITERATION_COST | ( | L | ) |
Referenced by vect_compute_single_scalar_iteration_cost(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), and vect_peeling_hash_get_lowest_cost().
#define LOOP_VINFO_SCALAR_IV_EXIT | ( | L | ) |
Referenced by set_uid_loop_bbs(), vect_do_peeling(), and vect_transform_loop().
#define LOOP_VINFO_SCALAR_LOOP | ( | L | ) |
Referenced by set_uid_loop_bbs(), vect_do_peeling(), vect_loop_versioning(), and vect_transform_loop().
#define LOOP_VINFO_SCALAR_LOOP_SCALING | ( | L | ) |
Referenced by vect_loop_versioning(), and vect_transform_loop().
#define LOOP_VINFO_SIMD_IF_COND | ( | L | ) |
Referenced by vect_analyze_loop_2().
#define LOOP_VINFO_SLP_INSTANCES | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_analyze_slp(), vect_make_slp_decision(), and vect_transform_loop().
#define LOOP_VINFO_SLP_UNROLLING_FACTOR | ( | L | ) |
Referenced by vect_make_slp_decision(), and vect_update_vf_for_slp().
#define LOOP_VINFO_UNALIGNED_DR | ( | L | ) |
Referenced by get_misalign_in_elems(), vect_analyze_loop_2(), vect_enhance_data_refs_alignment(), and vect_gen_prolog_loop_niters().
#define LOOP_VINFO_USING_DECREMENTING_IV_P | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), vect_set_loop_condition_partial_vectors(), and vect_set_loop_controls_directly().
#define LOOP_VINFO_USING_PARTIAL_VECTORS_P | ( | L | ) |
Referenced by vector_costs::better_epilogue_loop_than_p(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_can_peel_nonlinear_iv_p(), vect_determine_partial_vectors_and_peeling(), vect_do_peeling(), vect_estimate_min_profitable_iters(), vect_gen_vector_loop_niters(), vect_set_loop_condition(), vect_transform_loop(), vect_transform_loops(), and vectorizable_load().
#define LOOP_VINFO_USING_SELECT_VL_P | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_determine_partial_vectors_and_peeling(), vect_get_data_ptr_increment(), vect_get_strided_load_store_ops(), vect_set_loop_condition_partial_vectors(), vect_set_loop_controls_directly(), vectorizable_induction(), vectorizable_load(), and vectorizable_store().
#define LOOP_VINFO_VECT_FACTOR | ( | L | ) |
Referenced by vector_costs::better_epilogue_loop_than_p(), check_load_store_for_partial_vectors(), vector_costs::compare_inside_loop_cost(), get_group_load_store_type(), get_load_store_type(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_analyze_loop(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_better_loop_vinfo_p(), vect_can_peel_nonlinear_iv_p(), vect_compute_data_ref_alignment(), vect_determine_vectorization_factor(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_gen_vector_loop_niters(), vect_gen_vector_loop_niters_mult_vf(), vect_get_loop_mask(), vect_get_num_copies(), vect_iv_limit_for_partial_vectors(), vect_max_vf(), vect_need_peeling_or_partial_vectors_p(), vect_prepare_for_masked_peels(), vect_prune_runtime_alias_test_list(), vect_record_loop_len(), vect_set_loop_condition_partial_vectors(), vect_set_loop_condition_partial_vectors_avx512(), vect_set_loop_controls_directly(), vect_shift_permute_load_chain(), vect_small_gap_p(), vect_supportable_dr_alignment(), vect_transform_loop(), vect_transform_loops(), vect_update_vf_for_slp(), vect_verify_full_masking(), vect_verify_full_masking_avx512(), vect_vf_for_cost(), vectorizable_induction(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_reduction(), vectorizable_simd_clone_call(), vectorizable_slp_permutation_1(), and vectorizable_store().
#define LOOP_VINFO_VECTORIZABLE_P | ( | L | ) |
Referenced by try_vectorize_loop_1(), vect_analyze_loop(), and vect_analyze_loop_2().
#define LOOP_VINFO_VERSIONING_THRESHOLD | ( | L | ) |
Referenced by vect_analyze_loop(), vect_analyze_loop_2(), vect_loop_versioning(), and vect_transform_loop().
#define MAX_INTERM_CVT_STEPS 3 |
The maximum number of intermediate steps required in multi-step type conversion.
Referenced by supportable_narrowing_operation(), and supportable_widening_operation().
#define MAX_VECTORIZATION_FACTOR INT_MAX |
Referenced by vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), and vect_max_vf().
#define PURE_SLP_STMT | ( | S | ) |
Referenced by vec_slp_has_scalar_use(), vect_analyze_loop_operations(), vect_analyze_stmt(), vect_bb_slp_mark_live_stmts(), vect_detect_hybrid_slp(), vect_get_data_access_cost(), vect_remove_slp_scalar_calls(), vect_transform_stmt(), vect_update_vf_for_slp(), vectorizable_live_operation(), and vectorizable_store().
#define REDUC_GROUP_FIRST_ELEMENT | ( | S | ) |
Referenced by parloops_is_slp_reduction(), vect_analyze_slp(), vect_build_slp_instance(), vect_build_slp_tree_1(), vect_create_epilog_for_reduction(), vect_fixup_reduc_chain(), vect_fixup_scalar_cycles_with_patterns(), vect_get_and_check_slp_defs(), vect_is_simple_reduction(), vect_reassociating_reduction_p(), vect_transform_cycle_phi(), vectorizable_live_operation(), and vectorizable_reduction().
#define REDUC_GROUP_NEXT_ELEMENT | ( | S | ) |
#define REDUC_GROUP_SIZE | ( | S | ) |
Referenced by parloops_is_slp_reduction(), vect_analyze_slp_instance(), vect_fixup_reduc_chain(), and vect_is_simple_reduction().
#define SET_DR_MISALIGNMENT | ( | DR, | |
VAL ) |
Referenced by vect_compute_data_ref_alignment(), vect_enhance_data_refs_alignment(), and vect_update_misalignment_for_peel().
#define SET_DR_TARGET_ALIGNMENT | ( | DR, | |
VAL ) |
Referenced by vect_compute_data_ref_alignment().
#define SLP_INSTANCE_KIND | ( | S | ) |
#define SLP_INSTANCE_LOADS | ( | S | ) |
Referenced by vect_analyze_loop_2(), vect_build_slp_instance(), vect_free_slp_instance(), vect_gather_slp_loads(), vect_slp_analyze_instance_alignment(), and vect_slp_analyze_instance_dependence().
#define SLP_INSTANCE_REMAIN_DEFS | ( | S | ) |
Referenced by vect_bb_slp_mark_live_stmts(), vect_build_slp_instance(), vect_free_slp_instance(), and vectorize_slp_instance_root_stmt().
#define SLP_INSTANCE_ROOT_STMTS | ( | S | ) |
Referenced by vect_bb_vectorization_profitable_p(), vect_build_slp_instance(), vect_free_slp_instance(), vect_schedule_slp(), vect_slp_analyze_bb_1(), and vect_slp_analyze_operations().
#define SLP_INSTANCE_TREE | ( | S | ) |
Access Functions.
Referenced by vect_optimize_slp_pass::build_vertices(), debug(), dot_slp_tree(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_loop_2(), vect_analyze_slp(), vect_bb_partition_graph(), vect_bb_slp_mark_live_stmts(), vect_bb_vectorization_profitable_p(), vect_build_slp_instance(), vect_free_slp_instance(), vect_gather_slp_loads(), vect_lower_load_permutations(), vect_make_slp_decision(), vect_match_slp_patterns(), vect_optimize_slp(), vect_schedule_slp(), vect_slp_analyze_bb_1(), vect_slp_analyze_instance_alignment(), vect_slp_analyze_instance_dependence(), vect_slp_analyze_operations(), vect_slp_convert_to_external(), vect_slp_region(), and vectorizable_bb_reduc_epilogue().
#define SLP_TREE_CHILDREN | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), addsub_pattern::build(), complex_add_pattern::build(), complex_fms_pattern::build(), complex_mul_pattern::build(), vect_optimize_slp_pass::build_graph(), vect_optimize_slp_pass::build_vertices(), vect_optimize_slp_pass::change_vec_perm_layout(), compatible_complex_nodes_p(), vect_optimize_slp_pass::decide_masked_load_lanes(), dot_slp_tree(), vect_optimize_slp_pass::get_result_with_layout(), vect_optimize_slp_pass::internal_node_cost(), linear_loads_p(), complex_add_pattern::matches(), complex_fms_pattern::matches(), complex_mul_pattern::matches(), vect_optimize_slp_pass::materialize(), optimize_load_redistribution(), optimize_load_redistribution_1(), addsub_pattern::recognize(), vect_optimize_slp_pass::start_choosing_layouts(), vect_bb_partition_graph_r(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_scalar_cost(), vect_build_combine_node(), vect_build_slp_instance(), vect_build_slp_store_interleaving(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_create_epilog_for_reduction(), vect_create_new_slp_node(), vect_create_new_slp_node(), vect_cse_slp_nodes(), vect_detect_pair_op(), vect_detect_pair_op(), vect_free_slp_tree(), vect_gather_slp_loads(), vect_get_gather_scatter_ops(), vect_get_slp_defs(), vect_get_vec_defs(), vect_is_simple_use(), vect_lower_load_permutations(), vect_mark_slp_stmts(), vect_mark_slp_stmts_relevant(), vect_match_slp_patterns_2(), vect_print_slp_graph(), vect_print_slp_tree(), vect_remove_slp_scalar_calls(), vect_schedule_scc(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_slp_build_two_operator_nodes(), vect_slp_gather_vectorized_scalar_stmts(), vect_slp_prune_covered_roots(), vect_transform_cycle_phi(), vect_update_slp_vf_for_node(), vect_validate_multiplication(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_lc_phi(), vectorizable_load(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_scan_store(), vectorizable_slp_permutation(), vectorizable_store(), vectorize_fold_left_reduction(), and _slp_tree::~_slp_tree().
#define SLP_TREE_CODE | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), vect_optimize_slp_pass::backward_cost(), addsub_pattern::build(), complex_pattern::build(), vect_optimize_slp_pass::decide_masked_load_lanes(), vect_optimize_slp_pass::dump(), vect_optimize_slp_pass::forward_pass(), vect_optimize_slp_pass::get_result_with_layout(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::is_cfg_latch_edge(), linear_loads_p(), vect_optimize_slp_pass::materialize(), optimize_load_redistribution_1(), addsub_pattern::recognize(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_stmt(), vect_bb_slp_scalar_cost(), vect_build_combine_node(), vect_build_slp_tree_2(), vect_create_new_slp_node(), vect_detect_pair_op(), vect_gather_slp_loads(), vect_is_simple_use(), vect_is_slp_load_node(), vect_lower_load_permutations(), vect_print_slp_tree(), vect_schedule_scc(), vect_schedule_slp_node(), vect_slp_analyze_node_operations_1(), vect_slp_build_two_operator_nodes(), vect_transform_stmt(), and vect_update_slp_vf_for_node().
#define SLP_TREE_DEF_TYPE | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), compatible_complex_nodes_p(), vect_optimize_slp_pass::create_partitions(), vect_optimize_slp_pass::decide_masked_load_lanes(), vect_optimize_slp_pass::get_result_with_layout(), vect_optimize_slp_pass::is_cfg_latch_edge(), linear_loads_p(), optimize_load_redistribution_1(), vect_analyze_loop_2(), vect_bb_partition_graph_r(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_scalar_cost(), vect_build_slp_tree(), vect_build_slp_tree_2(), vect_check_scalar_mask(), vect_create_new_slp_node(), vect_create_new_slp_node(), vect_create_new_slp_node(), vect_cse_slp_nodes(), vect_gather_slp_loads(), vect_get_slp_scalar_def(), vect_is_simple_use(), vect_is_slp_load_node(), vect_mark_slp_stmts(), vect_mark_slp_stmts_relevant(), vect_maybe_update_slp_op_vectype(), vect_print_slp_tree(), vect_prologue_cost_for_slp(), vect_remove_slp_scalar_calls(), vect_schedule_scc(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_slp_analyze_operations(), vect_slp_build_two_operator_nodes(), vect_slp_convert_to_external(), vect_slp_gather_vectorized_scalar_stmts(), vect_slp_prune_covered_roots(), vect_slp_tree_uniform_p(), vect_update_slp_vf_for_node(), vectorizable_phi(), vectorizable_shift(), and vectorizable_slp_permutation_1().
#define SLP_TREE_LANE_PERMUTATION | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), addsub_pattern::build(), complex_pattern::build(), vect_optimize_slp_pass::decide_masked_load_lanes(), vect_optimize_slp_pass::get_result_with_layout(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::materialize(), optimize_load_redistribution_1(), addsub_pattern::recognize(), vect_optimize_slp_pass::start_choosing_layouts(), vect_bb_slp_scalar_cost(), vect_build_combine_node(), vect_build_slp_store_interleaving(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_detect_pair_op(), vect_lower_load_permutations(), vect_print_slp_tree(), vect_slp_build_two_operator_nodes(), vectorizable_slp_permutation(), vectorizable_slp_permutation_1(), and _slp_tree::~_slp_tree().
#define SLP_TREE_LANES | ( | S | ) |
Referenced by vect_optimize_slp_pass::change_layout_cost(), check_scan_store(), vect_optimize_slp_pass::decide_masked_load_lanes(), get_group_load_store_type(), vect_optimize_slp_pass::get_result_with_layout(), get_vectype_for_scalar_type(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::is_compatible_layout(), optimize_load_redistribution_1(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_optimize_slp_pass::start_choosing_layouts(), supportable_indirect_convert_operation(), vect_analyze_loop_2(), vect_analyze_slp(), vect_bb_slp_scalar_cost(), vect_bb_vectorization_profitable_p(), vect_build_combine_node(), vect_build_slp_instance(), vect_build_slp_store_interleaving(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_create_epilog_for_reduction(), vect_create_new_slp_node(), vect_create_new_slp_node(), vect_get_num_copies(), vect_lower_load_permutations(), vect_maybe_update_slp_op_vectype(), vect_slp_build_two_operator_nodes(), vect_slp_convert_to_external(), vect_transform_cycle_phi(), vect_update_slp_vf_for_node(), vectorizable_call(), vectorizable_condition(), vectorizable_conversion(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_slp_permutation_1(), vectorizable_store(), and vllp_cmp().
#define SLP_TREE_LOAD_PERMUTATION | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), get_group_load_store_type(), get_load_store_type(), vect_optimize_slp_pass::internal_node_cost(), linear_loads_p(), vect_optimize_slp_pass::materialize(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_slp(), vect_build_slp_tree_2(), vect_lower_load_permutations(), vect_print_slp_tree(), vect_slp_convert_to_external(), vect_transform_slp_perm_load(), vectorizable_load(), vllp_cmp(), and _slp_tree::~_slp_tree().
#define SLP_TREE_MEMORY_ACCESS_TYPE | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), vect_mem_access_type(), vectorizable_load(), and vectorizable_store().
#define SLP_TREE_NUMBER_OF_VEC_STMTS | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), vect_create_constant_vectors(), vect_get_slp_defs(), vect_model_simple_cost(), vect_prologue_cost_for_slp(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_slp_analyze_node_operations_1(), vect_transform_cycle_phi(), vect_transform_slp_perm_load_1(), vectorizable_bswap(), vectorizable_call(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_load(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_shift(), vectorizable_slp_permutation_1(), vectorizable_store(), and vectorize_slp_instance_root_stmt().
#define SLP_TREE_REF_COUNT | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), addsub_pattern::build(), complex_add_pattern::build(), complex_fms_pattern::build(), complex_mul_pattern::build(), vect_optimize_slp_pass::decide_masked_load_lanes(), optimize_load_redistribution(), optimize_load_redistribution_1(), vect_build_combine_node(), vect_build_slp_tree(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_free_slp_tree(), vect_print_slp_tree(), and vect_slp_build_two_operator_nodes().
#define SLP_TREE_REPRESENTATIVE | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), addsub_pattern::build(), complex_pattern::build(), vect_optimize_slp_pass::build_vertices(), compatible_complex_nodes_p(), vect_optimize_slp_pass::containing_loop(), vect_optimize_slp_pass::decide_masked_load_lanes(), vect_optimize_slp_pass::dump(), vect_optimize_slp_pass::get_result_with_layout(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::is_cfg_latch_edge(), linear_loads_p(), record_stmt_cost(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_loop_2(), vect_analyze_slp(), vect_build_combine_node(), vect_build_slp_store_interleaving(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_create_epilog_for_reduction(), vect_create_new_slp_node(), vect_free_slp_tree(), vect_gather_slp_loads(), vect_is_simple_use(), vect_is_slp_load_node(), vect_lower_load_permutations(), vect_match_expression_p(), vect_pattern_validate_optab(), vect_print_slp_tree(), vect_schedule_scc(), vect_schedule_slp(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_slp_analyze_node_operations_1(), vect_slp_build_two_operator_nodes(), and vect_slp_node_weight().
#define SLP_TREE_SCALAR_OPS | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), compatible_complex_nodes_p(), vect_optimize_slp_pass::get_result_with_layout(), vect_bb_slp_mark_live_stmts(), vect_create_new_slp_node(), vect_get_slp_scalar_def(), vect_is_simple_use(), vect_print_slp_tree(), vect_prologue_cost_for_slp(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_slp_convert_to_external(), vect_slp_gather_vectorized_scalar_stmts(), vect_slp_tree_uniform_p(), vectorizable_shift(), vectorizable_slp_permutation_1(), and _slp_tree::~_slp_tree().
#define SLP_TREE_SCALAR_STMTS | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), can_vectorize_live_stmts(), get_group_load_store_type(), vect_optimize_slp_pass::get_result_with_layout(), _slp_instance::location(), vect_optimize_slp_pass::materialize(), optimize_load_redistribution_1(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_analyze_loop_2(), vect_analyze_slp(), vect_bb_partition_graph_r(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_scalar_cost(), vect_build_slp_instance(), vect_build_slp_store_interleaving(), vect_build_slp_tree(), vect_build_slp_tree_2(), vect_create_epilog_for_reduction(), vect_create_new_slp_node(), vect_create_new_slp_node(), vect_cse_slp_nodes(), vect_find_first_scalar_stmt_in_slp(), vect_find_last_scalar_stmt_in_slp(), vect_get_slp_scalar_def(), vect_lower_load_permutations(), vect_lower_load_permutations(), vect_mark_slp_stmts(), vect_mark_slp_stmts_relevant(), vect_print_slp_tree(), vect_remove_slp_scalar_calls(), vect_schedule_slp(), vect_schedule_slp_node(), vect_slp_analyze_bb_1(), vect_slp_analyze_instance_dependence(), vect_slp_analyze_load_dependences(), vect_slp_analyze_node_alignment(), vect_slp_analyze_node_operations(), vect_slp_analyze_node_operations_1(), vect_slp_analyze_operations(), vect_slp_analyze_store_dependences(), vect_slp_convert_to_external(), vect_slp_gather_vectorized_scalar_stmts(), vect_slp_prune_covered_roots(), vect_transform_cycle_phi(), vect_transform_slp_perm_load_1(), vectorizable_induction(), vectorizable_load(), vectorizable_reduction(), vectorizable_scan_store(), vectorizable_shift(), vectorizable_store(), vectorize_fold_left_reduction(), vllp_cmp(), and _slp_tree::~_slp_tree().
#define SLP_TREE_SIMD_CLONE_INFO | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), vectorizable_simd_clone_call(), and _slp_tree::~_slp_tree().
#define SLP_TREE_VEC_DEFS | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), vect_optimize_slp_pass::create_partitions(), vect_optimize_slp_pass::get_result_with_layout(), vect_build_slp_tree_2(), vect_create_constant_vectors(), vect_create_epilog_for_reduction(), vect_get_slp_defs(), vect_get_slp_vect_def(), vect_prologue_cost_for_slp(), vect_schedule_scc(), vect_schedule_slp_node(), vect_transform_slp_perm_load_1(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_phi(), vectorizable_simd_clone_call(), vectorizable_slp_permutation_1(), vectorizable_store(), vectorize_slp_instance_root_stmt(), and _slp_tree::~_slp_tree().
#define SLP_TREE_VECTYPE | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), addsub_pattern::build(), complex_pattern::build(), vect_optimize_slp_pass::decide_masked_load_lanes(), vect_optimize_slp_pass::get_result_with_layout(), complex_mul_pattern::matches(), addsub_pattern::recognize(), record_stmt_cost(), vect_optimize_slp_pass::start_choosing_layouts(), vect_add_slp_permutation(), vect_analyze_slp(), vect_analyze_stmt(), vect_bb_slp_scalar_cost(), vect_build_combine_node(), vect_build_slp_instance(), vect_build_slp_store_interleaving(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_create_constant_vectors(), vect_get_load_cost(), vect_get_num_copies(), vect_get_store_cost(), vect_is_simple_use(), vect_lower_load_permutations(), vect_maybe_update_slp_op_vectype(), vect_pattern_validate_optab(), vect_print_slp_tree(), vect_prologue_cost_for_slp(), vect_schedule_slp_node(), vect_slp_analyze_instance_dependence(), vect_slp_analyze_node_alignment(), vect_slp_analyze_node_operations(), vect_slp_analyze_operations(), vect_slp_build_two_operator_nodes(), vect_slp_convert_to_external(), vect_slp_region(), vect_transform_slp_perm_load_1(), vect_transform_stmt(), vectorizable_bb_reduc_epilogue(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_lc_phi(), vectorizable_live_operation(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_slp_permutation(), and vectorizable_slp_permutation_1().
#define STMT_SLP_TYPE | ( | S | ) |
Referenced by addsub_pattern::build(), complex_pattern::build(), maybe_push_to_hybrid_worklist(), vec_info::new_stmt_vec_info(), vect_analyze_loop_2(), vect_detect_hybrid_slp(), vect_detect_hybrid_slp(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_free_slp_tree(), vect_mark_slp_stmts(), vect_slp_analyze_bb_1(), and vect_supportable_dr_alignment().
#define STMT_VINFO_DATA_REF | ( | S | ) |
Referenced by bump_vector_ptr(), compatible_complex_nodes_p(), exist_non_indexing_operands_for_use_p(), get_group_alias_ptr_type(), vect_optimize_slp_pass::internal_node_cost(), linear_loads_p(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_early_break_dependences(), vect_analyze_group_access_1(), vect_bb_slp_scalar_cost(), vect_build_slp_instance(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_check_gather_scatter(), vect_compute_single_scalar_iteration_cost(), vect_cond_store_pattern_same_ref(), vect_create_cond_for_align_checks(), vect_create_data_ref_ptr(), vect_describe_gather_scatter_call(), vect_determine_vf_for_stmt_1(), vect_gather_slp_loads(), vect_get_and_check_slp_defs(), vect_get_strided_load_store_ops(), vect_get_vector_types_for_stmt(), vect_is_extending_load(), vect_is_slp_load_node(), vect_is_store_elt_extraction(), vect_preserves_scalar_order_p(), vect_recog_bool_pattern(), vect_recog_cond_store_pattern(), vect_recog_gather_scatter_pattern(), vect_recog_mask_conversion_pattern(), vect_schedule_slp(), vect_schedule_slp_node(), vect_slp_analyze_instance_dependence(), vect_slp_analyze_load_dependences(), vect_slp_analyze_store_dependences(), vect_slp_prefer_store_lanes_p(), vectorizable_assignment(), vectorizable_load(), vectorizable_operation(), and vectorizable_store().
#define STMT_VINFO_DEF_TYPE | ( | S | ) |
Referenced by can_vectorize_live_stmts(), info_for_reduction(), iv_phi_p(), maybe_push_to_hybrid_worklist(), vec_info::new_stmt_vec_info(), parloops_is_simple_reduction(), parloops_valid_reduction_input_p(), process_use(), vect_active_double_reduction_p(), vect_analyze_loop_2(), vect_analyze_loop_operations(), vect_analyze_scalar_cycles_1(), vect_analyze_slp(), vect_analyze_slp_instance(), vect_analyze_stmt(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_compute_single_scalar_iteration_cost(), vect_create_epilog_for_reduction(), vect_create_loop_vinfo(), vect_fixup_reduc_chain(), vect_fixup_scalar_cycles_with_patterns(), vect_get_internal_def(), vect_init_pattern_stmt(), vect_inner_phi_in_double_reduction_p(), vect_is_simple_reduction(), vect_is_simple_use(), vect_mark_pattern_stmts(), vect_mark_stmts_to_be_vectorized(), vect_reassociating_reduction_p(), vect_recog_over_widening_pattern(), vect_schedule_scc(), vect_stmt_relevant_p(), vect_transform_cycle_phi(), vect_transform_reduction(), vect_update_vf_for_slp(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_lane_reducing(), vectorizable_lc_phi(), vectorizable_live_operation(), vectorizable_load(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().
#define STMT_VINFO_DR_BASE_ADDRESS | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_BASE_ALIGNMENT | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_BASE_MISALIGNMENT | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_INFO | ( | S | ) |
Referenced by check_scan_store(), compare_step_with_zero(), dr_misalignment(), dr_safe_speculative_read_required(), dr_set_safe_speculative_read_required(), dr_target_alignment(), ensure_base_align(), get_group_load_store_type(), get_load_store_type(), get_negative_load_store_type(), vec_info::lookup_dr(), vec_info::move_dr(), vect_analyze_data_ref_accesses(), vect_analyze_early_break_dependences(), vect_create_addr_base_for_vector_ref(), vect_create_data_ref_ptr(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_prune_runtime_alias_test_list(), vect_setup_realignment(), vect_slp_analyze_node_alignment(), vect_truncate_gather_scatter_offset(), vectorizable_load(), vectorizable_scan_store(), and vectorizable_store().
#define STMT_VINFO_DR_INIT | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_OFFSET | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_OFFSET_ALIGNMENT | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_STEP | ( | S | ) |
Referenced by vect_analyze_data_ref_access(), vect_analyze_data_refs(), and vect_setup_realignment().
#define STMT_VINFO_DR_STEP_ALIGNMENT | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_WRT_VEC_LOOP | ( | S | ) |
Referenced by vec_info::move_dr(), vect_analyze_data_refs(), vect_dr_behavior(), and vect_record_base_alignments().
#define STMT_VINFO_FORCE_SINGLE_CYCLE | ( | S | ) |
Referenced by vect_transform_cycle_phi(), vect_transform_reduction(), and vectorizable_reduction().
#define STMT_VINFO_GATHER_SCATTER_P | ( | S | ) |
Referenced by get_load_store_type(), vec_info::move_dr(), record_stmt_cost(), update_epilogue_loop_vinfo(), vect_analyze_data_ref_access(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_analyze_possibly_independent_ddr(), vect_build_slp_instance(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_check_store_rhs(), vect_compute_data_ref_alignment(), vect_detect_hybrid_slp(), vect_get_and_check_slp_defs(), vect_mark_stmts_to_be_vectorized(), vect_recog_gather_scatter_pattern(), vect_record_base_alignments(), vect_relevant_for_alignment_p(), vect_update_inits_of_drs(), vectorizable_load(), and vectorizable_store().
#define STMT_VINFO_GROUPED_ACCESS | ( | S | ) |
Referenced by check_scan_store(), vect_optimize_slp_pass::decide_masked_load_lanes(), dr_misalignment(), dr_safe_speculative_read_required(), dr_set_safe_speculative_read_required(), dr_target_alignment(), ensure_base_align(), get_group_load_store_type(), get_load_store_type(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_data_ref_access(), vect_analyze_data_refs_alignment(), vect_analyze_loop_2(), vect_analyze_slp(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_compute_data_ref_alignment(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_fixup_store_groups_with_patterns(), vect_is_slp_load_node(), vect_lower_load_permutations(), vect_preserves_scalar_order_p(), vect_relevant_for_alignment_p(), vect_slp_analyze_data_ref_dependence(), vect_slp_analyze_instance_dependence(), vect_slp_analyze_load_dependences(), vect_supportable_dr_alignment(), vect_transform_slp_perm_load_1(), vect_transform_stmt(), vector_alignment_reachable_p(), vectorizable_load(), vectorizable_store(), and vllp_cmp().
#define STMT_VINFO_IN_PATTERN_P | ( | S | ) |
Referenced by vect_analyze_loop_2(), vect_analyze_stmt(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_scalar_cost(), vect_create_epilog_for_reduction(), vect_detect_hybrid_slp(), vect_determine_vf_for_stmt(), vect_fixup_scalar_cycles_with_patterns(), vect_fixup_store_groups_with_patterns(), vect_free_slp_tree(), vect_mark_relevant(), vect_pattern_recog_1(), vect_set_pattern_stmt(), vect_stmt_to_vectorize(), and vectorizable_reduction().
#define STMT_VINFO_LIVE_P | ( | S | ) |
Referenced by can_vectorize_live_stmts(), process_use(), vect_analyze_loop_operations(), vect_analyze_slp(), vect_analyze_stmt(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_scalar_cost(), vect_compute_single_scalar_iteration_cost(), vect_determine_vectorization_factor(), vect_determine_vf_for_stmt_1(), vect_mark_relevant(), vect_print_slp_tree(), vect_schedule_slp_node(), vect_slp_analyze_node_operations_1(), vect_transform_loop(), vect_transform_stmt(), vectorizable_induction(), vectorizable_live_operation(), and vectorizable_reduction().
#define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED | ( | S | ) |
Referenced by is_nonwrapping_integer_induction(), vect_analyze_scalar_cycles_1(), vect_is_nonlinear_iv_evolution(), and vectorizable_reduction().
#define STMT_VINFO_LOOP_PHI_EVOLUTION_PART | ( | S | ) |
Referenced by is_nonwrapping_integer_induction(), vect_analyze_scalar_cycles_1(), vect_can_advance_ivs_p(), vect_can_peel_nonlinear_iv_p(), vect_is_nonlinear_iv_evolution(), vect_update_ivs_after_vectorizer(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_nonlinear_induction(), and vectorizable_reduction().
#define STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE | ( | S | ) |
Referenced by vect_analyze_scalar_cycles_1(), vect_can_advance_ivs_p(), vect_can_peel_nonlinear_iv_p(), vect_is_nonlinear_iv_evolution(), vect_update_ivs_after_vectorizer(), vectorizable_induction(), and vectorizable_nonlinear_induction().
#define STMT_VINFO_MEMORY_ACCESS_TYPE | ( | S | ) |
Referenced by update_epilogue_loop_vinfo(), vect_mem_access_type(), vectorizable_load(), and vectorizable_store().
#define STMT_VINFO_MIN_NEG_DIST | ( | S | ) |
Referenced by vect_analyze_data_ref_dependence(), and vectorizable_load().
#define STMT_VINFO_PATTERN_DEF_SEQ | ( | S | ) |
Referenced by append_pattern_def_seq(), update_epilogue_loop_vinfo(), vect_analyze_loop_2(), vect_analyze_stmt(), vect_detect_hybrid_slp(), vect_determine_vf_for_stmt(), vect_mark_pattern_stmts(), vect_pattern_recog_1(), vect_recog_popcount_clz_ctz_ffs_pattern(), and vect_split_statement().
#define STMT_VINFO_REDUC_CODE | ( | S | ) |
#define STMT_VINFO_REDUC_DEF | ( | S | ) |
Referenced by addsub_pattern::build(), complex_pattern::build(), info_for_reduction(), parloops_force_simple_reduction(), supportable_widening_operation(), vect_active_double_reduction_p(), vect_analyze_loop_2(), vect_analyze_scalar_cycles_1(), vect_analyze_slp_instance(), vect_create_epilog_for_reduction(), vect_reduc_type(), vect_transform_cycle_phi(), vect_transform_reduction(), vectorizable_condition(), vectorizable_lane_reducing(), vectorizable_live_operation(), and vectorizable_reduction().
#define STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT | ( | S | ) |
Referenced by vect_create_epilog_for_reduction(), vect_find_reusable_accumulator(), and vect_transform_cycle_phi().
#define STMT_VINFO_REDUC_FN | ( | S | ) |
Referenced by vec_info::new_stmt_vec_info(), vect_create_epilog_for_reduction(), vect_reduction_update_partial_vector_usage(), vect_transform_reduction(), and vectorizable_reduction().
#define STMT_VINFO_REDUC_IDX | ( | S | ) |
Referenced by vec_info::new_stmt_vec_info(), vect_analyze_slp(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_create_epilog_for_reduction(), vect_fixup_scalar_cycles_with_patterns(), vect_get_and_check_slp_defs(), vect_is_reduction(), vect_is_simple_reduction(), vect_mark_pattern_stmts(), vect_reassociating_reduction_p(), vect_transform_reduction(), vectorizable_call(), vectorizable_condition(), vectorizable_lane_reducing(), vectorizable_operation(), and vectorizable_reduction().
#define STMT_VINFO_REDUC_TYPE | ( | S | ) |
Referenced by vec_info::new_stmt_vec_info(), parloops_force_simple_reduction(), valid_reduction_p(), vect_create_epilog_for_reduction(), vect_find_reusable_accumulator(), vect_is_simple_reduction(), vect_reduc_type(), vect_reduction_update_partial_vector_usage(), vect_transform_cycle_phi(), vect_transform_reduction(), vectorizable_condition(), vectorizable_lane_reducing(), vectorizable_live_operation(), and vectorizable_reduction().
#define STMT_VINFO_REDUC_VECTYPE | ( | S | ) |
Referenced by vect_create_epilog_for_reduction(), and vectorizable_reduction().
#define STMT_VINFO_REDUC_VECTYPE_IN | ( | S | ) |
Referenced by vect_is_emulated_mixed_dot_prod(), vect_transform_reduction(), vectorizable_lane_reducing(), and vectorizable_reduction().
#define STMT_VINFO_RELATED_STMT | ( | S | ) |
Referenced by vec_info::add_pattern_stmt(), update_epilogue_loop_vinfo(), vect_analyze_loop_2(), vect_analyze_stmt(), vect_bb_slp_mark_live_stmts(), vect_create_epilog_for_reduction(), vect_detect_hybrid_slp(), vect_determine_vf_for_stmt(), vect_fixup_reduc_chain(), vect_fixup_scalar_cycles_with_patterns(), vect_fixup_store_groups_with_patterns(), vect_get_and_check_slp_defs(), vect_init_pattern_stmt(), vect_look_through_possible_promotion(), vect_mark_pattern_stmts(), vect_mark_relevant(), vect_orig_stmt(), vect_set_pattern_stmt(), vect_split_statement(), vect_stmt_to_vectorize(), and vectorizable_reduction().
#define STMT_VINFO_RELEVANT | ( | S | ) |
Referenced by addsub_pattern::build(), complex_pattern::build(), vec_info::new_stmt_vec_info(), vect_analyze_loop_operations(), vect_analyze_slp(), vect_analyze_stmt(), vect_detect_hybrid_slp(), vect_mark_relevant(), vect_mark_slp_stmts_relevant(), vect_mark_stmts_to_be_vectorized(), and vectorizable_reduction().
#define STMT_VINFO_RELEVANT_P | ( | S | ) |
Referenced by vect_active_double_reduction_p(), vect_analyze_loop_operations(), vect_analyze_slp(), vect_analyze_stmt(), vect_compute_single_scalar_iteration_cost(), vect_determine_vectorization_factor(), vect_determine_vf_for_stmt_1(), vect_relevant_for_alignment_p(), vect_transform_loop(), vect_update_vf_for_slp(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_load(), vectorizable_operation(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().
#define STMT_VINFO_SIMD_CLONE_INFO | ( | S | ) |
Referenced by vec_info::free_stmt_vec_info(), and vectorizable_simd_clone_call().
#define STMT_VINFO_SIMD_LANE_ACCESS_P | ( | S | ) |
Referenced by check_scan_store(), vec_info::move_dr(), vect_analyze_data_ref_accesses(), vect_analyze_data_refs(), vect_update_inits_of_drs(), vectorizable_load(), vectorizable_scan_store(), and vectorizable_store().
#define STMT_VINFO_SLP_VECT_ONLY | ( | S | ) |
Referenced by vect_optimize_slp_pass::decide_masked_load_lanes(), vec_info::new_stmt_vec_info(), vect_analyze_data_ref_accesses(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_dissolve_slp_only_groups(), and vectorizable_load().
#define STMT_VINFO_SLP_VECT_ONLY_PATTERN | ( | S | ) |
Referenced by addsub_pattern::build(), complex_pattern::build(), vec_info::new_stmt_vec_info(), vect_analyze_loop_2(), and vect_free_slp_tree().
#define STMT_VINFO_STMT | ( | S | ) |
Referenced by complex_pattern::build(), check_scan_store(), compatible_complex_nodes_p(), vect_optimize_slp_pass::decide_masked_load_lanes(), dump_stmt_cost(), get_load_store_type(), vect_optimize_slp_pass::start_choosing_layouts(), stmt_in_inner_loop_p(), update_epilogue_loop_vinfo(), vect_analyze_data_ref_accesses(), vect_analyze_slp(), vect_build_slp_tree_2(), vect_determine_mask_precision(), vect_get_slp_scalar_def(), vect_match_expression_p(), vect_pattern_validate_optab(), vect_recog_abd_pattern(), vect_recog_absolute_difference(), vect_recog_bitfield_ref_pattern(), vect_recog_build_binary_gimple_stmt(), vect_recog_cond_store_pattern(), vect_recog_gcond_pattern(), vect_recog_mod_var_pattern(), vect_recog_sat_add_pattern(), vect_recog_sat_sub_pattern(), vect_recog_sat_trunc_pattern(), vect_recog_widen_abd_pattern(), vect_slp_analyze_instance_dependence(), vect_stmt_relevant_p(), vectorizable_comparison_1(), vectorizable_early_exit(), vectorizable_reduction(), vectorizable_scan_store(), vectorize_slp_instance_root_stmt(), and vllp_cmp().
#define STMT_VINFO_STRIDED_P | ( | S | ) |
Referenced by vect_optimize_slp_pass::decide_masked_load_lanes(), get_group_load_store_type(), get_load_store_type(), vec_info::move_dr(), update_epilogue_loop_vinfo(), vect_analyze_data_ref_access(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_analyze_group_access_1(), vect_analyze_slp(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_lower_load_permutations(), and vect_relevant_for_alignment_p().
#define STMT_VINFO_TYPE | ( | S | ) |
Access Functions.
Referenced by vec_info::new_stmt_vec_info(), vect_analyze_stmt(), vect_create_loop_vinfo(), vect_init_pattern_stmt(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_transform_stmt(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison(), vectorizable_condition(), vectorizable_conversion(), vectorizable_induction(), vectorizable_lane_reducing(), vectorizable_lc_phi(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().
#define STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL | ( | S | ) |
Referenced by vect_create_epilog_for_reduction(), vect_transform_cycle_phi(), and vectorizable_reduction().
#define STMT_VINFO_VEC_STMTS | ( | S | ) |
Referenced by vec_info::free_stmt_vec_info(), vec_info::new_stmt_vec_info(), vect_create_vectorized_demotion_stmts(), vect_get_vec_defs_for_operand(), vect_record_grouped_load_vectors(), vect_transform_stmt(), vect_vfa_access_size(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_live_operation(), vectorizable_load(), vectorizable_recurr(), vectorizable_scan_store(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().
#define STMT_VINFO_VECTORIZABLE | ( | S | ) |
Referenced by vec_info::new_stmt_vec_info(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_analyze_data_refs_alignment(), vect_analyze_group_access_1(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_determine_precisions(), vect_pattern_recog(), and vect_record_base_alignments().
#define STMT_VINFO_VECTYPE | ( | S | ) |
Referenced by append_pattern_def_seq(), addsub_pattern::build(), complex_pattern::build(), bump_vector_ptr(), get_initial_defs_for_reduction(), get_misalign_in_elems(), record_stmt_cost(), stmt_vectype(), vect_analyze_data_refs(), vect_analyze_data_refs_alignment(), vect_analyze_loop_2(), vect_analyze_slp(), vect_analyze_stmt(), vect_build_one_gather_load_call(), vect_build_slp_instance(), vect_check_gather_scatter(), vect_check_scalar_mask(), vect_check_store_rhs(), vect_create_cond_for_align_checks(), vect_describe_gather_scatter_call(), vect_determine_vectorization_factor(), vect_determine_vf_for_stmt_1(), vect_dr_misalign_for_aligned_access(), vect_enhance_data_refs_alignment(), vect_find_reusable_accumulator(), vect_gen_prolog_loop_niters(), vect_get_data_access_cost(), vect_get_load_cost(), vect_get_peeling_costs_all_drs(), vect_get_store_cost(), vect_get_strided_load_store_ops(), vect_get_vec_defs_for_operand(), vect_get_vector_types_for_stmt(), vect_init_pattern_stmt(), vect_is_emulated_mixed_dot_prod(), vect_is_simple_use(), vect_model_reduction_cost(), vect_peeling_supportable(), vect_permute_load_chain(), vect_permute_store_chain(), vect_recog_bit_insert_pattern(), vect_recog_bitfield_ref_pattern(), vect_recog_cond_expr_convert_pattern(), vect_recog_gather_scatter_pattern(), vect_recog_popcount_clz_ctz_ffs_pattern(), vect_setup_realignment(), vect_shift_permute_load_chain(), vect_transform_cycle_phi(), vect_transform_grouped_load(), vect_transform_lc_phi(), vect_transform_reduction(), vect_transform_stmt(), vect_truncate_gather_scatter_offset(), vect_update_misalignment_for_peel(), vect_vfa_access_size(), vector_alignment_reachable_p(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison(), vectorizable_condition(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_scan_store(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
#define VECT_MAX_COST 1000 |
Referenced by vect_get_load_cost(), vect_get_store_cost(), and vect_peeling_hash_insert().
#define VECT_SCALAR_BOOLEAN_TYPE_P | ( | TYPE | ) |
Nonzero if TYPE represents a (scalar) boolean type or type in the middle-end compatible with it (unsigned precision 1 integral types). Used to determine which types should be vectorized as VECTOR_BOOLEAN_TYPE_P.
Referenced by get_same_sized_vectype(), integer_type_for_mask(), possible_vector_mask_operation_p(), vect_check_scalar_mask(), vect_determine_mask_precision(), vect_get_vec_defs_for_operand(), vect_is_simple_cond(), vect_narrowable_type_p(), vect_recog_bool_pattern(), vect_recog_cast_forwprop_pattern(), vect_recog_gcond_pattern(), vect_recog_mask_conversion_pattern(), and vectorizable_operation().
#define VECTORIZABLE_CYCLE_DEF | ( | D | ) |
Referenced by info_for_reduction(), vect_compute_single_scalar_iteration_cost(), vect_stmt_relevant_p(), vect_update_vf_for_slp(), vectorizable_lane_reducing(), and vectorizable_reduction().
typedef auto_vec<std::pair<unsigned, unsigned>, 16> auto_lane_permutation_t |
typedef auto_vec<unsigned, 16> auto_load_permutation_t |
typedef _bb_vec_info * bb_vec_info |
typedef enum _complex_perm_kinds complex_perm_kinds_t |
All possible load permute values that could result from the partial data-flow analysis.
typedef struct data_reference* dr_p |
typedef auto_vec<std::pair<data_reference*, tree> > drs_init_vec |
typedef vec<std::pair<unsigned, unsigned> > lane_permutation_t |
typedef vec<unsigned> load_permutation_t |
typedef _loop_vec_info * loop_vec_info |
Info on vectorized loops.
Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL value signifies success, and a NULL value signifies failure, supporting propagating an opt_problem * describing the failure back up the call stack.
typedef hash_map<slp_node_hash, bool> slp_compat_nodes_map_t |
typedef class _slp_instance * slp_instance |
SLP instance is a sequence of stmts in a loop that can be packed into SIMD stmts.
typedef pair_hash<nofree_ptr_hash <_slp_tree>, nofree_ptr_hash <_slp_tree> > slp_node_hash |
Cache from nodes pair to being compatible or not.
Cache from nodes to the load permutation they represent.
typedef class _stmt_vec_info* stmt_vec_info |
Vectorizer Copyright (C) 2003-2025 Free Software Foundation, Inc. Contributed by Dorit Naishlos <dorit@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>.
typedef vec<stmt_info_for_cost> stmt_vector_for_cost |
Key and map that records association between vector conditions and corresponding loop mask, and is populated by prepare_vec_mask.
typedef hash_map<tree_operand_hash, std::pair<stmt_vec_info, innermost_loop_behavior *> > vec_base_alignments |
Maps base addresses to an innermost_loop_behavior and the stmt it was derived from that gives the maximum known alignment for that base.
typedef auto_vec<rgroup_controls> vec_loop_lens |
typedef std::pair<tree, tree> vec_object_pair |
Describes two objects whose addresses must be unequal for the vectorized loop to be valid.
typedef vect_pattern *(* vect_pattern_decl_t) (slp_tree_to_load_perm_map_t *, slp_compat_nodes_map_t *, slp_tree *) |
Function pointer to create a new pattern matcher from a generic type.
enum _complex_perm_kinds |
enum dr_alignment_support |
enum operation_type |
enum slp_instance_kind |
enum slp_vect_type |
The type of vectorization that can be applied to the stmt: regular loop-based vectorization; pure SLP - the stmt is a part of SLP instances and does not have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is a part of SLP instance and also must be loop-based vectorized, since it has uses outside SLP sequences. In the loop context the meanings of pure and hybrid SLP are slightly different. By saying that pure SLP is applied to the loop, we mean that we exploit only intra-iteration parallelism in the loop; i.e., the loop can be vectorized without doing any conceptual unrolling, cause we don't pack together stmts from different iterations, only within a single iteration. Loop hybrid SLP means that we exploit both intra-iteration and inter-iteration parallelism (e.g., number of elements in the vector is 4 and the slp-group-size is 2, in which case we don't have enough parallelism within an iteration, so we obtain the rest of the parallelism from subsequent iterations by unrolling the loop by 2).
Enumerator | |
---|---|
loop_vect | |
pure_slp | |
hybrid |
enum stmt_vec_info_type |
Info on vectorized defs.
enum vec_load_store_type |
enum vect_def_type |
enum vect_reduction_type |
enum vect_relevant |
enum vect_var_kind |
|
inline |
References add_stmt_cost(), cond_branch_not_taken, cond_branch_taken, count, gcc_assert, NULL, NULL_TREE, and scalar_stmt.
|
inline |
Dump and add costs.
References count, dump_file, dump_flags, dump_stmt_cost(), and TDF_DETAILS.
Referenced by add_stmt_cost(), add_stmt_cost(), add_stmt_costs(), vect_bb_vectorization_profitable_p(), and vect_estimate_min_profitable_iters().
|
inline |
References add_stmt_cost(), and i.
|
inline |
References add_stmt_cost(), stmt_info_for_cost::count, FOR_EACH_VEC_ELT, i, stmt_info_for_cost::kind, stmt_info_for_cost::misalign, stmt_info_for_cost::node, stmt_info_for_cost::stmt_info, stmt_info_for_cost::vectype, and stmt_info_for_cost::where.
Referenced by vect_compute_single_scalar_iteration_cost(), and vect_slp_analyze_operations().
|
inline |
Return true if data access DR_INFO is aligned to the targets preferred alignment for VECTYPE (which may be less than a full vector).
References dr_misalignment().
Referenced by vect_enhance_data_refs_alignment(), and vector_alignment_reachable_p().
|
inline |
Alias targetm.vectorize.builtin_vectorization_cost.
References targetm.
Referenced by vector_costs::add_stmt_cost(), record_stmt_cost(), and vect_get_stmt_cost().
|
extern |
Function bump_vector_ptr Increment a pointer (to a vector type) by vector-size. If requested, i.e. if PTR-INCR is given, then also connect the new increment stmt to the existing def-use update-chain of the pointer, by modifying the PTR_INCR as illustrated below: The pointer def-use update-chain before this function: DATAREF_PTR = phi (p_0, p_2) .... PTR_INCR: p_2 = DATAREF_PTR + step The pointer def-use update-chain after this function: DATAREF_PTR = phi (p_0, p_2) .... NEW_DATAREF_PTR = DATAREF_PTR + BUMP .... PTR_INCR: p_2 = NEW_DATAREF_PTR + step Input: DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated in the loop. PTR_INCR - optional. The stmt that updates the pointer in each iteration of the loop. The increment amount across iterations is expected to be vector_size. BSI - location where the new update stmt is to be placed. STMT_INFO - the original scalar memory-access stmt that is being vectorized. BUMP - optional. The offset by which to bump the pointer. If not given, the offset is assumed to be vector_size. Output: Return NEW_DATAREF_PTR as illustrated above.
References build1(), copy_ssa_name(), DR_PTR_INFO, duplicate_ssa_name_ptr_info(), fold_build2, fold_convert, fold_stmt(), follow_all_ssa_edges(), FOR_EACH_SSA_USE_OPERAND, gcc_assert, gimple_build_assign(), gsi_for_stmt(), gsi_stmt(), is_gimple_min_invariant(), make_ssa_name(), mark_ptr_info_alignment_unknown(), operand_equal_p(), ptr_type_node, SET_USE, SSA_NAME_PTR_INFO, SSA_OP_USE, STMT_VINFO_DATA_REF, STMT_VINFO_VECTYPE, TREE_CODE, TREE_TYPE, TYPE_SIZE_UNIT, update_stmt(), USE_FROM_PTR, and vect_finish_stmt_generation().
Referenced by vectorizable_load(), and vectorizable_store().
|
extern |
Check whether it is possible to load COUNT elements of type ELT_TYPE using the method implemented by duplicate_and_interleave. Return true if so, returning the number of intermediate vectors in *NVECTORS_OUT (if nonnull) and the type of each intermediate vector in *VECTOR_TYPE_OUT (if nonnull).
References build_nonstandard_integer_type(), can_vec_perm_const_p(), count, GET_MODE_BITSIZE(), GET_MODE_NUNITS(), GET_MODE_SIZE(), GET_MODE_UNIT_SIZE, get_vectype_for_scalar_type(), i, int_mode_for_size(), known_eq, TYPE_MODE, vect_gen_perm_mask_checked(), and VECTOR_MODE_P.
Referenced by duplicate_and_interleave(), vect_build_slp_tree_2(), and vectorizable_reduction().
|
extern |
Used in gimple-loop-interchange.c and tree-parloops.cc.
References check_reduction_path(), and path.
|
extern |
STMT_INFO is a non-strided load or store, meaning that it accesses elements with a known constant step. Return -1 if that step is negative, 0 if it is zero, and 1 if it is greater than zero.
References size_zero_node, STMT_VINFO_DR_INFO, tree_int_cst_compare(), and vect_dr_behavior().
Referenced by vect_optimize_slp_pass::decide_masked_load_lanes(), get_group_load_store_type(), get_load_store_type(), vect_analyze_slp(), vect_build_slp_instance(), and vect_lower_load_permutations().
Return true if call statements CALL1 and CALL2 are similar enough to be combined into the same SLP group.
References gimple_call_arg(), gimple_call_combined_fn(), gimple_call_fn(), gimple_call_fntype(), gimple_call_internal_p(), gimple_call_lhs(), gimple_call_num_args(), i, map, operand_equal_p(), TREE_TYPE, types_compatible_p(), and vect_get_operand_map().
Referenced by compatible_complex_nodes_p(), and vect_build_slp_tree_1().
|
extern |
Return an invariant or register for EXPR and emit necessary computations in the LOOP_VINFO loop preheader.
References force_gimple_operand(), hash_map< KeyId, Value, Traits >::get_or_insert(), gsi_insert_seq_on_edge_immediate(), is_gimple_min_invariant(), is_gimple_reg(), _loop_vec_info::ivexpr_map, loop_preheader_edge(), LOOP_VINFO_LOOP, NULL, NULL_TREE, and unshare_expr().
Referenced by vect_get_strided_load_store_ops(), vectorizable_load(), and vectorizable_store().
|
extern |
Return the misalignment of DR_INFO accessed in VECTYPE with OFFSET applied.
References dr_info::dr, dr_vec_info::dr, DR_GROUP_FIRST_ELEMENT, DR_INIT, DR_MISALIGNMENT_UNINITIALIZED, DR_MISALIGNMENT_UNKNOWN, gcc_assert, known_eq, STMT_VINFO_DR_INFO, STMT_VINFO_GROUPED_ACCESS, targetm, and TREE_INT_CST_LOW.
Referenced by aligned_access_p(), get_group_load_store_type(), get_load_store_type(), get_negative_load_store_type(), known_alignment_for_access_p(), vect_enhance_data_refs_alignment(), vect_get_peeling_costs_all_drs(), vect_known_alignment_in_bytes(), vect_peeling_supportable(), vect_vfa_access_size(), vector_alignment_reachable_p(), vectorizable_load(), and vectorizable_store().
|
inline |
Return if the stmt_vec_info requires peeling for alignment.
References DR_GROUP_FIRST_ELEMENT, STMT_VINFO_DR_INFO, and STMT_VINFO_GROUPED_ACCESS.
Referenced by get_load_store_type(), vect_compute_data_ref_alignment(), and vect_supportable_dr_alignment().
|
inline |
Set the safe_speculative_read_required for the the stmt_vec_info, if group access then set on the fist element otherwise set on DR directly.
References DR_GROUP_FIRST_ELEMENT, STMT_VINFO_DR_INFO, and STMT_VINFO_GROUPED_ACCESS.
Referenced by vect_analyze_early_break_dependences().
|
inline |
Only defined once DR_MISALIGNMENT is defined.
References DR_GROUP_FIRST_ELEMENT, STMT_VINFO_DR_INFO, and STMT_VINFO_GROUPED_ACCESS.
Referenced by vector_alignment_reachable_p().
|
extern |
Dump a cost entry according to args to F.
References cond_branch_not_taken, cond_branch_taken, count, print_gimple_expr(), scalar_load, scalar_stmt, scalar_store, scalar_to_vec, STMT_VINFO_STMT, TDF_SLIM, unaligned_load, unaligned_store, vec_construct, vec_perm, vec_promote_demote, vec_to_scalar, vect_body, vect_epilogue, vect_prologue, vector_gather_load, vector_load, vector_scatter_store, vector_stmt, and vector_store.
Referenced by add_stmt_cost().
|
extern |
Build a variable-length vector in which the elements in ELTS are repeated to a fill NRESULTS vectors of type VECTOR_TYPE. Store the vectors in RESULTS and add any new instructions to SEQ. The approach we use is: (1) Find a vector mode VM with integer elements of mode IM. (2) Replace ELTS[0:NELTS] with ELTS'[0:NELTS'], where each element of ELTS' has mode IM. This involves creating NELTS' VIEW_CONVERT_EXPRs from small vectors to IM. (3) Duplicate each ELTS'[I] into a vector of mode VM. (4) Use a tree of interleaving VEC_PERM_EXPRs to create VMs with the correct byte contents. (5) Use VIEW_CONVERT_EXPR to cast the final VMs to the required type. We try to find the largest IM for which this sequence works, in order to cut down on the number of interleaves.
References build_vector_type(), can_duplicate_and_interleave_p(), gcc_unreachable, gimple_build(), gimple_build_assign(), gimple_build_vector(), gimple_build_vector_from_val(), gimple_seq_add_stmt(), i, make_ssa_name(), tree_vector_builder::new_vector(), TREE_TYPE, and TYPE_VECTOR_SUBPARTS().
Referenced by get_initial_defs_for_reduction(), and vect_create_constant_vectors().
|
extern |
Function find_loop_location. Extract the location of the loop in the source code. If the loop is not well formed for vectorization, an estimated location is calculated. Return the loop location if succeed and NULL if not.
References BUILTINS_LOCATION, cfun, dump_user_location_t::from_function_decl(), get_loop_exit_condition(), get_loop_exit_edges(), gimple_location(), gsi_end_p(), gsi_next(), gsi_start_bb(), gsi_stmt(), loop::header, LOCATION_LOCUS, loop_outer(), LOOPS_HAVE_RECORDED_EXITS, loops_state_satisfies_p(), NULL, and si.
Referenced by canonicalize_loop_induction_variables(), loop_distribution::execute(), find_loop_guard(), hoist_guard(), tree_loop_interchange::interchange(), optimize_mask_stores(), parallelize_loops(), tree_loop_unroll_and_jam(), tree_ssa_iv_optimize_loop(), tree_ssa_unswitch_loops(), tree_unswitch_outer_loop(), and try_vectorize_loop_1().
|
inline |
Return the offset calculated by adding the offset of this DR_INFO to the corresponding data_reference's offset. If CHECK_OUTER then use vect_dr_behavior to select the appropriate data_reference to use.
References dr_info::dr, fold_build2, fold_convert, data_reference::innermost, innermost_loop_behavior::offset, sizetype, TREE_TYPE, and vect_dr_behavior().
Referenced by check_scan_store(), vect_create_addr_base_for_vector_ref(), vectorizable_load(), and vectorizable_store().
|
inline |
Return the later statement between STMT1_INFO and STMT2_INFO.
References CDI_DOMINATORS, dominated_by_p(), gcc_unreachable, gimple_bb(), gimple_uid(), and vect_orig_stmt().
Referenced by vect_create_constant_vectors(), vect_find_first_scalar_stmt_in_slp(), vect_find_last_scalar_stmt_in_slp(), vect_preserves_scalar_order_p(), vect_prune_runtime_alias_test_list(), and vect_slp_analyze_load_dependences().
Function get_mask_type_for_scalar_type. Returns the mask type corresponding to a result of comparison of vectors of specified SCALAR_TYPE as supported by target. NODE, if nonnull, is the SLP tree node that will use the returned vector type.
References get_vectype_for_scalar_type(), NULL, and truth_type_for().
|
extern |
Function get_mask_type_for_scalar_type. Returns the mask type corresponding to a result of comparison of vectors of specified SCALAR_TYPE as supported by target. If GROUP_SIZE is nonzero and we're performing BB vectorization, make sure that the number of elements in the vector is no bigger than GROUP_SIZE.
References get_vectype_for_scalar_type(), NULL, and truth_type_for().
Referenced by vect_check_scalar_mask(), vect_convert_mask_for_vectype(), vect_determine_mask_precision(), vect_get_vector_types_for_stmt(), vect_recog_bool_pattern(), vect_recog_cond_store_pattern(), vect_recog_gcond_pattern(), and vect_recog_mask_conversion_pattern().
|
extern |
In tree-vect-stmts.cc.
If NUNITS is nonzero, return a vector type that contains NUNITS elements of type SCALAR_TYPE, or null if the target doesn't support such a type. If NUNITS is zero, return a vector type that contains elements of type SCALAR_TYPE, choosing whichever vector size the target prefers. If PREVAILING_MODE is VOIDmode, we have not yet chosen a vector mode for this vectorization region and want to "autodetect" the best choice. Otherwise, PREVAILING_MODE is a previously-chosen vector TYPE_MODE and we want the new type to be interoperable with it. PREVAILING_MODE in this case can be a scalar integer mode or a vector mode; when it is a vector mode, the function acts like a tree-level version of related_vector_mode.
References build_nonstandard_integer_type(), build_qualified_type(), build_vector_type_for_mode(), gcc_assert, GET_MODE_BITSIZE(), GET_MODE_SIZE(), INTEGRAL_MODE_P, INTEGRAL_TYPE_P, is_float_mode(), is_int_mode(), KEEP_QUAL_ADDR_SPACE, known_eq, mode_for_vector(), NULL_TREE, POINTER_TYPE_P, related_vector_mode(), SCALAR_FLOAT_TYPE_P, SCALAR_INT_MODE_P, targetm, TREE_CODE, TYPE_ADDR_SPACE, TYPE_ALIGN_UNIT, lang_hooks_for_types::type_for_mode, TYPE_MODE, TYPE_PRECISION, TYPE_QUALS, TYPE_UNSIGNED, lang_hooks::types, and VECTOR_MODE_P.
Referenced by get_same_sized_vectype(), get_vec_alignment_for_array_type(), get_vectype_for_scalar_type(), supportable_indirect_convert_operation(), vect_create_epilog_for_reduction(), vect_create_partial_epilog(), and vect_find_reusable_accumulator().
Function get_same_sized_vectype Returns a vector type corresponding to SCALAR_TYPE of size VECTOR_TYPE if supported by the target.
References GET_MODE_SIZE(), get_related_vectype_for_scalar_type(), NULL_TREE, truth_type_for(), TYPE_MODE, and VECT_SCALAR_BOOLEAN_TYPE_P.
Referenced by vect_create_epilog_for_reduction(), vect_recog_rotate_pattern(), vectorizable_bswap(), vectorizable_conversion(), vectorizable_induction(), and vectorizable_reduction().
Return the vector type corresponding to SCALAR_TYPE as supported by the target. NODE, if nonnull, is the SLP tree node that will use the returned vector type.
References get_vectype_for_scalar_type(), and SLP_TREE_LANES.
|
extern |
Function get_vectype_for_scalar_type. Returns the vector type corresponding to SCALAR_TYPE as supported by the target. If GROUP_SIZE is nonzero and we're performing BB vectorization, make sure that the number of elements in the vector is no bigger than GROUP_SIZE.
References hash_set< KeyId, Lazy, Traits >::add(), floor_log2(), gcc_assert, get_related_vectype_for_scalar_type(), is_a(), maybe_ge, vec_info::slp_instances, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), vec_info::used_vector_modes, and vec_info::vector_mode.
Referenced by can_duplicate_and_interleave_p(), get_mask_type_for_scalar_type(), get_mask_type_for_scalar_type(), get_vectype_for_scalar_type(), vect_add_conversion_to_pattern(), vect_analyze_data_refs(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_convert_input(), vect_determine_mask_precision(), vect_determine_vectorization_factor(), vect_gather_scatter_fn_p(), vect_get_vec_defs_for_operand(), vect_get_vector_types_for_stmt(), vect_is_simple_cond(), vect_phi_first_order_recurrence_p(), vect_recog_abd_pattern(), vect_recog_average_pattern(), vect_recog_bit_insert_pattern(), vect_recog_bitfield_ref_pattern(), vect_recog_bool_pattern(), vect_recog_build_binary_gimple_stmt(), vect_recog_cast_forwprop_pattern(), vect_recog_cond_expr_convert_pattern(), vect_recog_cond_store_pattern(), vect_recog_ctz_ffs_pattern(), vect_recog_divmod_pattern(), vect_recog_gather_scatter_pattern(), vect_recog_mask_conversion_pattern(), vect_recog_mod_var_pattern(), vect_recog_mulhs_pattern(), vect_recog_mult_pattern(), vect_recog_over_widening_pattern(), vect_recog_popcount_clz_ctz_ffs_pattern(), vect_recog_pow_pattern(), vect_recog_rotate_pattern(), vect_recog_sat_sub_pattern_transform(), vect_recog_sat_trunc_pattern(), vect_recog_vector_vector_shift_pattern(), vect_recog_widen_abd_pattern(), vect_recog_widen_op_pattern(), vect_slp_prefer_store_lanes_p(), vect_split_statement(), vect_supportable_conv_optab_p(), vect_supportable_direct_optab_p(), vect_supportable_shift(), vect_synth_mult_by_constant(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_conversion(), vectorizable_lane_reducing(), vectorizable_operation(), vectorizable_reduction(), vectorizable_shift(), and vectorizable_simd_clone_call().
|
extern |
For a statement STMT_INFO taking part in a reduction operation return the stmt_vec_info the meta information is stored on.
References as_a(), gcc_assert, gimple_phi_num_args(), is_a(), vec_info::lookup_def(), STMT_VINFO_DEF_TYPE, STMT_VINFO_REDUC_DEF, vect_double_reduction_def, vect_nested_cycle, vect_orig_stmt(), vect_phi_initial_value(), and VECTORIZABLE_CYCLE_DEF.
Referenced by vect_optimize_slp_pass::start_choosing_layouts(), vect_create_epilog_for_reduction(), vect_reduc_type(), vect_transform_cycle_phi(), vect_transform_reduction(), vectorizable_condition(), vectorizable_live_operation(), and vectorizable_reduction().
|
inline |
Alias targetm.vectorize.init_cost.
References targetm.
|
inline |
Return true if BB is a loop header.
References basic_block_def::loop_father.
Referenced by vec_info::new_stmt_vec_info(), parloops_valid_reduction_input_p(), and vect_analyze_loop_operations().
|
inline |
Return TRUE if a statement represented by STMT_INFO is a part of a pattern.
Referenced by vec_info::lookup_dr(), vec_info::move_dr(), vect_build_slp_tree_2(), vect_contains_pattern_stmt_p(), vect_get_and_check_slp_defs(), vect_mark_pattern_stmts(), vect_mark_stmts_to_be_vectorized(), vect_orig_stmt(), vect_recog_bitfield_ref_pattern(), vect_slp_linearize_chain(), vect_split_statement(), vectorizable_shift(), and vectorizable_store().
|
extern |
Function is_simple_and_all_uses_invariant Return true if STMT_INFO is simple and all uses of it are invariant.
References dump_enabled_p(), dump_printf_loc(), dyn_cast(), FOR_EACH_SSA_TREE_OPERAND, MSG_MISSED_OPTIMIZATION, SSA_OP_USE, vect_constant_def, vect_external_def, vect_is_simple_use(), vect_location, and vect_uninitialized_def.
Referenced by vect_stmt_relevant_p(), and vectorizable_live_operation().
|
inline |
Return TRUE if the (mis-)alignment of the data access is known with respect to the targets preferred alignment for VECTYPE, and FALSE otherwise.
References dr_misalignment(), and DR_MISALIGNMENT_UNKNOWN.
Referenced by vect_enhance_data_refs_alignment(), vect_get_peeling_costs_all_drs(), vect_peeling_supportable(), vect_update_misalignment_for_peel(), and vector_alignment_reachable_p().
|
inline |
Return true if CODE is a lane-reducing opcode.
Referenced by lane_reducing_stmt_p(), vect_transform_reduction(), and vectorizable_reduction().
Return true if STMT is a lane-reducing statement.
References dyn_cast(), gimple_assign_rhs_code(), and lane_reducing_op_p().
Referenced by vect_analyze_slp(), and vectorizable_lane_reducing().
|
inline |
Return the vect cost model for LOOP.
References loop::force_vectorize, NULL, and VECT_COST_MODEL_DEFAULT.
Referenced by unlimited_cost_model(), vect_analyze_loop(), vect_analyze_loop_costing(), vect_enhance_data_refs_alignment(), and vect_prune_runtime_alias_test_list().
|
inline |
References loop::aux.
Referenced by update_epilogue_loop_vinfo(), vect_analyze_loop(), and vect_transform_loops().
|
extern |
Return true if we need an in-order reduction for operation CODE on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer overflow must wrap.
References INTEGRAL_TYPE_P, code_helper::is_tree_code(), operation_no_trapping_overflow(), SAT_FIXED_POINT_TYPE_P, and SCALAR_FLOAT_TYPE_P.
Referenced by vect_optimize_slp_pass::start_choosing_layouts(), vect_reassociating_reduction_p(), vect_slp_check_for_roots(), and vectorizable_reduction().
|
inline |
References gimple_bb(), and loop::inner.
Referenced by vec_info::insert_seq_on_entry(), supportable_widening_operation(), vect_analyze_data_ref_access(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_build_slp_tree_2(), vect_compute_data_ref_alignment(), vect_create_data_ref_ptr(), vect_create_epilog_for_reduction(), vect_dr_behavior(), vect_model_reduction_cost(), vect_reassociating_reduction_p(), vect_record_base_alignments(), vect_setup_realignment(), vect_supportable_dr_alignment(), vect_transform_cycle_phi(), vect_transform_reduction(), vectorizable_induction(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_reduction(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
|
extern |
In tree-vect-loop.cc.
If there is a neutral value X such that a reduction would not be affected by the introduction of additional X elements, return that X, otherwise return null. CODE is the code of the reduction and SCALAR_TYPE is type of the scalar elements. If the reduction has just a single initial value then INITIAL_VALUE is that value, otherwise it is null. If AS_INITIAL is TRUE the value is supposed to be used as initial value. In that case no signed zero is returned.
References build_all_ones_cst(), build_one_cst(), build_real(), build_zero_cst(), dconstm0, HONOR_SIGNED_ZEROS(), code_helper::is_tree_code(), and NULL_TREE.
Referenced by convert_scalar_cond_reduction(), vect_create_epilog_for_reduction(), vect_expand_fold_left(), vect_find_reusable_accumulator(), vect_transform_cycle_phi(), and vectorizable_reduction().
|
extern |
The code below is trying to perform simple optimization - revert if-conversion for masked stores, i.e. if the mask of a store is zero do not perform it and all stored value producers also if possible. For example, for (i=0; i<n; i++) if (c[i]) { p1[i] += 1; p2[i] = p3[i] +2; } this transformation will produce the following semi-hammock: if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 }) { vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165); vect__12.22_172 = vect__11.19_170 + vect_cst__171; MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172); vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165); vect__19.28_184 = vect__18.25_182 + vect_cst__183; MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184); }
References add_bb_to_loop(), add_phi_arg(), build_zero_cst(), CDI_DOMINATORS, cfun, basic_block_def::count, create_empty_bb(), create_phi_node(), dom_info_available_p(), dump_enabled_p(), dump_printf_loc(), EDGE_SUCC, find_loop_location(), flow_loop_nested_p(), FOR_EACH_IMM_USE_FAST, free(), gcc_assert, get_loop_body(), gimple_bb(), gimple_build_cond(), gimple_call_arg(), gimple_call_internal_p(), gimple_get_lhs(), gimple_has_volatile_ops(), gimple_set_vdef(), gimple_vdef(), gimple_vop(), gimple_vuse(), gsi_end_p(), gsi_for_stmt(), gsi_insert_after(), gsi_last_bb(), gsi_move_before(), gsi_next(), gsi_prev(), gsi_remove(), GSI_SAME_STMT, gsi_start_bb(), gsi_stmt(), has_zero_uses(), i, basic_block_def::index, is_gimple_debug(), last, profile_probability::likely(), basic_block_def::loop_father, make_edge(), make_single_succ_edge(), make_ssa_name(), MSG_NOTE, NULL, NULL_TREE, loop::num_nodes, release_defs(), set_immediate_dominator(), split_block(), TREE_CODE, TREE_TYPE, UNKNOWN_LOCATION, USE_STMT, vect_location, VECTOR_TYPE_P, and worklist.
If the target supports a permute mask that reverses the elements in a vector of type VECTYPE, return that mask, otherwise return null.
References can_vec_perm_const_p(), i, NULL_TREE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), and vect_gen_perm_mask_checked().
Referenced by get_negative_load_store_type(), vectorizable_load(), and vectorizable_store().
|
extern |
Return the mask input to a masked load or store. VEC_MASK is the vectorized form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask that needs to be applied to all loads and stores in a vectorized loop. Return VEC_MASK if LOOP_MASK is null or if VEC_MASK is already masked, otherwise return VEC_MASK & LOOP_MASK. MASK_TYPE is the type of both masks. If new statements are needed, insert them before GSI.
References hash_set< KeyId, Lazy, Traits >::contains(), gcc_assert, gimple_build_assign(), gsi_insert_before(), GSI_SAME_STMT, make_temp_ssa_name(), NULL, TREE_TYPE, useless_type_conversion_p(), and _loop_vec_info::vec_cond_masked_set.
Referenced by vectorizable_call(), vectorizable_early_exit(), vectorizable_load(), vectorizable_operation(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
|
extern |
References cond_branch_not_taken, cond_branch_taken, count, gcc_assert, NULL, NULL_TREE, record_stmt_cost(), and scalar_stmt.
|
extern |
References count, record_stmt_cost(), and SLP_TREE_REPRESENTATIVE.
|
extern |
Record the cost of a statement, either by directly informing the target model or by saving it in a vector for later processing. Return a preliminary estimate of the statement's cost.
References builtin_vectorization_cost(), count, si, STMT_VINFO_GATHER_SCATTER_P, unaligned_load, unaligned_store, vector_gather_load, vector_load, vector_scatter_store, and vector_store.
Referenced by vector_costs::add_stmt_cost(), record_stmt_cost(), record_stmt_cost(), record_stmt_cost(), vect_bb_slp_scalar_cost(), vect_bb_vectorization_profitable_p(), vect_compute_single_scalar_iteration_cost(), vect_get_known_peeling_cost(), vect_get_load_cost(), vect_get_store_cost(), vect_model_promotion_demotion_cost(), vect_model_reduction_cost(), vect_model_simple_cost(), vect_prologue_cost_for_slp(), vectorizable_bb_reduc_epilogue(), vectorizable_bswap(), vectorizable_call(), vectorizable_induction(), vectorizable_lane_reducing(), vectorizable_live_operation(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_slp_permutation(), and vectorizable_store().
|
extern |
References count, NULL, and record_stmt_cost().
Referenced by record_stmt_cost(), and record_stmt_cost().
|
inline |
Overload of record_stmt_cost with VECTYPE derived from SLP node.
References count, record_stmt_cost(), and SLP_TREE_VECTYPE.
|
inline |
Overload of record_stmt_cost with VECTYPE derived from STMT_INFO.
References count, record_stmt_cost(), and STMT_VINFO_VECTYPE.
|
extern |
Function reduction_fn_for_scalar_code Input: CODE - tree_code of a reduction operations. Output: REDUC_FN - the corresponding internal function to be used to reduce the vector of partial results into a single scalar result, or IFN_LAST if the operation is a supported reduction operation, but does not have such an internal function. Return FALSE if CODE currently cannot be vectorized as reduction.
References code_helper::is_tree_code().
Referenced by vect_slp_check_for_roots(), vectorizable_bb_reduc_epilogue(), vectorizable_reduction(), and vectorize_slp_instance_root_stmt().
In tree-if-conv.cc.
Return TRUE if ref is a within bound array reference.
References for_each_index(), gcc_assert, idx_within_array_bound(), loop_containing_stmt(), and NULL.
Referenced by ifcvt_memrefs_wont_trap(), and vect_analyze_early_break_dependences().
|
inline |
Referenced by vect_update_misalignment_for_peel().
|
inline |
|
extern |
This function verifies that the following restrictions apply to LOOP: (1) it consists of exactly 2 basic blocks - header, and an empty latch for innermost loop and 5 basic blocks for outer-loop. (2) it is single entry, single exit (3) its exit condition is the last stmt in the header (4) E is the entry/exit edge of LOOP.
References can_copy_bbs_p(), empty_block_p(), free(), get_loop_body_with_size(), get_loop_exit_condition(), gsi_last_bb(), gsi_stmt(), loop::latch, loop_outer(), loop_preheader_edge(), and loop::num_nodes.
Referenced by vect_analyze_loop_2(), vect_do_peeling(), and vect_enhance_data_refs_alignment().
class loop * slpeel_tree_duplicate_loop_to_edge_cfg | ( | class loop * | loop, |
edge | loop_exit, | ||
class loop * | scalar_loop, | ||
edge | scalar_exit, | ||
edge | e, | ||
edge * | new_e, | ||
bool | flow_loops, | ||
vec< basic_block > * | updated_doms ) |
Given LOOP this function generates a new copy of it and puts it on E which is either the entry or exit of LOOP. If SCALAR_LOOP is non-NULL, assume LOOP and SCALAR_LOOP are equivalent and copy the basic blocks from SCALAR_LOOP instead of LOOP, but to either the entry or exit of LOOP. If FLOW_LOOPS then connect LOOP to SCALAR_LOOP as a continuation. This is correct for cases where one loop continues from the other like in the vectorizer, but not true for uses in e.g. loop distribution where the contents of the loop body are split but the iteration space of both copies remains the same. If UPDATED_DOMS is not NULL it is update with the list of basic blocks whoms dominators were updated during the peeling. When doing early break vectorization then LOOP_VINFO needs to be provided and is used to keep track of any newly created memory references that need to be updated should we decide to vectorize.
References add_phi_arg(), add_phi_args_after_copy(), adjust_debug_stmts(), adjust_phi_and_debug_stmts(), CDI_DOMINATORS, checking_verify_dominators(), copy_bbs(), copy_ssa_name(), basic_block_def::count, create_phi_node(), delete_basic_block(), duplicate_loop(), duplicate_subloops(), EDGE_COUNT, EDGE_PRED, first_dom_son(), flow_bb_inside_loop_p(), flush_pending_stmts(), FOR_EACH_EDGE, free(), gcc_assert, hash_map< KeyId, Value, Traits >::get(), get_all_dominated_blocks(), get_bb_copy(), get_immediate_dominator(), get_live_virtual_operand_on_edge(), get_loop_body_with_size(), get_loop_copy(), get_loop_exit_edges(), get_virtual_phi(), gimple_phi_arg_def_from_edge(), gimple_phi_num_args(), gimple_phi_result(), gsi_end_p(), gsi_for_stmt(), gsi_next(), gsi_start_phis(), gsi_stmt(), loop::header, i, loop::inner, iterate_fix_dominators(), loop::latch, loop_latch_edge(), loop_outer(), loop_preheader_edge(), MAY_HAVE_DEBUG_BIND_STMTS, next_dom_son(), NULL, NULL_TREE, loop::num_nodes, PHI_ARG_DEF_FROM_EDGE, PHI_ARG_DEF_PTR_FROM_EDGE, PHI_RESULT, basic_block_def::preds, hash_map< KeyId, Value, Traits >::put(), queue, redirect_edge_and_branch(), redirect_edge_and_branch_force(), redirect_edge_pred(), redirect_edge_var_map_clear(), remove_phi_node(), rename_use_op(), rename_variables_in_bb(), set_immediate_dominator(), SET_PHI_ARG_DEF, SET_PHI_ARG_DEF_ON_EDGE, single_pred(), single_pred_edge(), single_succ_edge(), single_succ_p(), split_edge(), TREE_CODE, true, UNKNOWN_LOCATION, and virtual_operand_p().
Referenced by copy_loop_before(), and vect_do_peeling().
|
extern |
Function supportable_indirect_convert_operation Check whether an operation represented by the code CODE is single or multi operations that are supported by the target platform in vector form (i.e., when operating on arguments of type VECTYPE_IN producing a result of type VECTYPE_OUT). Convert operations we currently support directly are FIX_TRUNC and FLOAT. This function checks if these operations are supported by the target platform directly (via vector tree-codes). Output: - converts contains some pairs to perform the convert operation, the pair's first is the intermediate type, and its second is the code of a vector operation to be used when converting the operation from the previous type to the intermediate type.
References build_nonstandard_integer_type(), FOR_EACH_2XWIDER_MODE, GET_MODE_BITSIZE(), GET_MODE_INNER, GET_MODE_SIZE(), get_related_vectype_for_scalar_type(), int_mode_for_size(), wi::min_precision(), NULL_TREE, opt_mode< T >::require(), SIGNED, SLP_TREE_LANES, SSA_NAME_RANGE_INFO, supportable_convert_operation(), TREE_CODE, TYPE_MODE, TYPE_PRECISION, TYPE_VECTOR_SUBPARTS(), vect_get_range_info(), and vect_get_slp_scalar_def().
Referenced by expand_vector_conversion(), and vectorizable_conversion().
|
extern |
Function supportable_narrowing_operation Check whether an operation represented by the code CODE is a narrowing operation that is supported by the target platform in vector form (i.e., when operating on arguments of type VECTYPE_IN and producing a result of type VECTYPE_OUT). Narrowing operations we currently support are NOP (CONVERT), FIX_TRUNC and FLOAT. This function checks if these operations are supported by the target platform directly via vector tree-codes. Output: - CODE1 is the code of a vector operation to be used when vectorizing the operation, if available. - MULTI_STEP_CVT determines the number of required intermediate steps in case of multi-step conversion (like int->short->char - in that case MULTI_STEP_CVT will be 1). - INTERM_TYPES contains the intermediate type required to perform the narrowing operation (short in the above example).
References CASE_CONVERT, gcc_unreachable, i, insn_data, code_helper::is_tree_code(), known_eq, MAX_INTERM_CVT_STEPS, optab_default, optab_for_tree_code(), optab_handler(), SCALAR_INT_MODE_P, lang_hooks_for_types::type_for_mode, TYPE_MODE, TYPE_UNSIGNED, TYPE_VECTOR_SUBPARTS(), lang_hooks::types, unknown_optab, vect_double_mask_nunits(), and VECTOR_BOOLEAN_TYPE_P.
Referenced by simple_integer_narrowing(), and vectorizable_conversion().
|
extern |
Function supportable_widening_operation Check whether an operation represented by the code CODE is a widening operation that is supported by the target platform in vector form (i.e., when operating on arguments of type VECTYPE_IN producing a result of type VECTYPE_OUT). Widening operations we currently support are NOP (CONVERT), FLOAT, FIX_TRUNC and WIDEN_MULT. This function checks if these operations are supported by the target platform either directly (via vector tree-codes), or via target builtins. Output: - CODE1 and CODE2 are codes of vector operations to be used when vectorizing the operation, if available. - MULTI_STEP_CVT determines the number of required intermediate steps in case of multi-step conversion (like char->short->int - in that case MULTI_STEP_CVT will be 1). - INTERM_TYPES contains the intermediate type required to perform the widening operation (short in the above example).
References as_combined_fn(), as_internal_fn(), build_vector_type_for_mode(), CASE_CONVERT, CONVERT_EXPR_CODE_P, direct_internal_fn_optab(), dyn_cast(), gcc_unreachable, GET_MODE_INNER, gimple_assign_lhs(), i, insn_data, code_helper::is_tree_code(), known_eq, lookup_evenodd_internal_fn(), lookup_hilo_internal_fn(), LOOP_VINFO_LOOP, MAX_INTERM_CVT_STEPS, MAX_TREE_CODES, nested_in_vect_loop_p(), NULL, optab_default, optab_for_tree_code(), optab_handler(), code_helper::safe_as_tree_code(), SCALAR_INT_MODE_P, STMT_VINFO_REDUC_DEF, supportable_widening_operation(), lang_hooks_for_types::type_for_mode, TYPE_MODE, TYPE_UNSIGNED, TYPE_VECTOR_SUBPARTS(), lang_hooks::types, unknown_optab, vect_halve_mask_nunits(), vect_orig_stmt(), VECTOR_BOOLEAN_TYPE_P, VECTOR_MODE_P, and widening_fn_p().
Referenced by supportable_widening_operation(), vect_recog_abd_pattern(), vect_recog_widen_abd_pattern(), vect_recog_widen_op_pattern(), and vectorizable_conversion().
Return true if the vect cost model is unlimited.
References loop_cost_model(), and VECT_COST_MODEL_UNLIMITED.
Referenced by vect_analyze_loop(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_peeling_hash_choose_best_peeling(), vect_peeling_hash_insert(), and vect_slp_region().
Determine the main loop exit for the vectorizer.
References candidate(), CDI_DOMINATORS, chrec_contains_undetermined(), COMPARISON_CLASS_P, dominated_by_p(), get_loop_exit_condition(), get_loop_exit_edges(), integer_nonzerop(), integer_zerop(), loop::latch, tree_niter_desc::may_be_zero, niter_desc::niter, NULL, number_of_iterations_exit_assumptions(), single_pred(), and single_pred_p().
Referenced by set_uid_loop_bbs(), and vect_analyze_loop_form().
|
extern |
Function vect_analyze_data_ref_accesses. Analyze the access pattern of all the data references in the loop. FORNOW: the only access pattern that is considered vectorizable is a simple step 1 (consecutive) access. FORNOW: handle only arrays and pointer accesses.
References absu_hwi(), hash_set< KeyId, Lazy, Traits >::add(), hash_set< KeyId, Lazy, Traits >::begin(), can_group_stmts_p(), data_ref_compare_tree(), vec_info_shared::datarefs, dr_vec_info::dr, DR_BASE_ADDRESS, DR_GROUP_FIRST_ELEMENT, DR_GROUP_NEXT_ELEMENT, dr_group_sort_cmp(), DR_INIT, DR_IS_READ, DR_OFFSET, DR_REF, DR_STEP, DR_STMT, dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, hash_set< KeyId, Lazy, Traits >::end(), opt_result::failure_at(), FOR_EACH_VEC_ELT, g, gcc_assert, gimple_bb(), gimple_uid(), dr_vec_info::group, i, basic_block_def::index, is_a(), vec_info::lookup_dr(), MSG_MISSED_OPTIMIZATION, MSG_NOTE, loop::next, NULL, hash_set< KeyId, Lazy, Traits >::remove(), vec_info::shared, dr_vec_info::stmt, STMT_VINFO_DR_INFO, STMT_VINFO_GATHER_SCATTER_P, STMT_VINFO_SIMD_LANE_ACCESS_P, STMT_VINFO_SLP_VECT_ONLY, STMT_VINFO_STMT, STMT_VINFO_VECTORIZABLE, opt_result::success(), tree_fits_shwi_p(), tree_fits_uhwi_p(), tree_int_cst_equal(), TREE_INT_CST_LOW, tree_to_shwi(), tree_to_uhwi(), TREE_TYPE, TYPE_SIZE_UNIT, types_compatible_p(), vect_analyze_data_ref_access(), and vect_location.
Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().
|
extern |
Function vect_analyze_data_ref_dependences. Examine all the data references in the loop, and make sure there do not exist any data dependences between them. Set *MAX_VF according to the maximum vectorization factor the data dependences allow.
References compute_all_dependences(), DUMP_VECT_SCOPE, FOR_EACH_VEC_ELT, gcc_assert, i, LOOP_VINFO_DATAREFS, LOOP_VINFO_DDRS, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_LOOP_NEST, LOOP_VINFO_NO_DATA_DEPENDENCIES, LOOP_VINFO_ORIG_MAX_VECT_FACTOR, opt_result::success(), vect_analyze_data_ref_dependence(), and vect_analyze_early_break_dependences().
Referenced by vect_analyze_loop_2().
|
extern |
Function vect_analyze_data_refs. Find all the data references in the loop or basic block. The general structure of the analysis of data refs in the vectorizer is as follows: 1- vect_analyze_data_refs(loop/bb): call compute_data_dependences_for_loop/bb to find and analyze all data-refs in the loop/bb and their dependences. 2- vect_analyze_dependences(): apply dependence testing using ddrs. 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok. 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
References as_a(), data_reference::aux, build_fold_indirect_ref, vec_info_shared::datarefs, DECL_NONALIASED, dr_analyze_innermost(), DR_BASE_ADDRESS, DR_INIT, DR_IS_READ, DR_IS_WRITE, DR_OFFSET, DR_REF, DR_STEP, DR_STMT, dump_enabled_p(), dump_generic_expr(), dump_printf(), dump_printf_loc(), DUMP_VECT_SCOPE, dyn_cast(), opt_result::failure_at(), fatal(), fold_build2, fold_build_pointer_plus, FOR_EACH_VEC_ELT, gcc_assert, get_base_address(), get_vectype_for_scalar_type(), i, is_a(), vec_info::lookup_stmt(), LOOP_VINFO_LOOP, MSG_MISSED_OPTIMIZATION, MSG_NOTE, nested_in_vect_loop_p(), NULL, gather_scatter_info::offset, vec_info::shared, data_reference::stmt, STMT_VINFO_DR_BASE_ADDRESS, STMT_VINFO_DR_BASE_ALIGNMENT, STMT_VINFO_DR_BASE_MISALIGNMENT, STMT_VINFO_DR_INIT, STMT_VINFO_DR_OFFSET, STMT_VINFO_DR_OFFSET_ALIGNMENT, STMT_VINFO_DR_STEP, STMT_VINFO_DR_STEP_ALIGNMENT, STMT_VINFO_DR_WRT_VEC_LOOP, STMT_VINFO_GATHER_SCATTER_P, STMT_VINFO_SIMD_LANE_ACCESS_P, STMT_VINFO_STRIDED_P, STMT_VINFO_VECTORIZABLE, STMT_VINFO_VECTYPE, opt_result::success(), TDF_DETAILS, TREE_CODE, TREE_THIS_VOLATILE, TREE_TYPE, TYPE_VECTOR_SUBPARTS(), unshare_expr(), VAR_P, vect_check_gather_scatter(), and vect_location.
Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().
|
extern |
Function vect_analyze_data_refs_alignment Analyze the alignment of the data-references in the loop. Return FALSE if a data reference is found that cannot be vectorized.
References DR_GROUP_FIRST_ELEMENT, DUMP_VECT_SCOPE, FOR_EACH_VEC_ELT, i, vec_info::lookup_dr(), LOOP_VINFO_DATAREFS, STMT_VINFO_GROUPED_ACCESS, STMT_VINFO_VECTORIZABLE, STMT_VINFO_VECTYPE, opt_result::success(), vect_compute_data_ref_alignment(), and vect_record_base_alignments().
Referenced by vect_analyze_loop_2().
|
extern |
Drive for loop analysis stage.
Function vect_analyze_loop. Apply a set of analyses on LOOP, and create a loop_vec_info struct for it. The different analyses will record information in the loop_vec_info struct.
References vect_loop_form_info::assumptions, dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, _loop_vec_info::epilogue_vinfo, opt_pointer_wrapper< loop_vec_info >::failure_at(), fatal(), find_loop_nest(), free_numbers_of_iterations_estimates(), gcc_assert, GET_MODE_NAME, i, loop::inner, integer_onep(), known_eq, LOOP_C_FINITE, loop_constraint_set(), loop_cost_model(), vec_info_shared::loop_nest, loop_outer(), LOOP_REQUIRES_VERSIONING, loop_vec_info_for_loop(), LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_PEELING_FOR_NITER, LOOP_VINFO_VECT_FACTOR, LOOP_VINFO_VECTORIZABLE_P, LOOP_VINFO_VERSIONING_THRESHOLD, maybe_ge, MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, opt_pointer_wrapper< loop_vec_info >::propagate_failure(), scev_reset_htab(), loop::simdlen, loop::simduid, opt_pointer_wrapper< loop_vec_info >::success(), vector_costs::suggested_epilogue_mode(), _loop_vec_info::suggested_unroll_factor, targetm, unlimited_cost_model(), vect_analyze_loop_1(), vect_analyze_loop_form(), VECT_COMPARE_COSTS, VECT_COST_MODEL_VERY_CHEAP, vect_joust_loop_vinfos(), vect_location, _loop_vec_info::vector_costs, and vec_info::vector_mode.
Referenced by try_vectorize_loop_1().
|
extern |
Function vect_analyze_loop_form. Verify that certain CFG restrictions hold, including: - the loop has a pre-header - the loop has a single entry - nested loops can have only a single exit. - the loop exit condition is simple enough - the number of iterations can be analyzed, i.e, a countable loop. The niter could be analyzed under some assumptions.
References vect_loop_form_info::assumptions, cfun, chrec_contains_undetermined(), vect_loop_form_info::conds, dump_enabled_p(), dump_generic_expr(), dump_printf(), dump_printf_loc(), DUMP_VECT_SCOPE, EDGE_COUNT, EDGE_PRED, empty_block_p(), loop::exits, expr_invariant_in_loop_p(), opt_result::failure_at(), free(), get_loop(), get_loop_body(), get_loop_exit_edges(), gimple_bb(), gimple_call_arg(), gimple_seq_empty_p(), loop::header, i, loop::inner, vect_loop_form_info::inner_loop_cond, integer_onep(), integer_zerop(), loop::latch, vect_loop_form_info::loop_exit, loop_exits_from_bb_p(), loop_preheader_edge(), MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, loop::num_nodes, vect_loop_form_info::number_of_iterations, vect_loop_form_info::number_of_iterationsm1, phi_nodes(), basic_block_def::preds, single_exit(), single_pred(), single_succ_p(), opt_result::success(), TDF_DETAILS, tree_fits_shwi_p(), tree_to_shwi(), vec_init_loop_exit_info(), vect_analyze_loop_form(), vect_get_loop_niters(), and vect_location.
Referenced by gather_scalar_reductions(), vect_analyze_loop(), and vect_analyze_loop_form().
|
extern |
Check if there are stmts in the loop can be vectorized using SLP. Build SLP trees of packed scalar stmts if SLP is possible.
References as_a(), compare_step_with_zero(), vec_info_shared::datarefs, DR_GROUP_FIRST_ELEMENT, DR_GROUP_SIZE, DR_IS_WRITE, dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, dyn_cast(), FOR_EACH_VEC_ELT, gcc_assert, hash_map< KeyId, Value, Traits >::get(), get_loop_exit_edges(), gimple_call_internal_fn(), gimple_call_internal_p(), gimple_cond_code(), gimple_cond_lhs(), gimple_cond_rhs(), gimple_phi_arg_def_from_edge(), vec_info::grouped_stores, gsi_end_p(), gsi_next(), gsi_start_phis(), i, internal_fn_mask_index(), is_a(), is_gimple_call(), lane_reducing_stmt_p(), last, vec_info::lookup_def(), vec_info::lookup_dr(), vec_info::lookup_stmt(), loop_latch_edge(), LOOP_VINFO_EARLY_BREAKS_LIVE_IVS, LOOP_VINFO_LOOP, LOOP_VINFO_LOOP_CONDS, LOOP_VINFO_SLP_INSTANCES, MSG_NOTE, loop::next, NULL, optimize_load_redistribution(), REDUC_GROUP_FIRST_ELEMENT, REDUC_GROUP_NEXT_ELEMENT, _loop_vec_info::reduction_chains, _loop_vec_info::reductions, release_scalar_stmts_to_slp_tree_map(), vec_info::shared, slp_inst_kind_gcond, slp_inst_kind_reduc_chain, slp_inst_kind_reduc_group, slp_inst_kind_store, SLP_INSTANCE_KIND, SLP_INSTANCE_TREE, SLP_TREE_LANES, SLP_TREE_LOAD_PERMUTATION, SLP_TREE_REPRESENTATIVE, SLP_TREE_SCALAR_STMTS, SLP_TREE_VECTYPE, dr_vec_info::stmt, STMT_VINFO_DEF_TYPE, STMT_VINFO_GROUPED_ACCESS, STMT_VINFO_LIVE_P, STMT_VINFO_REDUC_IDX, STMT_VINFO_RELEVANT, STMT_VINFO_RELEVANT_P, STMT_VINFO_STMT, STMT_VINFO_STRIDED_P, STMT_VINFO_VECTYPE, opt_result::success(), TREE_CODE, vect_analyze_slp_instance(), vect_build_slp_instance(), vect_double_reduction_def, vect_free_slp_instance(), vect_gather_slp_loads(), vect_induction_def, vect_internal_def, vect_load_lanes_supported(), vect_location, vect_lower_load_permutations(), vect_match_slp_patterns(), vect_print_slp_graph(), vect_reduction_def, vect_slp_prefer_store_lanes_p(), vect_stmt_to_vectorize(), vect_store_lanes_supported(), vect_used_only_live, virtual_operand_p(), visited, vNULL, and zerop().
Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().
|
extern |
Make sure the statement is vectorizable.
References as_a(), can_vectorize_live_stmts(), dump_enabled_p(), dump_printf_loc(), dyn_cast(), opt_result::failure_at(), gcc_assert, gcc_unreachable, gimple_call_lhs(), gimple_has_volatile_ops(), gsi_end_p(), gsi_next(), gsi_start(), gsi_stmt(), lc_phi_info_type, _slp_tree::ldst_lanes, vec_info::lookup_stmt(), MSG_NOTE, NULL, NULL_TREE, PURE_SLP_STMT, reduc_vec_info_type, si, SLP_TREE_CODE, SLP_TREE_VECTYPE, STMT_VINFO_DEF_TYPE, STMT_VINFO_IN_PATTERN_P, STMT_VINFO_LIVE_P, STMT_VINFO_PATTERN_DEF_SEQ, STMT_VINFO_RELATED_STMT, STMT_VINFO_RELEVANT, STMT_VINFO_RELEVANT_P, STMT_VINFO_TYPE, STMT_VINFO_VECTYPE, opt_result::success(), vect_analyze_stmt(), vect_condition_def, vect_constant_def, vect_double_reduction_def, vect_external_def, vect_first_order_recurrence, vect_induction_def, vect_internal_def, vect_location, vect_nested_cycle, vect_reduction_def, vect_unknown_def_type, vect_unused_in_scope, vect_used_by_reduction, vect_used_in_outer, vect_used_in_outer_by_reduction, vect_used_only_live, vectorizable_assignment(), vectorizable_call(), vectorizable_comparison(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_lane_reducing(), vectorizable_lc_phi(), vectorizable_load(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().
Referenced by vect_analyze_loop_operations(), vect_analyze_stmt(), and vect_slp_analyze_node_operations_1().
|
inline |
Return true if LOOP_VINFO requires a runtime check for whether the vector loop is profitable.
References LOOP_VINFO_COST_MODEL_THRESHOLD, LOOP_VINFO_NITERS_KNOWN_P, and vect_vf_for_cost().
Referenced by vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_loop_versioning(), and vect_transform_loop().
|
extern |
This function builds ni_name = number of iterations. Statements are emitted on the loop preheader edge. If NEW_VAR_P is not NULL, set it to TRUE if new ssa_var is generated.
References create_tmp_var, force_gimple_operand(), gsi_insert_seq_on_edge_immediate(), loop_preheader_edge(), LOOP_VINFO_LOOP, LOOP_VINFO_NITERS, NULL, TREE_CODE, TREE_TYPE, and unshare_expr().
Referenced by vect_do_peeling(), and vect_transform_loop().
|
extern |
Function vect_can_advance_ivs_p In case the number of iterations that LOOP iterates is unknown at compile time, an epilog loop will be generated, and the loop induction variables (IVs) will be "advanced" to the value they are supposed to take just before the epilog loop. Here we check that the access function of the loop IVs and the expression that represents the loop bound are simple enough. These restrictions will be relaxed in the future.
References dump_enabled_p(), dump_printf(), dump_printf_loc(), expr_invariant_in_loop_p(), gsi_end_p(), gsi_next(), gsi_start_phis(), loop::header, iv_phi_p(), vec_info::lookup_stmt(), LOOP_VINFO_LOOP, MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL_TREE, gphi_iterator::phi(), STMT_VINFO_LOOP_PHI_EVOLUTION_PART, STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE, tree_is_chrec(), vect_can_peel_nonlinear_iv_p(), vect_location, and vect_step_op_add.
Referenced by vect_analyze_loop_2(), vect_do_peeling(), and vect_enhance_data_refs_alignment().
|
extern |
In tree-vect-data-refs.cc.
Function vect_force_dr_alignment_p. Returns whether the alignment of a DECL can be forced to be aligned on ALIGNMENT bit boundary.
References decl_in_symtab_p(), symtab_node::get(), known_le, MAX_OFILE_ALIGNMENT, MAX_STACK_ALIGNMENT, TREE_STATIC, and VAR_P.
Referenced by increase_alignment(), and vect_compute_data_ref_alignment().
|
extern |
Likewise, but taking a code_helper.
References code_helper::is_tree_code(), and vect_can_vectorize_without_simd_p().
Return true if we can emulate CODE on an integer mode representation of a vector.
Referenced by vect_can_vectorize_without_simd_p(), vectorizable_operation(), and vectorizable_reduction().
|
extern |
Return true if a non-affine read or write in STMT_INFO is suitable for a gather load or scatter store. Describe the operation in *INFO if so. If it is suitable and ELSVALS is nonzero store the supported else values in the vector it points to.
References gather_scatter_info::base, build_fold_addr_expr, CASE_CONVERT, gather_scatter_info::decl, do_add(), DR_IS_READ, DR_REF, dyn_cast(), gather_scatter_info::element_type, expr_invariant_in_loop_p(), extract_ops_from_tree(), fold_convert, get_gimple_rhs_class(), get_inner_reference(), gimple_assign_rhs1(), gimple_assign_rhs2(), gimple_assign_rhs_code(), gimple_call_internal_fn(), gimple_call_internal_p(), GIMPLE_TERNARY_RHS, gather_scatter_info::ifn, integer_zerop(), INTEGRAL_TYPE_P, internal_fn_mask_index(), internal_gather_scatter_fn_p(), is_gimple_assign(), LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_LOOP, LOOP_VINFO_ORIG_LOOP_INFO, may_be_nonaddressable_p(), mem_ref_offset(), gather_scatter_info::memory_type, NULL_TREE, gather_scatter_info::offset, gather_scatter_info::offset_dt, gather_scatter_info::offset_vectype, operand_equal_p(), POINTER_TYPE_P, gather_scatter_info::scale, signed_char_type_node, size_binop, size_int, size_zero_node, sizetype, SSA_NAME_DEF_STMT, STMT_VINFO_DATA_REF, STMT_VINFO_VECTYPE, STRIP_NOPS, supports_vec_gather_load_p(), supports_vec_scatter_store_p(), targetm, TREE_CODE, tree_fits_shwi_p(), TREE_OPERAND, tree_to_shwi(), TREE_TYPE, TYPE_MODE, TYPE_PRECISION, TYPE_SIZE, unsigned_char_type_node, vect_describe_gather_scatter_call(), vect_gather_scatter_fn_p(), vect_unknown_def_type, and wide_int_to_tree().
Referenced by get_load_store_type(), vect_analyze_data_refs(), vect_detect_hybrid_slp(), vect_get_and_check_slp_defs(), vect_mark_stmts_to_be_vectorized(), vect_recog_gather_scatter_pattern(), and vect_use_strided_gather_scatters_p().
Return true if replacing LOOP_VINFO->vector_mode with VECTOR_MODE would not change the chosen vector modes.
References hash_set< KeyId, Lazy, Traits >::begin(), hash_set< KeyId, Lazy, Traits >::end(), GET_MODE_INNER, i, related_vector_mode(), vec_info::used_vector_modes, and VECTOR_MODE_P.
Referenced by vect_analyze_loop_1(), and vect_slp_region().
|
inline |
If STMT_INFO is a comparison or contains an embedded comparison, return the scalar type of the values being compared. Return null otherwise.
References dyn_cast(), gimple_assign_rhs1(), gimple_assign_rhs_code(), tcc_comparison, TREE_CODE_CLASS, TREE_TYPE, and vect_embedded_comparison_type().
Copy memory reference info such as base/clique from the SRC reference to the DEST MEM_REF.
References handled_component_p(), MR_DEPENDENCE_BASE, MR_DEPENDENCE_CLIQUE, TREE_CODE, and TREE_OPERAND.
Referenced by vect_setup_realignment(), vectorizable_load(), vectorizable_scan_store(), and vectorizable_store().
|
extern |
Function vect_create_addr_base_for_vector_ref. Create an expression that computes the address of the first memory location that will be accessed for a data reference. Input: STMT_INFO: The statement containing the data reference. NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list. OFFSET: Optional. If supplied, it is be added to the initial address. LOOP: Specify relative to which loop-nest should the address be computed. For example, when the dataref is in an inner-loop nested in an outer-loop that is now being vectorized, LOOP can be either the outer-loop, or the inner-loop. The first memory location accessed by the following dataref ('in' points to short): for (i=0; i<N; i++) for (j=0; j<M; j++) s += in[i+j] is as follows: if LOOP=i_loop: &in (relative to i_loop) if LOOP=j_loop: &in+i*2B (relative to j_loop) Output: 1. Return an SSA_NAME whose value is the address of the memory location of the first vector of the data reference. 2. If new_stmt_list is not NULL_TREE after return then the caller must insert these statement(s) which define the returned SSA_NAME. FORNOW: We are only handling array accesses with step 1.
References innermost_loop_behavior::base_address, build1(), build_pointer_type(), dr_info::dr, DR_PTR_INFO, DR_REF, dump_enabled_p(), dump_printf_loc(), dyn_cast(), fold_build2, fold_build_pointer_plus, fold_convert, force_gimple_operand(), gcc_assert, get_dr_vinfo_offset(), get_name(), gimple_seq_add_seq(), innermost_loop_behavior::init, MSG_NOTE, NULL, size_binop, sizetype, SSA_NAME_PTR_INFO, SSA_NAME_VAR, ssize_int, STMT_VINFO_DR_INFO, strip_zero_offset_components(), TREE_CODE, TREE_TYPE, unshare_expr(), vect_dr_behavior(), vect_duplicate_ssa_name_ptr_info(), vect_get_new_vect_var(), vect_location, and vect_pointer_var.
Referenced by get_misalign_in_elems(), vect_create_cond_for_align_checks(), vect_create_data_ref_ptr(), and vect_setup_realignment().
|
extern |
Function vect_create_data_ref_ptr. Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first location accessed in the loop by STMT_INFO, along with the def-use update chain to appropriately advance the pointer through the loop iterations. Also set aliasing information for the pointer. This pointer is used by the callers to this function to create a memory reference expression for vector load/store access. Input: 1. STMT_INFO: a stmt that references memory. Expected to be of the form GIMPLE_ASSIGN <name, data-ref> or GIMPLE_ASSIGN <data-ref, name>. 2. AGGR_TYPE: the type of the reference, which should be either a vector or an array. 3. AT_LOOP: the loop where the vector memref is to be created. 4. OFFSET (optional): a byte offset to be added to the initial address accessed by the data-ref in STMT_INFO. 5. BSI: location where the new stmts are to be placed if there is no loop 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain pointing to the initial address. 8. IV_STEP (optional, defaults to NULL): the amount that should be added to the IV during each iteration of the loop. NULL says to move by one copy of AGGR_TYPE up or down, depending on the step of the data reference. Output: 1. Declare a new ptr to vector_type, and have it point to the base of the data reference (initial addressed accessed by the data reference). For example, for vector of type V8HI, the following code is generated: v8hi *ap; ap = (v8hi *)initial_address; if OFFSET is not supplied: initial_address = &a[init]; if OFFSET is supplied: initial_address = &a[init] + OFFSET; if BYTE_OFFSET is supplied: initial_address = &a[init] + BYTE_OFFSET; Return the initial_address in INITIAL_ADDRESS. 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also update the pointer in each iteration of the loop. Return the increment stmt that updates the pointer in PTR_INCR. 3. Return the pointer.
References alias_sets_conflict_p(), build_pointer_type_for_mode(), create_iv(), dr_info::dr, DR_BASE_ADDRESS, DR_BASE_OBJECT, DR_GROUP_FIRST_ELEMENT, DR_GROUP_NEXT_ELEMENT, DR_GROUP_SIZE, DR_PTR_INFO, DR_REF, DR_STEP, dump_enabled_p(), dump_printf(), dump_printf_loc(), dyn_cast(), fold_build1, fold_convert, gcc_assert, gcc_unreachable, get_alias_set(), get_name(), get_tree_code_name(), gimple_bb(), gsi_insert_seq_before(), gsi_insert_seq_on_edge_immediate(), GSI_SAME_STMT, gsi_stmt(), integer_zerop(), loop_preheader_edge(), LOOP_VINFO_LOOP, MSG_NOTE, nested_in_vect_loop_p(), NULL, NULL_TREE, standard_iv_increment_position(), innermost_loop_behavior::step, STMT_VINFO_DATA_REF, STMT_VINFO_DR_INFO, TREE_CODE, tree_int_cst_sgn(), TREE_TYPE, TYPE_SIZE_UNIT, vect_create_addr_base_for_vector_ref(), vect_dr_behavior(), vect_duplicate_ssa_name_ptr_info(), vect_get_new_vect_var(), vect_location, and vect_pointer_var.
Referenced by vect_setup_realignment(), vectorizable_load(), and vectorizable_store().
Function vect_create_destination_var. Create a new temporary of type VECTYPE.
References free(), gcc_assert, get_name(), SSA_NAME_VERSION, TREE_CODE, TREE_TYPE, vect_get_new_vect_var(), vect_mask_var, vect_scalar_var, vect_simple_var, and VECTOR_BOOLEAN_TYPE_P.
Referenced by permute_vec_elements(), read_vector_array(), vect_create_epilog_for_reduction(), vect_setup_realignment(), vect_transform_cycle_phi(), vect_transform_lc_phi(), vect_transform_reduction(), vect_transform_slp_perm_load_1(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_conversion(), vectorizable_load(), vectorizable_operation(), vectorizable_phi(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
|
extern |
Create a loop_vec_info for LOOP with SHARED and the vect_analyze_loop_form result.
References vect_loop_form_info::assumptions, vect_loop_form_info::conds, estimated_stmt_executions(), i, loop::inner, vect_loop_form_info::inner_loop_cond, integer_onep(), vec_info::lookup_stmt(), vect_loop_form_info::loop_exit, loop_exit_ctrl_vec_info_type, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_INNER_LOOP_COST_FACTOR, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LOOP_CONDS, LOOP_VINFO_LOOP_IV_COND, LOOP_VINFO_MAIN_LOOP_INFO, LOOP_VINFO_NITERS, LOOP_VINFO_NITERS_ASSUMPTIONS, LOOP_VINFO_NITERS_UNCHANGED, LOOP_VINFO_NITERSM1, LOOP_VINFO_ORIG_LOOP_INFO, vect_loop_form_info::number_of_iterations, vect_loop_form_info::number_of_iterationsm1, wi::smin(), STMT_VINFO_DEF_TYPE, STMT_VINFO_TYPE, and vect_condition_def.
Referenced by gather_scalar_reductions(), and vect_analyze_loop_1().
Create an SLP node for SCALAR_STMTS.
References SLP_TREE_CHILDREN, SLP_TREE_CODE, SLP_TREE_DEF_TYPE, SLP_TREE_SCALAR_STMTS, vect_internal_def, and vNULL.
Referenced by vect_optimize_slp_pass::get_result_with_layout(), vect_build_combine_node(), vect_build_slp_instance(), vect_build_slp_store_interleaving(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_create_new_slp_node(), vect_create_new_slp_node(), and vect_lower_load_permutations().
|
extern |
Find stmts that must be both vectorized and SLPed.
References DUMP_VECT_SCOPE, gsi_end_p(), gsi_last_bb(), gsi_next(), gsi_prev(), gsi_start(), gsi_start_phis(), gsi_stmt(), i, is_gimple_debug(), vec_info::lookup_stmt(), vdhs_data::loop_vinfo, LOOP_VINFO_BBS, LOOP_VINFO_LOOP, maybe_push_to_hybrid_worklist(), gather_scatter_info::offset, STMT_SLP_TYPE, STMT_VINFO_GATHER_SCATTER_P, STMT_VINFO_IN_PATTERN_P, STMT_VINFO_PATTERN_DEF_SEQ, STMT_VINFO_RELATED_STMT, STMT_VINFO_RELEVANT, vect_check_gather_scatter(), vect_detect_hybrid_slp(), walk_gimple_op(), vdhs_data::worklist, and worklist.
|
extern |
Used in tree-vect-loop-manip.cc
Determine if operating on full vectors for LOOP_VINFO might leave some scalar iterations still to do. If so, decide how we should handle those scalar iterations. The possibilities are: (1) Make LOOP_VINFO operate on partial vectors instead of full vectors. In this case: LOOP_VINFO_USING_PARTIAL_VECTORS_P == true LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == false LOOP_VINFO_PEELING_FOR_NITER == false (2) Make LOOP_VINFO operate on full vectors and use an epilogue loop to handle the remaining scalar iterations. In this case: LOOP_VINFO_USING_PARTIAL_VECTORS_P == false LOOP_VINFO_PEELING_FOR_NITER == true There are two choices: (2a) Consider vectorizing the epilogue loop at the same VF as the main loop, but using partial vectors instead of full vectors. In this case: LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == true (2b) Consider vectorizing the epilogue loop at lower VFs only. In this case: LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == false
References dump_enabled_p(), dump_printf_loc(), opt_result::failure_at(), LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P, LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P, LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_MUST_USE_PARTIAL_VECTORS_P, LOOP_VINFO_PEELING_FOR_NITER, LOOP_VINFO_USING_PARTIAL_VECTORS_P, LOOP_VINFO_USING_SELECT_VL_P, MSG_NOTE, opt_result::success(), _loop_vec_info::suggested_unroll_factor, vect_known_niters_smaller_than_vf(), vect_location, and vect_need_peeling_or_partial_vectors_p().
Referenced by vect_analyze_loop_2(), and vect_do_peeling().
|
extern |
Function vect_do_peeling. Input: - LOOP_VINFO: Represent a loop to be vectorized, which looks like: preheader: LOOP: header_bb: loop_body if (exit_loop_cond) goto exit_bb else goto header_bb exit_bb: - NITERS: The number of iterations of the loop. - NITERSM1: The number of iterations of the loop's latch. - NITERS_NO_OVERFLOW: No overflow in computing NITERS. - TH, CHECK_PROFITABILITY: Threshold of niters to vectorize loop if CHECK_PROFITABILITY is true. Output: - *NITERS_VECTOR and *STEP_VECTOR describe how the main loop should iterate after vectorization; see vect_set_loop_condition for details. - *NITERS_VECTOR_MULT_VF_VAR is either null or an SSA name that should be set to the number of scalar iterations handled by the vector loop. The SSA name is only used on exit from the loop. This function peels prolog and epilog from the loop, adds guards skipping PROLOG and EPILOG for various conditions. As a result, the changed CFG would look like: guard_bb_1: if (prefer_scalar_loop) goto merge_bb_1 else goto guard_bb_2 guard_bb_2: if (skip_prolog) goto merge_bb_2 else goto prolog_preheader prolog_preheader: PROLOG: prolog_header_bb: prolog_body if (exit_prolog_cond) goto prolog_exit_bb else goto prolog_header_bb prolog_exit_bb: merge_bb_2: vector_preheader: VECTOR LOOP: vector_header_bb: vector_body if (exit_vector_cond) goto vector_exit_bb else goto vector_header_bb vector_exit_bb: guard_bb_3: if (skip_epilog) goto merge_bb_3 else goto epilog_preheader merge_bb_1: epilog_preheader: EPILOG: epilog_header_bb: epilog_body if (exit_epilog_cond) goto merge_bb_3 else goto epilog_header_bb merge_bb_3: Note this function peels prolog and epilog only if it's necessary, as well as guards. This function returns the epilogue loop if a decision was made to vectorize it, otherwise NULL. The analysis resulting in this epilogue loop's loop_vec_info was performed in the same vect_analyze_loop call as the main loop's. At that time vect_analyze_loop constructs a list of accepted loop_vec_info's for lower vectorization factors than the main loop. This list is chained in the loop's loop_vec_info in the 'epilogue_vinfo' member. When we decide to vectorize the epilogue loop for a lower vectorization factor, the loop_vec_info in epilogue_vinfo is updated and linked to the epilogue loop. This is later used to vectorize the epilogue. The reason the loop_vec_info needs updating is that it was constructed based on the original main loop, and the epilogue loop is a copy of this loop, so all links pointing to statements in the original loop need updating. Furthermore, these loop_vec_infos share the data_reference's records, which will also need to be updated. TODO: Guard for prefer_scalar_loop should be emitted along with versioning conditions if loop versioning is needed.
References add_phi_arg(), adjust_vec, adjust_vec_debug_stmts(), advance(), profile_probability::always(), profile_probability::apply_scale(), boolean_type_node, build_int_cst(), build_one_cst(), build_zero_cst(), CDI_DOMINATORS, cfun, basic_block_def::count, create_phi_node(), DEF_FROM_PTR, delete_update_ssa(), EDGE_PRED, EDGE_SUCC, _loop_vec_info::epilogue_vinfo, first_dom_son(), flow_bb_inside_loop_p(), flow_loop_nested_p(), fold_build2, FOR_EACH_IMM_USE_STMT, FOR_EACH_SSA_DEF_OPERAND, loop::force_vectorize, free(), free_original_copy_tables(), gcc_assert, gcc_checking_assert, get_bb_original(), get_dominated_by(), get_immediate_dominator(), get_loop_body(), get_loop_copy(), get_loop_exit_edges(), gimple_bb(), gimple_build_assign(), gimple_build_nop(), gimple_debug_bind_p(), gimple_debug_bind_reset_value(), gimple_phi_arg_def_from_edge(), gimple_phi_result(), gsi_end_p(), gsi_for_stmt(), gsi_insert_after(), gsi_insert_before(), gsi_last_bb(), GSI_NEW_STMT, gsi_next(), gsi_start_bb(), gsi_start_phis(), gsi_stmt(), profile_probability::guessed_always(), i, initialize_original_copy_tables(), profile_probability::initialized_p(), integer_onep(), profile_probability::invert(), poly_int< N, C >::is_constant(), iterate_fix_dominators(), LOOP_C_INFINITE, loop_constraint_clear(), loop_preheader_edge(), LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT, LOOP_VINFO_BBS, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EARLY_BREAKS_VECT_PEELED, LOOP_VINFO_EPILOGUE_IV_EXIT, LOOP_VINFO_INT_NITERS, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LOOP, LOOP_VINFO_NITERS, LOOP_VINFO_NITERS_KNOWN_P, LOOP_VINFO_NITERSM1, LOOP_VINFO_PEELING_FOR_ALIGNMENT, LOOP_VINFO_PEELING_FOR_GAPS, LOOP_VINFO_PEELING_FOR_NITER, LOOP_VINFO_SCALAR_IV_EXIT, LOOP_VINFO_SCALAR_LOOP, LOOP_VINFO_USING_PARTIAL_VECTORS_P, LOOP_VINFO_VECT_FACTOR, _loop_vec_info::main_loop_edge, make_ssa_name(), MAY_HAVE_DEBUG_BIND_STMTS, need_ssa_update_p(), next_dom_son(), profile_count::nonzero_p(), NULL, NULL_TREE, loop::num_nodes, PHI_RESULT, profile_count::probability_in(), queue, record_niter_bound(), reset_original_copy_tables(), scale_bbs_frequencies(), scale_loop_profile(), scev_reset(), set_immediate_dominator(), set_range_info(), single_pred(), single_pred_edge(), single_pred_p(), single_succ_edge(), _loop_vec_info::skip_main_loop_edge, _loop_vec_info::skip_this_loop_edge, slpeel_add_loop_guard(), slpeel_can_duplicate_loop_p(), slpeel_tree_duplicate_loop_to_edge_cfg(), slpeel_update_phi_nodes_for_guard1(), split_edge(), SSA_NAME_DEF_STMT, SSA_OP_DEF, poly_int< N, C >::to_constant(), wi::to_wide(), TREE_CODE, TREE_TYPE, TYPE_MAX_VALUE, ui, UNKNOWN_LOCATION, update_stmt(), vect_build_loop_niters(), vect_can_advance_ivs_p(), vect_determine_partial_vectors_and_peeling(), vect_gen_prolog_loop_niters(), vect_gen_scalar_loop_niters(), vect_gen_vector_loop_niters(), vect_gen_vector_loop_niters_mult_vf(), vect_set_loop_condition(), vect_update_inits_of_drs(), vect_update_ivs_after_vectorizer(), vect_use_loop_mask_for_alignment_p(), vect_vf_for_cost(), and virtual_operand_p().
Referenced by vect_transform_loop().
Return a mask type with twice as many elements as OLD_TYPE, given that it should have mode NEW_MODE.
References build_truth_vector_type_for_mode(), new_mode(), and TYPE_VECTOR_SUBPARTS().
Referenced by supportable_narrowing_operation().
|
inline |
Return the behavior of DR_INFO with respect to the vectorization context (which for outer loop vectorization might not be the behavior recorded in DR_INFO itself).
References dr_info::dr, DR_INNERMOST, dyn_cast(), LOOP_VINFO_LOOP, nested_in_vect_loop_p(), NULL, and STMT_VINFO_DR_WRT_VEC_LOOP.
Referenced by compare_step_with_zero(), get_dr_vinfo_offset(), vect_compute_data_ref_alignment(), vect_create_addr_base_for_vector_ref(), vect_create_data_ref_ptr(), vect_get_data_ptr_increment(), vect_get_loop_variant_data_ptr_increment(), vectorizable_load(), and vectorizable_store().
|
inline |
If STMT_INFO is a COND_EXPR that includes an embedded comparison, return the scalar type of the values being compared. Return null otherwise.
References COMPARISON_CLASS_P, dyn_cast(), gimple_assign_rhs1(), gimple_assign_rhs_code(), NULL_TREE, TREE_OPERAND, and TREE_TYPE.
Referenced by vect_comparison_type().
Return true if VECTYPE represents a vector that requires lowering by the vector lowering pass.
References TREE_TYPE, TYPE_MODE, TYPE_PRECISION, VECTOR_BOOLEAN_TYPE_P, and VECTOR_MODE_P.
Referenced by vectorizable_call(), vectorizable_operation(), vectorizable_reduction(), and vectorizable_shift().
|
extern |
Function vect_enhance_data_refs_alignment This pass will use loop versioning and loop peeling in order to enhance the alignment of data references in the loop. FOR NOW: we assume that whatever versioning/peeling takes place, only the original loop is to be vectorized. Any other loops that are created by the transformations performed in this pass - are not supposed to be vectorized. This restriction will be relaxed. This pass will require a cost model to guide it whether to apply peeling or versioning or a combination of the two. For example, the scheme that intel uses when given a loop with several memory accesses, is as follows: choose one memory access ('p') which alignment you want to force by doing peeling. Then, either (1) generate a loop in which 'p' is aligned and all other accesses are not necessarily aligned, or (2) use loop versioning to generate one loop in which all accesses are aligned, and another loop in which only 'p' is necessarily aligned. ("Automatic Intra-Register Vectorization for the Intel Architecture", Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International Journal of Parallel Programming, Vol. 30, No. 2, April 2002.) Devising a cost model is the most critical aspect of this work. It will guide us on which access to peel for, whether to use loop versioning, how many versions to create, etc. The cost model will probably consist of generic considerations as well as target specific considerations (on powerpc for example, misaligned stores are more painful than misaligned loads). Here are the general steps involved in alignment enhancements: -- original loop, before alignment analysis: for (i=0; i<N; i++){ x = q[i]; # DR_MISALIGNMENT(q) = unknown p[i] = y; # DR_MISALIGNMENT(p) = unknown } -- After vect_compute_data_refs_alignment: for (i=0; i<N; i++){ x = q[i]; # DR_MISALIGNMENT(q) = 3 p[i] = y; # DR_MISALIGNMENT(p) = unknown } -- Possibility 1: we do loop versioning: if (p is aligned) { for (i=0; i<N; i++){ # loop 1A x = q[i]; # DR_MISALIGNMENT(q) = 3 p[i] = y; # DR_MISALIGNMENT(p) = 0 } } else { for (i=0; i<N; i++){ # loop 1B x = q[i]; # DR_MISALIGNMENT(q) = 3 p[i] = y; # DR_MISALIGNMENT(p) = unaligned } } -- Possibility 2: we do loop peeling: for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized). x = q[i]; p[i] = y; } for (i = 3; i < N; i++){ # loop 2A x = q[i]; # DR_MISALIGNMENT(q) = 0 p[i] = y; # DR_MISALIGNMENT(p) = unknown } -- Possibility 3: combination of loop peeling and versioning: for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized). x = q[i]; p[i] = y; } if (p is aligned) { for (i = 3; i<N; i++){ # loop 3A x = q[i]; # DR_MISALIGNMENT(q) = 0 p[i] = y; # DR_MISALIGNMENT(p) = 0 } } else { for (i = 3; i<N; i++){ # loop 3B x = q[i]; # DR_MISALIGNMENT(q) = 0 p[i] = y; # DR_MISALIGNMENT(p) = unaligned } } These loops are later passed to loop_transform to be vectorized. The vectorizer will use the alignment information to guide the transformation (whether to generate regular loads/stores, or with special handling for misalignment).
References aligned_access_p(), _vect_peel_info::count, dr_vec_info::dr, dr_align_group_sort_cmp(), DR_BASE_ADDRESS, DR_GROUP_SIZE, _vect_peel_info::dr_info, DR_IS_WRITE, dr_misalignment(), DR_MISALIGNMENT_UNKNOWN, DR_OFFSET, DR_STEP, DR_STEP_ALIGNMENT, DR_TARGET_ALIGNMENT, dr_unaligned_unsupported, dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, flow_loop_nested_p(), FOR_EACH_VEC_ELT, gcc_assert, GET_MODE_SIZE(), i, loop::inner, _vect_peel_extended_info::inside_cost, INT_MAX, is_empty(), known_alignment_for_access_p(), known_le, vec_info::lookup_dr(), loop_cost_model(), loop_preheader_edge(), LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT, LOOP_VINFO_DATAREFS, LOOP_VINFO_EARLY_BREAKS_VECT_PEELED, LOOP_VINFO_INT_NITERS, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LOOP, LOOP_VINFO_MAY_MISALIGN_STMTS, LOOP_VINFO_NITERS_KNOWN_P, LOOP_VINFO_PEELING_FOR_ALIGNMENT, LOOP_VINFO_PTR_MASK, LOOP_VINFO_SCALAR_ITERATION_COST, LOOP_VINFO_UNALIGNED_DR, LOOP_VINFO_VECT_FACTOR, MAX, MSG_MISSED_OPTIMIZATION, MSG_NOTE, _vect_peel_info::npeel, NULL, operand_equal_p(), optimize_loop_nest_for_speed_p(), outermost_invariant_loop_for_expr(), _vect_peel_extended_info::outside_cost, _vect_peel_extended_info::peel_info, SET_DR_MISALIGNMENT, size_zero_node, slpeel_can_duplicate_loop_p(), dr_vec_info::stmt, STMT_SLP_TYPE, STMT_VINFO_DR_INFO, STMT_VINFO_GROUPED_ACCESS, STMT_VINFO_STRIDED_P, STMT_VINFO_VECTYPE, opt_result::success(), target_align(), tree_int_cst_compare(), TREE_INT_CST_LOW, TREE_TYPE, TYPE_MODE, TYPE_SIZE_UNIT, TYPE_VECTOR_SUBPARTS(), unlimited_cost_model(), vect_can_advance_ivs_p(), VECT_COST_MODEL_CHEAP, vect_dr_aligned_if_related_peeled_dr_is(), vect_dr_misalign_for_aligned_access(), vect_get_known_peeling_cost(), vect_get_peeling_costs_all_drs(), vect_get_scalar_dr_size(), vect_location, vect_peeling_hash_choose_best_peeling(), vect_peeling_hash_insert(), vect_peeling_supportable(), vect_relevant_for_alignment_p(), vect_supportable_dr_alignment(), vect_update_misalignment_for_peel(), vect_vf_for_cost(), and vector_alignment_reachable_p().
Referenced by vect_analyze_loop_2().
|
extern |
Find the first stmt in NODE.
References get_later_stmt(), i, NULL, SLP_TREE_SCALAR_STMTS, and vect_orig_stmt().
Referenced by vect_schedule_slp_node(), vect_slp_analyze_load_dependences(), and vectorizable_load().
|
extern |
Find the last store in SLP INSTANCE.
References get_later_stmt(), i, last, NULL, SLP_TREE_SCALAR_STMTS, and vect_orig_stmt().
Referenced by vect_bb_slp_mark_live_stmts(), vect_schedule_slp_node(), vect_slp_analyze_instance_dependence(), and vect_slp_analyze_store_dependences().
|
extern |
Find the data references in STMT, analyze them with respect to LOOP and append them to DATAREFS. Return false if datarefs in this stmt cannot be handled.
References data_reference::aux, cfun, CONVERT_EXPR_CODE_P, CONVERT_EXPR_P, create_data_ref(), DECL_BIT_FIELD, DR_BASE_ADDRESS, DR_INIT, DR_IS_CONDITIONAL_IN_STMT, DR_IS_READ, DR_OFFSET, DR_OFFSET_ALIGNMENT, DR_REF, DR_STEP, DR_STEP_ALIGNMENT, dyn_cast(), opt_result::failure_at(), find_data_references_in_stmt(), free_data_ref(), gcc_assert, gimple_assign_rhs1(), gimple_assign_rhs_code(), gimple_call_arg(), gimple_call_internal_fn(), gimple_call_internal_p(), gimple_clobber_p(), gimple_has_volatile_ops(), highest_pow2_factor(), integer_zerop(), INTEGRAL_TYPE_P, is_gimple_assign(), is_gimple_call(), loop_containing_stmt(), NULL, loop::simduid, SSA_NAME_DEF_STMT, SSA_NAME_VAR, ssize_int, data_reference::stmt, stmt_can_throw_internal(), STRIP_NOPS, opt_result::success(), TREE_CODE, tree_fits_uhwi_p(), tree_int_cst_equal(), TREE_OPERAND, tree_to_uhwi(), TREE_TYPE, TYPE_PRECISION, and TYPE_SIZE_UNIT.
Referenced by vect_get_datarefs_in_loop(), and vect_slp_bbs().
|
extern |
Replace the scalar statement STMT_INFO with a new vector statement VEC_STMT, which sets the same scalar result as STMT_INFO did. Create and return a stmt_vec_info for VEC_STMT.
References gcc_assert, gimple_get_lhs(), gsi_for_stmt(), gsi_replace(), scalar_stmt, vect_finish_stmt_generation_1(), and vect_orig_stmt().
Referenced by vectorizable_condition(), and vectorize_fold_left_reduction().
|
extern |
Add VEC_STMT to the vectorized implementation of STMT_INFO and insert it before *GSI. Create and return a stmt_vec_info for VEC_STMT.
References copy_ssa_name(), ECF_CONST, ECF_NOVOPS, ECF_PURE, gcc_assert, gimple_assign_lhs(), gimple_call_flags(), gimple_call_lhs(), gimple_has_mem_ops(), gimple_set_modified(), gimple_set_vdef(), gimple_set_vuse(), gimple_vdef(), gimple_vuse(), gimple_vuse_op(), gsi_end_p(), gsi_insert_before(), GSI_SAME_STMT, gsi_stmt(), is_gimple_assign(), is_gimple_call(), is_gimple_reg(), SET_USE, TREE_CODE, and vect_finish_stmt_generation_1().
Referenced by bump_vector_ptr(), permute_vec_elements(), read_vector_array(), vect_add_slp_permutation(), vect_build_one_gather_load_call(), vect_build_one_scatter_store_call(), vect_clobber_variable(), vect_create_half_widening_stmts(), vect_create_vectorized_demotion_stmts(), vect_emulate_mixed_dot_prod(), vect_gen_widened_results_half(), vect_init_vector_1(), vect_permute_load_chain(), vect_permute_store_chain(), vect_shift_permute_load_chain(), vect_transform_reduction(), vect_transform_slp_perm_load_1(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_load(), vectorizable_operation(), vectorizable_recurr(), vectorizable_scan_store(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_store(), vectorize_fold_left_reduction(), and write_vector_array().
void vect_free_loop_info_assumptions | ( | class loop * | loop | ) |
A helper function to free scev and LOOP niter information, as well as clear loop constraint LOOP_C_FINITE.
References loop::any_likely_upper_bound, loop::any_upper_bound, free_numbers_of_iterations_estimates(), LOOP_C_FINITE, loop_constraint_clear(), and scev_reset_htab().
Referenced by fwd_jt_path_registry::mark_threaded_blocks(), back_threader::maybe_register_path(), try_vectorize_loop_1(), and vect_loop_versioning().
|
extern |
Free the memory allocated for the SLP instance.
References free(), SLP_INSTANCE_LOADS, SLP_INSTANCE_REMAIN_DEFS, SLP_INSTANCE_ROOT_STMTS, SLP_INSTANCE_TREE, and vect_free_slp_tree().
Referenced by vect_analyze_loop_2(), vect_analyze_slp(), vect_slp_analyze_bb_1(), vect_slp_analyze_operations(), vect_transform_loop(), and vec_info::~vec_info().
|
extern |
Recursively free the memory allocated for the SLP tree rooted at NODE.
References FOR_EACH_VEC_ELT, i, SLP_TREE_CHILDREN, SLP_TREE_REF_COUNT, SLP_TREE_REPRESENTATIVE, STMT_SLP_TYPE, STMT_VINFO_IN_PATTERN_P, STMT_VINFO_SLP_VECT_ONLY_PATTERN, vect_free_slp_tree(), and vect_orig_stmt().
Referenced by addsub_pattern::build(), complex_add_pattern::build(), complex_fms_pattern::build(), complex_mul_pattern::build(), vect_optimize_slp_pass::decide_masked_load_lanes(), vect_optimize_slp_pass::materialize(), optimize_load_redistribution(), optimize_load_redistribution_1(), release_scalar_stmts_to_slp_tree_map(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_cse_slp_nodes(), vect_free_slp_instance(), and vect_free_slp_tree().
|
extern |
Check whether we can use an internal function for a gather load or scatter store. READ_P is true for loads and false for stores. MASKED_P is true if the load or store is conditional. MEMORY_TYPE is the type of the memory elements being loaded or stored. OFFSET_TYPE is the type of the offset that is being applied to the invariant base address. SCALE is the amount by which the offset should be multiplied *after* it has been converted to address width. Return true if the function is supported, storing the function id in *IFN_OUT and the vector type for the offset in *OFFSET_VECTYPE_OUT. If we can use gather and store the possible else values in ELSVALS.
References build_nonstandard_integer_type(), get_vectype_for_scalar_type(), internal_gather_scatter_fn_supported_p(), POINTER_SIZE, tree_to_uhwi(), TYPE_PRECISION, TYPE_SIZE, TYPE_UNSIGNED, and vector_element_bits().
Referenced by vect_check_gather_scatter(), and vect_truncate_gather_scatter_offset().
|
extern |
Gather loads reachable from the individual SLP graph entries.
References FOR_EACH_VEC_ELT, i, SLP_INSTANCE_LOADS, SLP_INSTANCE_TREE, vec_info::slp_instances, vect_gather_slp_loads(), and visited.
|
extern |
Generate and return statement sequence that sets vector length LEN that is: min_of_start_and_end = min (START_INDEX, END_INDEX); left_len = END_INDEX - min_of_start_and_end; rhs = min (left_len, LEN_LIMIT); LEN = rhs; Note: the cost of the code generated by this function is modeled by vect_estimate_min_profitable_iters, so changes here may need corresponding changes there.
References gcc_assert, gimple_build(), gimple_build_assign(), gimple_seq_add_stmt(), NULL, and TREE_TYPE.
Referenced by vect_set_loop_controls_directly().
|
extern |
Generate the tree for the loop len mask and return it. Given the lens, nvectors, vectype, index and factor to gen the len mask as below. tree len_mask = VCOND_MASK_LEN (compare_mask, ones, zero, len, bias)
References build_all_ones_cst(), build_int_cst(), build_zero_cst(), gimple_build_call_internal(), gimple_call_set_lhs(), gsi_insert_before(), GSI_SAME_STMT, intQI_type_node, LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS, make_temp_ssa_name(), NULL, TREE_TYPE, and vect_get_loop_len().
Referenced by vectorizable_early_exit().
|
extern |
Given a vector type VECTYPE, turns permutation SEL into the equivalent VECTOR_CST mask. No checks are made that the target platform supports the mask, so callers may wish to test can_vec_perm_const_p separately, or use vect_gen_perm_mask_checked.
References build_vector_type(), gcc_assert, known_eq, vec_perm_indices::length(), ssizetype, TYPE_VECTOR_SUBPARTS(), and vec_perm_indices_to_tree().
Referenced by vect_create_epilog_for_reduction(), vect_create_nonlinear_iv_init(), vect_gen_perm_mask_checked(), and vectorizable_scan_store().
|
extern |
Checked version of vect_gen_perm_mask_any. Asserts can_vec_perm_const_p, i.e. that the target supports the pattern _for arbitrary input vectors_.
References can_vec_perm_const_p(), gcc_assert, TYPE_MODE, and vect_gen_perm_mask_any().
Referenced by blend_vec_perm_simplify_seqs(), can_duplicate_and_interleave_p(), perm_mask_for_reverse(), recognise_vec_perm_simplify_seq(), vect_maybe_permute_loop_masks(), vect_permute_load_chain(), vect_permute_store_chain(), vect_shift_permute_load_chain(), vect_transform_slp_perm_load_1(), vectorizable_load(), vectorizable_recurr(), vectorizable_scan_store(), vectorizable_slp_permutation_1(), and vectorizable_store().
|
extern |
NITERS is the number of times that the original scalar loop executes after peeling. Work out the maximum number of iterations N that can be handled by the vectorized form of the loop and then either: a) set *STEP_VECTOR_PTR to the vectorization factor and generate: niters_vector = N b) set *STEP_VECTOR_PTR to one and generate: niters_vector = N / vf In both cases, store niters_vector in *NITERS_VECTOR_PTR and add any new statements on the loop preheader edge. NITERS_NO_OVERFLOW is true if NITERS doesn't overflow (i.e. if NITERS is always nonzero).
References build_int_cst(), build_one_cst(), create_tmp_var, exact_log2(), fold_build2, force_gimple_operand(), gsi_insert_seq_on_edge_immediate(), poly_int< N, C >::is_constant(), is_gimple_val(), loop_preheader_edge(), LOOP_VINFO_LOOP, LOOP_VINFO_PEELING_FOR_GAPS, LOOP_VINFO_USING_PARTIAL_VECTORS_P, LOOP_VINFO_VECT_FACTOR, wi::max_value(), NULL, NULL_TREE, wi::one(), wi::rshift(), set_range_info(), TREE_TYPE, TYPE_PRECISION, and TYPE_SIGN.
Referenced by vect_do_peeling(), and vect_transform_loop().
|
extern |
Generate and return a vector mask of MASK_TYPE such that mask[I] is true iff J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ.
References build_zero_cst(), direct_internal_fn_supported_p(), gcc_checking_assert, gimple_build_call_internal(), gimple_call_set_lhs(), gimple_seq_add_stmt(), make_ssa_name(), make_temp_ssa_name(), NULL, OPTIMIZE_FOR_SPEED, and TREE_TYPE.
Referenced by vect_gen_while_not(), and vect_set_loop_controls_directly().
|
extern |
Generate a vector mask of type MASK_TYPE for which index I is false iff J + START_INDEX < END_INDEX for all J <= I. Add the statements to SEQ.
References gimple_build(), and vect_gen_while().
Referenced by vect_set_loop_controls_directly().
|
extern |
Calculate cost of peeling the loop PEEL_ITERS_PROLOGUE times.
References cond_branch_taken, FOR_EACH_VEC_ELT, LOOP_VINFO_NITERS_KNOWN_P, record_stmt_cost(), si, vect_epilogue, vect_get_peel_iters_epilogue(), and vect_prologue.
Referenced by vect_enhance_data_refs_alignment(), and vect_peeling_hash_get_lowest_cost().
|
extern |
Calculate cost of DR's memory access.
References dr_aligned, dr_explicit_realign, dr_explicit_realign_optimized, dr_unaligned_supported, dr_unaligned_unsupported, dump_enabled_p(), dump_printf_loc(), gcc_unreachable, MSG_MISSED_OPTIMIZATION, MSG_NOTE, record_stmt_cost(), SLP_TREE_VECTYPE, STMT_VINFO_VECTYPE, targetm, unaligned_load, vec_perm, vect_body, vect_location, VECT_MAX_COST, vect_prologue, vector_load, and vector_stmt.
Referenced by vect_get_data_access_cost(), and vectorizable_load().
|
extern |
Given a complete set of lengths LENS, extract length number INDEX for an rgroup that operates on NVECTORS vectors of type VECTYPE, where 0 <= INDEX < NVECTORS. Return a value that contains FACTOR multipled by the number of elements that should be processed. Insert any set-up statements before GSI.
References rgroup_controls::bias_adjusted_ctrl, build_int_cst(), rgroup_controls::controls, rgroup_controls::factor, gcc_assert, gimple_build(), gimple_build_nop(), gsi_insert_seq_before(), GSI_SAME_STMT, i, LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS, LOOP_VINFO_RGROUP_COMPARE_TYPE, LOOP_VINFO_RGROUP_IV_TYPE, make_temp_ssa_name(), NULL, NULL_TREE, SSA_NAME_DEF_STMT, rgroup_controls::type, and TYPE_VECTOR_SUBPARTS().
Referenced by vect_gen_loop_len_mask(), vect_get_loop_variant_data_ptr_increment(), vect_get_strided_load_store_ops(), vectorizable_call(), vectorizable_condition(), vectorizable_induction(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_operation(), vectorizable_store(), and vectorize_fold_left_reduction().
|
extern |
Given a complete set of masks MASKS, extract mask number INDEX for an rgroup that operates on NVECTORS vectors of type VECTYPE, where 0 <= INDEX < NVECTORS. Insert any set-up statements before GSI. See the comment above vec_loop_masks for more details about the mask arrangement.
References build_int_cst(), rgroup_controls::controls, rgroup_controls::factor, gcc_assert, gcc_unreachable, GET_MODE_CLASS, gimple_build(), gimple_build_nop(), gimple_convert(), gsi_insert_seq_before(), GSI_SAME_STMT, i, integer_type_node, known_eq, LOOP_VINFO_PARTIAL_VECTORS_STYLE, LOOP_VINFO_VECT_FACTOR, make_temp_ssa_name(), NULL, vec_loop_masks::rgc_vec, SSA_NAME_DEF_STMT, TREE_TYPE, truth_type_for(), rgroup_controls::type, lang_hooks_for_types::type_for_mode, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), lang_hooks::types, vect_partial_vectors_avx512, and vect_partial_vectors_while_ult.
Referenced by vect_transform_reduction(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_operation(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
|
extern |
|
extern |
LOOP_VINFO is an epilogue loop whose corresponding main loop can be skipped. Return a value that equals: - MAIN_LOOP_VALUE when LOOP_VINFO is entered from the main loop and - SKIP_VALUE when the main loop is skipped.
References add_phi_arg(), create_phi_node(), gcc_assert, _loop_vec_info::main_loop_edge, make_ssa_name(), _loop_vec_info::skip_main_loop_edge, TREE_TYPE, and UNKNOWN_LOCATION.
Referenced by vect_transform_cycle_phi().
Return the corresponding else value for an else value constant ELSVAL with type TYPE.
References build_minus_one_cst(), build_zero_cst(), cfun, create_tmp_var, gcc_unreachable, get_or_create_ssa_default_def(), MASK_LOAD_ELSE_M1, MASK_LOAD_ELSE_UNDEFINED, MASK_LOAD_ELSE_ZERO, and TREE_NO_WARNING.
Referenced by predicate_load_or_store(), read_vector_array(), vect_recog_gather_scatter_pattern(), and vectorizable_load().
|
extern |
|
extern |
Like vect_get_new_vect_var but return an SSA name.
References free(), gcc_unreachable, make_temp_ssa_name(), NULL, vect_pointer_var, vect_scalar_var, and vect_simple_var.
Referenced by vect_build_one_gather_load_call(), vect_build_one_scatter_store_call(), vect_init_vector(), vectorizable_call(), vectorizable_load(), and vectorizable_store().
|
extern |
Function vect_get_new_vect_var. Returns a name for a new variable. The current naming scheme appends the prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to the name of vectorizer generated variables, and appends that to NAME if provided.
References create_tmp_reg(), free(), gcc_unreachable, NULL, vect_mask_var, vect_pointer_var, vect_scalar_var, and vect_simple_var.
Referenced by permute_vec_elements(), vect_create_addr_base_for_vector_ref(), vect_create_data_ref_ptr(), vect_create_destination_var(), vectorizable_induction(), vectorizable_nonlinear_induction(), and vectorizable_recurr().
|
inline |
Return the number of copies needed for loop vectorization when a statement operates on vectors of type VECTYPE. This is the vectorization factor divided by the number of elements in VECTYPE and is always known at compile time.
References NULL, and vect_get_num_copies().
Return the number of vectors in the context of vectorization region VINFO, needed for a group of statements, whose size is specified by lanes of NODE, if NULL, it is 1. The statements are supposed to be interleaved together with no gap, and all operate on vectors of type VECTYPE, if NULL, the vectype of NODE is used.
References dyn_cast(), LOOP_VINFO_VECT_FACTOR, NULL, SLP_TREE_LANES, SLP_TREE_VECTYPE, and vect_get_num_vectors().
Referenced by check_load_store_for_partial_vectors(), vect_get_data_access_cost(), vect_get_gather_scatter_ops(), vect_get_num_copies(), vect_reduction_update_partial_vector_usage(), vect_slp_analyze_node_operations(), vect_slp_analyze_node_operations_1(), vect_transform_reduction(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_lane_reducing(), vectorizable_live_operation(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_recurr(), vectorizable_shift(), and vectorizable_store().
|
inline |
Return the number of vectors of type VECTYPE that are needed to get NUNITS elements. NUNITS should be based on the vectorization factor, so it is always a known multiple of the number of elements in VECTYPE.
References TYPE_VECTOR_SUBPARTS().
Referenced by vect_get_num_copies().
|
extern |
Find the place of the data-ref in STMT_INFO in the interleaving chain that starts from FIRST_STMT_INFO. Return -1 if the data-ref is not a part of the chain.
References DR_GROUP_FIRST_ELEMENT, DR_GROUP_GAP, and DR_GROUP_NEXT_ELEMENT.
Referenced by vect_build_slp_tree_2(), and vectorizable_load().
Analysis Utilities for Loop Vectorization. Copyright (C) 2006-2025 Free Software Foundation, Inc. Contributed by Dorit Nuzman <dorit@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>.
TODO: Note the vectorizer still builds COND_EXPRs with GENERIC compares in the first operand. Disentangling this is future work, the IL is properly transfered to VEC_COND_EXPRs with separate compares.
Return true if we have a useful VR_RANGE range for VAR, storing it in *MIN_VALUE and *MAX_VALUE if so. Note the range in the dump files.
References cfun, dump_enabled_p(), dump_generic_expr_loc(), dump_hex(), dump_printf(), get_legacy_range(), get_nonzero_bits(), get_range_query(), intersect_range_with_nonzero_bits(), MSG_NOTE, path_range_query::range_of_expr(), irange::set_varying(), TDF_SLIM, wi::to_wide(), TREE_TYPE, TYPE_SIGN, vrange::undefined_p(), vect_location, and VR_RANGE.
Referenced by supportable_indirect_convert_operation(), vect_determine_precisions_from_range(), vect_determine_precisions_from_users(), and vectorizable_conversion().
|
inline |
Return the size of the value accessed by unvectorized data reference DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated for the associated gimple statement, since that guarantees that DR_INFO accesses either a scalar or a scalar equivalent. ("Scalar equivalent" here includes things like V1SI, which can be vectorized in the same way as a plain SI.)
References dr_info::dr, DR_REF, tree_to_uhwi(), TREE_TYPE, and TYPE_SIZE_UNIT.
Referenced by get_group_load_store_type(), vect_enhance_data_refs_alignment(), vect_small_gap_p(), vect_truncate_gather_scatter_offset(), vectorizable_load(), and vectorizable_with_step_bound_p().
Get the vectorized definitions of SLP_NODE in *VEC_DEFS.
References SLP_TREE_NUMBER_OF_VEC_STMTS, and SLP_TREE_VEC_DEFS.
Referenced by vect_get_gather_scatter_ops(), vect_get_slp_defs(), vect_get_vec_defs(), vect_transform_cycle_phi(), vectorizable_call(), vectorizable_load(), vectorizable_phi(), vectorizable_simd_clone_call(), vectorizable_store(), vectorize_fold_left_reduction(), and vectorize_slp_instance_root_stmt().
|
extern |
Get N vectorized definitions for SLP_NODE.
References i, SLP_TREE_CHILDREN, vect_get_slp_defs(), and vNULL.
Get the scalar definition of the Nth lane from SLP_NODE or NULL_TREE if there is no definition for it in the scalar IL or it is not known.
References gimple_get_lhs(), NULL_TREE, SLP_TREE_DEF_TYPE, SLP_TREE_SCALAR_OPS, SLP_TREE_SCALAR_STMTS, STMT_VINFO_STMT, and vect_internal_def.
Referenced by supportable_indirect_convert_operation(), and vectorizable_conversion().
Get the Ith vectorized definition from SLP_NODE.
References i, and SLP_TREE_VEC_DEFS.
Referenced by vect_create_epilog_for_reduction(), vect_schedule_scc(), vectorizable_induction(), and vectorizable_slp_permutation_1().
|
extern |
Return the smallest scalar part of STMT_INFO. This is used to determine the vectype of the stmt. We generally set the vectype according to the type of the result (lhs). For stmts whose result-type is different than the type of the arguments (e.g., demotion, promotion), vectype will be reset appropriately (later). Note that we have to visit the smallest datatype in this function, because that determines the VF. If the smallest datatype in the loop is present only as the rhs of a promotion operation - we'd miss it. Such a case, where a variable of this datatype does not appear in the lhs anywhere in the loop, can only occur if it's an invariant: e.g.: 'int_x = (int) short_inv', which we'd expect to have been optimized away by invariant motion. However, we cannot rely on invariant motion to always take invariants out of the loop, and so in the case of promotion we also have to check the rhs. LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding types.
References dyn_cast(), gimple_assign_cast_p(), gimple_assign_lhs(), gimple_assign_rhs1(), gimple_assign_rhs_code(), gimple_call_arg(), gimple_call_internal_fn(), gimple_call_internal_p(), gimple_call_num_args(), i, internal_fn_mask_index(), internal_fn_stored_value_index(), internal_load_fn_p(), internal_store_fn_p(), SIMD_CLONE_ARG_TYPE_VECTOR, simd_clone_call_p(), tree_fits_uhwi_p(), TREE_INT_CST_LOW, TREE_TYPE, and TYPE_SIZE_UNIT.
Referenced by vect_get_vector_types_for_stmt().
|
inline |
Get cost by calling cost target builtin.
References builtin_vectorization_cost(), and NULL.
Referenced by vect_estimate_min_profitable_iters().
|
extern |
Calculate cost of DR's memory access.
References dr_aligned, dr_unaligned_supported, dr_unaligned_unsupported, dump_enabled_p(), dump_printf_loc(), gcc_unreachable, MSG_MISSED_OPTIMIZATION, MSG_NOTE, record_stmt_cost(), SLP_TREE_VECTYPE, STMT_VINFO_VECTYPE, unaligned_store, vect_body, vect_location, VECT_MAX_COST, and vector_store.
Referenced by vect_get_data_access_cost(), and vectorizable_store().
|
extern |
STMT_INFO is either a masked or unconditional store. Return the value being stored.
References dyn_cast(), gcc_assert, gcc_unreachable, gimple_assign_rhs1(), gimple_assign_single_p(), gimple_call_arg(), gimple_call_internal_fn(), and internal_fn_stored_value_index().
Referenced by get_group_load_store_type(), vect_recog_gather_scatter_pattern(), and vectorizable_store().
void vect_get_vec_defs | ( | vec_info * | vinfo, |
stmt_vec_info | stmt_info, | ||
slp_tree | slp_node, | ||
unsigned | ncopies, | ||
tree | op0, | ||
tree | vectype0, | ||
vec< tree > * | vec_oprnds0, | ||
tree | op1, | ||
tree | vectype1, | ||
vec< tree > * | vec_oprnds1, | ||
tree | op2, | ||
tree | vectype2, | ||
vec< tree > * | vec_oprnds2, | ||
tree | op3, | ||
tree | vectype3, | ||
vec< tree > * | vec_oprnds3 ) |
Get vectorized definitions for OP0 and OP1.
References SLP_TREE_CHILDREN, vect_get_slp_defs(), and vect_get_vec_defs_for_operand().
Referenced by vect_get_vec_defs(), vect_transform_lc_phi(), vect_transform_reduction(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_conversion(), vectorizable_operation(), vectorizable_scan_store(), vectorizable_shift(), and vectorizable_store().
void vect_get_vec_defs | ( | vec_info * | vinfo, |
stmt_vec_info | stmt_info, | ||
slp_tree | slp_node, | ||
unsigned | ncopies, | ||
tree | op0, | ||
vec< tree > * | vec_oprnds0, | ||
tree | op1 = NULL, | ||
vec< tree > * | vec_oprnds1 = NULL, | ||
tree | op2 = NULL, | ||
vec< tree > * | vec_oprnds2 = NULL, | ||
tree | op3 = NULL, | ||
vec< tree > * | vec_oprnds3 = NULL ) |
References NULL_TREE, and vect_get_vec_defs().
void vect_get_vec_defs_for_operand | ( | vec_info * | vinfo, |
stmt_vec_info | stmt_vinfo, | ||
unsigned | ncopies, | ||
tree | op, | ||
vec< tree > * | vec_oprnds, | ||
tree | vectype ) |
Function vect_get_vec_defs_for_operand. OP is an operand in STMT_VINFO. This function returns a vector of NCOPIES defs that will be used in the vectorized stmts for STMT_VINFO. In the case that OP is an SSA_NAME which is defined in the loop, then STMT_VINFO_VEC_STMTS of the defining stmt holds the relevant defs. In case OP is an invariant or constant, a new stmt that creates a vector def needs to be introduced. VECTYPE may be used to specify a required type for vector invariant.
References cfun, create_tmp_var, dump_enabled_p(), dump_printf_loc(), dyn_cast(), gcc_assert, get_or_create_ssa_default_def(), get_vectype_for_scalar_type(), gimple_get_lhs(), i, MSG_NOTE, NULL, SSA_NAME_IS_DEFAULT_DEF, SSA_NAME_VAR, stmt_vectype(), STMT_VINFO_VEC_STMTS, STMT_VINFO_VECTYPE, TREE_CODE, TREE_TYPE, truth_type_for(), VAR_P, vect_constant_def, vect_external_def, vect_init_vector(), vect_is_simple_use(), vect_location, VECT_SCALAR_BOOLEAN_TYPE_P, vect_stmt_to_vectorize(), and VECTOR_BOOLEAN_TYPE_P.
Referenced by vect_get_gather_scatter_ops(), vect_get_vec_defs(), vectorizable_call(), vectorizable_load(), vectorizable_simd_clone_call(), and vectorizable_store().
|
extern |
Try to compute the vector types required to vectorize STMT_INFO, returning true on success and false if vectorization isn't possible. If GROUP_SIZE is nonzero and we're performing BB vectorization, take sure that the number of elements in the vectors is no bigger than GROUP_SIZE. On success: - Set *STMT_VECTYPE_OUT to: - NULL_TREE if the statement doesn't need to be vectorized; - the equivalent of STMT_VINFO_VECTYPE otherwise. - Set *NUNITS_VECTYPE_OUT to the vector type that contains the maximum number of units needed to vectorize STMT_INFO, or NULL_TREE if the statement does not help to determine the overall number of units.
References build_nonstandard_integer_type(), DR_REF, dump_dec(), dump_enabled_p(), dump_printf(), dump_printf_loc(), opt_result::failure_at(), gcc_assert, get_mask_type_for_scalar_type(), get_vectype_for_scalar_type(), gimple_call_internal_fn(), gimple_call_internal_p(), gimple_get_lhs(), internal_store_fn_p(), is_a(), is_gimple_call(), MSG_NOTE, NULL_TREE, vec_info::slp_instances, STMT_VINFO_DATA_REF, STMT_VINFO_VECTYPE, opt_result::success(), TREE_TYPE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), types_compatible_p(), vect_get_smallest_scalar_type(), vect_location, vect_use_mask_type_p(), VECTOR_BOOLEAN_TYPE_P, and VECTOR_MODE_P.
Referenced by vect_build_slp_tree_1(), and vect_determine_vf_for_stmt_1().
gimple * vect_gimple_build | ( | tree | lhs, |
code_helper | ch, | ||
tree | op0, | ||
tree | op1 ) |
Build a GIMPLE_ASSIGN or GIMPLE_CALL with the tree_code, or internal_fn contained in ch, respectively.
References as_internal_fn(), gcc_assert, gimple_build_assign(), gimple_build_call_internal(), gimple_call_set_lhs(), code_helper::is_internal_fn(), code_helper::is_tree_code(), and NULL_TREE.
Referenced by vect_create_half_widening_stmts(), vect_create_vectorized_demotion_stmts(), vect_gen_widened_results_half(), vect_recog_widen_op_pattern(), vectorizable_call(), and vectorizable_conversion().
|
extern |
Function vect_grouped_load_supported. COUNT is the size of the load group (the number of statements plus the number of gaps). SINGLE_ELEMENT_P is true if there is actually only one statement, with a gap of COUNT - 1. Returns true if a suitable permute exists.
References can_vec_perm_const_p(), count, dump_enabled_p(), dump_printf_loc(), exact_log2(), gcc_assert, GET_MODE_NUNITS(), i, maybe_gt, MSG_MISSED_OPTIMIZATION, pow2p_hwi(), TYPE_MODE, TYPE_VECTOR_SUBPARTS(), vect_location, and VECTOR_MODE_P.
Referenced by get_group_load_store_type(), and vect_analyze_loop_2().
Function vect_grouped_store_supported. Returns TRUE if interleave high and interleave low permutations are supported, and FALSE otherwise.
References can_vec_perm_const_p(), count, dump_enabled_p(), dump_printf(), dump_printf_loc(), exact_log2(), gcc_assert, GET_MODE_NUNITS(), i, data_reference::indices, MSG_MISSED_OPTIMIZATION, pow2p_hwi(), TYPE_MODE, vect_location, and VECTOR_MODE_P.
Referenced by get_group_load_store_type(), and vect_analyze_loop_2().
Return a mask type with half the number of elements as OLD_TYPE, given that it should have mode NEW_MODE.
References build_truth_vector_type_for_mode(), new_mode(), and TYPE_VECTOR_SUBPARTS().
Referenced by supportable_widening_operation(), and vect_maybe_permute_loop_masks().
|
extern |
Function vect_init_vector. Insert a new stmt (INIT_STMT) that initializes a new variable of type TYPE with the value VAL. If TYPE is a vector type and VAL does not have vector type a vector with all elements equal to VAL is created first. Place the initialization at GSI if it is not NULL. Otherwise, place the initialization at the loop preheader. Return the DEF of INIT_STMT. It will be used in the vectorization of STMT_INFO.
References build_all_ones_cst(), build_vector_from_val(), build_zero_cst(), CONSTANT_CLASS_P, gcc_assert, gimple_build(), gimple_build_assign(), gimple_convert(), gsi_end_p(), gsi_remove(), gsi_start(), gsi_stmt(), integer_zerop(), INTEGRAL_TYPE_P, make_ssa_name(), NULL, TREE_TYPE, types_compatible_p(), useless_type_conversion_p(), vect_get_new_ssa_name(), vect_init_vector_1(), vect_simple_var, VECTOR_BOOLEAN_TYPE_P, and VECTOR_TYPE_P.
Referenced by vect_build_all_ones_mask(), vect_build_one_scatter_store_call(), vect_build_zero_merge_argument(), vect_create_nonlinear_iv_vec_step(), vect_get_vec_defs_for_operand(), vectorizable_load(), vectorizable_recurr(), vectorizable_scan_store(), and vectorizable_shift().
|
inline |
Return true if STMT_INFO extends the result of a load.
References CONVERT_EXPR_CODE_P, DR_IS_READ, dyn_cast(), gimple_assign_lhs(), gimple_assign_rhs1(), gimple_assign_rhs_code(), INTEGRAL_TYPE_P, vec_info::lookup_def(), STMT_VINFO_DATA_REF, TREE_TYPE, and TYPE_PRECISION.
|
inline |
Return true if STMT_INFO is an integer truncation.
References CONVERT_EXPR_CODE_P, dyn_cast(), gimple_assign_lhs(), gimple_assign_rhs1(), gimple_assign_rhs_code(), INTEGRAL_TYPE_P, TREE_TYPE, and TYPE_PRECISION.
|
inline |
Return true if STMT_INFO represents part of a reduction.
References STMT_VINFO_REDUC_IDX.
|
extern |
Function vect_is_simple_use. Input: VINFO - the vect info of the loop or basic block that is being vectorized. OPERAND - operand in the loop or bb. Output: DEF_STMT_INFO_OUT (optional) - information about the defining stmt in case OPERAND is an SSA_NAME that is defined in the vectorizable region DEF_STMT_OUT (optional) - the defining stmt in case OPERAND is an SSA_NAME; the definition could be anywhere in the function DT - the type of definition Returns whether a stmt with OPERAND can be vectorized. For loops, supportable operands are constants, loop invariants, and operands that are defined by the current iteration of the loop. Unsupportable operands are those that are defined by a previous iteration of the loop (as is the case in reduction/induction computations). For basic blocks, supportable operands are constants and bb invariants. For now, operands defined outside the basic block are not supported.
References CONSTANT_CLASS_P, dump_enabled_p(), dump_generic_expr(), dump_gimple_expr(), dump_printf(), dump_printf_loc(), is_gimple_min_invariant(), vec_info::lookup_def(), MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, SSA_NAME_DEF_STMT, SSA_NAME_IS_DEFAULT_DEF, STMT_VINFO_DEF_TYPE, TDF_SLIM, TREE_CODE, vect_condition_def, vect_constant_def, vect_double_reduction_def, vect_external_def, vect_first_order_recurrence, vect_induction_def, vect_internal_def, vect_location, vect_nested_cycle, vect_reduction_def, vect_stmt_to_vectorize(), vect_uninitialized_def, and vect_unknown_def_type.
Referenced by get_group_load_store_type(), get_load_store_type(), is_simple_and_all_uses_invariant(), process_use(), vect_check_scalar_mask(), vect_check_store_rhs(), vect_get_and_check_slp_defs(), vect_get_vec_defs_for_operand(), vect_is_simple_cond(), vect_is_simple_use(), vect_is_simple_use(), vect_look_through_possible_promotion(), vect_recog_bool_pattern(), vect_recog_rotate_pattern(), vect_slp_linearize_chain(), vect_widened_op_tree(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_lane_reducing(), vectorizable_load(), vectorizable_operation(), vectorizable_reduction(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().
|
extern |
Function vect_is_simple_use. Same as vect_is_simple_use but also determines the vector operand type of OPERAND and stores it to *VECTYPE. If the definition of OPERAND is vect_uninitialized_def, vect_constant_def or vect_external_def *VECTYPE will be set to NULL_TREE and the caller is responsible to compute the best suited vector type for the scalar operand.
References dump_enabled_p(), dump_printf_loc(), gcc_assert, gcc_unreachable, MSG_NOTE, NULL_TREE, STMT_VINFO_VECTYPE, vect_constant_def, vect_double_reduction_def, vect_external_def, vect_first_order_recurrence, vect_induction_def, vect_internal_def, vect_is_simple_use(), vect_location, vect_nested_cycle, vect_reduction_def, and vect_uninitialized_def.
|
extern |
Function vect_is_simple_use. Same as vect_is_simple_use but determines the operand by operand position OPERAND from either STMT or SLP_NODE, filling in *OP and *SLP_DEF (when SLP_NODE is not NULL).
References COMPARISON_CLASS_P, dyn_cast(), error_mark_node, gcc_assert, gcc_unreachable, gimple_assign_rhs1(), gimple_assign_rhs_code(), gimple_call_arg(), gimple_get_lhs(), gimple_op(), NULL, SLP_TREE_CHILDREN, SLP_TREE_CODE, SLP_TREE_DEF_TYPE, SLP_TREE_REPRESENTATIVE, SLP_TREE_SCALAR_OPS, SLP_TREE_VECTYPE, TREE_OPERAND, vect_internal_def, and vect_is_simple_use().
|
inline |
---------------------------------------------------------------------- Target support routines ----------------------------------------------------------------------- The following routines are provided to simplify costing decisions in target code. Please add more as needed.
Return true if an operaton of kind KIND for STMT_INFO represents the extraction of an element from a vector in preparation for storing the element to memory.
References DR_IS_WRITE, STMT_VINFO_DATA_REF, and vec_to_scalar.
|
extern |
Stores the standard position for induction variable increment in belonging to LOOP_EXIT (just before the exit condition of the given exit to BSI. INSERT_AFTER is set to true if the increment should be inserted after *BSI.
References gsi_last_bb().
Referenced by vect_create_epilog_for_reduction(), vect_set_loop_condition_normal(), vect_set_loop_condition_partial_vectors_avx512(), and vect_set_loop_controls_directly().
|
extern |
Decide whether it is possible to use a zero-based induction variable when vectorizing LOOP_VINFO with partial vectors. If it is, return the value that the induction variable must be able to hold in order to ensure that the rgroups eventually have no active vector elements. Return -1 otherwise.
References LOOP_VINFO_LOOP, LOOP_VINFO_MASK_SKIP_NITERS, LOOP_VINFO_PEELING_FOR_ALIGNMENT, LOOP_VINFO_VECT_FACTOR, max_loop_iterations(), wi::to_widest(), TREE_CODE, and vect_max_vf().
Referenced by vect_rgroup_iv_might_wrap_p(), vect_verify_full_masking(), and vect_verify_full_masking_avx512().
|
inline |
Return the minimum alignment in bytes that the vectorized version of DR_INFO is guaranteed to have.
References dr_info::dr, dr_misalignment(), DR_MISALIGNMENT_UNKNOWN, DR_REF, DR_TARGET_ALIGNMENT, TREE_TYPE, and TYPE_ALIGN_UNIT.
Referenced by get_group_load_store_type(), and vectorizable_load().
|
extern |
Return FN if vec_{masked_,mask_len_}load_lanes is available for COUNT vectors of type VECTYPE. MASKED_P says whether the masked form is needed. If it is available and ELSVALS is nonzero store the possible else values in the vector it points to.
References count, and vect_lanes_optab_supported_p().
Referenced by check_load_store_for_partial_vectors(), vect_optimize_slp_pass::decide_masked_load_lanes(), get_group_load_store_type(), vect_analyze_loop_2(), vect_analyze_slp(), and vect_lower_load_permutations().
If LOOP has been versioned during ifcvt, return the internal call guarding it.
References as_a(), g, gimple_call_arg(), gimple_call_internal_p(), gsi_end_p(), gsi_for_stmt(), gsi_last_bb(), gsi_prev(), gsi_stmt(), loop_preheader_edge(), NULL, loop::num, single_pred(), single_pred_p(), single_succ_p(), and tree_to_shwi().
Referenced by set_uid_loop_bbs(), try_vectorize_loop(), and vect_loop_versioning().
class loop * vect_loop_versioning | ( | loop_vec_info | loop_vinfo, |
gimple * | loop_vectorized_call ) |
Function vect_loop_versioning. If the loop has data references that may or may not be aligned or/and has data reference relations whose independence was not proven then two versions of the loop need to be generated, one which is vectorized and one which isn't. A test is then generated to control which of the loops is executed. The test checks for the alignment of all of the data references that may or may not be aligned. An additional sequence of runtime tests is generated for each pairs of DDRs whose independence was not proven. The vectorized version of loop is executed only if both alias and alignment tests are passed. The test generated to check which version of loop is executed is modified to also check for profitability as indicated by the cost model threshold TH. The versioning precondition(s) are placed in *COND_EXPR and *COND_EXPR_STMT_LIST.
References add_phi_arg(), adjust_phi_and_debug_stmts(), profile_probability::always(), profile_count::apply_probability(), as_a(), bb_loop_depth(), boolean_false_node, boolean_true_node, boolean_type_node, build_int_cst(), build_zero_cst(), CDI_DOMINATORS, copy_ssa_name(), basic_block_def::count, create_phi_node(), dom_info_available_p(), dominated_by_p(), dump_enabled_p(), dump_printf_loc(), EDGE_COMPLEX, EDGE_COUNT, EDGE_PRED, EDGE_SUCC, EXPR_P, extract_true_false_edges_from_block(), first_dom_son(), flow_bb_inside_loop_p(), flow_loop_nested_p(), flush_pending_stmts(), fold_build2, fold_loop_internal_call(), FOR_EACH_SSA_USE_OPERAND, force_gimple_operand_1(), free_original_copy_tables(), gcc_assert, get_current_def(), get_loop_copy(), gimple_bb(), gimple_build_cond(), gimple_call_lhs(), gimple_cond_set_condition_from_tree(), gimple_phi_arg_location_from_edge(), gimple_seq_add_seq(), gsi_end_p(), gsi_for_stmt(), gsi_insert_after(), gsi_insert_seq_before(), gsi_last_bb(), GSI_NEW_STMT, gsi_next(), GSI_SAME_STMT, gsi_start(), gsi_start_phis(), gsi_stmt(), loop::header, initialize_original_copy_tables(), loop::inner, profile_probability::invert(), is_gimple_condexpr_for_cond(), is_gimple_val(), profile_probability::likely(), LOCATION_LOCUS, LOOP_C_FINITE, loop_constraint_set_p(), loop_outer(), loop_preheader_edge(), LOOP_REQUIRES_VERSIONING_FOR_ALIAS, LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT, LOOP_REQUIRES_VERSIONING_FOR_NITERS, LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND, loop_version(), LOOP_VINFO_COST_MODEL_THRESHOLD, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LOOP, LOOP_VINFO_NITERSM1, LOOP_VINFO_SCALAR_LOOP, LOOP_VINFO_SCALAR_LOOP_SCALING, LOOP_VINFO_VERSIONING_THRESHOLD, make_edge(), MSG_NOTE, MSG_OPTIMIZED_LOCATIONS, MSG_PRIORITY_USER_FACING, loop::next, next_dom_son(), NULL, NULL_TREE, loop::num, outermost_invariant_loop_for_expr(), gphi_iterator::phi(), PHI_ARG_DEF_FROM_EDGE, PHI_RESULT, basic_block_def::preds, replace_uses_by(), vec_info::resync_stmt_addr(), scale_loop_frequencies(), set_immediate_dominator(), single_exit(), split_block(), split_edge(), profile_probability::sqrt(), SSA_NAME_DEF_STMT, SSA_OP_USE, ssa_redirect_edge(), superloop_at_depth(), TODO_update_ssa_no_phi, TREE_CODE, TREE_TYPE, UNKNOWN_LOCATION, unshare_expr(), update_ssa(), update_stmt(), USE_FROM_PTR, vect_apply_runtime_profitability_check_p(), vect_create_cond_for_alias_checks(), vect_create_cond_for_align_checks(), vect_create_cond_for_lower_bounds(), vect_create_cond_for_niters_checks(), vect_create_cond_for_unequal_addrs(), vect_free_loop_info_assumptions(), vect_location, and vect_loop_vectorized_call().
Referenced by vect_transform_loop().
|
extern |
For each possible SLP instance decide whether to SLP it and calculate overall unrolling factor needed to SLP the loop. Return TRUE if decided to SLP at least one instance.
References dump_dec(), dump_enabled_p(), dump_printf(), dump_printf_loc(), DUMP_VECT_SCOPE, FOR_EACH_VEC_ELT, i, LOOP_VINFO_SLP_INSTANCES, LOOP_VINFO_SLP_UNROLLING_FACTOR, MSG_NOTE, SLP_INSTANCE_TREE, vect_location, vect_mark_slp_stmts(), vect_update_slp_vf_for_node(), and visited.
Referenced by vect_analyze_loop_2().
|
extern |
In tree-vect-patterns.cc.
Mark statements that are involved in a pattern.
References dump_enabled_p(), dump_printf_loc(), gcc_assert, gcc_unreachable, gimple_extract_op(), gimple_get_lhs(), gimple_set_lhs(), gsi_end_p(), gsi_for_stmt(), gsi_insert_before_without_update(), gsi_insert_seq_before_without_update(), gsi_next(), gsi_none(), gsi_remove(), GSI_SAME_STMT, gsi_start(), gsi_stmt(), i, is_a(), is_pattern_stmt_p(), vec_info::lookup_stmt(), MSG_NOTE, NULL, gimple_match_op::num_ops, gimple_match_op::ops, si, STMT_VINFO_DEF_TYPE, STMT_VINFO_PATTERN_DEF_SEQ, STMT_VINFO_REDUC_IDX, STMT_VINFO_RELATED_STMT, vect_condition_def, vect_init_pattern_stmt(), vect_internal_def, vect_location, and vect_set_pattern_stmt().
Referenced by vect_pattern_recog_1().
|
extern |
Function vect_mark_stmts_to_be_vectorized. Not all stmts in the loop need to be vectorized. For example: for i... for j... 1. T0 = i + j 2. T1 = a[T0] 3. j = j + 1 Stmt 1 and 3 do not need to be vectorized, because loop control and addressing of vectorized data-refs are handled differently. This pass detects such stmts.
References COMPARISON_CLASS_P, dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, dyn_cast(), opt_result::failure_at(), fatal(), FOR_EACH_PHI_OR_STMT_USE, gcc_assert, gcc_unreachable, gimple_assign_rhs1(), gimple_assign_rhs_code(), gimple_call_arg(), gimple_call_num_args(), gimple_cond_code(), gimple_cond_lhs(), gimple_cond_rhs(), gimple_num_ops(), gimple_op(), gimple_phi_result(), gsi_end_p(), gsi_next(), gsi_start_bb(), gsi_start_phis(), gsi_stmt(), i, is_gimple_debug(), is_pattern_stmt_p(), vec_info::lookup_stmt(), LOOP_VINFO_BBS, LOOP_VINFO_LOOP, MSG_NOTE, loop::num_nodes, gather_scatter_info::offset, process_use(), relevant, si, SSA_OP_USE, ssa_use_operand_t::stmt, STMT_VINFO_DEF_TYPE, STMT_VINFO_GATHER_SCATTER_P, STMT_VINFO_RELEVANT, opt_result::success(), tcc_comparison, TREE_CODE, TREE_CODE_CLASS, TREE_OPERAND, USE_FROM_PTR, vect_check_gather_scatter(), vect_double_reduction_def, vect_location, vect_mark_relevant(), vect_nested_cycle, vect_reduction_def, vect_stmt_relevant_p(), vect_unused_in_scope, vect_used_by_reduction, vect_used_in_outer, vect_used_in_outer_by_reduction, vect_used_in_scope, vect_used_only_live, virtual_operand_p(), and worklist.
Referenced by vect_analyze_loop_2().
|
inline |
Return the maximum possible vectorization factor for LOOP_VINFO.
References LOOP_VINFO_VECT_FACTOR, and MAX_VECTORIZATION_FACTOR.
Referenced by vect_iv_limit_for_partial_vectors(), vect_truncate_gather_scatter_offset(), and vect_verify_full_masking_avx512().
If OP is not NULL and is external or constant update its vector type with VECTYPE. Returns true if successful or false if not, for example when conflicting vector types are present.
References SLP_TREE_DEF_TYPE, SLP_TREE_LANES, SLP_TREE_VECTYPE, types_compatible_p(), vect_external_def, vect_internal_def, and VECTOR_BOOLEAN_TYPE_P.
Referenced by vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_conversion(), vectorizable_induction(), vectorizable_lane_reducing(), vectorizable_lc_phi(), vectorizable_load(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_slp_permutation_1(), and vectorizable_store().
|
inline |
Returns the memory acccess type being used to vectorize the statement. If SLP this is read from NODE, otherwise it's read from the STMT_VINFO.
References SLP_TREE_MEMORY_ACCESS_TYPE, and STMT_VINFO_MEMORY_ACCESS_TYPE.
|
extern |
Return true if we can assume from the scalar form of STMT_INFO that neither the scalar nor the vector forms will generate code. STMT_INFO is known not to involve a data reference.
References CONVERT_EXPR_CODE_P, dyn_cast(), gimple_assign_lhs(), gimple_assign_rhs1(), gimple_assign_rhs_code(), tree_nop_conversion_p(), and TREE_TYPE.
Referenced by vect_bb_slp_scalar_cost(), vect_compute_single_scalar_iteration_cost(), and vectorizable_assignment().
|
inline |
Estimate the number of elements in VEC_TYPE for costing purposes. Pick a reasonable estimate if the exact number isn't known at compile time.
References estimated_poly_value(), and TYPE_VECTOR_SUBPARTS().
Referenced by vect_model_reduction_cost(), vectorizable_bb_reduc_epilogue(), vectorizable_load(), and vectorizable_store().
|
extern |
Optimize the SLP graph of VINFO.
References release_scalar_stmts_to_slp_tree_map(), vect_optimize_slp_pass::run(), SLP_INSTANCE_TREE, vec_info::slp_instances, and vect_cse_slp_nodes().
Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().
|
inline |
If STMT_INFO is a pattern statement, return the statement that it replaces, otherwise return STMT_INFO itself.
References is_pattern_stmt_p(), and STMT_VINFO_RELATED_STMT.
Referenced by addsub_pattern::build(), complex_pattern::build(), vect_optimize_slp_pass::build_vertices(), vect_optimize_slp_pass::containing_loop(), get_later_stmt(), info_for_reduction(), vect_optimize_slp_pass::is_cfg_latch_edge(), maybe_push_to_hybrid_worklist(), supportable_widening_operation(), vect_analyze_slp_instance(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_scalar_cost(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_create_epilog_for_reduction(), vect_find_first_scalar_stmt_in_slp(), vect_find_last_scalar_stmt_in_slp(), vect_finish_replace_stmt(), vect_free_slp_tree(), vect_get_and_check_slp_defs(), vect_model_reduction_cost(), vect_remove_slp_scalar_calls(), vect_remove_stores(), vect_schedule_slp(), vect_slp_analyze_load_dependences(), vect_slp_analyze_store_dependences(), vect_slp_convert_to_external(), vect_slp_node_weight(), vect_slp_prune_covered_roots(), vect_transform_reduction(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_lane_reducing(), vectorizable_live_operation(), vectorizable_reduction(), vectorizable_simd_clone_call(), vectorize_fold_left_reduction(), and vectorize_slp_instance_root_stmt().
void vect_pattern_recog | ( | vec_info * | vinfo | ) |
Pattern recognition functions. Additional pattern recognition functions can (and will) be added in the future.
Function vect_pattern_recog Input: LOOP_VINFO - a struct_loop_info of a loop in which we want to look for computation idioms. Output - for each computation idiom that is detected we create a new stmt that provides the same functionality and that can be vectorized. We also record some information in the struct_stmt_info of the relevant stmts, as explained below: At the entry to this function we have the following stmts, with the following initial value in the STMT_VINFO fields: stmt in_pattern_p related_stmt vec_stmt S1: a_i = .... - - - S2: a_2 = ..use(a_i).. - - - S3: a_1 = ..use(a_2).. - - - S4: a_0 = ..use(a_1).. - - - S5: ... = ..use(a_0).. - - - Say the sequence {S1,S2,S3,S4} was detected as a pattern that can be represented by a single stmt. We then: - create a new stmt S6 equivalent to the pattern (the stmt is not inserted into the code) - fill in the STMT_VINFO fields as follows: in_pattern_p related_stmt vec_stmt S1: a_i = .... - - - S2: a_2 = ..use(a_i).. - - - S3: a_1 = ..use(a_2).. - - - S4: a_0 = ..use(a_1).. true S6 - '---> S6: a_new = .... - S4 - S5: ... = ..use(a_0).. - - - (the last stmt in the pattern (S4) and the new pattern stmt (S6) point to each other through the RELATED_STMT field). S6 will be marked as relevant in vect_mark_stmts_to_be_vectorized instead of S4 because it will replace all its uses. Stmts {S1,S2,S3} will remain irrelevant unless used by stmts other than S4. If vectorization succeeds, vect_transform_stmt will skip over {S1,S2,S3} (because they are marked as irrelevant). It will vectorize S6, and record a pointer to the new vector stmt VS6 from S6 (as usual). S4 will be skipped, and S5 will be vectorized as usual: in_pattern_p related_stmt vec_stmt S1: a_i = .... - - - S2: a_2 = ..use(a_i).. - - - S3: a_1 = ..use(a_2).. - - - > VS6: va_new = .... - - - S4: a_0 = ..use(a_1).. true S6 VS6 '---> S6: a_new = .... - S4 VS6 > VS5: ... = ..vuse(va_new).. - - - S5: ... = ..use(a_0).. - - - DCE could then get rid of {S1,S2,S3,S4,S5} (if their defs are not used elsewhere), and we'll end up with: VS6: va_new = .... VS5: ... = ..vuse(va_new).. In case of more than one pattern statements, e.g., widen-mult with intermediate type: S1 a_t = ; S2 a_T = (TYPE) a_t; '--> S3: a_it = (interm_type) a_t; S4 prod_T = a_T * CONST; '--> S5: prod_T' = a_it w* CONST; there may be other users of a_T outside the pattern. In that case S2 will be marked as relevant (as well as S3), and both S2 and S3 will be analyzed and vectorized. The vector stmt VS2 will be recorded in S2, and VS3 will be recorded in S3.
References vec_info::bbs, DUMP_VECT_SCOPE, gsi_end_p(), gsi_next(), gsi_start_bb(), gsi_stmt(), i, vec_info::lookup_stmt(), vec_info::nbbs, si, vec_info::stmt_vec_info_ro, STMT_VINFO_VECTORIZABLE, vect_determine_precisions(), vect_pattern_recog_1(), and vect_vect_recog_func_ptrs.
Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().
|
extern |
Nonlinear induction.
Peel init_expr by skip_niter for induction_type.
References begin(), build_zero_cst(), exp(), wi::from_mpz(), gcc_assert, gcc_unreachable, gimple_build(), gimple_convert(), init_expr(), wi::to_mpz(), wi::to_wide(), TREE_CODE, tree_fits_uhwi_p(), TREE_INT_CST_LOW, tree_to_uhwi(), TREE_TYPE, TYPE_PRECISION, TYPE_SIGN, TYPE_UNSIGNED, UNSIGNED, unsigned_type_for(), vect_step_op_mul, vect_step_op_neg, vect_step_op_shl, vect_step_op_shr, and wide_int_to_tree().
Referenced by vect_update_ivs_after_vectorizer(), and vectorizable_nonlinear_induction().
|
extern |
Function vect_permute_store_chain. Given a chain of interleaved stores in DR_CHAIN of LENGTH that must be a power of 2 or equal to 3, generate interleave_high/low stmts to reorder the data correctly for the stores. Return the final references for stores in RESULT_CHAIN. E.g., LENGTH is 4 and the scalar type is short, i.e., VF is 8. The input is 4 vectors each containing 8 elements. We assign a number to each element, the input sequence is: 1st vec: 0 1 2 3 4 5 6 7 2nd vec: 8 9 10 11 12 13 14 15 3rd vec: 16 17 18 19 20 21 22 23 4th vec: 24 25 26 27 28 29 30 31 The output sequence should be: 1st vec: 0 8 16 24 1 9 17 25 2nd vec: 2 10 18 26 3 11 19 27 3rd vec: 4 12 20 28 5 13 21 30 4th vec: 6 14 22 30 7 15 23 31 i.e., we interleave the contents of the four vectors in their order. We use interleave_high/low instructions to create such output. The input of each interleave_high/low operation is two vectors: 1st vec 2nd vec 0 1 2 3 4 5 6 7 the even elements of the result vector are obtained left-to-right from the high/low elements of the first vector. The odd elements of the result are obtained left-to-right from the high/low elements of the second vector. The output of interleave_high will be: 0 4 1 5 and of interleave_low: 2 6 3 7 The permutation is done in log LENGTH stages. In each stage interleave_high and interleave_low stmts are created for each pair of vectors in DR_CHAIN, where the first argument is taken from the first half of DR_CHAIN and the second argument from it's second half. In our example, I1: interleave_high (1st vec, 3rd vec) I2: interleave_low (1st vec, 3rd vec) I3: interleave_high (2nd vec, 4th vec) I4: interleave_low (2nd vec, 4th vec) The output for the first stage is: I1: 0 16 1 17 2 18 3 19 I2: 4 20 5 21 6 22 7 23 I3: 8 24 9 25 10 26 11 27 I4: 12 28 13 29 14 30 15 31 The output of the second stage, i.e. the final result is: I1: 0 8 16 24 1 9 17 25 I2: 2 10 18 26 3 11 19 27 I3: 4 12 20 28 5 13 21 30 I4: 6 14 22 30 7 15 23 31.
References exact_log2(), gcc_assert, gimple_build_assign(), i, data_reference::indices, make_temp_ssa_name(), NULL, pow2p_hwi(), STMT_VINFO_VECTYPE, poly_int< N, C >::to_constant(), TYPE_VECTOR_SUBPARTS(), vect_finish_stmt_generation(), and vect_gen_perm_mask_checked().
Referenced by vectorizable_store().
PHI is either a scalar reduction phi or a scalar induction phi. Return the initial value of the variable on entry to the containing loop.
References gcc_assert, gimple_bb(), basic_block_def::loop_father, loop_preheader_edge(), and PHI_ARG_DEF_FROM_EDGE.
Referenced by info_for_reduction(), vect_transform_cycle_phi(), vectorizable_nonlinear_induction(), and vectorizable_reduction().
|
inline |
Return pow2 (X).
References i.
|
extern |
For the information recorded in LOOP_VINFO prepare the loop for peeling by masking. This involves calculating the number of iterations to be peeled and then aligning all memory references appropriately.
References build_int_cst(), dump_enabled_p(), dump_printf_loc(), fold_convert, force_gimple_operand(), gcc_assert, get_misalign_in_elems(), gimple_seq_add_seq(), gsi_insert_seq_on_edge_immediate(), loop_preheader_edge(), LOOP_VINFO_LOOP, LOOP_VINFO_MASK_SKIP_NITERS, LOOP_VINFO_NITERS, LOOP_VINFO_PEELING_FOR_ALIGNMENT, LOOP_VINFO_VECT_FACTOR, MSG_NOTE, NULL, NULL_TREE, TREE_TYPE, vect_location, vect_update_inits_of_drs(), and vect_use_loop_mask_for_alignment_p().
Referenced by vect_transform_loop().
|
extern |
Function vect_prune_runtime_alias_test_list. Prune a list of ddrs to be tested at run-time by versioning for alias. Merge several alias checks into one if possible. Return FALSE if resulting list of ddrs is longer then allowed by PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE.
References hash_set< KeyId, Lazy, Traits >::add(), count, DDR_A, DDR_B, DDR_OBJECT_A, DDR_OBJECT_B, dependence_distance_ge_vf(), dr_vec_info::dr, DR_BASE_ADDRESS, DR_GROUP_FIRST_ELEMENT, dr_known_forward_stride_p(), DR_OFFSET, DR_REF, DR_STEP, dump_dec(), dump_enabled_p(), dump_printf(), dump_printf_loc(), DUMP_VECT_SCOPE, opt_result::failure_at(), FOR_EACH_VEC_ELT, get_later_stmt(), i, index_in_loop_nest(), known_eq, least_bit_hwi(), vec_info::lookup_dr(), loop_cost_model(), loop_depth(), LOOP_VINFO_CHECK_NONZERO, LOOP_VINFO_CHECK_UNEQUAL_ADDRS, LOOP_VINFO_COMP_ALIAS_DDRS, LOOP_VINFO_LOOP, LOOP_VINFO_LOOP_NEST, LOOP_VINFO_MAY_ALIAS_DDRS, LOOP_VINFO_NITERS, LOOP_VINFO_VECT_FACTOR, MSG_NOTE, loop::num, operand_equal_p(), poly_int_tree_p(), prune_runtime_alias_test_list(), dr_with_seg_len_pair_t::REORDERED, size_int, size_zero_node, dr_vec_info::stmt, STMT_VINFO_DR_INFO, opt_result::success(), TREE_CODE, vect_check_lower_bound(), vect_compile_time_alias(), VECT_COST_MODEL_CHEAP, VECT_COST_MODEL_VERY_CHEAP, vect_location, vect_preserves_scalar_order_p(), vect_small_gap_p(), vect_vfa_access_size(), vect_vfa_align(), vect_vfa_segment_size(), vectorizable_with_step_bound_p(), and dr_with_seg_len_pair_t::WELL_ORDERED.
Referenced by vect_analyze_loop_2().
|
extern |
If the region we're going to vectorize is reached, all unconditional data references occur at least once. We can therefore pool the base alignment guarantees from each unconditional reference. Do this by going through all the data references in VINFO and checking whether the containing statement makes the reference unconditionally. If so, record the alignment of the base address in VINFO so that it can be used for all other references with the same base.
References vec_info_shared::datarefs, DR_INNERMOST, DR_IS_CONDITIONAL_IN_STMT, dyn_cast(), vec_info::lookup_dr(), LOOP_VINFO_LOOP, nested_in_vect_loop_p(), NULL, vec_info::shared, STMT_VINFO_DR_WRT_VEC_LOOP, STMT_VINFO_GATHER_SCATTER_P, STMT_VINFO_VECTORIZABLE, and vect_record_base_alignment().
Referenced by vect_analyze_data_refs_alignment(), and vect_slp_analyze_bb_1().
|
extern |
RESULT_CHAIN contains the output of a group of grouped loads that were generated as part of the vectorization of STMT_INFO. Assign the statement for each vector to the associated scalar statement.
References DR_GROUP_FIRST_ELEMENT, DR_GROUP_GAP, DR_GROUP_NEXT_ELEMENT, FOR_EACH_VEC_ELT, i, SSA_NAME_DEF_STMT, and STMT_VINFO_VEC_STMTS.
Referenced by vect_transform_grouped_load(), and vectorizable_load().
|
extern |
Record that LOOP_VINFO would need LENS to contain a sequence of NVECTORS lengths for controlling an operation on VECTYPE. The operation splits each element of VECTYPE into FACTOR separate subelements, measuring the length as a number of these subelements.
References rgroup_controls::factor, gcc_assert, LOOP_VINFO_VECT_FACTOR, rgroup_controls::max_nscalars_per_iter, rgroup_controls::type, and TYPE_VECTOR_SUBPARTS().
Referenced by check_load_store_for_partial_vectors(), vect_reduction_update_partial_vector_usage(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_live_operation(), and vectorizable_operation().
|
extern |
Record that a fully-masked version of LOOP_VINFO would need MASKS to contain a sequence of NVECTORS masks that each control a vector of type VECTYPE. If SCALAR_MASK is nonnull, the fully-masked loop would AND these vector masks with the vector version of SCALAR_MASK.
References hash_set< KeyId, Lazy, Traits >::add(), gcc_assert, vec_loop_masks::mask_set, and _loop_vec_info::scalar_cond_masked_set.
Referenced by check_load_store_for_partial_vectors(), vect_reduction_update_partial_vector_usage(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_live_operation(), vectorizable_operation(), and vectorizable_simd_clone_call().
|
inline |
If STMT_INFO describes a reduction, return the vect_reduction_type of the reduction it describes, otherwise return -1.
References dyn_cast(), info_for_reduction(), STMT_VINFO_REDUC_DEF, and STMT_VINFO_REDUC_TYPE.
|
extern |
Remove a group of stores (for SLP or interleaving), free their stmt_vec_info.
References DR_GROUP_NEXT_ELEMENT, vec_info::remove_stmt(), and vect_orig_stmt().
bool vect_rgroup_iv_might_wrap_p | ( | loop_vec_info | loop_vinfo, |
rgroup_controls * | rgc ) |
For the given rgroup_controls RGC, check whether an induction variable would ever hit a value that produces a set of all-false masks or zero lengths before wrapping around. Return true if it's possible to wrap around before hitting the desirable value, otherwise return false.
References rgroup_controls::factor, LOOP_VINFO_RGROUP_COMPARE_TYPE, rgroup_controls::max_nscalars_per_iter, wi::min_precision(), TYPE_PRECISION, UNSIGNED, and vect_iv_limit_for_partial_vectors().
Referenced by vect_estimate_min_profitable_iters(), and vect_set_loop_condition_partial_vectors().
|
extern |
Generate vector code for SLP_INSTANCES in the loop/basic block.
References DR_IS_WRITE, dump_enabled_p(), dump_printf_loc(), FOR_EACH_VEC_ELT, i, is_a(), is_empty(), MSG_NOTE, NULL, vec_info::remove_stmt(), SLP_INSTANCE_ROOT_STMTS, SLP_INSTANCE_TREE, SLP_TREE_REPRESENTATIVE, SLP_TREE_SCALAR_STMTS, STMT_VINFO_DATA_REF, vect_location, vect_orig_stmt(), vect_print_slp_graph(), vect_remove_slp_scalar_calls(), vect_schedule_scc(), and vectorize_slp_instance_root_stmt().
Referenced by vect_slp_region(), and vect_transform_loop().
|
extern |
Function prototypes.
Simple loop peeling and versioning utilities for vectorizer's purposes - in tree-vect-loop-manip.cc.
If we're using fully-masked loops, make LOOP iterate: N == (NITERS - 1) / STEP + 1 times. When NITERS is zero, this is equivalent to making the loop execute (1 << M) / STEP times, where M is the precision of NITERS. NITERS_MAYBE_ZERO is true if this last case might occur. If we're not using fully-masked loops, make LOOP iterate: N == (NITERS - STEP) / STEP + 1 times, where NITERS is known to be outside the range [1, STEP - 1]. This is equivalent to making the loop execute NITERS / STEP times when NITERS is nonzero and (1 << M) / STEP times otherwise. NITERS_MAYBE_ZERO again indicates whether this last case might occur. If FINAL_IV is nonnull, it is an SSA name that should be set to N * STEP on exit from the loop. Assumption: the exit-condition of LOOP is the last stmt in the loop.
References dump_enabled_p(), dump_printf_loc(), get_loop_exit_condition(), gsi_for_stmt(), gsi_remove(), vec_info::lookup_stmt(), LOOP_VINFO_PARTIAL_VECTORS_STYLE, LOOP_VINFO_USING_PARTIAL_VECTORS_P, MSG_NOTE, vec_info::remove_stmt(), vect_location, vect_partial_vectors_avx512, vect_set_loop_condition_normal(), vect_set_loop_condition_partial_vectors(), and vect_set_loop_condition_partial_vectors_avx512().
Referenced by vect_do_peeling(), and vect_transform_loop().
|
extern |
Function vect_setup_realignment This function is called when vectorizing an unaligned load using the dr_explicit_realign[_optimized] scheme. This function generates the following code at the loop prolog: p = initial_addr; x msq_init = *(floor(p)); # prolog load realignment_token = call target_builtin; loop: x msq = phi (msq_init, ---) The stmts marked with x are generated only for the case of dr_explicit_realign_optimized. The code above sets up a new (vector) pointer, pointing to the first location accessed by STMT_INFO, and a "floor-aligned" load using that pointer. It also generates code to compute the "realignment-token" (if the relevant target hook was defined), and creates a phi-node at the loop-header bb whose arguments are the result of the prolog-load (created by this function) and the result of a load that takes place in the loop (to be created by the caller to this function). For the case of dr_explicit_realign_optimized: The caller to this function uses the phi-result (msq) to create the realignment code inside the loop, and sets up the missing phi argument, as follows: loop: msq = phi (msq_init, lsq) lsq = *(floor(p')); # load in loop result = realign_load (msq, lsq, realignment_token); For the case of dr_explicit_realign: loop: msq = *(floor(p)); # load in loop p' = p + (VS-1); lsq = *(floor(p')); # load in loop result = realign_load (msq, lsq, realignment_token); Input: STMT_INFO - (scalar) load stmt to be vectorized. This load accesses a memory location that may be unaligned. BSI - place where new code is to be inserted. ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes is used. Output: REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load target hook, if defined. Return value - the result of the loop-header phi node.
References add_phi_arg(), build2(), build_int_cst(), copy_ssa_name(), create_phi_node(), dr_info::dr, dr_explicit_realign, dr_explicit_realign_optimized, DR_REF, DR_TARGET_ALIGNMENT, dyn_cast(), fold_build2, gcc_assert, get_virtual_phi(), gimple_assign_lhs(), gimple_assign_set_lhs(), gimple_bb(), gimple_build_assign(), gimple_build_call(), gimple_call_lhs(), gimple_call_return_type(), gimple_call_set_lhs(), gimple_set_vuse(), gimple_vuse(), gsi_insert_before(), gsi_insert_on_edge_immediate(), gsi_insert_seq_before(), gsi_insert_seq_on_edge_immediate(), GSI_SAME_STMT, gsi_stmt(), loop::header, loop::inner, loop_preheader_edge(), LOOP_VINFO_LOOP, make_ssa_name(), nested_in_vect_loop_p(), NULL, NULL_TREE, PHI_ARG_DEF_FROM_EDGE, reference_alias_ptr_type(), size_zero_node, STMT_VINFO_DR_INFO, STMT_VINFO_DR_STEP, STMT_VINFO_VECTYPE, targetm, TREE_CODE, tree_int_cst_compare(), TREE_READONLY, TREE_TYPE, UNKNOWN_LOCATION, vect_copy_ref_info(), vect_create_addr_base_for_vector_ref(), vect_create_data_ref_ptr(), and vect_create_destination_var().
Referenced by vectorizable_load().
|
extern |
Function vect_slp_analyze_instance_alignment Analyze the alignment of the data-references in the SLP instance. Return FALSE if a data reference is found that cannot be vectorized.
References DUMP_VECT_SCOPE, FOR_EACH_VEC_ELT, i, slp_inst_kind_store, SLP_INSTANCE_KIND, SLP_INSTANCE_LOADS, SLP_INSTANCE_TREE, and vect_slp_analyze_node_alignment().
Referenced by vect_slp_analyze_bb_1().
|
extern |
Function vect_analyze_data_ref_dependences. Examine all the data references in the basic-block, and make sure there do not exist any data dependences between them. Set *MAX_VF according to the maximum vectorization factor the data dependences allow.
References absu_hwi(), create_data_ref(), DR_BASE_ADDRESS, DR_GROUP_FIRST_ELEMENT, DR_GROUP_SIZE, DR_INIT, DR_OFFSET, DR_REF, DR_STEP, DUMP_VECT_SCOPE, free_data_ref(), gimple_bb(), gimple_set_visited(), basic_block_def::loop_father, loop_outer(), loop_preheader_edge(), maybe_gt, NULL, operand_equal_p(), ranges_overlap_p(), slp_inst_kind_store, SLP_INSTANCE_KIND, SLP_INSTANCE_LOADS, SLP_INSTANCE_TREE, SLP_TREE_SCALAR_STMTS, SLP_TREE_VECTYPE, data_reference::stmt, STMT_VINFO_DATA_REF, STMT_VINFO_GROUPED_ACCESS, STMT_VINFO_STMT, TREE_CODE, TREE_INT_CST_LOW, tree_to_poly_uint64(), TREE_TYPE, TYPE_SIZE_UNIT, vect_find_last_scalar_stmt_in_slp(), vect_slp_analyze_load_dependences(), vect_slp_analyze_store_dependences(), vect_stmt_dominates_stmt_p(), and vNULL.
Referenced by vect_slp_analyze_bb_1().
Analyze statements in SLP instances of VINFO. Return true if the operations are supported.
References hash_set< KeyId, Lazy, Traits >::add(), add_stmt_costs(), hash_set< KeyId, Lazy, Traits >::contains(), dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, dyn_cast(), gimple_assign_rhs1(), i, is_a(), hash_set< KeyId, Lazy, Traits >::is_empty(), is_empty(), MSG_NOTE, NULL, slp_inst_kind_bb_reduc, slp_inst_kind_ctor, slp_inst_kind_gcond, SLP_INSTANCE_KIND, SLP_INSTANCE_ROOT_STMTS, SLP_INSTANCE_TREE, vec_info::slp_instances, SLP_TREE_DEF_TYPE, SLP_TREE_SCALAR_STMTS, SLP_TREE_VECTYPE, TREE_TYPE, useless_type_conversion_p(), vect_bb_slp_mark_live_stmts(), vect_free_slp_instance(), vect_internal_def, vect_location, vect_slp_analyze_node_operations(), vect_slp_prune_covered_roots(), vectorizable_bb_reduc_epilogue(), vectorizable_early_exit(), and visited.
Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().
Return the SLP node child index for operand OP of STMT.
References gcc_unreachable, i, and vect_get_operand_map().
Referenced by vect_check_store_rhs(), vectorizable_load(), vectorizable_simd_clone_call(), and vectorizable_store().
|
extern |
References NULL, slp_first_node, and slp_tree_pool.
Main entry for the BB vectorizer. Analyze and transform BB, returns true if anything in the basic-block was vectorized.
References BASIC_BLOCK_FOR_FN, bitmap_set_bit, CDI_DOMINATORS, dominated_by_p(), loop::dont_vectorize, dump_enabled_p(), dump_printf_loc(), ECF_RETURNS_TWICE, ENTRY_BLOCK_PTR_FOR_FN, EXIT_BLOCK, first_stmt(), flow_loop_nested_p(), free(), gimple_call_flags(), gimple_get_lhs(), gsi_last_bb(), loop::header, i, basic_block_def::index, is_ctrl_altering_stmt(), last, basic_block_def::loop_father, MSG_MISSED_OPTIMIZATION, n_basic_blocks_for_fn, NULL, loop::num, r, rev_post_order_and_mark_dfs_back_seme(), safe_dyn_cast(), single_succ_edge(), vect_location, and vect_slp_bbs().
|
extern |
Special entry for the BB vectorizer. Analyze and transform a single if-converted BB with ORIG_LOOPs body being the not if-converted representation. Returns true if anything in the basic-block was vectorized.
References vect_slp_bbs().
Referenced by try_vectorize_loop_1().
|
extern |
In tree-vect-slp.cc.
References slp_tree_pool.
Returns true if S1 dominates S2.
References CDI_DOMINATORS, dominated_by_p(), gimple_bb(), gimple_uid(), gsi_end_p(), gsi_for_stmt(), gsi_next(), gsi_prev(), and gsi_stmt().
Referenced by vect_bb_slp_mark_live_stmts(), vect_phi_first_order_recurrence_p(), vect_schedule_slp_node(), vect_slp_analyze_instance_dependence(), and vectorizable_live_operation().
|
inline |
If STMT_INFO has been replaced by a pattern statement, return the replacement statement, otherwise return STMT_INFO itself.
References STMT_VINFO_IN_PATTERN_P, and STMT_VINFO_RELATED_STMT.
Referenced by maybe_push_to_hybrid_worklist(), update_epilogue_loop_vinfo(), vec_slp_has_scalar_use(), vect_analyze_early_break_dependences(), vect_analyze_loop_2(), vect_analyze_slp(), vect_analyze_slp_instance(), vect_bb_slp_mark_live_stmts(), vect_build_slp_instance(), vect_compute_single_scalar_iteration_cost(), vect_create_epilog_for_reduction(), vect_detect_hybrid_slp(), vect_dissolve_slp_only_groups(), vect_fixup_scalar_cycles_with_patterns(), vect_get_internal_def(), vect_get_vec_defs_for_operand(), vect_is_simple_use(), vect_mark_slp_stmts(), vect_recog_cond_store_pattern(), vect_slp_check_for_roots(), vect_transform_cycle_phi(), vect_update_vf_for_slp(), vectorizable_live_operation(), vectorizable_recurr(), and vectorizable_reduction().
|
extern |
Return FN if vec_{mask_,mask_len_}store_lanes is available for COUNT vectors of type VECTYPE. MASKED_P says whether the masked form is needed.
References count, and vect_lanes_optab_supported_p().
Referenced by check_load_store_for_partial_vectors(), get_group_load_store_type(), vect_analyze_loop_2(), vect_analyze_slp(), vect_build_slp_instance(), and vect_slp_prefer_store_lanes_p().
|
extern |
Return whether the data reference DR_INFO is supported with respect to its alignment. If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even it is aligned, i.e., check if it is possible to vectorize it with different alignment.
References can_implement_p(), dr_info::dr, dr_aligned, dr_explicit_realign, dr_explicit_realign_optimized, DR_GROUP_FIRST_ELEMENT, DR_GROUP_SIZE, DR_IS_READ, DR_MISALIGNMENT_UNKNOWN, DR_REF, dr_safe_speculative_read_required(), DR_STEP, dr_unaligned_supported, dr_unaligned_unsupported, dyn_cast(), GET_MODE_SIZE(), gimple_call_internal_fn(), gimple_call_internal_p(), LOOP_VINFO_LOOP, LOOP_VINFO_VECT_FACTOR, nested_in_vect_loop_p(), not_size_aligned(), NULL, STMT_SLP_TYPE, STMT_VINFO_GROUPED_ACCESS, targetm, TREE_INT_CST_LOW, TREE_TYPE, TYPE_MODE, and TYPE_VECTOR_SUBPARTS().
Referenced by get_group_load_store_type(), get_load_store_type(), get_negative_load_store_type(), vect_enhance_data_refs_alignment(), vect_get_peeling_costs_all_drs(), vect_peeling_supportable(), vect_vfa_access_size(), vectorizable_load(), and vectorizable_store().
Return TRUE if CODE (a shift operation) is supported for SCALAR_TYPE either as shift by a scalar or by a vector.
References can_implement_p(), get_vectype_for_scalar_type(), optab_for_tree_code(), optab_scalar, optab_vector, and TYPE_MODE.
Referenced by vect_synth_mult_by_constant().
|
extern |
Transform phase of a cycle PHI.
References add_phi_arg(), as_a(), build_vector_from_val(), build_vector_type(), build_vector_type_for_mode(), create_phi_node(), EXTRACT_LAST_REDUCTION, FOLD_LEFT_REDUCTION, gcc_assert, get_initial_defs_for_reduction(), gimple_convert(), gimple_phi_result(), GSI_CONTINUE_LINKING, gsi_end_p(), gsi_insert_seq_after(), gsi_insert_seq_on_edge_immediate(), gsi_last_bb(), gsi_prev(), gsi_stmt(), loop::header, i, info_for_reduction(), loop::inner, INTEGER_INDUC_COND_REDUCTION, integer_zerop(), loop_preheader_edge(), LOOP_VINFO_LOOP, _loop_vec_info::main_loop_edge, nested_in_vect_loop_p(), neutral_op_for_reduction(), NULL, NULL_TREE, num_phis(), operand_equal_p(), _slp_tree::push_vec_def(), REDUC_GROUP_FIRST_ELEMENT, _loop_vec_info::skip_this_loop_edge, SLP_TREE_CHILDREN, SLP_TREE_LANES, SLP_TREE_NUMBER_OF_VEC_STMTS, SLP_TREE_SCALAR_STMTS, stmt_ends_bb_p(), STMT_VINFO_DEF_TYPE, STMT_VINFO_FORCE_SINGLE_CYCLE, STMT_VINFO_REDUC_CODE, STMT_VINFO_REDUC_DEF, STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT, STMT_VINFO_REDUC_TYPE, STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL, STMT_VINFO_VECTYPE, TREE_CODE, tree_int_cst_lt(), TREE_TYPE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), UNKNOWN_LOCATION, useless_type_conversion_p(), vect_create_destination_var(), vect_create_partial_epilog(), vect_find_reusable_accumulator(), vect_get_main_loop_result(), vect_get_slp_defs(), vect_phi_initial_value(), vect_reduction_def, and vect_stmt_to_vectorize().
Referenced by vect_transform_stmt().
|
extern |
Function vect_transform_grouped_load. Given a chain of input interleaved data-refs (in DR_CHAIN), build statements to perform their permutation and ascribe the result vectorized statements to the scalar statements.
References pow2p_hwi(), STMT_VINFO_VECTYPE, targetm, TYPE_MODE, vect_permute_load_chain(), vect_record_grouped_load_vectors(), vect_shift_permute_load_chain(), and vNULL.
Referenced by vectorizable_load().
|
extern |
|
extern |
Drive for loop transformation stage.
Function vect_transform_loop. The analysis phase has determined that the loop is vectorizable. Vectorize the loop - created vectorized stmts to replace the scalar stmts in the loop, and update the loop exit condition. Returns scalar epilogue loop if any.
References advance(), loop::any_estimate, loop::any_likely_upper_bound, loop::any_upper_bound, profile_count::apply_probability(), build_int_cst(), build_one_cst(), build_zero_cst(), vec_info_shared::check_datarefs(), conditional_internal_fn_code(), basic_block_def::count, loop::dont_vectorize, dump_enabled_p(), dump_printf(), dump_printf_loc(), DUMP_VECT_SCOPE, dyn_cast(), EDGE_COUNT, fold_build2, FOR_EACH_VEC_ELT, loop::force_vectorize, gcc_assert, GET_MODE_NAME, gimple_build_assign(), gimple_call_arg(), gimple_call_builtin_p(), gimple_call_internal_fn(), gimple_call_internal_p(), gimple_call_lhs(), gimple_call_num_args(), gimple_clobber_p(), gimple_get_lhs(), gimple_seq_empty_p(), gsi_after_labels(), GSI_CONTINUE_LINKING, gsi_end_p(), gsi_insert_seq_before(), gsi_next(), gsi_remove(), gsi_replace(), gsi_start_bb(), gsi_stmt(), loop::header, i, loop::inner, integer_onep(), poly_int< N, C >::is_constant(), known_eq, vec_info::lookup_stmt(), loop_niters_no_overflow(), loop_preheader_edge(), LOOP_REQUIRES_VERSIONING, LOOP_VINFO_BBS, LOOP_VINFO_COST_MODEL_THRESHOLD, LOOP_VINFO_DRS_ADVANCED_BY, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_INT_NITERS, LOOP_VINFO_INV_PATTERN_DEF_SEQ, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LOOP, LOOP_VINFO_NITERS, LOOP_VINFO_NITERS_KNOWN_P, LOOP_VINFO_NITERS_UNCHANGED, LOOP_VINFO_NITERSM1, LOOP_VINFO_ORIG_LOOP_INFO, LOOP_VINFO_PEELING_FOR_ALIGNMENT, LOOP_VINFO_PEELING_FOR_GAPS, LOOP_VINFO_SCALAR_IV_EXIT, LOOP_VINFO_SCALAR_LOOP, LOOP_VINFO_SCALAR_LOOP_SCALING, LOOP_VINFO_SLP_INSTANCES, LOOP_VINFO_USING_PARTIAL_VECTORS_P, LOOP_VINFO_VECT_FACTOR, LOOP_VINFO_VERSIONING_THRESHOLD, maybe_flat_loop_profile(), move_early_exit_stmts(), MSG_NOTE, loop::nb_iterations_estimate, loop::nb_iterations_likely_upper_bound, loop::nb_iterations_upper_bound, NULL, NULL_TREE, loop::num_nodes, _loop_vec_info::peeling_for_alignment, _loop_vec_info::peeling_for_gaps, basic_block_def::preds, release_defs(), vec_info::remove_stmt(), loop::safelen, scale_loop_frequencies(), scale_profile_for_vect_loop(), vec_info::shared, si, loop::simduid, single_pred_p(), vec_info::slp_instances, split_edge(), split_loop_exit_edge(), STMT_VINFO_LIVE_P, STMT_VINFO_RELEVANT_P, TREE_TYPE, wi::udiv_ceil(), wi::udiv_floor(), wi::umin(), unlink_stmt_vdef(), loop::unroll, unshare_expr(), update_epilogue_loop_vinfo(), vect_apply_runtime_profitability_check_p(), vect_build_loop_niters(), vect_do_peeling(), vect_free_slp_instance(), vect_gen_vector_loop_niters(), vect_location, vect_loop_versioning(), vect_prepare_for_masked_peels(), vect_schedule_slp(), vect_set_loop_condition(), vect_use_loop_mask_for_alignment_p(), vect_vf_for_cost(), vec_info::vector_mode, and VECTOR_TYPE_P.
Referenced by vect_transform_loops().
|
extern |
Transform the definition stmt STMT_INFO of a reduction PHI backedge value.
References as_a(), build_vect_cond_expr(), canonicalize_code(), gimple_match_op::code, commutative_binary_op_p(), conditional_internal_fn_code(), count, dump_enabled_p(), dump_printf_loc(), FOLD_LEFT_REDUCTION, gcc_assert, gcc_unreachable, get_conditional_internal_fn(), gimple_build_assign(), gimple_build_call_internal(), gimple_call_set_lhs(), gimple_call_set_nothrow(), gimple_extract_op(), gimple_get_lhs(), gimple_set_lhs(), i, info_for_reduction(), loop::inner, internal_fn_else_index(), code_helper::is_internal_fn(), code_helper::is_tree_code(), lane_reducing_op_p(), LOOP_VINFO_FULLY_MASKED_P, LOOP_VINFO_LENS, LOOP_VINFO_LOOP, LOOP_VINFO_MASKS, make_ssa_name(), MSG_NOTE, nested_in_vect_loop_p(), NULL_TREE, loop::num, gimple_match_op::num_ops, gimple_match_op::ops, _slp_tree::push_vec_def(), SSA_NAME_DEF_STMT, SSA_NAME_IS_DEFAULT_DEF, STMT_VINFO_DEF_TYPE, STMT_VINFO_FORCE_SINGLE_CYCLE, STMT_VINFO_REDUC_DEF, STMT_VINFO_REDUC_FN, STMT_VINFO_REDUC_IDX, STMT_VINFO_REDUC_TYPE, STMT_VINFO_REDUC_VECTYPE_IN, STMT_VINFO_VECTYPE, TREE_CODE, truth_type_for(), gimple_match_op::type, use_mask_by_cond_expr_p(), vect_create_destination_var(), vect_double_reduction_def, vect_emulate_mixed_dot_prod(), vect_finish_stmt_generation(), vect_get_loop_mask(), vect_get_num_copies(), vect_get_vec_defs(), vect_is_emulated_mixed_dot_prod(), vect_location, vect_orig_stmt(), and vectorize_fold_left_reduction().
Referenced by vect_transform_stmt().
|
extern |
References NULL.
|
extern |
Function vect_transform_stmt. Create a vectorized stmt to replace STMT_INFO, and insert it at GSI.
References as_a(), assignment_vec_info_type, call_simd_clone_vec_info_type, call_vec_info_type, can_vectorize_live_stmts(), comparison_vec_info_type, condition_vec_info_type, cycle_phi_info_type, DR_GROUP_FIRST_ELEMENT, DR_GROUP_SIZE, DR_GROUP_STORE_COUNT, dump_enabled_p(), dump_printf_loc(), gcc_assert, gcc_unreachable, induc_vec_info_type, lc_phi_info_type, _slp_tree::ldst_lanes, load_vec_info_type, loop_exit_ctrl_vec_info_type, MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, op_vec_info_type, phi_info_type, PURE_SLP_STMT, recurr_info_type, reduc_vec_info_type, shift_vec_info_type, SLP_TREE_CODE, SLP_TREE_VECTYPE, STMT_VINFO_GROUPED_ACCESS, STMT_VINFO_LIVE_P, STMT_VINFO_TYPE, STMT_VINFO_VEC_STMTS, STMT_VINFO_VECTYPE, store_vec_info_type, type_conversion_vec_info_type, type_demotion_vec_info_type, type_promotion_vec_info_type, vect_location, vect_transform_cycle_phi(), vect_transform_lc_phi(), vect_transform_reduction(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_load(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().
Referenced by vect_schedule_slp_node().
|
extern |
Function vect_update_inits_of_drs Apply vect_update_inits_of_dr to all accesses in LOOP_VINFO. CODE and NITERS are as for vect_update_inits_of_dr.
References DUMP_VECT_SCOPE, fold_convert, FOR_EACH_VEC_ELT, i, vec_info::lookup_dr(), LOOP_VINFO_DATAREFS, sizetype, STMT_VINFO_GATHER_SCATTER_P, STMT_VINFO_SIMD_LANE_ACCESS_P, TREE_TYPE, types_compatible_p(), and vect_update_init_of_dr().
Referenced by update_epilogue_loop_vinfo(), vect_do_peeling(), and vect_prepare_for_masked_peels().
|
inline |
Update maximum unit count *MAX_NUNITS so that it accounts for NUNITS. *MAX_NUNITS can be 1 if we haven't yet recorded anything.
Referenced by vect_build_slp_instance(), vect_build_slp_tree(), vect_determine_vectorization_factor(), vect_determine_vf_for_stmt_1(), vect_record_max_nunits(), and vect_update_max_nunits().
|
inline |
Update maximum unit count *MAX_NUNITS so that it accounts for the number of units in vector type VECTYPE. *MAX_NUNITS can be 1 if we haven't yet recorded any vector types.
References TYPE_VECTOR_SUBPARTS(), and vect_update_max_nunits().
|
inline |
Return true if the loop described by LOOP_VINFO is fully-masked and if the first iteration should use a partial mask in order to achieve alignment.
References LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_FULLY_MASKED_P, LOOP_VINFO_NON_LINEAR_IV, and LOOP_VINFO_PEELING_FOR_ALIGNMENT.
Referenced by vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_can_peel_nonlinear_iv_p(), vect_do_peeling(), vect_estimate_min_profitable_iters(), vect_prepare_for_masked_peels(), and vect_transform_loop().
|
inline |
Return true if STMT_INFO should produce a vector mask type rather than a normal nonmask type.
Referenced by integer_type_for_mask(), vect_get_vector_types_for_stmt(), and vect_init_pattern_stmt().
|
inline |
Return the vectorization factor that should be used for costing purposes while vectorizing the loop described by LOOP_VINFO. Pick a reasonable estimate if the vectorization factor isn't known at compile time.
References estimated_poly_value(), and LOOP_VINFO_VECT_FACTOR.
Referenced by vect_analyze_loop_costing(), vect_apply_runtime_profitability_check_p(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_get_peel_iters_epilogue(), vect_known_niters_smaller_than_vf(), and vect_transform_loop().
|
extern |
Check to see if the current early break given in STMT_INFO is valid for vectorization.
References as_a(), build_zero_cst(), direct_internal_fn_supported_p(), direct_optab_handler(), dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, dyn_cast(), EDGE_SUCC, flow_bb_inside_loop_p(), gcc_assert, gcc_unreachable, gimple_assign_lhs(), gimple_bb(), gimple_build_assign(), gimple_cond_code(), gimple_cond_set_condition(), gsi_last_bb(), i, is_a(), is_empty(), LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P, LOOP_VINFO_FULLY_MASKED_P, LOOP_VINFO_FULLY_WITH_LENGTH_P, LOOP_VINFO_LENS, LOOP_VINFO_LOOP, LOOP_VINFO_MASKS, make_temp_ssa_name(), MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, NULL_TREE, OPTIMIZE_FOR_SPEED, prepare_vec_mask(), SLP_TREE_CHILDREN, SLP_TREE_NUMBER_OF_VEC_STMTS, SLP_TREE_VEC_DEFS, SLP_TREE_VECTYPE, STMT_VINFO_DEF_TYPE, STMT_VINFO_RELEVANT_P, STMT_VINFO_STMT, STMT_VINFO_VEC_STMTS, TREE_TYPE, TYPE_MODE, update_stmt(), vect_condition_def, vect_finish_stmt_generation(), vect_gen_loop_len_mask(), vect_get_loop_mask(), vect_get_num_copies(), vect_is_simple_use(), vect_location, vect_orig_stmt(), vect_record_loop_len(), vect_record_loop_mask(), and vectorizable_comparison_1().
Referenced by vect_analyze_stmt(), vect_slp_analyze_operations(), vect_transform_stmt(), and vectorize_slp_instance_root_stmt().
|
extern |
Function vectorizable_induction Check if STMT_INFO performs an induction computation that can be vectorized. If VEC_STMT is also passed, vectorize the induction PHI: create a vectorized phi to replace it, put it in VEC_STMT, and add it to the same basic block. Return true if STMT_INFO is vectorizable in this way.
References add_phi_arg(), as_a(), build_index_vector(), build_int_cst(), build_int_cstu(), build_nonstandard_integer_type(), build_real_from_wide(), build_vector_from_val(), build_vector_type(), build_zero_cst(), can_float_p(), CONSTANT_CLASS_P, create_phi_node(), dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, dyn_cast(), FLOAT_TYPE_P, flow_bb_inside_loop_p(), FOR_EACH_IMM_USE_FAST, FOR_EACH_VEC_ELT, gcc_assert, GET_MODE_BITSIZE(), get_same_sized_vectype(), gimple_bb(), gimple_build(), gimple_build_vector(), gimple_build_vector_from_val(), gimple_convert(), gimple_get_lhs(), gimple_phi_arg_def(), gsi_after_labels(), GSI_CONTINUE_LINKING, gsi_for_stmt(), gsi_insert_seq_after(), gsi_insert_seq_before(), gsi_insert_seq_on_edge_immediate(), GSI_SAME_STMT, loop::header, i, induc_vec_info_type, loop::inner, insert_iv_increment(), integer_type_node, integer_zerop(), INTEGRAL_TYPE_P, poly_int< N, C >::is_constant(), is_gimple_debug(), least_common_multiple(), vec_info::lookup_stmt(), loop_latch_edge(), loop_preheader_edge(), LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_LENS, LOOP_VINFO_LOOP, LOOP_VINFO_MASK_NITERS_PFA_OFFSET, LOOP_VINFO_MASK_SKIP_NITERS, LOOP_VINFO_USING_SELECT_VL_P, LOOP_VINFO_VECT_FACTOR, MSG_MISSED_OPTIMIZATION, MSG_NOTE, nested_in_vect_loop_p(), NULL, NULL_TREE, optab_default, PHI_ARG_DEF_FROM_EDGE, PHI_RESULT, _slp_tree::push_vec_def(), record_stmt_cost(), SCALAR_FLOAT_TYPE_P, scalar_to_vec, SCALAR_TYPE_MODE, si, SLP_TREE_CHILDREN, SLP_TREE_LANES, SLP_TREE_NUMBER_OF_VEC_STMTS, SLP_TREE_SCALAR_STMTS, SLP_TREE_VEC_DEFS, SLP_TREE_VECTYPE, SSA_NAME_DEF_STMT, standard_iv_increment_position(), STMT_VINFO_DEF_TYPE, STMT_VINFO_LIVE_P, STMT_VINFO_LOOP_PHI_EVOLUTION_PART, STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE, STMT_VINFO_RELEVANT_P, STMT_VINFO_TYPE, target_supports_op_p(), TREE_CODE, TREE_TYPE, type_has_mode_precision_p(), TYPE_MODE, TYPE_VECTOR_SUBPARTS(), UNKNOWN_LOCATION, unshare_expr(), UNSIGNED, USE_STMT, useless_type_conversion_p(), vect_body, vect_get_loop_len(), vect_get_new_vect_var(), vect_get_slp_vect_def(), vect_induction_def, vect_location, vect_maybe_update_slp_op_vectype(), vect_prologue, vect_scalar_var, vect_simple_var, vect_step_op_add, vector_stmt, and vectorizable_nonlinear_induction().
Referenced by vect_analyze_stmt(), and vect_transform_stmt().
|
extern |
Check if STMT_INFO is a lane-reducing operation that can be vectorized in the context of LOOP_VINFO, and vector cost will be recorded in COST_VEC, and the analysis is for slp if SLP_NODE is not NULL. For a lane-reducing operation, the loop reduction path that it lies in, may contain normal operation, or other lane-reducing operation of different input type size, an example as: int sum = 0; for (i) { ... sum += d0[i] * d1[i]; // dot-prod <vector(16) char> sum += w[i]; // widen-sum <vector(16) char> sum += abs(s0[i] - s1[i]); // sad <vector(8) short> sum += n[i]; // normal <vector(4) int> ... } Vectorization factor is essentially determined by operation whose input vectype has the most lanes ("vector(16) char" in the example), while we need to choose input vectype with the least lanes ("vector(4) int" in the example) to determine effective number of vector reduction PHIs.
References dump_enabled_p(), dump_printf(), dump_printf_loc(), gcc_assert, get_vectype_for_scalar_type(), gimple_assign_lhs(), gimple_assign_rhs_code(), gimple_num_ops(), i, INTEGRAL_TYPE_P, lane_reducing_stmt_p(), LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P, MSG_MISSED_OPTIMIZATION, MSG_NOTE, record_stmt_cost(), reduc_vec_info_type, scalar_to_vec, STMT_VINFO_DEF_TYPE, STMT_VINFO_REDUC_DEF, STMT_VINFO_REDUC_IDX, STMT_VINFO_REDUC_TYPE, STMT_VINFO_REDUC_VECTYPE_IN, STMT_VINFO_TYPE, TREE_CODE_REDUCTION, TREE_TYPE, type_has_mode_precision_p(), vect_body, vect_get_num_copies(), vect_is_emulated_mixed_dot_prod(), vect_is_simple_use(), vect_location, vect_maybe_update_slp_op_vectype(), vect_orig_stmt(), vect_prologue, vect_reduction_def, vect_reduction_update_partial_vector_usage(), vector_stmt, and VECTORIZABLE_CYCLE_DEF.
Referenced by vect_analyze_stmt().
|
extern |
Vectorizes LC PHIs.
References dump_enabled_p(), dump_printf_loc(), gimple_phi_num_args(), is_a(), lc_phi_info_type, MSG_MISSED_OPTIMIZATION, SLP_TREE_CHILDREN, SLP_TREE_VECTYPE, STMT_VINFO_DEF_TYPE, STMT_VINFO_TYPE, vect_double_reduction_def, vect_internal_def, vect_location, and vect_maybe_update_slp_op_vectype().
Referenced by vect_analyze_stmt().
|
extern |
Function vectorizable_live_operation. STMT_INFO computes a value that is used outside the loop. Check if it can be supported.
References as_a(), bitsize_int, build3(), build_nonstandard_integer_type(), build_zero_cst(), can_vec_extract_var_idx_p(), direct_internal_fn_supported_p(), dump_enabled_p(), dump_printf_loc(), dyn_cast(), EXTRACT_LAST_REDUCTION, flow_bb_inside_loop_p(), flow_loop_nested_p(), fold_convert, FOLD_LEFT_REDUCTION, FOR_EACH_IMM_USE_ON_STMT, FOR_EACH_IMM_USE_STMT, force_gimple_operand(), gcc_assert, get_loop_exit_edges(), gimple_bb(), gimple_build(), gimple_build_assign(), gimple_convert(), gimple_get_lhs(), gimple_phi_arg_edge(), gimple_phi_result(), gsi_after_labels(), gsi_for_stmt(), gsi_insert_before(), gsi_insert_seq_after(), gsi_insert_seq_before(), GSI_SAME_STMT, info_for_reduction(), int_const_binop(), is_a(), is_gimple_debug(), is_simple_and_all_uses_invariant(), vec_info::lookup_stmt(), loop_exit_edge_p(), LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EARLY_BREAKS_VECT_PEELED, LOOP_VINFO_FULLY_MASKED_P, LOOP_VINFO_FULLY_WITH_LENGTH_P, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LENS, LOOP_VINFO_LOOP, LOOP_VINFO_MASK_NITERS_PFA_OFFSET, LOOP_VINFO_MASKS, MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, NULL_TREE, OPTIMIZE_FOR_SPEED, phi_arg_index_from_use(), POINTER_TYPE_P, PURE_SLP_STMT, record_stmt_cost(), REDUC_GROUP_FIRST_ELEMENT, remove_phi_node(), SET_USE, si, sizetype, SLP_TREE_LANES, SLP_TREE_NUMBER_OF_VEC_STMTS, SLP_TREE_VEC_DEFS, SLP_TREE_VECTYPE, SSA_NAME_DEF_STMT, SSA_NAME_IS_DEFAULT_DEF, SSA_NAME_OCCURS_IN_ABNORMAL_PHI, STMT_VINFO_DEF_TYPE, STMT_VINFO_LIVE_P, STMT_VINFO_LOOP_PHI_EVOLUTION_PART, STMT_VINFO_REDUC_DEF, STMT_VINFO_REDUC_TYPE, STMT_VINFO_RELEVANT_P, STMT_VINFO_VEC_STMTS, STMT_VINFO_VECTYPE, TREE_CODE, tree_to_uhwi(), TREE_TYPE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), update_stmt(), vec_to_scalar, vect_create_epilog_for_reduction(), vect_epilogue, vect_get_num_copies(), vect_induction_def, vect_location, vect_orig_stmt(), vect_record_loop_len(), vect_record_loop_mask(), vect_stmt_dominates_stmt_p(), vect_stmt_to_vectorize(), VECTOR_BOOLEAN_TYPE_P, vector_element_bits_tree(), and vectorizable_live_operation_1().
Referenced by can_vectorize_live_stmts(), vect_bb_slp_mark_live_stmts(), vect_schedule_slp_node(), and vect_slp_analyze_node_operations_1().
|
extern |
Vectorizes PHIs.
References add_phi_arg(), as_a(), create_phi_node(), dump_enabled_p(), dump_printf_loc(), FOR_EACH_VEC_ELT, gcc_assert, gimple_bb(), gimple_phi_arg_edge(), gimple_phi_num_args(), gimple_phi_result(), i, is_a(), is_empty(), MSG_MISSED_OPTIMIZATION, phi_info_type, _slp_tree::push_vec_def(), record_stmt_cost(), SLP_TREE_CHILDREN, SLP_TREE_DEF_TYPE, SLP_TREE_NUMBER_OF_VEC_STMTS, SLP_TREE_VEC_DEFS, SLP_TREE_VECTYPE, STMT_VINFO_DEF_TYPE, STMT_VINFO_TYPE, UNKNOWN_LOCATION, useless_type_conversion_p(), vect_body, vect_create_destination_var(), vect_get_slp_defs(), vect_internal_def, vect_location, vect_maybe_update_slp_op_vectype(), and vector_stmt.
Referenced by vect_analyze_stmt(), and vect_transform_stmt().
|
extern |
Vectorizes first order recurrences. An overview of the transformation is described below. Suppose we have the following loop. int t = 0; for (int i = 0; i < n; ++i) { b[i] = a[i] - t; t = a[i]; } There is a first-order recurrence on 'a'. For this loop, the scalar IR looks (simplified) like: scalar.preheader: init = 0; scalar.body: i = PHI <0(scalar.preheader), i+1(scalar.body)> _2 = PHI <(init(scalar.preheader), <_1(scalar.body)> _1 = a[i] b[i] = _1 - _2 if (i < n) goto scalar.body In this example, _2 is a recurrence because it's value depends on the previous iteration. We vectorize this as (VF = 4) vector.preheader: vect_init = vect_cst(..., ..., ..., 0) vector.body i = PHI <0(vector.preheader), i+4(vector.body)> vect_1 = PHI <vect_init(vector.preheader), v2(vector.body)> vect_2 = a[i, i+1, i+2, i+3]; vect_3 = vec_perm (vect_1, vect_2, { 3, 4, 5, 6 }) b[i, i+1, i+2, i+3] = vect_2 - vect_3 if (..) goto vector.body In this function, vectorizable_recurr, we code generate both the vector PHI node and the permute since those together compute the vectorized value of the scalar PHI. We do not yet have the backedge value to fill in there nor into the vec_perm. Those are filled in vect_schedule_scc. TODO: Since the scalar loop does not have a use of the recurrence outside of the loop the natural way to implement peeling via vectorizing the live value doesn't work. For now peeling of loops with a recurrence is not implemented. For SLP the supported cases are restricted to those requiring a single vector recurrence PHI.
References add_phi_arg(), as_a(), build_vector_from_val(), can_vec_perm_const_p(), create_phi_node(), dump_enabled_p(), dump_printf_loc(), FOR_EACH_VEC_ELT, gimple_bb(), gimple_build_assign(), gimple_convert(), gimple_phi_result(), gsi_for_stmt(), gsi_insert_seq_on_edge_immediate(), gsi_next(), i, is_a(), vec_info::lookup_def(), loop_latch_edge(), loop_preheader_edge(), LOOP_VINFO_LOOP, make_ssa_name(), maybe_gt, MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, NULL_TREE, PHI_ARG_DEF_FROM_EDGE, _slp_tree::push_vec_def(), record_stmt_cost(), recurr_info_type, scalar_to_vec, SLP_TREE_CHILDREN, SLP_TREE_LANES, SLP_TREE_NUMBER_OF_VEC_STMTS, SLP_TREE_VECTYPE, SSA_NAME_DEF_STMT, STMT_VINFO_DEF_TYPE, STMT_VINFO_TYPE, STMT_VINFO_VEC_STMTS, STMT_VINFO_VECTYPE, TREE_CODE, TREE_TYPE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), types_compatible_p(), UNKNOWN_LOCATION, useless_type_conversion_p(), vect_body, vect_finish_stmt_generation(), vect_first_order_recurrence, vect_gen_perm_mask_checked(), vect_get_new_vect_var(), vect_get_num_copies(), vect_init_vector(), vect_location, vect_maybe_update_slp_op_vectype(), vect_prologue, vect_simple_var, vect_stmt_to_vectorize(), and vector_stmt.
Referenced by vect_analyze_stmt(), and vect_transform_stmt().
|
extern |
Function vectorizable_reduction. Check if STMT_INFO performs a reduction operation that can be vectorized. If VEC_STMT is also passed, vectorize STMT_INFO: create a vectorized stmt to replace it, put it in VEC_STMT, and insert it at GSI. Return true if STMT_INFO is vectorizable in this way. This function also handles reduction idioms (patterns) that have been recognized in advance during vect_pattern_recog. In this case, STMT_INFO may be of this form: X = pattern_expr (arg0, arg1, ..., X) and its STMT_VINFO_RELATED_STMT points to the last stmt in the original sequence that had been detected and replaced by the pattern-stmt (STMT_INFO). This function also handles reduction of condition expressions, for example: for (int i = 0; i < N; i++) if (a[i] < value) last = a[i]; This is handled by vectorising the loop and creating an additional vector containing the loop indexes for which "a[i] < value" was true. In the function epilogue this is reduced to a single max value and then used to index into the vector of results. In some cases of reduction patterns, the type of the reduction variable X is different than the type of the other arguments of STMT_INFO. In such cases, the vectype that is used when transforming STMT_INFO into a vector stmt is different than the vectype that is used to determine the vectorization factor, because it consists of a different number of elements than the actual number of elements that are being operated upon in parallel. For example, consider an accumulation of shorts into an int accumulator. On some targets it's possible to vectorize this pattern operating on 8 shorts at a time (hence, the vectype for purposes of determining the vectorization factor should be V8HI); on the other hand, the vectype that is used to create the vector form is actually V4SI (the type of the result). Upon entry to this function, STMT_VINFO_VECTYPE records the vectype that indicates what is the actual level of parallelism (V8HI in the example), so that the right vectorization factor would be derived. This vectype corresponds to the type of arguments to the reduction stmt, and should *NOT* be used to create the vectorized stmt. The right vectype for the vectorized stmt is obtained from the type of the result X: get_vectype_for_scalar_type (vinfo, TREE_TYPE (X)) This means that, contrary to "regular" reductions (or "regular" stmts in general), the following equation: STMT_VINFO_VECTYPE == get_vectype_for_scalar_type (vinfo, TREE_TYPE (X)) does *NOT* necessarily hold for reduction patterns.
References as_a(), associative_binary_op_p(), boolean_type_node, build_int_cst(), can_duplicate_and_interleave_p(), gimple_match_op::code, commutative_binary_op_p(), COMPARISON_CLASS_P, COND_REDUCTION, conditional_internal_fn_code(), CONST_COND_REDUCTION, CONVERT_EXPR_CODE_P, cycle_phi_info_type, direct_internal_fn_supported_p(), directly_supported_p(), dump_enabled_p(), dump_printf(), dump_printf_loc(), EXTRACT_LAST_REDUCTION, fold_binary, FOLD_LEFT_REDUCTION, fold_left_reduction_fn(), FOR_EACH_VEC_ELT, gcc_assert, gcc_unreachable, GET_MODE_PRECISION(), GET_MODE_SIZE(), get_same_sized_vectype(), get_vectype_for_scalar_type(), wi::geu_p(), gimple_bb(), gimple_extract_op(), gimple_phi_num_args(), gimple_phi_result(), loop::header, i, info_for_reduction(), loop::inner, int_const_binop(), INTEGER_INDUC_COND_REDUCTION, integer_one_node, integer_onep(), integer_zerop(), INTEGRAL_TYPE_P, is_a(), poly_int< N, C >::is_constant(), code_helper::is_internal_fn(), is_nonwrapping_integer_induction(), known_eq, lane_reducing_op_p(), vec_info::lookup_def(), vec_info::lookup_stmt(), loop_latch_edge(), LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P, LOOP_VINFO_LOOP, LOOP_VINFO_VECT_FACTOR, make_unsigned_type(), max_loop_iterations(), MSG_MISSED_OPTIMIZATION, MSG_NOTE, needs_fold_left_reduction_p(), nested_in_vect_loop_p(), neutral_op_for_reduction(), NULL, NULL_TREE, gimple_match_op::num_ops, gimple_match_op::ops, optab_vector, OPTIMIZE_FOR_SPEED, PHI_ARG_DEF_FROM_EDGE, PHI_RESULT, POINTER_TYPE_P, record_stmt_cost(), REDUC_GROUP_FIRST_ELEMENT, REDUC_GROUP_NEXT_ELEMENT, reduc_vec_info_type, reduction_fn_for_scalar_code(), SCALAR_FLOAT_TYPE_P, SCALAR_TYPE_MODE, single_imm_use(), SLP_TREE_CHILDREN, SLP_TREE_LANES, SLP_TREE_NUMBER_OF_VEC_STMTS, SLP_TREE_SCALAR_STMTS, SLP_TREE_VECTYPE, STMT_VINFO_DEF_TYPE, STMT_VINFO_FORCE_SINGLE_CYCLE, STMT_VINFO_IN_PATTERN_P, STMT_VINFO_LIVE_P, STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED, STMT_VINFO_LOOP_PHI_EVOLUTION_PART, STMT_VINFO_REDUC_CODE, STMT_VINFO_REDUC_DEF, STMT_VINFO_REDUC_FN, STMT_VINFO_REDUC_IDX, STMT_VINFO_REDUC_TYPE, STMT_VINFO_REDUC_VECTYPE, STMT_VINFO_REDUC_VECTYPE_IN, STMT_VINFO_RELATED_STMT, STMT_VINFO_RELEVANT, STMT_VINFO_STMT, STMT_VINFO_TYPE, STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL, STMT_VINFO_VECTYPE, _loop_vec_info::suggested_unroll_factor, wi::to_widest(), TREE_CODE, TREE_CODE_REDUCTION, tree_int_cst_lt(), tree_int_cst_sgn(), tree_nop_conversion_p(), TREE_TYPE, gimple_match_op::type, type_has_mode_precision_p(), TYPE_MAX_VALUE, TYPE_MIN_VALUE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), types_compatible_p(), vect_body, vect_can_vectorize_without_simd_p(), vect_constant_def, vect_double_reduction_def, vect_emulated_vector_p(), vect_induction_def, vect_internal_def, vect_is_simple_use(), vect_location, vect_maybe_update_slp_op_vectype(), vect_model_reduction_cost(), vect_nested_cycle, vect_orig_stmt(), vect_phi_initial_value(), vect_reduction_def, vect_reduction_update_partial_vector_usage(), vect_stmt_to_vectorize(), vect_unknown_def_type, vect_unused_in_scope, vect_used_in_outer, vect_used_only_live, vector_stmt, and VECTORIZABLE_CYCLE_DEF.
Referenced by vect_analyze_stmt().
unsigned vectorize_loops | ( | void | ) |
In tree-vectorizer.cc.
References NULL.
|
extern |
Number of supported pattern matchers.
Set the number of SLP pattern matchers available.
Referenced by vect_match_slp_patterns_2().
|
extern |
List of supported pattern matchers.
Referenced by vect_match_slp_patterns_2().
|
extern |
Source location + hotness information.
Vectorizer Copyright (C) 2003-2025 Free Software Foundation, Inc. Contributed by Dorit Naishlos <dorit@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>.
Loop and basic block vectorizer. This file contains drivers for the three vectorizers: (1) loop vectorizer (inter-iteration parallelism), (2) loop-aware SLP (intra-iteration parallelism) (invoked by the loop vectorizer) (3) BB vectorizer (out-of-loops), aka SLP The rest of the vectorizer's code is organized as follows: - tree-vect-loop.cc - loop specific parts such as reductions, etc. These are used by drivers (1) and (2). - tree-vect-loop-manip.cc - vectorizer's loop control-flow utilities, used by drivers (1) and (2). - tree-vect-slp.cc - BB vectorization specific analysis and transformation, used by drivers (2) and (3). - tree-vect-stmts.cc - statements analysis and transformation (used by all). - tree-vect-data-refs.cc - vectorizer specific data-refs analysis and manipulations (used by all). - tree-vect-patterns.cc - vectorizable code patterns detector (used by all) Here's a poor attempt at illustrating that: tree-vectorizer.cc: loop_vect() loop_aware_slp() slp_vect() | / \ / | / \ / tree-vect-loop.cc tree-vect-slp.cc | \ \ / / | | \ \/ / | | \ /\ / | | \ / \ / | tree-vect-stmts.cc tree-vect-data-refs.cc \ / tree-vect-patterns.cc
Loop or bb location, with hotness information.
Referenced by check_load_store_for_partial_vectors(), check_scan_store(), dependence_distance_ge_vf(), vect_optimize_slp_pass::dump(), get_group_alias_ptr_type(), get_group_load_store_type(), get_load_store_type(), get_negative_load_store_type(), vect_optimize_slp_pass::get_result_with_layout(), increase_alignment(), is_simple_and_all_uses_invariant(), vect_optimize_slp_pass::materialize(), maybe_push_to_hybrid_worklist(), move_early_exit_stmts(), optimize_load_redistribution_1(), optimize_mask_stores(), parloops_is_simple_reduction(), parloops_is_slp_reduction(), process_use(), report_ploop_op(), report_vect_op(), try_vectorize_loop_1(), vect_analyze_data_ref_access(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_analyze_early_break_dependences(), vect_analyze_group_access_1(), vect_analyze_loop(), vect_analyze_loop_1(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_analyze_loop_form(), vect_analyze_loop_operations(), vect_analyze_scalar_cycles_1(), vect_analyze_slp(), vect_analyze_slp_instance(), vect_analyze_stmt(), vect_bb_partition_graph(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_mark_live_stmts(), vect_bb_vectorization_profitable_p(), vect_build_slp_instance(), vect_build_slp_tree(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_can_advance_ivs_p(), vect_can_peel_nonlinear_iv_p(), vect_check_lower_bound(), vect_check_nonzero_value(), vect_check_scalar_mask(), vect_check_store_rhs(), vect_compute_data_ref_alignment(), vect_create_addr_base_for_vector_ref(), vect_create_cond_for_alias_checks(), vect_create_data_ref_ptr(), vect_create_epilog_for_reduction(), vect_cse_slp_nodes(), vect_detect_hybrid_slp(), vect_determine_mask_precision(), vect_determine_min_output_precision_1(), vect_determine_partial_vectors_and_peeling(), vect_determine_precisions_from_range(), vect_determine_precisions_from_users(), vect_determine_vectorization_factor(), vect_determine_vf_for_stmt(), vect_determine_vf_for_stmt_1(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_finish_stmt_generation_1(), vect_gen_prolog_loop_niters(), vect_get_and_check_slp_defs(), vect_get_data_access_cost(), vect_get_load_cost(), vect_get_loop_niters(), vect_get_peel_iters_epilogue(), vect_get_range_info(), vect_get_store_cost(), vect_get_vec_defs_for_operand(), vect_get_vector_types_for_stmt(), vect_grouped_load_supported(), vect_grouped_store_supported(), vect_init_vector_1(), vect_is_simple_iv_evolution(), vect_is_simple_reduction(), vect_is_simple_use(), vect_is_simple_use(), vect_joust_loop_vinfos(), vect_lanes_optab_supported_p(), vect_loop_versioning(), vect_make_slp_decision(), vect_mark_for_runtime_alias_test(), vect_mark_pattern_stmts(), vect_mark_relevant(), vect_mark_stmts_to_be_vectorized(), vect_match_slp_patterns(), vect_model_promotion_demotion_cost(), vect_model_simple_cost(), vect_pattern_detected(), vect_pattern_recog_1(), vect_pattern_validate_optab(), vect_prepare_for_masked_peels(), vect_prune_runtime_alias_test_list(), vect_recog_average_pattern(), vect_recog_cond_expr_convert_pattern(), vect_recog_ctz_ffs_pattern(), vect_recog_mulhs_pattern(), vect_recog_over_widening_pattern(), vect_recog_popcount_clz_ctz_ffs_pattern(), vect_record_base_alignment(), vect_record_max_nunits(), vect_reduction_update_partial_vector_usage(), vect_schedule_slp(), vect_schedule_slp_node(), vect_set_loop_condition(), vect_shift_permute_load_chain(), vect_slp_analyze_bb_1(), vect_slp_analyze_data_ref_dependence(), vect_slp_analyze_node_operations(), vect_slp_analyze_operations(), vect_slp_bbs(), vect_slp_convert_to_external(), vect_slp_function(), vect_slp_region(), vect_split_slp_store_group(), vect_split_statement(), vect_stmt_relevant_p(), vect_transform_loop(), vect_transform_loops(), vect_transform_reduction(), vect_transform_slp_perm_load_1(), vect_transform_stmt(), vect_truncate_gather_scatter_offset(), vect_update_ivs_after_vectorizer(), vect_update_misalignment_for_peel(), vect_update_vf_for_slp(), vect_use_strided_gather_scatters_p(), vect_verify_loop_lens(), vector_alignment_reachable_p(), vectorizable_assignment(), vectorizable_bb_reduc_epilogue(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_lane_reducing(), vectorizable_lc_phi(), vectorizable_live_operation(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_scan_store(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_slp_permutation_1(), vectorizable_store(), and auto_purge_vect_location::~auto_purge_vect_location().