GCC Middle and Back End API Reference
|
#include "tree-data-ref.h"
#include "tree-hash-traits.h"
#include "target.h"
#include "internal-fn.h"
#include "tree-ssa-operands.h"
#include "gimple-match.h"
Go to the source code of this file.
Data Structures | |
struct | stmt_info_for_cost |
struct | vect_scalar_ops_slice |
struct | vect_scalar_ops_slice_hash |
struct | _slp_tree |
class | _slp_instance |
struct | scalar_cond_masked_key |
struct | default_hash_traits< scalar_cond_masked_key > |
class | vec_lower_bound |
class | vec_info_shared |
class | vec_info |
struct | rgroup_controls |
struct | vec_loop_masks |
struct | vect_reusable_accumulator |
class | _loop_vec_info |
struct | slp_root |
class | _bb_vec_info |
class | dr_vec_info |
class | _stmt_vec_info |
struct | gather_scatter_info |
class | vector_costs |
class | auto_purge_vect_location |
struct | vect_loop_form_info |
class | vect_pattern |
Variables | |
dump_user_location_t | vect_location |
vect_pattern_decl_t | slp_patterns [] |
size_t | num__slp_patterns |
#define BB_VINFO_BBS | ( | B | ) |
#define BB_VINFO_DATAREFS | ( | B | ) |
Referenced by vect_slp_region().
#define BB_VINFO_DDRS | ( | B | ) |
#define BB_VINFO_GROUPED_STORES | ( | B | ) |
Referenced by vect_analyze_group_access_1().
#define BB_VINFO_NBBS | ( | B | ) |
#define BB_VINFO_SLP_INSTANCES | ( | B | ) |
Referenced by vect_slp_analyze_bb_1(), and vect_slp_region().
#define DR_GROUP_FIRST_ELEMENT | ( | S | ) |
Referenced by vect_optimize_slp_pass::decide_masked_load_lanes(), dr_misalignment(), dr_target_alignment(), ensure_base_align(), get_group_load_store_type(), get_load_store_type(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_data_ref_access(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs_alignment(), vect_analyze_group_access(), vect_analyze_group_access_1(), vect_analyze_loop_2(), vect_analyze_slp(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_create_data_ref_ptr(), vect_dissolve_slp_only_groups(), vect_fixup_store_groups_with_patterns(), vect_get_place_in_interleaving_chain(), vect_lower_load_permutations(), vect_lower_load_permutations(), vect_preserves_scalar_order_p(), vect_prune_runtime_alias_test_list(), vect_record_grouped_load_vectors(), vect_relevant_for_alignment_p(), vect_slp_analyze_data_ref_dependence(), vect_slp_analyze_load_dependences(), vect_slp_analyze_node_alignment(), vect_small_gap_p(), vect_split_slp_store_group(), vect_supportable_dr_alignment(), vect_transform_loop(), vect_transform_slp_perm_load_1(), vect_transform_stmt(), vect_vfa_access_size(), vector_alignment_reachable_p(), vectorizable_load(), vectorizable_store(), and vllp_cmp().
#define DR_GROUP_GAP | ( | S | ) |
Referenced by get_group_load_store_type(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_analyze_group_access_1(), vect_build_slp_tree_2(), vect_dissolve_slp_only_groups(), vect_fixup_store_groups_with_patterns(), vect_get_place_in_interleaving_chain(), vect_lower_load_permutations(), vect_record_grouped_load_vectors(), vect_split_slp_store_group(), vect_vfa_access_size(), and vectorizable_load().
#define DR_GROUP_NEXT_ELEMENT | ( | S | ) |
Referenced by get_group_alias_ptr_type(), get_group_load_store_type(), get_load_store_type(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_analyze_data_ref_access(), vect_analyze_data_ref_accesses(), vect_analyze_group_access(), vect_analyze_group_access_1(), vect_analyze_loop_2(), vect_analyze_slp_instance(), vect_build_slp_tree_2(), vect_create_data_ref_ptr(), vect_dissolve_slp_only_groups(), vect_fixup_store_groups_with_patterns(), vect_get_place_in_interleaving_chain(), vect_lower_load_permutations(), vect_preserves_scalar_order_p(), vect_record_grouped_load_vectors(), vect_remove_stores(), vect_slp_analyze_load_dependences(), vect_split_slp_store_group(), vectorizable_load(), and vectorizable_store().
#define DR_GROUP_SIZE | ( | S | ) |
Referenced by vect_optimize_slp_pass::decide_masked_load_lanes(), get_group_load_store_type(), get_load_store_type(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_group_access_1(), vect_analyze_loop_2(), vect_analyze_slp(), vect_analyze_slp_instance(), vect_build_slp_tree_2(), vect_compute_data_ref_alignment(), vect_create_data_ref_ptr(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_fixup_store_groups_with_patterns(), vect_lower_load_permutations(), vect_small_gap_p(), vect_split_slp_store_group(), vect_supportable_dr_alignment(), vect_transform_slp_perm_load_1(), vect_transform_stmt(), vect_vfa_access_size(), vector_alignment_reachable_p(), vectorizable_load(), and vectorizable_store().
#define DR_GROUP_STORE_COUNT | ( | S | ) |
Referenced by vect_transform_stmt().
#define DR_MISALIGNMENT_UNINITIALIZED (-2) |
Referenced by dr_misalignment(), ensure_base_align(), vec_info::new_stmt_vec_info(), and vect_slp_analyze_node_alignment().
#define DR_MISALIGNMENT_UNKNOWN (-1) |
Info on data references alignment.
Referenced by dr_misalignment(), get_group_load_store_type(), get_load_store_type(), known_alignment_for_access_p(), vect_compute_data_ref_alignment(), vect_dissolve_slp_only_groups(), vect_dr_misalign_for_aligned_access(), vect_enhance_data_refs_alignment(), vect_get_peeling_costs_all_drs(), vect_known_alignment_in_bytes(), vect_peeling_supportable(), vect_slp_analyze_node_alignment(), vect_supportable_dr_alignment(), vect_update_misalignment_for_peel(), vectorizable_load(), and vectorizable_store().
#define DR_TARGET_ALIGNMENT | ( | DR | ) |
Referenced by ensure_base_align(), get_misalign_in_elems(), vect_dr_aligned_if_related_peeled_dr_is(), vect_enhance_data_refs_alignment(), vect_gen_prolog_loop_niters(), vect_get_peeling_costs_all_drs(), vect_known_alignment_in_bytes(), vect_peeling_supportable(), vect_setup_realignment(), vect_update_misalignment_for_peel(), vectorizable_load(), and vectorizable_store().
#define DUMP_VECT_SCOPE | ( | MSG | ) |
A macro for calling: dump_begin_scope (MSG, vect_location); via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc, and then calling dump_end_scope (); once the object goes out of scope, thus capturing the nesting of the scopes. These scopes affect dump messages within them: dump messages at the top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those in a nested scope implicitly default to MSG_PRIORITY_INTERNALS.
Referenced by move_early_exit_stmts(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependences(), vect_analyze_data_refs(), vect_analyze_data_refs_alignment(), vect_analyze_early_break_dependences(), vect_analyze_loop(), vect_analyze_loop_form(), vect_analyze_loop_operations(), vect_analyze_scalar_cycles_1(), vect_analyze_slp(), vect_bb_partition_graph(), vect_compute_single_scalar_iteration_cost(), vect_detect_hybrid_slp(), vect_determine_precisions(), vect_determine_vectorization_factor(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_get_loop_niters(), vect_make_slp_decision(), vect_mark_stmts_to_be_vectorized(), vect_match_slp_patterns(), vect_pattern_recog(), vect_prune_runtime_alias_test_list(), vect_slp_analyze_bb_1(), vect_slp_analyze_instance_alignment(), vect_slp_analyze_instance_dependence(), vect_slp_analyze_operations(), vect_transform_loop(), vect_update_inits_of_drs(), vect_update_vf_for_slp(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_shift(), and vectorizable_simd_clone_call().
#define HYBRID_SLP_STMT | ( | S | ) |
#define LOOP_REQUIRES_VERSIONING | ( | L | ) |
Referenced by vect_analyze_loop(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_estimate_min_profitable_iters(), vect_need_peeling_or_partial_vectors_p(), and vect_transform_loop().
#define LOOP_REQUIRES_VERSIONING_FOR_ALIAS | ( | L | ) |
Referenced by vect_estimate_min_profitable_iters(), and vect_loop_versioning().
#define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT | ( | L | ) |
Referenced by vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), and vect_loop_versioning().
#define LOOP_REQUIRES_VERSIONING_FOR_NITERS | ( | L | ) |
Referenced by vect_estimate_min_profitable_iters(), and vect_loop_versioning().
#define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND | ( | L | ) |
Referenced by vect_loop_versioning().
#define LOOP_VINFO_BBS | ( | L | ) |
Referenced by update_epilogue_loop_vinfo(), vect_analyze_loop_2(), vect_analyze_loop_operations(), vect_compute_single_scalar_iteration_cost(), vect_detect_hybrid_slp(), vect_determine_vectorization_factor(), vect_do_peeling(), vect_mark_stmts_to_be_vectorized(), vect_transform_loop(), and vect_update_vf_for_slp().
#define LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P | ( | L | ) |
Referenced by check_load_store_for_partial_vectors(), get_group_load_store_type(), vect_analyze_loop_2(), vect_determine_partial_vectors_and_peeling(), vect_reduction_update_partial_vector_usage(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_lane_reducing(), vectorizable_live_operation(), vectorizable_load(), vectorizable_operation(), vectorizable_reduction(), vectorizable_simd_clone_call(), and vectorizable_store().
#define LOOP_VINFO_CHECK_NONZERO | ( | L | ) |
Referenced by vect_check_nonzero_value(), and vect_prune_runtime_alias_test_list().
#define LOOP_VINFO_CHECK_UNEQUAL_ADDRS | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_create_cond_for_unequal_addrs(), vect_estimate_min_profitable_iters(), and vect_prune_runtime_alias_test_list().
#define LOOP_VINFO_COMP_ALIAS_DDRS | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_create_cond_for_alias_checks(), vect_estimate_min_profitable_iters(), and vect_prune_runtime_alias_test_list().
#define LOOP_VINFO_COST_MODEL_THRESHOLD | ( | L | ) |
#define LOOP_VINFO_DATAREFS | ( | L | ) |
Referenced by update_epilogue_loop_vinfo(), vect_analyze_data_ref_dependences(), vect_analyze_data_refs_alignment(), vect_analyze_loop_2(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_get_peeling_costs_all_drs(), vect_peeling_supportable(), and vect_update_inits_of_drs().
#define LOOP_VINFO_DDRS | ( | L | ) |
Referenced by vect_analyze_data_ref_dependences().
#define LOOP_VINFO_DRS_ADVANCED_BY | ( | L | ) |
Referenced by update_epilogue_loop_vinfo(), and vect_transform_loop().
#define LOOP_VINFO_EARLY_BREAKS | ( | L | ) |
Referenced by can_vectorize_live_stmts(), vect_analyze_data_ref_dependences(), vect_analyze_loop(), vect_analyze_loop_2(), vect_create_loop_vinfo(), vect_do_peeling(), vect_recog_gcond_pattern(), vect_stmt_relevant_p(), vect_transform_loop(), vectorizable_live_operation(), and vectorizable_live_operation_1().
#define LOOP_VINFO_EARLY_BREAKS_LIVE_IVS | ( | L | ) |
Referenced by vect_analyze_slp(), and vect_stmt_relevant_p().
#define LOOP_VINFO_EARLY_BREAKS_VECT_PEELED | ( | L | ) |
Referenced by vect_analyze_early_break_dependences(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_gen_vector_loop_niters_mult_vf(), vect_set_loop_condition_partial_vectors(), vect_set_loop_condition_partial_vectors_avx512(), and vectorizable_live_operation().
#define LOOP_VINFO_EARLY_BRK_DEST_BB | ( | L | ) |
Referenced by move_early_exit_stmts(), and vect_analyze_early_break_dependences().
#define LOOP_VINFO_EARLY_BRK_STORES | ( | L | ) |
Referenced by move_early_exit_stmts(), and vect_analyze_early_break_dependences().
#define LOOP_VINFO_EARLY_BRK_VUSES | ( | L | ) |
Referenced by move_early_exit_stmts(), and vect_analyze_early_break_dependences().
#define LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P | ( | L | ) |
Referenced by vect_determine_partial_vectors_and_peeling().
#define LOOP_VINFO_EPILOGUE_IV_EXIT | ( | L | ) |
Referenced by vect_do_peeling().
#define LOOP_VINFO_EPILOGUE_P | ( | L | ) |
Referenced by vect_analyze_data_ref_dependences(), vect_analyze_loop_1(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_check_gather_scatter(), vect_create_loop_vinfo(), vect_determine_partial_vectors_and_peeling(), vect_estimate_min_profitable_iters(), and vect_transform_loop().
#define LOOP_VINFO_FULLY_MASKED_P | ( | L | ) |
Referenced by check_scan_store(), vect_estimate_min_profitable_iters(), vect_schedule_slp_node(), vect_set_loop_condition_partial_vectors(), vect_set_loop_controls_directly(), vect_transform_reduction(), vect_use_loop_mask_for_alignment_p(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_operation(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
#define LOOP_VINFO_FULLY_WITH_LENGTH_P | ( | L | ) |
Referenced by vect_estimate_min_profitable_iters(), vect_schedule_slp_node(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_operation(), vectorizable_store(), and vectorize_fold_left_reduction().
#define LOOP_VINFO_GROUPED_STORES | ( | L | ) |
Referenced by vect_analyze_group_access_1().
#define LOOP_VINFO_HAS_MASK_STORE | ( | L | ) |
Referenced by vectorizable_store().
#define LOOP_VINFO_INNER_LOOP_COST_FACTOR | ( | L | ) |
Referenced by vector_costs::adjust_cost_for_freq(), vect_compute_single_scalar_iteration_cost(), and vect_create_loop_vinfo().
#define LOOP_VINFO_INT_NITERS | ( | L | ) |
Referenced by vector_costs::better_epilogue_loop_than_p(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_can_peel_nonlinear_iv_p(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_get_peel_iters_epilogue(), vect_known_niters_smaller_than_vf(), vect_need_peeling_or_partial_vectors_p(), and vect_transform_loop().
#define LOOP_VINFO_INV_PATTERN_DEF_SEQ | ( | L | ) |
Referenced by vect_transform_loop().
#define LOOP_VINFO_IV_EXIT | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_create_epilog_for_reduction(), vect_create_loop_vinfo(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_gen_vector_loop_niters_mult_vf(), vect_loop_versioning(), vect_set_loop_controls_directly(), vect_transform_loop(), vect_update_ivs_after_vectorizer(), and vectorizable_live_operation().
#define LOOP_VINFO_LENS | ( | L | ) |
Referenced by check_load_store_for_partial_vectors(), vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), vect_reduction_update_partial_vector_usage(), vect_set_loop_condition_partial_vectors(), vect_transform_reduction(), vect_verify_loop_lens(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_operation(), and vectorizable_store().
#define LOOP_VINFO_LOOP | ( | L | ) |
Access Functions.
Referenced by vector_costs::compare_inside_loop_cost(), cse_and_gimplify_to_preheader(), get_group_load_store_type(), get_initial_def_for_reduction(), vec_info::insert_seq_on_entry(), loop_niters_no_overflow(), move_early_exit_stmts(), parloops_is_simple_reduction(), parloops_is_slp_reduction(), stmt_in_inner_loop_p(), supportable_widening_operation(), vect_analyze_data_ref_access(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_analyze_early_break_dependences(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_analyze_loop_operations(), vect_analyze_possibly_independent_ddr(), vect_analyze_scalar_cycles(), vect_analyze_scalar_cycles_1(), vect_analyze_slp(), vect_better_loop_vinfo_p(), vect_build_loop_niters(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_can_advance_ivs_p(), vect_check_gather_scatter(), vect_compute_data_ref_alignment(), vect_compute_single_scalar_iteration_cost(), vect_create_cond_for_alias_checks(), vect_create_data_ref_ptr(), vect_create_epilog_for_reduction(), vect_detect_hybrid_slp(), vect_determine_vectorization_factor(), vect_do_peeling(), vect_dr_behavior(), vect_emit_reduction_init_stmts(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_gen_vector_loop_niters(), vect_is_simple_reduction(), vect_iv_limit_for_partial_vectors(), vect_known_niters_smaller_than_vf(), vect_loop_versioning(), vect_mark_for_runtime_alias_test(), vect_mark_stmts_to_be_vectorized(), vect_min_prec_for_max_niters(), vect_model_reduction_cost(), vect_need_peeling_or_partial_vectors_p(), vect_peeling_hash_choose_best_peeling(), vect_peeling_hash_insert(), vect_phi_first_order_recurrence_p(), vect_prepare_for_masked_peels(), vect_prune_runtime_alias_test_list(), vect_reassociating_reduction_p(), vect_record_base_alignments(), vect_schedule_slp_node(), vect_setup_realignment(), vect_stmt_relevant_p(), vect_supportable_dr_alignment(), vect_transform_cycle_phi(), vect_transform_loop(), vect_transform_loop_stmt(), vect_transform_reduction(), vect_truncate_gather_scatter_offset(), vect_update_ivs_after_vectorizer(), vect_update_vf_for_slp(), vectorizable_call(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
#define LOOP_VINFO_LOOP_CONDS | ( | L | ) |
Referenced by vect_analyze_slp(), and vect_create_loop_vinfo().
#define LOOP_VINFO_LOOP_IV_COND | ( | L | ) |
Referenced by vect_create_loop_vinfo(), and vect_stmt_relevant_p().
#define LOOP_VINFO_LOOP_NEST | ( | L | ) |
Referenced by vect_analyze_data_ref_dependences(), and vect_prune_runtime_alias_test_list().
#define LOOP_VINFO_LOWER_BOUNDS | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_check_lower_bound(), vect_create_cond_for_lower_bounds(), and vect_estimate_min_profitable_iters().
#define LOOP_VINFO_MAIN_LOOP_INFO | ( | L | ) |
Referenced by vect_analyze_loop_costing(), and vect_create_loop_vinfo().
#define LOOP_VINFO_MASK_SKIP_NITERS | ( | L | ) |
Referenced by vect_can_peel_nonlinear_iv_p(), vect_iv_limit_for_partial_vectors(), vect_prepare_for_masked_peels(), vect_set_loop_condition_partial_vectors(), vect_set_loop_condition_partial_vectors_avx512(), vectorizable_induction(), and vectorizable_nonlinear_induction().
#define LOOP_VINFO_MASKS | ( | L | ) |
Referenced by can_produce_all_loop_masks_p(), check_load_store_for_partial_vectors(), vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), vect_get_max_nscalars_per_iter(), vect_reduction_update_partial_vector_usage(), vect_set_loop_condition_partial_vectors(), vect_set_loop_condition_partial_vectors_avx512(), vect_transform_reduction(), vect_verify_full_masking(), vect_verify_full_masking_avx512(), vectorizable_call(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_operation(), vectorizable_simd_clone_call(), and vectorizable_store().
#define LOOP_VINFO_MAX_VECT_FACTOR | ( | L | ) |
Referenced by vect_analyze_loop_2(), and vect_estimate_min_profitable_iters().
#define LOOP_VINFO_MAY_ALIAS_DDRS | ( | L | ) |
Referenced by vect_mark_for_runtime_alias_test(), and vect_prune_runtime_alias_test_list().
#define LOOP_VINFO_MAY_MISALIGN_STMTS | ( | L | ) |
Referenced by vect_create_cond_for_align_checks(), vect_enhance_data_refs_alignment(), and vect_estimate_min_profitable_iters().
#define LOOP_VINFO_MUST_USE_PARTIAL_VECTORS_P | ( | L | ) |
Referenced by get_group_load_store_type(), vect_analyze_loop_2(), and vect_determine_partial_vectors_and_peeling().
#define LOOP_VINFO_NBBS | ( | L | ) |
Referenced by update_epilogue_loop_vinfo().
#define LOOP_VINFO_NITERS | ( | L | ) |
Referenced by loop_niters_no_overflow(), vect_build_loop_niters(), vect_create_loop_vinfo(), vect_do_peeling(), vect_gen_prolog_loop_niters(), vect_need_peeling_or_partial_vectors_p(), vect_prepare_for_masked_peels(), vect_prune_runtime_alias_test_list(), vect_transform_loop(), vect_verify_loop_lens(), and vectorizable_simd_clone_call().
#define LOOP_VINFO_NITERS_ASSUMPTIONS | ( | L | ) |
Referenced by vect_create_cond_for_niters_checks(), and vect_create_loop_vinfo().
#define LOOP_VINFO_NITERS_KNOWN_P | ( | L | ) |
Referenced by vector_costs::better_epilogue_loop_than_p(), loop_niters_no_overflow(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_apply_runtime_profitability_check_p(), vect_can_peel_nonlinear_iv_p(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_get_known_peeling_cost(), vect_get_peel_iters_epilogue(), vect_known_niters_smaller_than_vf(), vect_need_peeling_or_partial_vectors_p(), and vect_transform_loop().
#define LOOP_VINFO_NITERS_UNCHANGED | ( | L | ) |
Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after prologue peeling retain total unchanged scalar loop iterations for cost model.
Referenced by vect_create_loop_vinfo(), vect_transform_loop(), and vectorizable_simd_clone_call().
#define LOOP_VINFO_NITERSM1 | ( | L | ) |
Referenced by loop_niters_no_overflow(), vect_analyze_loop_costing(), vect_create_loop_vinfo(), vect_do_peeling(), vect_loop_versioning(), vect_min_prec_for_max_niters(), and vect_transform_loop().
#define LOOP_VINFO_NO_DATA_DEPENDENCIES | ( | L | ) |
Referenced by vect_analyze_data_ref_dependence(), vect_analyze_data_ref_dependences(), vect_analyze_possibly_independent_ddr(), and vectorizable_load().
#define LOOP_VINFO_ORIG_LOOP_INFO | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_better_loop_vinfo_p(), vect_check_gather_scatter(), vect_compute_data_ref_alignment(), vect_create_loop_vinfo(), vect_find_reusable_accumulator(), vect_need_peeling_or_partial_vectors_p(), and vect_transform_loop().
#define LOOP_VINFO_ORIG_MAX_VECT_FACTOR | ( | L | ) |
Referenced by vect_analyze_data_ref_dependences().
#define LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), vect_gen_loop_len_mask(), vect_get_loop_len(), vect_set_loop_controls_directly(), vect_verify_loop_lens(), vectorizable_call(), vectorizable_condition(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_operation(), vectorizable_store(), and vectorize_fold_left_reduction().
#define LOOP_VINFO_PARTIAL_VECTORS_STYLE | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), vect_get_loop_mask(), vect_set_loop_condition(), vect_verify_full_masking(), vect_verify_full_masking_avx512(), and vect_verify_loop_lens().
#define LOOP_VINFO_PEELING_FOR_ALIGNMENT | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_can_peel_nonlinear_iv_p(), vect_compute_data_ref_alignment(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_gen_prolog_loop_niters(), vect_iv_limit_for_partial_vectors(), vect_need_peeling_or_partial_vectors_p(), vect_prepare_for_masked_peels(), vect_transform_loop(), and vect_use_loop_mask_for_alignment_p().
#define LOOP_VINFO_PEELING_FOR_GAPS | ( | L | ) |
Referenced by get_group_load_store_type(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_do_peeling(), vect_estimate_min_profitable_iters(), vect_gen_vector_loop_niters(), vect_get_peel_iters_epilogue(), vect_need_peeling_or_partial_vectors_p(), vect_transform_loop(), and vectorizable_load().
#define LOOP_VINFO_PEELING_FOR_NITER | ( | L | ) |
Referenced by vect_analyze_loop(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_determine_partial_vectors_and_peeling(), and vect_do_peeling().
#define LOOP_VINFO_PTR_MASK | ( | L | ) |
Referenced by vect_create_cond_for_align_checks(), and vect_enhance_data_refs_alignment().
#define LOOP_VINFO_REDUCTION_CHAINS | ( | L | ) |
Referenced by parloops_is_slp_reduction(), vect_analyze_loop_2(), vect_fixup_scalar_cycles_with_patterns(), and vect_is_simple_reduction().
#define LOOP_VINFO_REDUCTIONS | ( | L | ) |
Referenced by vect_analyze_scalar_cycles_1().
#define LOOP_VINFO_RGROUP_COMPARE_TYPE | ( | L | ) |
Referenced by vect_get_loop_len(), vect_rgroup_iv_might_wrap_p(), vect_set_loop_condition_partial_vectors(), vect_set_loop_controls_directly(), vect_verify_full_masking(), vect_verify_full_masking_avx512(), and vect_verify_loop_lens().
#define LOOP_VINFO_RGROUP_IV_TYPE | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), vect_get_loop_len(), vect_set_loop_condition_partial_vectors(), vect_set_loop_condition_partial_vectors_avx512(), vect_set_loop_controls_directly(), vect_verify_full_masking(), vect_verify_full_masking_avx512(), and vect_verify_loop_lens().
#define LOOP_VINFO_SCALAR_ITERATION_COST | ( | L | ) |
Referenced by vect_compute_single_scalar_iteration_cost(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), and vect_peeling_hash_get_lowest_cost().
#define LOOP_VINFO_SCALAR_IV_EXIT | ( | L | ) |
Referenced by set_uid_loop_bbs(), vect_do_peeling(), and vect_transform_loop().
#define LOOP_VINFO_SCALAR_LOOP | ( | L | ) |
Referenced by set_uid_loop_bbs(), vect_do_peeling(), vect_loop_versioning(), and vect_transform_loop().
#define LOOP_VINFO_SCALAR_LOOP_SCALING | ( | L | ) |
Referenced by vect_loop_versioning(), and vect_transform_loop().
#define LOOP_VINFO_SIMD_IF_COND | ( | L | ) |
Referenced by vect_analyze_loop_2().
#define LOOP_VINFO_SLP_INSTANCES | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_analyze_slp(), vect_make_slp_decision(), and vect_transform_loop().
#define LOOP_VINFO_SLP_UNROLLING_FACTOR | ( | L | ) |
Referenced by vect_make_slp_decision(), and vect_update_vf_for_slp().
#define LOOP_VINFO_UNALIGNED_DR | ( | L | ) |
Referenced by get_misalign_in_elems(), vect_analyze_loop_2(), vect_enhance_data_refs_alignment(), and vect_gen_prolog_loop_niters().
#define LOOP_VINFO_USING_DECREMENTING_IV_P | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), vect_set_loop_condition_partial_vectors(), and vect_set_loop_controls_directly().
#define LOOP_VINFO_USING_PARTIAL_VECTORS_P | ( | L | ) |
Referenced by vector_costs::better_epilogue_loop_than_p(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_can_peel_nonlinear_iv_p(), vect_determine_partial_vectors_and_peeling(), vect_do_peeling(), vect_estimate_min_profitable_iters(), vect_gen_vector_loop_niters(), vect_set_loop_condition(), vect_transform_loop(), and vectorizable_load().
#define LOOP_VINFO_USING_SELECT_VL_P | ( | L | ) |
Referenced by vect_analyze_loop_2(), vect_determine_partial_vectors_and_peeling(), vect_get_data_ptr_increment(), vect_get_strided_load_store_ops(), vect_set_loop_condition_partial_vectors(), vect_set_loop_controls_directly(), vectorizable_induction(), vectorizable_load(), and vectorizable_store().
#define LOOP_VINFO_VECT_FACTOR | ( | L | ) |
Referenced by vector_costs::better_epilogue_loop_than_p(), check_load_store_for_partial_vectors(), vector_costs::compare_inside_loop_cost(), get_group_load_store_type(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_analyze_loop(), vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_better_loop_vinfo_p(), vect_can_peel_nonlinear_iv_p(), vect_compute_data_ref_alignment(), vect_determine_vectorization_factor(), vect_do_peeling(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_gen_vector_loop_niters(), vect_gen_vector_loop_niters_mult_vf(), vect_get_loop_mask(), vect_get_num_copies(), vect_iv_limit_for_partial_vectors(), vect_max_vf(), vect_need_peeling_or_partial_vectors_p(), vect_prepare_for_masked_peels(), vect_prune_runtime_alias_test_list(), vect_record_loop_len(), vect_set_loop_condition_partial_vectors(), vect_set_loop_condition_partial_vectors_avx512(), vect_set_loop_controls_directly(), vect_shift_permute_load_chain(), vect_small_gap_p(), vect_supportable_dr_alignment(), vect_transform_loop(), vect_transform_loop_stmt(), vect_update_vf_for_slp(), vect_verify_full_masking(), vect_verify_full_masking_avx512(), vect_vf_for_cost(), vectorizable_induction(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_reduction(), vectorizable_simd_clone_call(), vectorizable_slp_permutation_1(), and vectorizable_store().
#define LOOP_VINFO_VECTORIZABLE_P | ( | L | ) |
Referenced by try_vectorize_loop_1(), vect_analyze_loop(), and vect_analyze_loop_2().
#define LOOP_VINFO_VERSIONING_THRESHOLD | ( | L | ) |
Referenced by vect_analyze_loop(), vect_analyze_loop_2(), vect_loop_versioning(), and vect_transform_loop().
#define MAX_INTERM_CVT_STEPS 3 |
The maximum number of intermediate steps required in multi-step type conversion.
Referenced by supportable_narrowing_operation(), and supportable_widening_operation().
#define MAX_VECTORIZATION_FACTOR INT_MAX |
Referenced by vect_analyze_loop_2(), vect_estimate_min_profitable_iters(), and vect_max_vf().
#define PURE_SLP_STMT | ( | S | ) |
Referenced by vec_slp_has_scalar_use(), vect_analyze_loop_operations(), vect_analyze_stmt(), vect_bb_slp_mark_live_stmts(), vect_detect_hybrid_slp(), vect_get_data_access_cost(), vect_remove_slp_scalar_calls(), vect_transform_loop(), vect_transform_loop_stmt(), vect_transform_stmt(), vect_update_vf_for_slp(), vectorizable_live_operation(), and vectorizable_store().
#define REDUC_GROUP_FIRST_ELEMENT | ( | S | ) |
Referenced by parloops_is_slp_reduction(), vect_analyze_slp(), vect_build_slp_instance(), vect_build_slp_tree_1(), vect_create_epilog_for_reduction(), vect_fixup_reduc_chain(), vect_fixup_scalar_cycles_with_patterns(), vect_get_and_check_slp_defs(), vect_is_simple_reduction(), vect_reassociating_reduction_p(), vect_transform_cycle_phi(), vectorizable_live_operation(), and vectorizable_reduction().
#define REDUC_GROUP_NEXT_ELEMENT | ( | S | ) |
#define REDUC_GROUP_SIZE | ( | S | ) |
Referenced by parloops_is_slp_reduction(), vect_analyze_slp_instance(), vect_fixup_reduc_chain(), and vect_is_simple_reduction().
#define SET_DR_MISALIGNMENT | ( | DR, | |
VAL ) |
Referenced by vect_compute_data_ref_alignment(), vect_enhance_data_refs_alignment(), and vect_update_misalignment_for_peel().
#define SET_DR_TARGET_ALIGNMENT | ( | DR, | |
VAL ) |
Referenced by vect_compute_data_ref_alignment().
#define SLP_INSTANCE_KIND | ( | S | ) |
#define SLP_INSTANCE_LOADS | ( | S | ) |
Referenced by vect_analyze_loop_2(), vect_build_slp_instance(), vect_free_slp_instance(), vect_gather_slp_loads(), vect_slp_analyze_instance_alignment(), and vect_slp_analyze_instance_dependence().
#define SLP_INSTANCE_REMAIN_DEFS | ( | S | ) |
Referenced by vect_bb_slp_mark_live_stmts(), vect_build_slp_instance(), vect_free_slp_instance(), and vectorize_slp_instance_root_stmt().
#define SLP_INSTANCE_ROOT_STMTS | ( | S | ) |
Referenced by vect_bb_vectorization_profitable_p(), vect_build_slp_instance(), vect_free_slp_instance(), vect_schedule_slp(), vect_slp_analyze_bb_1(), and vect_slp_analyze_operations().
#define SLP_INSTANCE_TREE | ( | S | ) |
Access Functions.
Referenced by vect_optimize_slp_pass::build_vertices(), debug(), dot_slp_tree(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_loop_2(), vect_analyze_slp(), vect_bb_partition_graph(), vect_bb_slp_mark_live_stmts(), vect_bb_vectorization_profitable_p(), vect_build_slp_instance(), vect_free_slp_instance(), vect_gather_slp_loads(), vect_lower_load_permutations(), vect_make_slp_decision(), vect_match_slp_patterns(), vect_optimize_slp(), vect_schedule_slp(), vect_slp_analyze_bb_1(), vect_slp_analyze_instance_alignment(), vect_slp_analyze_instance_dependence(), vect_slp_analyze_operations(), vect_slp_convert_to_external(), vect_slp_region(), and vectorizable_bb_reduc_epilogue().
#define SLP_TREE_CHILDREN | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), addsub_pattern::build(), complex_add_pattern::build(), complex_fms_pattern::build(), complex_mul_pattern::build(), vect_optimize_slp_pass::build_graph(), vect_optimize_slp_pass::build_vertices(), vect_optimize_slp_pass::change_vec_perm_layout(), compatible_complex_nodes_p(), vect_optimize_slp_pass::decide_masked_load_lanes(), dot_slp_tree(), vect_optimize_slp_pass::get_result_with_layout(), vect_optimize_slp_pass::internal_node_cost(), linear_loads_p(), complex_add_pattern::matches(), complex_fms_pattern::matches(), complex_mul_pattern::matches(), vect_optimize_slp_pass::materialize(), optimize_load_redistribution(), optimize_load_redistribution_1(), addsub_pattern::recognize(), vect_optimize_slp_pass::start_choosing_layouts(), vect_bb_partition_graph_r(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_scalar_cost(), vect_build_combine_node(), vect_build_slp_instance(), vect_build_slp_store_interleaving(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_create_epilog_for_reduction(), vect_create_new_slp_node(), vect_create_new_slp_node(), vect_cse_slp_nodes(), vect_detect_pair_op(), vect_detect_pair_op(), vect_free_slp_tree(), vect_gather_slp_loads(), vect_get_gather_scatter_ops(), vect_get_slp_defs(), vect_get_vec_defs(), vect_is_simple_use(), vect_lower_load_permutations(), vect_mark_slp_stmts(), vect_mark_slp_stmts_relevant(), vect_match_slp_patterns_2(), vect_print_slp_graph(), vect_print_slp_tree(), vect_remove_slp_scalar_calls(), vect_schedule_scc(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_slp_build_two_operator_nodes(), vect_slp_gather_vectorized_scalar_stmts(), vect_slp_prune_covered_roots(), vect_transform_cycle_phi(), vect_update_slp_vf_for_node(), vect_validate_multiplication(), vectorizable_condition(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_lc_phi(), vectorizable_load(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_scan_store(), vectorizable_slp_permutation(), vectorizable_store(), vectorize_fold_left_reduction(), and _slp_tree::~_slp_tree().
#define SLP_TREE_CODE | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), vect_optimize_slp_pass::backward_cost(), addsub_pattern::build(), complex_pattern::build(), vect_optimize_slp_pass::decide_masked_load_lanes(), vect_optimize_slp_pass::dump(), vect_optimize_slp_pass::forward_pass(), vect_optimize_slp_pass::get_result_with_layout(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::is_cfg_latch_edge(), linear_loads_p(), vect_optimize_slp_pass::materialize(), optimize_load_redistribution_1(), addsub_pattern::recognize(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_stmt(), vect_bb_slp_scalar_cost(), vect_build_combine_node(), vect_build_slp_tree_2(), vect_create_new_slp_node(), vect_detect_pair_op(), vect_gather_slp_loads(), vect_is_simple_use(), vect_is_slp_load_node(), vect_lower_load_permutations(), vect_print_slp_tree(), vect_schedule_scc(), vect_schedule_slp_node(), vect_slp_analyze_node_operations_1(), vect_slp_build_two_operator_nodes(), vect_transform_stmt(), and vect_update_slp_vf_for_node().
#define SLP_TREE_DEF_TYPE | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), compatible_complex_nodes_p(), vect_optimize_slp_pass::create_partitions(), vect_optimize_slp_pass::decide_masked_load_lanes(), vect_optimize_slp_pass::get_result_with_layout(), vect_optimize_slp_pass::is_cfg_latch_edge(), linear_loads_p(), optimize_load_redistribution_1(), vect_analyze_loop_2(), vect_bb_partition_graph_r(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_scalar_cost(), vect_build_slp_tree(), vect_build_slp_tree_2(), vect_check_scalar_mask(), vect_create_new_slp_node(), vect_create_new_slp_node(), vect_create_new_slp_node(), vect_cse_slp_nodes(), vect_gather_slp_loads(), vect_get_slp_scalar_def(), vect_is_simple_use(), vect_is_slp_load_node(), vect_mark_slp_stmts(), vect_mark_slp_stmts_relevant(), vect_maybe_update_slp_op_vectype(), vect_print_slp_tree(), vect_prologue_cost_for_slp(), vect_remove_slp_scalar_calls(), vect_schedule_scc(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_slp_analyze_operations(), vect_slp_build_two_operator_nodes(), vect_slp_convert_to_external(), vect_slp_gather_vectorized_scalar_stmts(), vect_slp_prune_covered_roots(), vect_slp_tree_uniform_p(), vect_update_slp_vf_for_node(), vectorizable_phi(), vectorizable_shift(), and vectorizable_slp_permutation_1().
#define SLP_TREE_LANE_PERMUTATION | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), addsub_pattern::build(), complex_pattern::build(), vect_optimize_slp_pass::decide_masked_load_lanes(), vect_optimize_slp_pass::get_result_with_layout(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::materialize(), optimize_load_redistribution_1(), addsub_pattern::recognize(), vect_optimize_slp_pass::start_choosing_layouts(), vect_bb_slp_scalar_cost(), vect_build_combine_node(), vect_build_slp_store_interleaving(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_detect_pair_op(), vect_lower_load_permutations(), vect_print_slp_tree(), vect_slp_build_two_operator_nodes(), vectorizable_slp_permutation(), vectorizable_slp_permutation_1(), and _slp_tree::~_slp_tree().
#define SLP_TREE_LANES | ( | S | ) |
Referenced by vect_optimize_slp_pass::change_layout_cost(), check_scan_store(), vect_optimize_slp_pass::decide_masked_load_lanes(), get_group_load_store_type(), vect_optimize_slp_pass::get_result_with_layout(), get_vectype_for_scalar_type(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::is_compatible_layout(), optimize_load_redistribution_1(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_optimize_slp_pass::start_choosing_layouts(), supportable_indirect_convert_operation(), vect_analyze_loop_2(), vect_analyze_slp(), vect_bb_slp_scalar_cost(), vect_bb_vectorization_profitable_p(), vect_build_combine_node(), vect_build_slp_instance(), vect_build_slp_store_interleaving(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_create_epilog_for_reduction(), vect_create_new_slp_node(), vect_create_new_slp_node(), vect_get_num_copies(), vect_lower_load_permutations(), vect_maybe_update_slp_op_vectype(), vect_slp_build_two_operator_nodes(), vect_slp_convert_to_external(), vect_transform_cycle_phi(), vect_update_slp_vf_for_node(), vectorizable_call(), vectorizable_condition(), vectorizable_conversion(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_slp_permutation_1(), vectorizable_store(), and vllp_cmp().
#define SLP_TREE_LOAD_PERMUTATION | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), get_group_load_store_type(), vect_optimize_slp_pass::internal_node_cost(), linear_loads_p(), vect_optimize_slp_pass::materialize(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_slp(), vect_build_slp_tree_2(), vect_lower_load_permutations(), vect_print_slp_tree(), vect_slp_convert_to_external(), vect_transform_slp_perm_load(), vectorizable_load(), vllp_cmp(), and _slp_tree::~_slp_tree().
#define SLP_TREE_MEMORY_ACCESS_TYPE | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), vect_mem_access_type(), vectorizable_load(), and vectorizable_store().
#define SLP_TREE_NUMBER_OF_VEC_STMTS | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), vect_create_constant_vectors(), vect_get_slp_defs(), vect_model_simple_cost(), vect_prologue_cost_for_slp(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_slp_analyze_node_operations_1(), vect_transform_cycle_phi(), vect_transform_slp_perm_load_1(), vectorizable_bswap(), vectorizable_call(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_load(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_shift(), vectorizable_slp_permutation_1(), vectorizable_store(), and vectorize_slp_instance_root_stmt().
#define SLP_TREE_REF_COUNT | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), addsub_pattern::build(), complex_add_pattern::build(), complex_fms_pattern::build(), complex_mul_pattern::build(), vect_optimize_slp_pass::decide_masked_load_lanes(), optimize_load_redistribution(), optimize_load_redistribution_1(), vect_build_combine_node(), vect_build_slp_tree(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_free_slp_tree(), vect_print_slp_tree(), and vect_slp_build_two_operator_nodes().
#define SLP_TREE_REPRESENTATIVE | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), addsub_pattern::build(), complex_pattern::build(), vect_optimize_slp_pass::build_vertices(), compatible_complex_nodes_p(), vect_optimize_slp_pass::containing_loop(), vect_optimize_slp_pass::decide_masked_load_lanes(), vect_optimize_slp_pass::dump(), vect_optimize_slp_pass::get_result_with_layout(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::is_cfg_latch_edge(), linear_loads_p(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_loop_2(), vect_analyze_slp(), vect_build_combine_node(), vect_build_slp_store_interleaving(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_create_epilog_for_reduction(), vect_create_new_slp_node(), vect_free_slp_tree(), vect_gather_slp_loads(), vect_is_simple_use(), vect_is_slp_load_node(), vect_lower_load_permutations(), vect_match_expression_p(), vect_pattern_validate_optab(), vect_print_slp_tree(), vect_schedule_scc(), vect_schedule_slp(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_slp_analyze_node_operations_1(), vect_slp_build_two_operator_nodes(), and vect_slp_node_weight().
#define SLP_TREE_SCALAR_OPS | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), compatible_complex_nodes_p(), vect_optimize_slp_pass::get_result_with_layout(), vect_bb_slp_mark_live_stmts(), vect_create_new_slp_node(), vect_get_slp_scalar_def(), vect_is_simple_use(), vect_print_slp_tree(), vect_prologue_cost_for_slp(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_slp_convert_to_external(), vect_slp_gather_vectorized_scalar_stmts(), vect_slp_tree_uniform_p(), vectorizable_shift(), vectorizable_slp_permutation_1(), and _slp_tree::~_slp_tree().
#define SLP_TREE_SCALAR_STMTS | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), can_vectorize_live_stmts(), get_group_load_store_type(), vect_optimize_slp_pass::get_result_with_layout(), _slp_instance::location(), vect_optimize_slp_pass::materialize(), optimize_load_redistribution_1(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_analyze_loop_2(), vect_analyze_slp(), vect_bb_partition_graph_r(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_scalar_cost(), vect_build_slp_instance(), vect_build_slp_store_interleaving(), vect_build_slp_tree(), vect_build_slp_tree_2(), vect_create_epilog_for_reduction(), vect_create_new_slp_node(), vect_create_new_slp_node(), vect_cse_slp_nodes(), vect_find_first_scalar_stmt_in_slp(), vect_find_last_scalar_stmt_in_slp(), vect_get_slp_scalar_def(), vect_lower_load_permutations(), vect_lower_load_permutations(), vect_mark_slp_stmts(), vect_mark_slp_stmts_relevant(), vect_print_slp_tree(), vect_remove_slp_scalar_calls(), vect_schedule_slp(), vect_schedule_slp_node(), vect_slp_analyze_bb_1(), vect_slp_analyze_instance_dependence(), vect_slp_analyze_load_dependences(), vect_slp_analyze_node_alignment(), vect_slp_analyze_node_operations(), vect_slp_analyze_node_operations_1(), vect_slp_analyze_operations(), vect_slp_analyze_store_dependences(), vect_slp_convert_to_external(), vect_slp_gather_vectorized_scalar_stmts(), vect_slp_prune_covered_roots(), vect_transform_cycle_phi(), vect_transform_slp_perm_load_1(), vectorizable_induction(), vectorizable_load(), vectorizable_reduction(), vectorizable_scan_store(), vectorizable_shift(), vectorizable_store(), vectorize_fold_left_reduction(), vllp_cmp(), and _slp_tree::~_slp_tree().
#define SLP_TREE_SIMD_CLONE_INFO | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), vectorizable_simd_clone_call(), and _slp_tree::~_slp_tree().
#define SLP_TREE_VEC_DEFS | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), vect_optimize_slp_pass::create_partitions(), vect_optimize_slp_pass::get_result_with_layout(), vect_build_slp_tree_2(), vect_create_constant_vectors(), vect_create_epilog_for_reduction(), vect_get_slp_defs(), vect_get_slp_vect_def(), vect_prologue_cost_for_slp(), vect_schedule_scc(), vect_schedule_slp_node(), vect_transform_slp_perm_load_1(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_phi(), vectorizable_simd_clone_call(), vectorizable_slp_permutation_1(), vectorizable_store(), vectorize_slp_instance_root_stmt(), and _slp_tree::~_slp_tree().
#define SLP_TREE_VECTYPE | ( | S | ) |
Referenced by _slp_tree::_slp_tree(), addsub_pattern::build(), complex_pattern::build(), vect_optimize_slp_pass::decide_masked_load_lanes(), vect_optimize_slp_pass::get_result_with_layout(), complex_mul_pattern::matches(), addsub_pattern::recognize(), vect_optimize_slp_pass::start_choosing_layouts(), vect_add_slp_permutation(), vect_analyze_slp(), vect_analyze_stmt(), vect_bb_slp_scalar_cost(), vect_build_combine_node(), vect_build_slp_instance(), vect_build_slp_store_interleaving(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_create_constant_vectors(), vect_get_num_copies(), vect_is_simple_use(), vect_lower_load_permutations(), vect_maybe_update_slp_op_vectype(), vect_pattern_validate_optab(), vect_print_slp_tree(), vect_prologue_cost_for_slp(), vect_schedule_slp_node(), vect_slp_analyze_node_alignment(), vect_slp_analyze_node_operations(), vect_slp_analyze_operations(), vect_slp_build_two_operator_nodes(), vect_slp_convert_to_external(), vect_slp_region(), vect_transform_slp_perm_load_1(), vect_transform_stmt(), vectorizable_bb_reduc_epilogue(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_lc_phi(), vectorizable_live_operation(), vectorizable_load(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_slp_permutation(), and vectorizable_slp_permutation_1().
#define STMT_SLP_TYPE | ( | S | ) |
Referenced by addsub_pattern::build(), complex_pattern::build(), maybe_push_to_hybrid_worklist(), vec_info::new_stmt_vec_info(), vect_analyze_loop_2(), vect_detect_hybrid_slp(), vect_detect_hybrid_slp(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_free_slp_tree(), vect_mark_slp_stmts(), vect_slp_analyze_bb_1(), vect_supportable_dr_alignment(), and vect_transform_loop_stmt().
#define STMT_VINFO_DATA_REF | ( | S | ) |
Referenced by bump_vector_ptr(), compatible_complex_nodes_p(), exist_non_indexing_operands_for_use_p(), get_group_alias_ptr_type(), vect_optimize_slp_pass::internal_node_cost(), linear_loads_p(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_early_break_dependences(), vect_analyze_group_access_1(), vect_bb_slp_scalar_cost(), vect_build_slp_instance(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_check_gather_scatter(), vect_compute_single_scalar_iteration_cost(), vect_cond_store_pattern_same_ref(), vect_create_cond_for_align_checks(), vect_create_data_ref_ptr(), vect_describe_gather_scatter_call(), vect_determine_vf_for_stmt_1(), vect_gather_slp_loads(), vect_get_and_check_slp_defs(), vect_get_strided_load_store_ops(), vect_get_vector_types_for_stmt(), vect_is_extending_load(), vect_is_slp_load_node(), vect_is_store_elt_extraction(), vect_preserves_scalar_order_p(), vect_recog_bool_pattern(), vect_recog_cond_store_pattern(), vect_recog_gather_scatter_pattern(), vect_recog_mask_conversion_pattern(), vect_schedule_slp(), vect_schedule_slp_node(), vect_slp_analyze_load_dependences(), vect_slp_analyze_store_dependences(), vect_slp_prefer_store_lanes_p(), vectorizable_assignment(), vectorizable_load(), vectorizable_operation(), and vectorizable_store().
#define STMT_VINFO_DEF_TYPE | ( | S | ) |
Referenced by can_vectorize_live_stmts(), info_for_reduction(), iv_phi_p(), maybe_push_to_hybrid_worklist(), maybe_set_vectorized_backedge_value(), vec_info::new_stmt_vec_info(), parloops_is_simple_reduction(), parloops_valid_reduction_input_p(), process_use(), vect_active_double_reduction_p(), vect_analyze_loop_2(), vect_analyze_loop_operations(), vect_analyze_scalar_cycles_1(), vect_analyze_slp(), vect_analyze_slp_instance(), vect_analyze_stmt(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_compute_single_scalar_iteration_cost(), vect_create_epilog_for_reduction(), vect_create_loop_vinfo(), vect_fixup_reduc_chain(), vect_fixup_scalar_cycles_with_patterns(), vect_get_internal_def(), vect_init_pattern_stmt(), vect_inner_phi_in_double_reduction_p(), vect_is_simple_reduction(), vect_is_simple_use(), vect_mark_pattern_stmts(), vect_mark_stmts_to_be_vectorized(), vect_reassociating_reduction_p(), vect_recog_over_widening_pattern(), vect_schedule_scc(), vect_stmt_relevant_p(), vect_transform_cycle_phi(), vect_transform_loop(), vect_transform_reduction(), vect_update_vf_for_slp(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_lane_reducing(), vectorizable_lc_phi(), vectorizable_live_operation(), vectorizable_load(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().
#define STMT_VINFO_DR_BASE_ADDRESS | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_BASE_ALIGNMENT | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_BASE_MISALIGNMENT | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_INFO | ( | S | ) |
Referenced by check_scan_store(), compare_step_with_zero(), dr_misalignment(), dr_target_alignment(), ensure_base_align(), get_group_load_store_type(), get_load_store_type(), get_negative_load_store_type(), vec_info::lookup_dr(), vec_info::move_dr(), vect_analyze_data_ref_accesses(), vect_analyze_early_break_dependences(), vect_create_addr_base_for_vector_ref(), vect_create_data_ref_ptr(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_prune_runtime_alias_test_list(), vect_setup_realignment(), vect_slp_analyze_node_alignment(), vect_truncate_gather_scatter_offset(), vectorizable_load(), vectorizable_scan_store(), and vectorizable_store().
#define STMT_VINFO_DR_INIT | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_OFFSET | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_OFFSET_ALIGNMENT | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_STEP | ( | S | ) |
Referenced by vect_analyze_data_ref_access(), vect_analyze_data_refs(), and vect_setup_realignment().
#define STMT_VINFO_DR_STEP_ALIGNMENT | ( | S | ) |
Referenced by vect_analyze_data_refs().
#define STMT_VINFO_DR_WRT_VEC_LOOP | ( | S | ) |
Referenced by vec_info::move_dr(), vect_analyze_data_refs(), vect_dr_behavior(), and vect_record_base_alignments().
#define STMT_VINFO_FORCE_SINGLE_CYCLE | ( | S | ) |
Referenced by vect_transform_cycle_phi(), vect_transform_reduction(), and vectorizable_reduction().
#define STMT_VINFO_GATHER_SCATTER_P | ( | S | ) |
Referenced by get_load_store_type(), vec_info::move_dr(), record_stmt_cost(), update_epilogue_loop_vinfo(), vect_analyze_data_ref_access(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_analyze_early_break_dependences(), vect_analyze_possibly_independent_ddr(), vect_build_slp_instance(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_check_store_rhs(), vect_compute_data_ref_alignment(), vect_detect_hybrid_slp(), vect_get_and_check_slp_defs(), vect_mark_stmts_to_be_vectorized(), vect_recog_gather_scatter_pattern(), vect_record_base_alignments(), vect_relevant_for_alignment_p(), vect_update_inits_of_drs(), vectorizable_load(), and vectorizable_store().
#define STMT_VINFO_GROUPED_ACCESS | ( | S | ) |
Referenced by check_scan_store(), vect_optimize_slp_pass::decide_masked_load_lanes(), dr_misalignment(), dr_target_alignment(), ensure_base_align(), get_group_load_store_type(), get_load_store_type(), vect_optimize_slp_pass::internal_node_cost(), vect_optimize_slp_pass::remove_redundant_permutations(), vect_optimize_slp_pass::start_choosing_layouts(), vect_analyze_data_ref_access(), vect_analyze_data_refs_alignment(), vect_analyze_loop_2(), vect_analyze_slp(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_compute_data_ref_alignment(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_fixup_store_groups_with_patterns(), vect_is_slp_load_node(), vect_lower_load_permutations(), vect_preserves_scalar_order_p(), vect_relevant_for_alignment_p(), vect_slp_analyze_data_ref_dependence(), vect_slp_analyze_load_dependences(), vect_supportable_dr_alignment(), vect_transform_loop(), vect_transform_slp_perm_load_1(), vect_transform_stmt(), vector_alignment_reachable_p(), vectorizable_load(), vectorizable_store(), and vllp_cmp().
#define STMT_VINFO_IN_PATTERN_P | ( | S | ) |
Referenced by vect_analyze_loop_2(), vect_analyze_stmt(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_scalar_cost(), vect_create_epilog_for_reduction(), vect_detect_hybrid_slp(), vect_determine_vf_for_stmt(), vect_fixup_scalar_cycles_with_patterns(), vect_fixup_store_groups_with_patterns(), vect_free_slp_tree(), vect_mark_relevant(), vect_pattern_recog_1(), vect_set_pattern_stmt(), vect_stmt_to_vectorize(), vect_transform_loop(), and vectorizable_reduction().
#define STMT_VINFO_LIVE_P | ( | S | ) |
Referenced by can_vectorize_live_stmts(), process_use(), vect_analyze_loop_operations(), vect_analyze_slp(), vect_analyze_stmt(), vect_bb_slp_mark_live_stmts(), vect_bb_slp_scalar_cost(), vect_compute_single_scalar_iteration_cost(), vect_determine_vectorization_factor(), vect_determine_vf_for_stmt_1(), vect_mark_relevant(), vect_print_slp_tree(), vect_schedule_slp_node(), vect_slp_analyze_node_operations_1(), vect_transform_loop(), vect_transform_loop_stmt(), vect_transform_stmt(), vectorizable_induction(), vectorizable_live_operation(), and vectorizable_reduction().
#define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED | ( | S | ) |
Referenced by is_nonwrapping_integer_induction(), vect_analyze_scalar_cycles_1(), vect_is_nonlinear_iv_evolution(), and vectorizable_reduction().
#define STMT_VINFO_LOOP_PHI_EVOLUTION_PART | ( | S | ) |
Referenced by is_nonwrapping_integer_induction(), vect_analyze_scalar_cycles_1(), vect_can_advance_ivs_p(), vect_can_peel_nonlinear_iv_p(), vect_is_nonlinear_iv_evolution(), vect_update_ivs_after_vectorizer(), vectorizable_induction(), vectorizable_nonlinear_induction(), and vectorizable_reduction().
#define STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE | ( | S | ) |
Referenced by vect_can_advance_ivs_p(), vect_can_peel_nonlinear_iv_p(), vect_is_nonlinear_iv_evolution(), vect_update_ivs_after_vectorizer(), vectorizable_induction(), and vectorizable_nonlinear_induction().
#define STMT_VINFO_MEMORY_ACCESS_TYPE | ( | S | ) |
Referenced by update_epilogue_loop_vinfo(), vect_mem_access_type(), vectorizable_load(), and vectorizable_store().
#define STMT_VINFO_MIN_NEG_DIST | ( | S | ) |
Referenced by vect_analyze_data_ref_dependence(), and vectorizable_load().
#define STMT_VINFO_PATTERN_DEF_SEQ | ( | S | ) |
Referenced by append_pattern_def_seq(), update_epilogue_loop_vinfo(), vect_analyze_loop_2(), vect_analyze_stmt(), vect_detect_hybrid_slp(), vect_determine_vf_for_stmt(), vect_mark_pattern_stmts(), vect_pattern_recog_1(), vect_recog_popcount_clz_ctz_ffs_pattern(), vect_split_statement(), and vect_transform_loop().
#define STMT_VINFO_REDUC_CODE | ( | S | ) |
#define STMT_VINFO_REDUC_DEF | ( | S | ) |
Referenced by addsub_pattern::build(), complex_pattern::build(), info_for_reduction(), parloops_force_simple_reduction(), supportable_widening_operation(), vect_active_double_reduction_p(), vect_analyze_loop_2(), vect_analyze_scalar_cycles_1(), vect_analyze_slp_instance(), vect_create_epilog_for_reduction(), vect_reduc_type(), vect_transform_cycle_phi(), vect_transform_reduction(), vectorizable_condition(), vectorizable_lane_reducing(), vectorizable_live_operation(), and vectorizable_reduction().
#define STMT_VINFO_REDUC_EPILOGUE_ADJUSTMENT | ( | S | ) |
Referenced by vect_create_epilog_for_reduction(), vect_find_reusable_accumulator(), and vect_transform_cycle_phi().
#define STMT_VINFO_REDUC_FN | ( | S | ) |
Referenced by vec_info::new_stmt_vec_info(), vect_create_epilog_for_reduction(), vect_reduction_update_partial_vector_usage(), vect_transform_reduction(), and vectorizable_reduction().
#define STMT_VINFO_REDUC_IDX | ( | S | ) |
Referenced by vec_info::new_stmt_vec_info(), vect_analyze_slp(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_create_epilog_for_reduction(), vect_fixup_scalar_cycles_with_patterns(), vect_get_and_check_slp_defs(), vect_is_reduction(), vect_is_simple_reduction(), vect_mark_pattern_stmts(), vect_reassociating_reduction_p(), vect_transform_reduction(), vectorizable_call(), vectorizable_condition(), vectorizable_lane_reducing(), vectorizable_operation(), and vectorizable_reduction().
#define STMT_VINFO_REDUC_TYPE | ( | S | ) |
Referenced by maybe_set_vectorized_backedge_value(), vec_info::new_stmt_vec_info(), parloops_force_simple_reduction(), valid_reduction_p(), vect_create_epilog_for_reduction(), vect_find_reusable_accumulator(), vect_is_simple_reduction(), vect_reduc_type(), vect_reduction_update_partial_vector_usage(), vect_transform_cycle_phi(), vect_transform_reduction(), vectorizable_condition(), vectorizable_lane_reducing(), vectorizable_live_operation(), and vectorizable_reduction().
#define STMT_VINFO_REDUC_VECTYPE | ( | S | ) |
Referenced by vect_create_epilog_for_reduction(), and vectorizable_reduction().
#define STMT_VINFO_REDUC_VECTYPE_IN | ( | S | ) |
Referenced by vect_is_emulated_mixed_dot_prod(), vect_transform_reduction(), vectorizable_lane_reducing(), and vectorizable_reduction().
#define STMT_VINFO_RELATED_STMT | ( | S | ) |
Referenced by vec_info::add_pattern_stmt(), update_epilogue_loop_vinfo(), vect_analyze_loop_2(), vect_analyze_stmt(), vect_bb_slp_mark_live_stmts(), vect_create_epilog_for_reduction(), vect_detect_hybrid_slp(), vect_determine_vf_for_stmt(), vect_fixup_reduc_chain(), vect_fixup_scalar_cycles_with_patterns(), vect_fixup_store_groups_with_patterns(), vect_get_and_check_slp_defs(), vect_init_pattern_stmt(), vect_look_through_possible_promotion(), vect_mark_pattern_stmts(), vect_mark_relevant(), vect_orig_stmt(), vect_set_pattern_stmt(), vect_split_statement(), vect_stmt_to_vectorize(), vect_transform_loop(), and vectorizable_reduction().
#define STMT_VINFO_RELEVANT | ( | S | ) |
Referenced by addsub_pattern::build(), complex_pattern::build(), vec_info::new_stmt_vec_info(), vect_analyze_loop_operations(), vect_analyze_slp(), vect_analyze_stmt(), vect_detect_hybrid_slp(), vect_mark_relevant(), vect_mark_slp_stmts_relevant(), vect_mark_stmts_to_be_vectorized(), and vectorizable_reduction().
#define STMT_VINFO_RELEVANT_P | ( | S | ) |
Referenced by maybe_set_vectorized_backedge_value(), vect_active_double_reduction_p(), vect_analyze_loop_operations(), vect_analyze_slp(), vect_analyze_stmt(), vect_compute_single_scalar_iteration_cost(), vect_determine_vectorization_factor(), vect_determine_vf_for_stmt_1(), vect_relevant_for_alignment_p(), vect_transform_loop(), vect_transform_loop_stmt(), vect_update_vf_for_slp(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_live_operation(), vectorizable_load(), vectorizable_operation(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().
#define STMT_VINFO_SIMD_CLONE_INFO | ( | S | ) |
Referenced by vec_info::free_stmt_vec_info(), and vectorizable_simd_clone_call().
#define STMT_VINFO_SIMD_LANE_ACCESS_P | ( | S | ) |
Referenced by check_scan_store(), vec_info::move_dr(), vect_analyze_data_ref_accesses(), vect_analyze_data_refs(), vect_update_inits_of_drs(), vectorizable_load(), vectorizable_scan_store(), and vectorizable_store().
#define STMT_VINFO_SLP_VECT_ONLY | ( | S | ) |
Referenced by vect_optimize_slp_pass::decide_masked_load_lanes(), vec_info::new_stmt_vec_info(), vect_analyze_data_ref_accesses(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_dissolve_slp_only_groups(), and vectorizable_load().
#define STMT_VINFO_SLP_VECT_ONLY_PATTERN | ( | S | ) |
Referenced by addsub_pattern::build(), complex_pattern::build(), vec_info::new_stmt_vec_info(), vect_analyze_loop_2(), and vect_free_slp_tree().
#define STMT_VINFO_STMT | ( | S | ) |
Referenced by complex_pattern::build(), check_scan_store(), compatible_complex_nodes_p(), vect_optimize_slp_pass::decide_masked_load_lanes(), dump_stmt_cost(), vect_optimize_slp_pass::start_choosing_layouts(), stmt_in_inner_loop_p(), update_epilogue_loop_vinfo(), vect_analyze_data_ref_accesses(), vect_analyze_slp(), vect_build_slp_tree_2(), vect_determine_mask_precision(), vect_get_slp_scalar_def(), vect_match_expression_p(), vect_pattern_validate_optab(), vect_recog_abd_pattern(), vect_recog_absolute_difference(), vect_recog_bitfield_ref_pattern(), vect_recog_build_binary_gimple_stmt(), vect_recog_cond_store_pattern(), vect_recog_gcond_pattern(), vect_recog_mod_var_pattern(), vect_recog_sat_add_pattern(), vect_recog_sat_sub_pattern(), vect_recog_sat_trunc_pattern(), vect_recog_widen_abd_pattern(), vect_stmt_relevant_p(), vectorizable_comparison_1(), vectorizable_early_exit(), vectorizable_reduction(), vectorizable_scan_store(), vectorize_slp_instance_root_stmt(), and vllp_cmp().
#define STMT_VINFO_STRIDED_P | ( | S | ) |
Referenced by vect_optimize_slp_pass::decide_masked_load_lanes(), get_group_load_store_type(), get_load_store_type(), vec_info::move_dr(), update_epilogue_loop_vinfo(), vect_analyze_data_ref_access(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_analyze_early_break_dependences(), vect_analyze_group_access_1(), vect_analyze_slp(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_dissolve_slp_only_groups(), vect_enhance_data_refs_alignment(), vect_lower_load_permutations(), and vect_relevant_for_alignment_p().
#define STMT_VINFO_TYPE | ( | S | ) |
Access Functions.
Referenced by vec_info::new_stmt_vec_info(), vect_analyze_stmt(), vect_create_loop_vinfo(), vect_init_pattern_stmt(), vect_schedule_slp_node(), vect_slp_analyze_node_operations(), vect_transform_stmt(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison(), vectorizable_condition(), vectorizable_conversion(), vectorizable_induction(), vectorizable_lane_reducing(), vectorizable_lc_phi(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().
#define STMT_VINFO_VEC_INDUC_COND_INITIAL_VAL | ( | S | ) |
Referenced by vect_create_epilog_for_reduction(), vect_transform_cycle_phi(), and vectorizable_reduction().
#define STMT_VINFO_VEC_STMTS | ( | S | ) |
Referenced by vec_info::free_stmt_vec_info(), maybe_set_vectorized_backedge_value(), vec_info::new_stmt_vec_info(), vect_create_epilog_for_reduction(), vect_create_vectorized_demotion_stmts(), vect_get_vec_defs_for_operand(), vect_record_grouped_load_vectors(), vect_transform_cycle_phi(), vect_transform_reduction(), vect_transform_stmt(), vect_vfa_access_size(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_lc_phi(), vectorizable_live_operation(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_recurr(), vectorizable_scan_store(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
#define STMT_VINFO_VECTORIZABLE | ( | S | ) |
Referenced by vec_info::new_stmt_vec_info(), vect_analyze_data_ref_accesses(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_analyze_data_refs_alignment(), vect_analyze_group_access_1(), vect_build_slp_tree_1(), vect_build_slp_tree_2(), vect_determine_precisions(), vect_pattern_recog(), and vect_record_base_alignments().
#define STMT_VINFO_VECTYPE | ( | S | ) |
Referenced by append_pattern_def_seq(), addsub_pattern::build(), complex_pattern::build(), bump_vector_ptr(), get_initial_defs_for_reduction(), get_misalign_in_elems(), record_stmt_cost(), record_stmt_cost(), stmt_vectype(), vect_analyze_data_refs(), vect_analyze_data_refs_alignment(), vect_analyze_loop_2(), vect_analyze_slp(), vect_analyze_stmt(), vect_build_one_gather_load_call(), vect_build_slp_instance(), vect_check_gather_scatter(), vect_check_scalar_mask(), vect_check_store_rhs(), vect_create_cond_for_align_checks(), vect_describe_gather_scatter_call(), vect_determine_vectorization_factor(), vect_determine_vf_for_stmt_1(), vect_dr_misalign_for_aligned_access(), vect_enhance_data_refs_alignment(), vect_find_reusable_accumulator(), vect_gen_prolog_loop_niters(), vect_get_data_access_cost(), vect_get_peeling_costs_all_drs(), vect_get_strided_load_store_ops(), vect_get_vec_defs_for_operand(), vect_get_vector_types_for_stmt(), vect_init_pattern_stmt(), vect_is_emulated_mixed_dot_prod(), vect_is_simple_use(), vect_model_reduction_cost(), vect_peeling_supportable(), vect_permute_load_chain(), vect_permute_store_chain(), vect_recog_bit_insert_pattern(), vect_recog_bitfield_ref_pattern(), vect_recog_cond_expr_convert_pattern(), vect_recog_gather_scatter_pattern(), vect_recog_popcount_clz_ctz_ffs_pattern(), vect_setup_realignment(), vect_shift_permute_load_chain(), vect_transform_cycle_phi(), vect_transform_grouped_load(), vect_transform_loop(), vect_transform_loop_stmt(), vect_transform_reduction(), vect_transform_stmt(), vect_truncate_gather_scatter_offset(), vect_update_misalignment_for_peel(), vect_vfa_access_size(), vector_alignment_reachable_p(), vectorizable_assignment(), vectorizable_bswap(), vectorizable_call(), vectorizable_comparison(), vectorizable_condition(), vectorizable_conversion(), vectorizable_induction(), vectorizable_lc_phi(), vectorizable_live_operation(), vectorizable_live_operation_1(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_scan_store(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
#define VECT_MAX_COST 1000 |
Referenced by vect_get_load_cost(), vect_get_store_cost(), and vect_peeling_hash_insert().
#define VECT_SCALAR_BOOLEAN_TYPE_P | ( | TYPE | ) |
Nonzero if TYPE represents a (scalar) boolean type or type in the middle-end compatible with it (unsigned precision 1 integral types). Used to determine which types should be vectorized as VECTOR_BOOLEAN_TYPE_P.
Referenced by get_same_sized_vectype(), integer_type_for_mask(), possible_vector_mask_operation_p(), vect_check_scalar_mask(), vect_determine_mask_precision(), vect_get_vec_defs_for_operand(), vect_is_simple_cond(), vect_narrowable_type_p(), vect_recog_bool_pattern(), vect_recog_cast_forwprop_pattern(), vect_recog_gcond_pattern(), vect_recog_mask_conversion_pattern(), and vectorizable_operation().
#define VECTORIZABLE_CYCLE_DEF | ( | D | ) |
Referenced by info_for_reduction(), maybe_set_vectorized_backedge_value(), vect_compute_single_scalar_iteration_cost(), vect_update_vf_for_slp(), vectorizable_lane_reducing(), and vectorizable_reduction().
typedef auto_vec<std::pair<unsigned, unsigned>, 16> auto_lane_permutation_t |
typedef auto_vec<unsigned, 16> auto_load_permutation_t |
typedef _bb_vec_info * bb_vec_info |
typedef enum _complex_perm_kinds complex_perm_kinds_t |
All possible load permute values that could result from the partial data-flow analysis.
typedef struct data_reference* dr_p |
typedef auto_vec<std::pair<data_reference*, tree> > drs_init_vec |
typedef vec<std::pair<unsigned, unsigned> > lane_permutation_t |
typedef vec<unsigned> load_permutation_t |
typedef _loop_vec_info * loop_vec_info |
Info on vectorized loops.
Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL value signifies success, and a NULL value signifies failure, supporting propagating an opt_problem * describing the failure back up the call stack.
typedef hash_map<slp_node_hash, bool> slp_compat_nodes_map_t |
typedef class _slp_instance * slp_instance |
SLP instance is a sequence of stmts in a loop that can be packed into SIMD stmts.
typedef pair_hash<nofree_ptr_hash <_slp_tree>, nofree_ptr_hash <_slp_tree> > slp_node_hash |
Cache from nodes pair to being compatible or not.
Cache from nodes to the load permutation they represent.
typedef class _stmt_vec_info* stmt_vec_info |
Vectorizer Copyright (C) 2003-2025 Free Software Foundation, Inc. Contributed by Dorit Naishlos <dorit@il.ibm.com> This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>.
typedef vec<stmt_info_for_cost> stmt_vector_for_cost |
Key and map that records association between vector conditions and corresponding loop mask, and is populated by prepare_vec_mask.
typedef hash_map<tree_operand_hash, std::pair<stmt_vec_info, innermost_loop_behavior *> > vec_base_alignments |
Maps base addresses to an innermost_loop_behavior and the stmt it was derived from that gives the maximum known alignment for that base.
typedef auto_vec<rgroup_controls> vec_loop_lens |
typedef std::pair<tree, tree> vec_object_pair |
Describes two objects whose addresses must be unequal for the vectorized loop to be valid.
typedef vect_pattern *(* vect_pattern_decl_t) (slp_tree_to_load_perm_map_t *, slp_compat_nodes_map_t *, slp_tree *) |
Function pointer to create a new pattern matcher from a generic type.
enum _complex_perm_kinds |
enum dr_alignment_support |
enum operation_type |
enum slp_instance_kind |
enum slp_vect_type |
The type of vectorization that can be applied to the stmt: regular loop-based vectorization; pure SLP - the stmt is a part of SLP instances and does not have uses outside SLP instances; or hybrid SLP and loop-based - the stmt is a part of SLP instance and also must be loop-based vectorized, since it has uses outside SLP sequences. In the loop context the meanings of pure and hybrid SLP are slightly different. By saying that pure SLP is applied to the loop, we mean that we exploit only intra-iteration parallelism in the loop; i.e., the loop can be vectorized without doing any conceptual unrolling, cause we don't pack together stmts from different iterations, only within a single iteration. Loop hybrid SLP means that we exploit both intra-iteration and inter-iteration parallelism (e.g., number of elements in the vector is 4 and the slp-group-size is 2, in which case we don't have enough parallelism within an iteration, so we obtain the rest of the parallelism from subsequent iterations by unrolling the loop by 2).
Enumerator | |
---|---|
loop_vect | |
pure_slp | |
hybrid |
enum stmt_vec_info_type |
Info on vectorized defs.
enum vec_load_store_type |
enum vect_def_type |
enum vect_reduction_type |
enum vect_relevant |
enum vect_var_kind |
|
inline |
References add_stmt_cost(), cond_branch_not_taken, cond_branch_taken, count, gcc_assert, NULL, NULL_TREE, and scalar_stmt.
|
inline |
Dump and add costs.
References count, dump_file, dump_flags, dump_stmt_cost(), and TDF_DETAILS.
Referenced by add_stmt_cost(), add_stmt_cost(), add_stmt_costs(), vect_bb_vectorization_profitable_p(), and vect_estimate_min_profitable_iters().
|
inline |
References add_stmt_cost(), and i.
|
inline |
References add_stmt_cost(), stmt_info_for_cost::count, FOR_EACH_VEC_ELT, i, stmt_info_for_cost::kind, stmt_info_for_cost::misalign, stmt_info_for_cost::node, stmt_info_for_cost::stmt_info, stmt_info_for_cost::vectype, and stmt_info_for_cost::where.
Referenced by vect_analyze_loop_operations(), vect_compute_single_scalar_iteration_cost(), and vect_slp_analyze_operations().
|
inline |
Return true if data access DR_INFO is aligned to the targets preferred alignment for VECTYPE (which may be less than a full vector).
References dr_misalignment().
Referenced by vect_enhance_data_refs_alignment(), and vector_alignment_reachable_p().
|
inline |
Alias targetm.vectorize.builtin_vectorization_cost.
References targetm.
Referenced by vector_costs::add_stmt_cost(), record_stmt_cost(), and vect_get_stmt_cost().
|
extern |
Function bump_vector_ptr Increment a pointer (to a vector type) by vector-size. If requested, i.e. if PTR-INCR is given, then also connect the new increment stmt to the existing def-use update-chain of the pointer, by modifying the PTR_INCR as illustrated below: The pointer def-use update-chain before this function: DATAREF_PTR = phi (p_0, p_2) .... PTR_INCR: p_2 = DATAREF_PTR + step The pointer def-use update-chain after this function: DATAREF_PTR = phi (p_0, p_2) .... NEW_DATAREF_PTR = DATAREF_PTR + BUMP .... PTR_INCR: p_2 = NEW_DATAREF_PTR + step Input: DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated in the loop. PTR_INCR - optional. The stmt that updates the pointer in each iteration of the loop. The increment amount across iterations is expected to be vector_size. BSI - location where the new update stmt is to be placed. STMT_INFO - the original scalar memory-access stmt that is being vectorized. BUMP - optional. The offset by which to bump the pointer. If not given, the offset is assumed to be vector_size. Output: Return NEW_DATAREF_PTR as illustrated above.
References build1(), copy_ssa_name(), DR_PTR_INFO, duplicate_ssa_name_ptr_info(), fold_build2, fold_convert, fold_stmt(), follow_all_ssa_edges(), FOR_EACH_SSA_USE_OPERAND, gcc_assert, gimple_build_assign(), gsi_for_stmt(), gsi_stmt(), is_gimple_min_invariant(), make_ssa_name(), mark_ptr_info_alignment_unknown(), operand_equal_p(), ptr_type_node, SET_USE, SSA_NAME_PTR_INFO, SSA_OP_USE, STMT_VINFO_DATA_REF, STMT_VINFO_VECTYPE, TREE_CODE, TREE_TYPE, TYPE_SIZE_UNIT, update_stmt(), USE_FROM_PTR, and vect_finish_stmt_generation().
Referenced by vectorizable_load(), and vectorizable_store().
|
extern |
Check whether it is possible to load COUNT elements of type ELT_TYPE using the method implemented by duplicate_and_interleave. Return true if so, returning the number of intermediate vectors in *NVECTORS_OUT (if nonnull) and the type of each intermediate vector in *VECTOR_TYPE_OUT (if nonnull).
References build_nonstandard_integer_type(), can_vec_perm_const_p(), count, GET_MODE_BITSIZE(), GET_MODE_NUNITS(), GET_MODE_SIZE(), GET_MODE_UNIT_SIZE, get_vectype_for_scalar_type(), i, int_mode_for_size(), known_eq, TYPE_MODE, vect_gen_perm_mask_checked(), and VECTOR_MODE_P.
Referenced by duplicate_and_interleave(), vect_build_slp_tree_2(), and vectorizable_reduction().
|
extern |
Used in gimple-loop-interchange.c and tree-parloops.cc.
References check_reduction_path(), and path.
|
extern |
STMT_INFO is a non-strided load or store, meaning that it accesses elements with a known constant step. Return -1 if that step is negative, 0 if it is zero, and 1 if it is greater than zero.
References size_zero_node, STMT_VINFO_DR_INFO, tree_int_cst_compare(), and vect_dr_behavior().
Referenced by vect_optimize_slp_pass::decide_masked_load_lanes(), get_group_load_store_type(), get_load_store_type(), vect_analyze_slp(), vect_build_slp_instance(), and vect_lower_load_permutations().
Return true if call statements CALL1 and CALL2 are similar enough to be combined into the same SLP group.
References gimple_call_arg(), gimple_call_combined_fn(), gimple_call_fn(), gimple_call_fntype(), gimple_call_internal_p(), gimple_call_lhs(), gimple_call_num_args(), i, map, operand_equal_p(), TREE_TYPE, types_compatible_p(), and vect_get_operand_map().
Referenced by compatible_complex_nodes_p(), and vect_build_slp_tree_1().
|
extern |
Return an invariant or register for EXPR and emit necessary computations in the LOOP_VINFO loop preheader.
References force_gimple_operand(), hash_map< KeyId, Value, Traits >::get_or_insert(), gsi_insert_seq_on_edge_immediate(), is_gimple_min_invariant(), is_gimple_reg(), _loop_vec_info::ivexpr_map, loop_preheader_edge(), LOOP_VINFO_LOOP, NULL, NULL_TREE, and unshare_expr().
Referenced by vect_get_strided_load_store_ops(), vectorizable_load(), and vectorizable_store().
|
extern |
Return the misalignment of DR_INFO accessed in VECTYPE with OFFSET applied.
References dr_info::dr, dr_vec_info::dr, DR_GROUP_FIRST_ELEMENT, DR_INIT, DR_MISALIGNMENT_UNINITIALIZED, DR_MISALIGNMENT_UNKNOWN, gcc_assert, known_eq, STMT_VINFO_DR_INFO, STMT_VINFO_GROUPED_ACCESS, targetm, and TREE_INT_CST_LOW.
Referenced by aligned_access_p(), get_group_load_store_type(), get_load_store_type(), get_negative_load_store_type(), known_alignment_for_access_p(), vect_enhance_data_refs_alignment(), vect_get_peeling_costs_all_drs(), vect_known_alignment_in_bytes(), vect_peeling_supportable(), vect_vfa_access_size(), vector_alignment_reachable_p(), vectorizable_load(), and vectorizable_store().
|
inline |
Only defined once DR_MISALIGNMENT is defined.
References DR_GROUP_FIRST_ELEMENT, STMT_VINFO_DR_INFO, and STMT_VINFO_GROUPED_ACCESS.
Referenced by vector_alignment_reachable_p().
|
extern |
Dump a cost entry according to args to F.
References cond_branch_not_taken, cond_branch_taken, count, print_gimple_expr(), scalar_load, scalar_stmt, scalar_store, scalar_to_vec, STMT_VINFO_STMT, TDF_SLIM, unaligned_load, unaligned_store, vec_construct, vec_perm, vec_promote_demote, vec_to_scalar, vect_body, vect_epilogue, vect_prologue, vector_gather_load, vector_load, vector_scatter_store, vector_stmt, and vector_store.
Referenced by add_stmt_cost().
|
extern |
Build a variable-length vector in which the elements in ELTS are repeated to a fill NRESULTS vectors of type VECTOR_TYPE. Store the vectors in RESULTS and add any new instructions to SEQ. The approach we use is: (1) Find a vector mode VM with integer elements of mode IM. (2) Replace ELTS[0:NELTS] with ELTS'[0:NELTS'], where each element of ELTS' has mode IM. This involves creating NELTS' VIEW_CONVERT_EXPRs from small vectors to IM. (3) Duplicate each ELTS'[I] into a vector of mode VM. (4) Use a tree of interleaving VEC_PERM_EXPRs to create VMs with the correct byte contents. (5) Use VIEW_CONVERT_EXPR to cast the final VMs to the required type. We try to find the largest IM for which this sequence works, in order to cut down on the number of interleaves.
References build_vector_type(), can_duplicate_and_interleave_p(), gcc_unreachable, gimple_build(), gimple_build_assign(), gimple_build_vector(), gimple_build_vector_from_val(), gimple_seq_add_stmt(), i, make_ssa_name(), tree_vector_builder::new_vector(), TREE_TYPE, and TYPE_VECTOR_SUBPARTS().
Referenced by get_initial_defs_for_reduction(), and vect_create_constant_vectors().
|
extern |
Function find_loop_location. Extract the location of the loop in the source code. If the loop is not well formed for vectorization, an estimated location is calculated. Return the loop location if succeed and NULL if not.
References BUILTINS_LOCATION, cfun, dump_user_location_t::from_function_decl(), get_loop_exit_condition(), get_loop_exit_edges(), gimple_location(), gsi_end_p(), gsi_next(), gsi_start_bb(), gsi_stmt(), loop::header, LOCATION_LOCUS, loop_outer(), LOOPS_HAVE_RECORDED_EXITS, loops_state_satisfies_p(), NULL, and si.
Referenced by canonicalize_loop_induction_variables(), loop_distribution::execute(), find_loop_guard(), hoist_guard(), tree_loop_interchange::interchange(), optimize_mask_stores(), parallelize_loops(), tree_loop_unroll_and_jam(), tree_ssa_iv_optimize_loop(), tree_ssa_unswitch_loops(), tree_unswitch_outer_loop(), and try_vectorize_loop_1().
|
inline |
Return the offset calculated by adding the offset of this DR_INFO to the corresponding data_reference's offset. If CHECK_OUTER then use vect_dr_behavior to select the appropriate data_reference to use.
References dr_info::dr, fold_build2, fold_convert, data_reference::innermost, innermost_loop_behavior::offset, sizetype, TREE_TYPE, and vect_dr_behavior().
Referenced by check_scan_store(), vect_create_addr_base_for_vector_ref(), vectorizable_load(), and vectorizable_store().
|
inline |
Return the later statement between STMT1_INFO and STMT2_INFO.
References gimple_uid(), and vect_orig_stmt().
Referenced by vect_create_constant_vectors(), vect_find_first_scalar_stmt_in_slp(), vect_find_last_scalar_stmt_in_slp(), vect_preserves_scalar_order_p(), vect_prune_runtime_alias_test_list(), and vect_slp_analyze_load_dependences().
Function get_mask_type_for_scalar_type. Returns the mask type corresponding to a result of comparison of vectors of specified SCALAR_TYPE as supported by target. NODE, if nonnull, is the SLP tree node that will use the returned vector type.
References get_vectype_for_scalar_type(), NULL, and truth_type_for().
|
extern |
Function get_mask_type_for_scalar_type. Returns the mask type corresponding to a result of comparison of vectors of specified SCALAR_TYPE as supported by target. If GROUP_SIZE is nonzero and we're performing BB vectorization, make sure that the number of elements in the vector is no bigger than GROUP_SIZE.
References get_vectype_for_scalar_type(), NULL, and truth_type_for().
Referenced by vect_check_scalar_mask(), vect_convert_mask_for_vectype(), vect_determine_mask_precision(), vect_get_vector_types_for_stmt(), vect_recog_bool_pattern(), vect_recog_cond_store_pattern(), vect_recog_gcond_pattern(), and vect_recog_mask_conversion_pattern().
|
extern |
In tree-vect-stmts.cc.
If NUNITS is nonzero, return a vector type that contains NUNITS elements of type SCALAR_TYPE, or null if the target doesn't support such a type. If NUNITS is zero, return a vector type that contains elements of type SCALAR_TYPE, choosing whichever vector size the target prefers. If PREVAILING_MODE is VOIDmode, we have not yet chosen a vector mode for this vectorization region and want to "autodetect" the best choice. Otherwise, PREVAILING_MODE is a previously-chosen vector TYPE_MODE and we want the new type to be interoperable with it. PREVAILING_MODE in this case can be a scalar integer mode or a vector mode; when it is a vector mode, the function acts like a tree-level version of related_vector_mode.
References build_nonstandard_integer_type(), build_qualified_type(), build_vector_type_for_mode(), gcc_assert, GET_MODE_BITSIZE(), GET_MODE_SIZE(), INTEGRAL_MODE_P, INTEGRAL_TYPE_P, is_float_mode(), is_int_mode(), KEEP_QUAL_ADDR_SPACE, known_eq, mode_for_vector(), NULL_TREE, POINTER_TYPE_P, related_vector_mode(), SCALAR_FLOAT_TYPE_P, SCALAR_INT_MODE_P, targetm, TREE_CODE, TYPE_ADDR_SPACE, TYPE_ALIGN_UNIT, lang_hooks_for_types::type_for_mode, TYPE_MODE, TYPE_PRECISION, TYPE_QUALS, TYPE_UNSIGNED, lang_hooks::types, and VECTOR_MODE_P.
Referenced by get_same_sized_vectype(), get_vec_alignment_for_array_type(), get_vectype_for_scalar_type(), supportable_indirect_convert_operation(), vect_create_epilog_for_reduction(), vect_create_partial_epilog(), and vect_find_reusable_accumulator().
Function get_same_sized_vectype Returns a vector type corresponding to SCALAR_TYPE of size VECTOR_TYPE if supported by the target.
References GET_MODE_SIZE(), get_related_vectype_for_scalar_type(), NULL_TREE, truth_type_for(), TYPE_MODE, and VECT_SCALAR_BOOLEAN_TYPE_P.
Referenced by vect_create_epilog_for_reduction(), vect_recog_rotate_pattern(), vectorizable_bswap(), vectorizable_conversion(), vectorizable_induction(), and vectorizable_reduction().
Return the vector type corresponding to SCALAR_TYPE as supported by the target. NODE, if nonnull, is the SLP tree node that will use the returned vector type.
References get_vectype_for_scalar_type(), and SLP_TREE_LANES.
|
extern |
Function get_vectype_for_scalar_type. Returns the vector type corresponding to SCALAR_TYPE as supported by the target. If GROUP_SIZE is nonzero and we're performing BB vectorization, make sure that the number of elements in the vector is no bigger than GROUP_SIZE.
References hash_set< KeyId, Lazy, Traits >::add(), floor_log2(), gcc_assert, get_related_vectype_for_scalar_type(), is_a(), maybe_ge, vec_info::slp_instances, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), vec_info::used_vector_modes, and vec_info::vector_mode.
Referenced by can_duplicate_and_interleave_p(), get_initial_def_for_reduction(), get_mask_type_for_scalar_type(), get_mask_type_for_scalar_type(), get_vectype_for_scalar_type(), vect_add_conversion_to_pattern(), vect_analyze_data_refs(), vect_build_slp_instance(), vect_build_slp_tree_2(), vect_convert_input(), vect_determine_mask_precision(), vect_determine_vectorization_factor(), vect_gather_scatter_fn_p(), vect_get_vec_defs_for_operand(), vect_get_vector_types_for_stmt(), vect_is_simple_cond(), vect_phi_first_order_recurrence_p(), vect_recog_abd_pattern(), vect_recog_average_pattern(), vect_recog_bit_insert_pattern(), vect_recog_bitfield_ref_pattern(), vect_recog_bool_pattern(), vect_recog_build_binary_gimple_stmt(), vect_recog_cast_forwprop_pattern(), vect_recog_cond_expr_convert_pattern(), vect_recog_cond_store_pattern(), vect_recog_ctz_ffs_pattern(), vect_recog_divmod_pattern(), vect_recog_gather_scatter_pattern(), vect_recog_mask_conversion_pattern(), vect_recog_mod_var_pattern(), vect_recog_mulhs_pattern(), vect_recog_mult_pattern(), vect_recog_over_widening_pattern(), vect_recog_popcount_clz_ctz_ffs_pattern(), vect_recog_pow_pattern(), vect_recog_rotate_pattern(), vect_recog_sat_sub_pattern_transform(), vect_recog_sat_trunc_pattern(), vect_recog_vector_vector_shift_pattern(), vect_recog_widen_abd_pattern(), vect_recog_widen_op_pattern(), vect_slp_prefer_store_lanes_p(), vect_split_statement(), vect_supportable_conv_optab_p(), vect_supportable_direct_optab_p(), vect_supportable_shift(), vect_synth_mult_by_constant(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_conversion(), vectorizable_lane_reducing(), vectorizable_operation(), vectorizable_reduction(), vectorizable_shift(), and vectorizable_simd_clone_call().
|
extern |
For a statement STMT_INFO taking part in a reduction operation return the stmt_vec_info the meta information is stored on.
References as_a(), gcc_assert, gimple_phi_num_args(), is_a(), vec_info::lookup_def(), STMT_VINFO_DEF_TYPE, STMT_VINFO_REDUC_DEF, vect_double_reduction_def, vect_nested_cycle, vect_orig_stmt(), vect_phi_initial_value(), and VECTORIZABLE_CYCLE_DEF.
Referenced by vect_optimize_slp_pass::start_choosing_layouts(), vect_create_epilog_for_reduction(), vect_reduc_type(), vect_transform_cycle_phi(), vect_transform_reduction(), vectorizable_condition(), vectorizable_live_operation(), and vectorizable_reduction().
|
inline |
Alias targetm.vectorize.init_cost.
References targetm.
|
inline |
Return true if BB is a loop header.
References basic_block_def::loop_father.
Referenced by vec_info::new_stmt_vec_info(), parloops_valid_reduction_input_p(), and vect_analyze_loop_operations().
|
inline |
Return TRUE if a statement represented by STMT_INFO is a part of a pattern.
Referenced by vec_info::lookup_dr(), vec_info::move_dr(), vect_build_slp_tree_2(), vect_contains_pattern_stmt_p(), vect_get_and_check_slp_defs(), vect_mark_pattern_stmts(), vect_mark_stmts_to_be_vectorized(), vect_orig_stmt(), vect_recog_bitfield_ref_pattern(), vect_remove_slp_scalar_calls(), vect_slp_linearize_chain(), vect_split_statement(), vectorizable_shift(), and vectorizable_store().
|
extern |
Function is_simple_and_all_uses_invariant Return true if STMT_INFO is simple and all uses of it are invariant.
References dump_enabled_p(), dump_printf_loc(), dyn_cast(), FOR_EACH_SSA_TREE_OPERAND, MSG_MISSED_OPTIMIZATION, SSA_OP_USE, vect_constant_def, vect_external_def, vect_is_simple_use(), vect_location, and vect_uninitialized_def.
Referenced by vect_stmt_relevant_p(), and vectorizable_live_operation().
|
inline |
Return TRUE if the (mis-)alignment of the data access is known with respect to the targets preferred alignment for VECTYPE, and FALSE otherwise.
References dr_misalignment(), and DR_MISALIGNMENT_UNKNOWN.
Referenced by vect_enhance_data_refs_alignment(), vect_get_peeling_costs_all_drs(), vect_peeling_supportable(), vect_update_misalignment_for_peel(), and vector_alignment_reachable_p().
|
inline |
Return true if CODE is a lane-reducing opcode.
Referenced by lane_reducing_stmt_p(), vect_transform_reduction(), and vectorizable_reduction().
Return true if STMT is a lane-reducing statement.
References dyn_cast(), gimple_assign_rhs_code(), and lane_reducing_op_p().
Referenced by vect_analyze_slp(), and vectorizable_lane_reducing().
|
inline |
Return the vect cost model for LOOP.
References loop::force_vectorize, NULL, and VECT_COST_MODEL_DEFAULT.
Referenced by unlimited_cost_model(), vect_analyze_loop(), vect_analyze_loop_costing(), vect_enhance_data_refs_alignment(), and vect_prune_runtime_alias_test_list().
|
inline |
References loop::aux.
Referenced by update_epilogue_loop_vinfo(), vect_analyze_loop(), and vect_transform_loops().
|
extern |
Return true if we need an in-order reduction for operation CODE on type TYPE. NEED_WRAPPING_INTEGRAL_OVERFLOW is true if integer overflow must wrap.
References INTEGRAL_TYPE_P, code_helper::is_tree_code(), operation_no_trapping_overflow(), SAT_FIXED_POINT_TYPE_P, and SCALAR_FLOAT_TYPE_P.
Referenced by vect_optimize_slp_pass::start_choosing_layouts(), vect_reassociating_reduction_p(), vect_slp_check_for_roots(), and vectorizable_reduction().
|
inline |
References gimple_bb(), and loop::inner.
Referenced by get_initial_def_for_reduction(), vec_info::insert_seq_on_entry(), supportable_widening_operation(), vect_analyze_data_ref_access(), vect_analyze_data_ref_dependence(), vect_analyze_data_refs(), vect_build_slp_tree_2(), vect_compute_data_ref_alignment(), vect_create_data_ref_ptr(), vect_create_epilog_for_reduction(), vect_dr_behavior(), vect_model_reduction_cost(), vect_reassociating_reduction_p(), vect_record_base_alignments(), vect_setup_realignment(), vect_supportable_dr_alignment(), vect_transform_cycle_phi(), vect_transform_reduction(), vectorizable_induction(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_reduction(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
|
extern |
In tree-vect-loop.cc.
If there is a neutral value X such that a reduction would not be affected by the introduction of additional X elements, return that X, otherwise return null. CODE is the code of the reduction and SCALAR_TYPE is type of the scalar elements. If the reduction has just a single initial value then INITIAL_VALUE is that value, otherwise it is null. If AS_INITIAL is TRUE the value is supposed to be used as initial value. In that case no signed zero is returned.
References build_all_ones_cst(), build_one_cst(), build_real(), build_zero_cst(), dconstm0, HONOR_SIGNED_ZEROS(), code_helper::is_tree_code(), and NULL_TREE.
Referenced by convert_scalar_cond_reduction(), vect_create_epilog_for_reduction(), vect_expand_fold_left(), vect_find_reusable_accumulator(), vect_transform_cycle_phi(), and vectorizable_reduction().
|
extern |
The code below is trying to perform simple optimization - revert if-conversion for masked stores, i.e. if the mask of a store is zero do not perform it and all stored value producers also if possible. For example, for (i=0; i<n; i++) if (c[i]) { p1[i] += 1; p2[i] = p3[i] +2; } this transformation will produce the following semi-hammock: if (!mask__ifc__42.18_165 == { 0, 0, 0, 0, 0, 0, 0, 0 }) { vect__11.19_170 = MASK_LOAD (vectp_p1.20_168, 0B, mask__ifc__42.18_165); vect__12.22_172 = vect__11.19_170 + vect_cst__171; MASK_STORE (vectp_p1.23_175, 0B, mask__ifc__42.18_165, vect__12.22_172); vect__18.25_182 = MASK_LOAD (vectp_p3.26_180, 0B, mask__ifc__42.18_165); vect__19.28_184 = vect__18.25_182 + vect_cst__183; MASK_STORE (vectp_p2.29_187, 0B, mask__ifc__42.18_165, vect__19.28_184); }
References add_bb_to_loop(), add_phi_arg(), build_zero_cst(), CDI_DOMINATORS, cfun, basic_block_def::count, create_empty_bb(), create_phi_node(), dom_info_available_p(), dump_enabled_p(), dump_printf_loc(), EDGE_SUCC, find_loop_location(), flow_loop_nested_p(), FOR_EACH_IMM_USE_FAST, free(), gcc_assert, get_loop_body(), gimple_bb(), gimple_build_cond(), gimple_call_arg(), gimple_call_internal_p(), gimple_get_lhs(), gimple_has_volatile_ops(), gimple_set_vdef(), gimple_vdef(), gimple_vop(), gimple_vuse(), gsi_end_p(), gsi_for_stmt(), gsi_insert_after(), gsi_last_bb(), gsi_move_before(), gsi_next(), gsi_prev(), gsi_remove(), GSI_SAME_STMT, gsi_start_bb(), gsi_stmt(), has_zero_uses(), i, basic_block_def::index, is_gimple_debug(), last, profile_probability::likely(), basic_block_def::loop_father, make_edge(), make_single_succ_edge(), make_ssa_name(), MSG_NOTE, NULL, NULL_TREE, loop::num_nodes, release_defs(), set_immediate_dominator(), split_block(), TREE_CODE, TREE_TYPE, UNKNOWN_LOCATION, USE_STMT, vect_location, VECTOR_TYPE_P, and worklist.
If the target supports a permute mask that reverses the elements in a vector of type VECTYPE, return that mask, otherwise return null.
References can_vec_perm_const_p(), i, NULL_TREE, TYPE_MODE, TYPE_VECTOR_SUBPARTS(), and vect_gen_perm_mask_checked().
Referenced by get_negative_load_store_type(), vectorizable_load(), and vectorizable_store().
|
extern |
Return the mask input to a masked load or store. VEC_MASK is the vectorized form of the scalar mask condition and LOOP_MASK, if nonnull, is the mask that needs to be applied to all loads and stores in a vectorized loop. Return VEC_MASK if LOOP_MASK is null or if VEC_MASK is already masked, otherwise return VEC_MASK & LOOP_MASK. MASK_TYPE is the type of both masks. If new statements are needed, insert them before GSI.
References hash_set< KeyId, Lazy, Traits >::contains(), gcc_assert, gimple_build_assign(), gsi_insert_before(), GSI_SAME_STMT, make_temp_ssa_name(), NULL, TREE_TYPE, useless_type_conversion_p(), and _loop_vec_info::vec_cond_masked_set.
Referenced by vectorizable_call(), vectorizable_early_exit(), vectorizable_load(), vectorizable_operation(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
|
extern |
References cond_branch_not_taken, cond_branch_taken, count, gcc_assert, NULL, NULL_TREE, record_stmt_cost(), and scalar_stmt.
|
extern |
References count, NULL, and record_stmt_cost().
|
extern |
Record the cost of a statement, either by directly informing the target model or by saving it in a vector for later processing. Return a preliminary estimate of the statement's cost.
References builtin_vectorization_cost(), count, si, STMT_VINFO_GATHER_SCATTER_P, unaligned_load, unaligned_store, vector_gather_load, vector_load, vector_scatter_store, and vector_store.
Referenced by vector_costs::add_stmt_cost(), record_stmt_cost(), record_stmt_cost(), record_stmt_cost(), vect_bb_slp_scalar_cost(), vect_bb_vectorization_profitable_p(), vect_compute_single_scalar_iteration_cost(), vect_get_known_peeling_cost(), vect_get_load_cost(), vect_get_store_cost(), vect_model_promotion_demotion_cost(), vect_model_reduction_cost(), vect_model_simple_cost(), vect_prologue_cost_for_slp(), vectorizable_bb_reduc_epilogue(), vectorizable_bswap(), vectorizable_call(), vectorizable_induction(), vectorizable_lane_reducing(), vectorizable_live_operation(), vectorizable_load(), vectorizable_nonlinear_induction(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_slp_permutation(), and vectorizable_store().
|
extern |
References count, NULL, and record_stmt_cost().
Referenced by record_stmt_cost(), and record_stmt_cost().
|
inline |
Overload of record_stmt_cost with VECTYPE derived from STMT_INFO.
References count, record_stmt_cost(), and STMT_VINFO_VECTYPE.
|
inline |
Overload of record_stmt_cost with VECTYPE derived from STMT_INFO and SLP node specified.
References count, record_stmt_cost(), and STMT_VINFO_VECTYPE.
|
extern |
Function reduction_fn_for_scalar_code Input: CODE - tree_code of a reduction operations. Output: REDUC_FN - the corresponding internal function to be used to reduce the vector of partial results into a single scalar result, or IFN_LAST if the operation is a supported reduction operation, but does not have such an internal function. Return FALSE if CODE currently cannot be vectorized as reduction.
References code_helper::is_tree_code().
Referenced by vect_slp_check_for_roots(), vectorizable_bb_reduc_epilogue(), vectorizable_reduction(), and vectorize_slp_instance_root_stmt().
In tree-if-conv.cc.
Return TRUE if ref is a within bound array reference.
References for_each_index(), gcc_assert, idx_within_array_bound(), loop_containing_stmt(), and NULL.
Referenced by ifcvt_memrefs_wont_trap(), and vect_analyze_early_break_dependences().
|
inline |
Referenced by vect_update_misalignment_for_peel().
|
inline |
|
extern |
This function verifies that the following restrictions apply to LOOP: (1) it consists of exactly 2 basic blocks - header, and an empty latch for innermost loop and 5 basic blocks for outer-loop. (2) it is single entry, single exit (3) its exit condition is the last stmt in the header (4) E is the entry/exit edge of LOOP.
References can_copy_bbs_p(), empty_block_p(), free(), get_loop_body_with_size(), get_loop_exit_condition(), gsi_last_bb(), gsi_stmt(), loop::latch, loop_outer(), loop_preheader_edge(), and loop::num_nodes.
Referenced by vect_analyze_loop_2(), vect_do_peeling(), and vect_enhance_data_refs_alignment().
class loop * slpeel_tree_duplicate_loop_to_edge_cfg | ( | class loop * | loop, |
edge | loop_exit, | ||
class loop * | scalar_loop, | ||
edge | scalar_exit, | ||
edge | e, | ||
edge * | new_e, | ||
bool | flow_loops, | ||
vec< basic_block > * | updated_doms ) |
Given LOOP this function generates a new copy of it and puts it on E which is either the entry or exit of LOOP. If SCALAR_LOOP is non-NULL, assume LOOP and SCALAR_LOOP are equivalent and copy the basic blocks from SCALAR_LOOP instead of LOOP, but to either the entry or exit of LOOP. If FLOW_LOOPS then connect LOOP to SCALAR_LOOP as a continuation. This is correct for cases where one loop continues from the other like in the vectorizer, but not true for uses in e.g. loop distribution where the contents of the loop body are split but the iteration space of both copies remains the same. If UPDATED_DOMS is not NULL it is update with the list of basic blocks whoms dominators were updated during the peeling. When doing early break vectorization then LOOP_VINFO needs to be provided and is used to keep track of any newly created memory references that need to be updated should we decide to vectorize.
References add_phi_arg(), add_phi_args_after_copy(), adjust_debug_stmts(), adjust_phi_and_debug_stmts(), CDI_DOMINATORS, checking_verify_dominators(), copy_bbs(), copy_ssa_name(), create_phi_node(), delete_basic_block(), duplicate_loop(), duplicate_subloops(), EDGE_COUNT, EDGE_PRED, first_dom_son(), flow_bb_inside_loop_p(), flush_pending_stmts(), FOR_EACH_EDGE, free(), gcc_assert, hash_map< KeyId, Value, Traits >::get(), get_all_dominated_blocks(), get_bb_copy(), get_immediate_dominator(), get_live_virtual_operand_on_edge(), get_loop_body_with_size(), get_loop_copy(), get_loop_exit_edges(), get_virtual_phi(), gimple_phi_arg_def_from_edge(), gimple_phi_num_args(), gimple_phi_result(), gsi_end_p(), gsi_for_stmt(), gsi_next(), gsi_start_phis(), gsi_stmt(), loop::header, i, loop::inner, iterate_fix_dominators(), loop::latch, loop_latch_edge(), loop_outer(), loop_preheader_edge(), MAY_HAVE_DEBUG_BIND_STMTS, next_dom_son(), NULL, NULL_TREE, loop::num_nodes, PHI_ARG_DEF_FROM_EDGE, PHI_ARG_DEF_PTR_FROM_EDGE, PHI_RESULT, basic_block_def::preds, hash_map< KeyId, Value, Traits >::put(), queue, redirect_edge_and_branch(), redirect_edge_and_branch_force(), redirect_edge_pred(), redirect_edge_var_map_clear(), remove_phi_node(), rename_use_op(), rename_variables_in_bb(), set_immediate_dominator(), SET_PHI_ARG_DEF, SET_PHI_ARG_DEF_ON_EDGE, single_pred(), single_pred_edge(), single_succ_edge(), single_succ_p(), split_edge(), TREE_CODE, true, UNKNOWN_LOCATION, and virtual_operand_p().
Referenced by copy_loop_before(), and vect_do_peeling().
|
extern |
Function supportable_indirect_convert_operation Check whether an operation represented by the code CODE is single or multi operations that are supported by the target platform in vector form (i.e., when operating on arguments of type VECTYPE_IN producing a result of type VECTYPE_OUT). Convert operations we currently support directly are FIX_TRUNC and FLOAT. This function checks if these operations are supported by the target platform directly (via vector tree-codes). Output: - converts contains some pairs to perform the convert operation, the pair's first is the intermediate type, and its second is the code of a vector operation to be used when converting the operation from the previous type to the intermediate type.
References build_nonstandard_integer_type(), FOR_EACH_2XWIDER_MODE, GET_MODE_BITSIZE(), GET_MODE_INNER, GET_MODE_SIZE(), get_related_vectype_for_scalar_type(), int_mode_for_size(), wi::min_precision(), NULL_TREE, opt_mode< T >::require(), SIGNED, SLP_TREE_LANES, SSA_NAME_RANGE_INFO, supportable_convert_operation(), TREE_CODE, TYPE_MODE, TYPE_PRECISION, TYPE_VECTOR_SUBPARTS(), vect_get_range_info(), and vect_get_slp_scalar_def().
Referenced by expand_vector_conversion(), and vectorizable_conversion().
|
extern |
Function supportable_narrowing_operation Check whether an operation represented by the code CODE is a narrowing operation that is supported by the target platform in vector form (i.e., when operating on arguments of type VECTYPE_IN and producing a result of type VECTYPE_OUT). Narrowing operations we currently support are NOP (CONVERT), FIX_TRUNC and FLOAT. This function checks if these operations are supported by the target platform directly via vector tree-codes. Output: - CODE1 is the code of a vector operation to be used when vectorizing the operation, if available. - MULTI_STEP_CVT determines the number of required intermediate steps in case of multi-step conversion (like int->short->char - in that case MULTI_STEP_CVT will be 1). - INTERM_TYPES contains the intermediate type required to perform the narrowing operation (short in the above example).
References CASE_CONVERT, gcc_unreachable, i, insn_data, code_helper::is_tree_code(), known_eq, MAX_INTERM_CVT_STEPS, optab_default, optab_for_tree_code(), optab_handler(), SCALAR_INT_MODE_P, lang_hooks_for_types::type_for_mode, TYPE_MODE, TYPE_UNSIGNED, TYPE_VECTOR_SUBPARTS(), lang_hooks::types, unknown_optab, vect_double_mask_nunits(), and VECTOR_BOOLEAN_TYPE_P.
Referenced by simple_integer_narrowing(), and vectorizable_conversion().
|
extern |
Function supportable_widening_operation Check whether an operation represented by the code CODE is a widening operation that is supported by the target platform in vector form (i.e., when operating on arguments of type VECTYPE_IN producing a result of type VECTYPE_OUT). Widening operations we currently support are NOP (CONVERT), FLOAT, FIX_TRUNC and WIDEN_MULT. This function checks if these operations are supported by the target platform either directly (via vector tree-codes), or via target builtins. Output: - CODE1 and CODE2 are codes of vector operations to be used when vectorizing the operation, if available. - MULTI_STEP_CVT determines the number of required intermediate steps in case of multi-step conversion (like char->short->int - in that case MULTI_STEP_CVT will be 1). - INTERM_TYPES contains the intermediate type required to perform the widening operation (short in the above example).
References as_combined_fn(), as_internal_fn(), build_vector_type_for_mode(), CASE_CONVERT, CONVERT_EXPR_CODE_P, direct_internal_fn_optab(), dyn_cast(), gcc_unreachable, GET_MODE_INNER, gimple_assign_lhs(), i, insn_data, code_helper::is_tree_code(), known_eq, lookup_evenodd_internal_fn(), lookup_hilo_internal_fn(), LOOP_VINFO_LOOP, MAX_INTERM_CVT_STEPS, MAX_TREE_CODES, nested_in_vect_loop_p(), NULL, optab_default, optab_for_tree_code(), optab_handler(), code_helper::safe_as_tree_code(), SCALAR_INT_MODE_P, STMT_VINFO_REDUC_DEF, supportable_widening_operation(), lang_hooks_for_types::type_for_mode, TYPE_MODE, TYPE_UNSIGNED, TYPE_VECTOR_SUBPARTS(), lang_hooks::types, unknown_optab, vect_halve_mask_nunits(), vect_orig_stmt(), VECTOR_BOOLEAN_TYPE_P, VECTOR_MODE_P, and widening_fn_p().
Referenced by supportable_widening_operation(), vect_recog_abd_pattern(), vect_recog_widen_abd_pattern(), vect_recog_widen_op_pattern(), and vectorizable_conversion().
Return true if the vect cost model is unlimited.
References loop_cost_model(), and VECT_COST_MODEL_UNLIMITED.
Referenced by vect_analyze_loop(), vect_enhance_data_refs_alignment(), vect_estimate_min_profitable_iters(), vect_peeling_hash_choose_best_peeling(), vect_peeling_hash_insert(), and vect_slp_region().
Determine the main loop exit for the vectorizer.
References candidate(), CDI_DOMINATORS, chrec_contains_undetermined(), COMPARISON_CLASS_P, dominated_by_p(), get_loop_exit_condition(), get_loop_exit_edges(), integer_nonzerop(), integer_zerop(), loop::latch, tree_niter_desc::may_be_zero, niter_desc::niter, NULL, number_of_iterations_exit_assumptions(), single_pred(), and single_pred_p().
Referenced by set_uid_loop_bbs(), and vect_analyze_loop_form().
|
extern |
Function vect_analyze_data_ref_accesses. Analyze the access pattern of all the data references in the loop. FORNOW: the only access pattern that is considered vectorizable is a simple step 1 (consecutive) access. FORNOW: handle only arrays and pointer accesses.
References absu_hwi(), hash_set< KeyId, Lazy, Traits >::add(), hash_set< KeyId, Lazy, Traits >::begin(), can_group_stmts_p(), data_ref_compare_tree(), vec_info_shared::datarefs, dr_vec_info::dr, DR_BASE_ADDRESS, DR_GROUP_FIRST_ELEMENT, DR_GROUP_NEXT_ELEMENT, dr_group_sort_cmp(), DR_INIT, DR_IS_READ, DR_OFFSET, DR_REF, DR_STEP, DR_STMT, dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, hash_set< KeyId, Lazy, Traits >::end(), opt_result::failure_at(), FOR_EACH_VEC_ELT, g, gcc_assert, gimple_bb(), gimple_uid(), dr_vec_info::group, i, basic_block_def::index, is_a(), vec_info::lookup_dr(), MSG_MISSED_OPTIMIZATION, MSG_NOTE, loop::next, NULL, hash_set< KeyId, Lazy, Traits >::remove(), vec_info::shared, dr_vec_info::stmt, STMT_VINFO_DR_INFO, STMT_VINFO_GATHER_SCATTER_P, STMT_VINFO_SIMD_LANE_ACCESS_P, STMT_VINFO_SLP_VECT_ONLY, STMT_VINFO_STMT, STMT_VINFO_VECTORIZABLE, opt_result::success(), tree_fits_shwi_p(), tree_fits_uhwi_p(), tree_int_cst_equal(), TREE_INT_CST_LOW, tree_to_shwi(), tree_to_uhwi(), TREE_TYPE, TYPE_SIZE_UNIT, types_compatible_p(), vect_analyze_data_ref_access(), and vect_location.
Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().
|
extern |
Function vect_analyze_data_ref_dependences. Examine all the data references in the loop, and make sure there do not exist any data dependences between them. Set *MAX_VF according to the maximum vectorization factor the data dependences allow.
References compute_all_dependences(), DUMP_VECT_SCOPE, FOR_EACH_VEC_ELT, gcc_assert, i, LOOP_VINFO_DATAREFS, LOOP_VINFO_DDRS, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_LOOP_NEST, LOOP_VINFO_NO_DATA_DEPENDENCIES, LOOP_VINFO_ORIG_MAX_VECT_FACTOR, opt_result::success(), vect_analyze_data_ref_dependence(), and vect_analyze_early_break_dependences().
Referenced by vect_analyze_loop_2().
|
extern |
Function vect_analyze_data_refs. Find all the data references in the loop or basic block. The general structure of the analysis of data refs in the vectorizer is as follows: 1- vect_analyze_data_refs(loop/bb): call compute_data_dependences_for_loop/bb to find and analyze all data-refs in the loop/bb and their dependences. 2- vect_analyze_dependences(): apply dependence testing using ddrs. 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok. 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
References as_a(), data_reference::aux, build_fold_indirect_ref, vec_info_shared::datarefs, DECL_NONALIASED, dr_analyze_innermost(), DR_BASE_ADDRESS, DR_INIT, DR_IS_READ, DR_IS_WRITE, DR_OFFSET, DR_REF, DR_STEP, DR_STMT, dump_enabled_p(), dump_generic_expr(), dump_printf(), dump_printf_loc(), DUMP_VECT_SCOPE, dyn_cast(), opt_result::failure_at(), fatal(), fold_build2, fold_build_pointer_plus, FOR_EACH_VEC_ELT, gcc_assert, get_base_address(), get_vectype_for_scalar_type(), i, is_a(), vec_info::lookup_stmt(), LOOP_VINFO_LOOP, MSG_MISSED_OPTIMIZATION, MSG_NOTE, nested_in_vect_loop_p(), NULL, gather_scatter_info::offset, vec_info::shared, data_reference::stmt, STMT_VINFO_DR_BASE_ADDRESS, STMT_VINFO_DR_BASE_ALIGNMENT, STMT_VINFO_DR_BASE_MISALIGNMENT, STMT_VINFO_DR_INIT, STMT_VINFO_DR_OFFSET, STMT_VINFO_DR_OFFSET_ALIGNMENT, STMT_VINFO_DR_STEP, STMT_VINFO_DR_STEP_ALIGNMENT, STMT_VINFO_DR_WRT_VEC_LOOP, STMT_VINFO_GATHER_SCATTER_P, STMT_VINFO_SIMD_LANE_ACCESS_P, STMT_VINFO_STRIDED_P, STMT_VINFO_VECTORIZABLE, STMT_VINFO_VECTYPE, opt_result::success(), TDF_DETAILS, TREE_CODE, TREE_THIS_VOLATILE, TREE_TYPE, TYPE_VECTOR_SUBPARTS(), unshare_expr(), VAR_P, vect_check_gather_scatter(), and vect_location.
Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().
|
extern |
Function vect_analyze_data_refs_alignment Analyze the alignment of the data-references in the loop. Return FALSE if a data reference is found that cannot be vectorized.
References DR_GROUP_FIRST_ELEMENT, DUMP_VECT_SCOPE, FOR_EACH_VEC_ELT, i, vec_info::lookup_dr(), LOOP_VINFO_DATAREFS, STMT_VINFO_GROUPED_ACCESS, STMT_VINFO_VECTORIZABLE, STMT_VINFO_VECTYPE, opt_result::success(), vect_compute_data_ref_alignment(), and vect_record_base_alignments().
Referenced by vect_analyze_loop_2().
|
extern |
Drive for loop analysis stage.
Function vect_analyze_loop. Apply a set of analyses on LOOP, and create a loop_vec_info struct for it. The different analyses will record information in the loop_vec_info struct.
References vect_loop_form_info::assumptions, dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, _loop_vec_info::epilogue_vinfo, opt_pointer_wrapper< loop_vec_info >::failure_at(), fatal(), find_loop_nest(), free_numbers_of_iterations_estimates(), gcc_assert, GET_MODE_NAME, i, loop::inner, integer_onep(), known_eq, LOOP_C_FINITE, loop_constraint_set(), loop_cost_model(), vec_info_shared::loop_nest, loop_outer(), LOOP_REQUIRES_VERSIONING, loop_vec_info_for_loop(), LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_PEELING_FOR_NITER, LOOP_VINFO_VECT_FACTOR, LOOP_VINFO_VECTORIZABLE_P, LOOP_VINFO_VERSIONING_THRESHOLD, maybe_ge, MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, opt_pointer_wrapper< loop_vec_info >::propagate_failure(), scev_reset_htab(), loop::simdlen, loop::simduid, opt_pointer_wrapper< loop_vec_info >::success(), vector_costs::suggested_epilogue_mode(), _loop_vec_info::suggested_unroll_factor, targetm, unlimited_cost_model(), vect_analyze_loop_1(), vect_analyze_loop_form(), VECT_COMPARE_COSTS, VECT_COST_MODEL_VERY_CHEAP, vect_joust_loop_vinfos(), vect_location, _loop_vec_info::vector_costs, and vec_info::vector_mode.
Referenced by try_vectorize_loop_1().
|
extern |
Function vect_analyze_loop_form. Verify that certain CFG restrictions hold, including: - the loop has a pre-header - the loop has a single entry - nested loops can have only a single exit. - the loop exit condition is simple enough - the number of iterations can be analyzed, i.e, a countable loop. The niter could be analyzed under some assumptions.
References vect_loop_form_info::assumptions, cfun, chrec_contains_undetermined(), vect_loop_form_info::conds, dump_enabled_p(), dump_generic_expr(), dump_printf(), dump_printf_loc(), DUMP_VECT_SCOPE, EDGE_COUNT, EDGE_PRED, empty_block_p(), loop::exits, expr_invariant_in_loop_p(), opt_result::failure_at(), free(), get_loop(), get_loop_body(), get_loop_exit_edges(), gimple_bb(), gimple_call_arg(), gimple_seq_empty_p(), loop::header, i, loop::inner, vect_loop_form_info::inner_loop_cond, integer_onep(), integer_zerop(), loop::latch, vect_loop_form_info::loop_exit, loop_exits_from_bb_p(), loop_preheader_edge(), MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL, loop::num_nodes, vect_loop_form_info::number_of_iterations, vect_loop_form_info::number_of_iterationsm1, phi_nodes(), basic_block_def::preds, single_exit(), single_pred(), single_succ_p(), opt_result::success(), TDF_DETAILS, tree_fits_shwi_p(), tree_to_shwi(), vec_init_loop_exit_info(), vect_analyze_loop_form(), vect_get_loop_niters(), and vect_location.
Referenced by gather_scalar_reductions(), vect_analyze_loop(), and vect_analyze_loop_form().
|
extern |
Check if there are stmts in the loop can be vectorized using SLP. Build SLP trees of packed scalar stmts if SLP is possible.
References as_a(), compare_step_with_zero(), vec_info_shared::datarefs, DR_GROUP_FIRST_ELEMENT, DR_GROUP_SIZE, DR_IS_WRITE, dump_enabled_p(), dump_printf_loc(), DUMP_VECT_SCOPE, dyn_cast(), FOR_EACH_VEC_ELT, gcc_assert, get_loop_exit_edges(), gimple_call_internal_fn(), gimple_call_internal_p(), gimple_cond_code(), gimple_cond_lhs(), gimple_cond_rhs(), gimple_phi_arg_def_from_edge(), vec_info::grouped_stores, gsi_end_p(), gsi_next(), gsi_start_phis(), i, internal_fn_mask_index(), is_a(), is_gimple_call(), lane_reducing_stmt_p(), last, vec_info::lookup_def(), vec_info::lookup_dr(), vec_info::lookup_stmt(), loop_latch_edge(), LOOP_VINFO_EARLY_BREAKS_LIVE_IVS, LOOP_VINFO_LOOP, LOOP_VINFO_LOOP_CONDS, LOOP_VINFO_SLP_INSTANCES, MSG_NOTE, loop::next, NULL, optimize_load_redistribution(), REDUC_GROUP_FIRST_ELEMENT, REDUC_GROUP_NEXT_ELEMENT, _loop_vec_info::reduction_chains, _loop_vec_info::reductions, release_scalar_stmts_to_slp_tree_map(), vec_info::shared, slp_inst_kind_gcond, slp_inst_kind_reduc_chain, slp_inst_kind_reduc_group, slp_inst_kind_store, SLP_INSTANCE_KIND, SLP_INSTANCE_TREE, SLP_TREE_LANES, SLP_TREE_LOAD_PERMUTATION, SLP_TREE_REPRESENTATIVE, SLP_TREE_SCALAR_STMTS, SLP_TREE_VECTYPE, dr_vec_info::stmt, STMT_VINFO_DEF_TYPE, STMT_VINFO_GROUPED_ACCESS, STMT_VINFO_LIVE_P, STMT_VINFO_REDUC_IDX, STMT_VINFO_RELEVANT, STMT_VINFO_RELEVANT_P, STMT_VINFO_STMT, STMT_VINFO_STRIDED_P, STMT_VINFO_VECTYPE, opt_result::success(), TREE_CODE, vect_analyze_slp_instance(), vect_build_slp_instance(), vect_double_reduction_def, vect_free_slp_instance(), vect_gather_slp_loads(), vect_induction_def, vect_internal_def, vect_load_lanes_supported(), vect_location, vect_lower_load_permutations(), vect_match_slp_patterns(), vect_print_slp_graph(), vect_reduction_def, vect_stmt_to_vectorize(), vect_store_lanes_supported(), vect_used_only_live, virtual_operand_p(), visited, vNULL, and zerop().
Referenced by vect_analyze_loop_2(), and vect_slp_analyze_bb_1().
|
extern |
Make sure the statement is vectorizable.
References as_a(), can_vectorize_live_stmts(), dump_enabled_p(), dump_printf_loc(), dyn_cast(), opt_result::failure_at(), gcc_assert, gcc_unreachable, gimple_call_lhs(), gimple_has_volatile_ops(), gsi_end_p(), gsi_next(), gsi_start(), gsi_stmt(), lc_phi_info_type, _slp_tree::ldst_lanes, vec_info::lookup_stmt(), MSG_NOTE, NULL, NULL_TREE, PURE_SLP_STMT, reduc_vec_info_type, si, SLP_TREE_CODE, SLP_TREE_VECTYPE, STMT_VINFO_DEF_TYPE, STMT_VINFO_IN_PATTERN_P, STMT_VINFO_LIVE_P, STMT_VINFO_PATTERN_DEF_SEQ, STMT_VINFO_RELATED_STMT, STMT_VINFO_RELEVANT, STMT_VINFO_RELEVANT_P, STMT_VINFO_TYPE, STMT_VINFO_VECTYPE, opt_result::success(), vect_analyze_stmt(), vect_condition_def, vect_constant_def, vect_double_reduction_def, vect_external_def, vect_first_order_recurrence, vect_induction_def, vect_internal_def, vect_location, vect_nested_cycle, vect_reduction_def, vect_unknown_def_type, vect_unused_in_scope, vect_used_by_reduction, vect_used_in_outer, vect_used_in_outer_by_reduction, vect_used_only_live, vectorizable_assignment(), vectorizable_call(), vectorizable_comparison(), vectorizable_condition(), vectorizable_conversion(), vectorizable_early_exit(), vectorizable_induction(), vectorizable_lane_reducing(), vectorizable_lc_phi(), vectorizable_load(), vectorizable_operation(), vectorizable_phi(), vectorizable_recurr(), vectorizable_reduction(), vectorizable_shift(), vectorizable_simd_clone_call(), and vectorizable_store().
Referenced by vect_analyze_loop_operations(), vect_analyze_stmt(), and vect_slp_analyze_node_operations_1().
|
inline |
Return true if LOOP_VINFO requires a runtime check for whether the vector loop is profitable.
References LOOP_VINFO_COST_MODEL_THRESHOLD, LOOP_VINFO_NITERS_KNOWN_P, and vect_vf_for_cost().
Referenced by vect_analyze_loop_2(), vect_analyze_loop_costing(), vect_loop_versioning(), and vect_transform_loop().
|
extern |
This function builds ni_name = number of iterations. Statements are emitted on the loop preheader edge. If NEW_VAR_P is not NULL, set it to TRUE if new ssa_var is generated.
References create_tmp_var, force_gimple_operand(), gsi_insert_seq_on_edge_immediate(), loop_preheader_edge(), LOOP_VINFO_LOOP, LOOP_VINFO_NITERS, NULL, TREE_CODE, TREE_TYPE, and unshare_expr().
Referenced by vect_do_peeling(), and vect_transform_loop().
|
extern |
Function vect_can_advance_ivs_p In case the number of iterations that LOOP iterates is unknown at compile time, an epilog loop will be generated, and the loop induction variables (IVs) will be "advanced" to the value they are supposed to take just before the epilog loop. Here we check that the access function of the loop IVs and the expression that represents the loop bound are simple enough. These restrictions will be relaxed in the future.
References dump_enabled_p(), dump_printf(), dump_printf_loc(), expr_invariant_in_loop_p(), gsi_end_p(), gsi_next(), gsi_start_phis(), loop::header, iv_phi_p(), vec_info::lookup_stmt(), LOOP_VINFO_LOOP, MSG_MISSED_OPTIMIZATION, MSG_NOTE, NULL_TREE, gphi_iterator::phi(), STMT_VINFO_LOOP_PHI_EVOLUTION_PART, STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE, tree_is_chrec(), vect_can_peel_nonlinear_iv_p(), vect_location, and vect_step_op_add.
Referenced by vect_analyze_loop_2(), vect_do_peeling(), and vect_enhance_data_refs_alignment().
|
extern |
In tree-vect-data-refs.cc.
Function vect_force_dr_alignment_p. Returns whether the alignment of a DECL can be forced to be aligned on ALIGNMENT bit boundary.
References decl_in_symtab_p(), symtab_node::get(), known_le, MAX_OFILE_ALIGNMENT, MAX_STACK_ALIGNMENT, TREE_STATIC, and VAR_P.
Referenced by increase_alignment(), and vect_compute_data_ref_alignment().
|
extern |
Likewise, but taking a code_helper.
References code_helper::is_tree_code(), and vect_can_vectorize_without_simd_p().
Return true if we can emulate CODE on an integer mode representation of a vector.
Referenced by vect_can_vectorize_without_simd_p(), vectorizable_operation(), and vectorizable_reduction().
|
extern |
Return true if a non-affine read or write in STMT_INFO is suitable for a gather load or scatter store. Describe the operation in *INFO if so. If it is suitable and ELSVALS is nonzero store the supported else values in the vector it points to.
References gather_scatter_info::base, build_fold_addr_expr, CASE_CONVERT, gather_scatter_info::decl, do_add(), DR_IS_READ, DR_REF, dyn_cast(), gather_scatter_info::element_type, expr_invariant_in_loop_p(), extract_ops_from_tree(), fold_convert, get_gimple_rhs_class(), get_inner_reference(), gimple_assign_rhs1(), gimple_assign_rhs2(), gimple_assign_rhs_code(), gimple_call_internal_fn(), gimple_call_internal_p(), GIMPLE_TERNARY_RHS, gather_scatter_info::ifn, integer_zerop(), INTEGRAL_TYPE_P, internal_fn_mask_index(), internal_gather_scatter_fn_p(), is_gimple_assign(), LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_LOOP, LOOP_VINFO_ORIG_LOOP_INFO, may_be_nonaddressable_p(), mem_ref_offset(), gather_scatter_info::memory_type, NULL_TREE, gather_scatter_info::offset, gather_scatter_info::offset_dt, gather_scatter_info::offset_vectype, operand_equal_p(), POINTER_TYPE_P, gather_scatter_info::scale, signed_char_type_node, size_binop, size_int, size_zero_node, sizetype, SSA_NAME_DEF_STMT, STMT_VINFO_DATA_REF, STMT_VINFO_VECTYPE, STRIP_NOPS, supports_vec_gather_load_p(), supports_vec_scatter_store_p(), targetm, TREE_CODE, tree_fits_shwi_p(), TREE_OPERAND, tree_to_shwi(), TREE_TYPE, TYPE_MODE, TYPE_PRECISION, TYPE_SIZE, unsigned_char_type_node, vect_describe_gather_scatter_call(), vect_gather_scatter_fn_p(), vect_unknown_def_type, and wide_int_to_tree().
Referenced by get_load_store_type(), vect_analyze_data_refs(), vect_detect_hybrid_slp(), vect_get_and_check_slp_defs(), vect_mark_stmts_to_be_vectorized(), vect_recog_gather_scatter_pattern(), and vect_use_strided_gather_scatters_p().
Return true if replacing LOOP_VINFO->vector_mode with VECTOR_MODE would not change the chosen vector modes.
References hash_set< KeyId, Lazy, Traits >::begin(), hash_set< KeyId, Lazy, Traits >::end(), GET_MODE_INNER, i, related_vector_mode(), vec_info::used_vector_modes, and VECTOR_MODE_P.
Referenced by vect_analyze_loop_1(), and vect_slp_region().
|
inline |
If STMT_INFO is a comparison or contains an embedded comparison, return the scalar type of the values being compared. Return null otherwise.
References dyn_cast(), gimple_assign_rhs1(), gimple_assign_rhs_code(), tcc_comparison, TREE_CODE_CLASS, TREE_TYPE, and vect_embedded_comparison_type().
Copy memory reference info such as base/clique from the SRC reference to the DEST MEM_REF.
References handled_component_p(), MR_DEPENDENCE_BASE, MR_DEPENDENCE_CLIQUE, TREE_CODE, and TREE_OPERAND.
Referenced by vect_setup_realignment(), vectorizable_load(), vectorizable_scan_store(), and vectorizable_store().
|
extern |
Function vect_create_addr_base_for_vector_ref. Create an expression that computes the address of the first memory location that will be accessed for a data reference. Input: STMT_INFO: The statement containing the data reference. NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list. OFFSET: Optional. If supplied, it is be added to the initial address. LOOP: Specify relative to which loop-nest should the address be computed. For example, when the dataref is in an inner-loop nested in an outer-loop that is now being vectorized, LOOP can be either the outer-loop, or the inner-loop. The first memory location accessed by the following dataref ('in' points to short): for (i=0; i<N; i++) for (j=0; j<M; j++) s += in[i+j] is as follows: if LOOP=i_loop: &in (relative to i_loop) if LOOP=j_loop: &in+i*2B (relative to j_loop) Output: 1. Return an SSA_NAME whose value is the address of the memory location of the first vector of the data reference. 2. If new_stmt_list is not NULL_TREE after return then the caller must insert these statement(s) which define the returned SSA_NAME. FORNOW: We are only handling array accesses with step 1.
References innermost_loop_behavior::base_address, build1(), build_pointer_type(), dr_info::dr, DR_PTR_INFO, DR_REF, dump_enabled_p(), dump_printf_loc(), dyn_cast(), fold_build2, fold_build_pointer_plus, fold_convert, force_gimple_operand(), gcc_assert, get_dr_vinfo_offset(), get_name(), gimple_seq_add_seq(), innermost_loop_behavior::init, MSG_NOTE, NULL, size_binop, sizetype, SSA_NAME_PTR_INFO, SSA_NAME_VAR, ssize_int, STMT_VINFO_DR_INFO, strip_zero_offset_components(), TREE_CODE, TREE_TYPE, unshare_expr(), vect_dr_behavior(), vect_duplicate_ssa_name_ptr_info(), vect_get_new_vect_var(), vect_location, and vect_pointer_var.
Referenced by get_misalign_in_elems(), vect_create_cond_for_align_checks(), vect_create_data_ref_ptr(), and vect_setup_realignment().
|
extern |
Function vect_create_data_ref_ptr. Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first location accessed in the loop by STMT_INFO, along with the def-use update chain to appropriately advance the pointer through the loop iterations. Also set aliasing information for the pointer. This pointer is used by the callers to this function to create a memory reference expression for vector load/store access. Input: 1. STMT_INFO: a stmt that references memory. Expected to be of the form GIMPLE_ASSIGN <name, data-ref> or GIMPLE_ASSIGN <data-ref, name>. 2. AGGR_TYPE: the type of the reference, which should be either a vector or an array. 3. AT_LOOP: the loop where the vector memref is to be created. 4. OFFSET (optional): a byte offset to be added to the initial address accessed by the data-ref in STMT_INFO. 5. BSI: location where the new stmts are to be placed if there is no loop 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain pointing to the initial address. 8. IV_STEP (optional, defaults to NULL): the amount that should be added to the IV during each iteration of the loop. NULL says to move by one copy of AGGR_TYPE up or down, depending on the step of the data reference. Output: 1. Declare a new ptr to vector_type, and have it point to the base of the data reference (initial addressed accessed by the data reference). For example, for vector of type V8HI, the following code is generated: v8hi *ap; ap = (v8hi *)initial_address; if OFFSET is not supplied: initial_address = &a[init]; if OFFSET is supplied: initial_address = &a[init] + OFFSET; if BYTE_OFFSET is supplied: initial_address = &a[init] + BYTE_OFFSET; Return the initial_address in INITIAL_ADDRESS. 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also update the pointer in each iteration of the loop. Return the increment stmt that updates the pointer in PTR_INCR. 3. Return the pointer.
References alias_sets_conflict_p(), build_pointer_type_for_mode(), create_iv(), dr_info::dr, DR_BASE_ADDRESS, DR_BASE_OBJECT, DR_GROUP_FIRST_ELEMENT, DR_GROUP_NEXT_ELEMENT, DR_GROUP_SIZE, DR_PTR_INFO, DR_REF, DR_STEP, dump_enabled_p(), dump_printf(), dump_printf_loc(), dyn_cast(), fold_build1, fold_convert, gcc_assert, gcc_unreachable, get_alias_set(), get_name(), get_tree_code_name(), gimple_bb(), gsi_insert_seq_before(), gsi_insert_seq_on_edge_immediate(), GSI_SAME_STMT, gsi_stmt(), integer_zerop(), loop_preheader_edge(), LOOP_VINFO_LOOP, MSG_NOTE, nested_in_vect_loop_p(), NULL, NULL_TREE, standard_iv_increment_position(), innermost_loop_behavior::step, STMT_VINFO_DATA_REF, STMT_VINFO_DR_INFO, TREE_CODE, tree_int_cst_sgn(), TREE_TYPE, TYPE_SIZE_UNIT, vect_create_addr_base_for_vector_ref(), vect_dr_behavior(), vect_duplicate_ssa_name_ptr_info(), vect_get_new_vect_var(), vect_location, and vect_pointer_var.
Referenced by vect_setup_realignment(), vectorizable_load(), and vectorizable_store().
Function vect_create_destination_var. Create a new temporary of type VECTYPE.
References free(), gcc_assert, get_name(), SSA_NAME_VERSION, TREE_CODE, TREE_TYPE, vect_get_new_vect_var(), vect_mask_var, vect_scalar_var, vect_simple_var, and VECTOR_BOOLEAN_TYPE_P.
Referenced by permute_vec_elements(), read_vector_array(), vect_create_epilog_for_reduction(), vect_setup_realignment(), vect_transform_cycle_phi(), vect_transform_reduction(), vect_transform_slp_perm_load_1(), vectorizable_assignment(), vectorizable_call(), vectorizable_comparison_1(), vectorizable_condition(), vectorizable_conversion(), vectorizable_lc_phi(), vectorizable_load(), vectorizable_operation(), vectorizable_phi(), vectorizable_shift(), vectorizable_simd_clone_call(), vectorizable_store(), and vectorize_fold_left_reduction().
|
extern |
Create a loop_vec_info for LOOP with SHARED and the vect_analyze_loop_form result.
References vect_loop_form_info::assumptions, vect_loop_form_info::conds, estimated_stmt_executions(), i, loop::inner, vect_loop_form_info::inner_loop_cond, integer_onep(), vec_info::lookup_stmt(), vect_loop_form_info::loop_exit, loop_exit_ctrl_vec_info_type, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_INNER_LOOP_COST_FACTOR, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LOOP_CONDS, LOOP_VINFO_LOOP_IV_COND, LOOP_VINFO_MAIN_LOOP_INFO, LOOP_VINFO_NITERS, LOOP_VINFO_NITERS_ASSUMPTIONS, LOOP_VINFO_NITERS_UNCHANGED, LOOP_VINFO_NITERSM1, LOOP_VINFO_ORIG_LOOP_INFO, vect_loop_form_info::number_of_iterations, vect_loop_form_info::number_of_iterationsm1, wi::smin(), STMT_VINFO_DEF_TYPE, STMT_VINFO_TYPE, and vect_condition_def.
Referenced by gather_scalar_reductions(), and vect_analyze_loop_1().
Create an SLP node for SCALAR_STMTS.
References SLP_TREE_CHILDREN, SLP_TREE_CODE, SLP_TREE_DEF_TYPE, SLP_TREE_SCALAR_STMTS, vect_internal_def, and vNULL.
Referenced by vect_optimize_slp_pass::get_result_with_layout(), vect_build_combine_node(), vect_build_slp_instance(), vect_build_slp_store_interleaving(), vect_build_slp_tree_2(), vect_build_swap_evenodd_node(), vect_create_new_slp_node(), vect_create_new_slp_node(), and vect_lower_load_permutations().
|
extern |
Find stmts that must be both vectorized and SLPed.
References DUMP_VECT_SCOPE, gsi_end_p(), gsi_last_bb(), gsi_next(), gsi_prev(), gsi_start(), gsi_start_phis(), gsi_stmt(), i, is_gimple_debug(), vec_info::lookup_stmt(), vdhs_data::loop_vinfo, LOOP_VINFO_BBS, LOOP_VINFO_LOOP, maybe_push_to_hybrid_worklist(), gather_scatter_info::offset, STMT_SLP_TYPE, STMT_VINFO_GATHER_SCATTER_P, STMT_VINFO_IN_PATTERN_P, STMT_VINFO_PATTERN_DEF_SEQ, STMT_VINFO_RELATED_STMT, STMT_VINFO_RELEVANT, vect_check_gather_scatter(), vect_detect_hybrid_slp(), walk_gimple_op(), vdhs_data::worklist, and worklist.
|
extern |
Used in tree-vect-loop-manip.cc
Determine if operating on full vectors for LOOP_VINFO might leave some scalar iterations still to do. If so, decide how we should handle those scalar iterations. The possibilities are: (1) Make LOOP_VINFO operate on partial vectors instead of full vectors. In this case: LOOP_VINFO_USING_PARTIAL_VECTORS_P == true LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == false LOOP_VINFO_PEELING_FOR_NITER == false (2) Make LOOP_VINFO operate on full vectors and use an epilogue loop to handle the remaining scalar iterations. In this case: LOOP_VINFO_USING_PARTIAL_VECTORS_P == false LOOP_VINFO_PEELING_FOR_NITER == true There are two choices: (2a) Consider vectorizing the epilogue loop at the same VF as the main loop, but using partial vectors instead of full vectors. In this case: LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == true (2b) Consider vectorizing the epilogue loop at lower VFs only. In this case: LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P == false
References dump_enabled_p(), dump_printf_loc(), opt_result::failure_at(), LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P, LOOP_VINFO_EPIL_USING_PARTIAL_VECTORS_P, LOOP_VINFO_EPILOGUE_P, LOOP_VINFO_MUST_USE_PARTIAL_VECTORS_P, LOOP_VINFO_PEELING_FOR_NITER, LOOP_VINFO_USING_PARTIAL_VECTORS_P, LOOP_VINFO_USING_SELECT_VL_P, MSG_NOTE, opt_result::success(), _loop_vec_info::suggested_unroll_factor, vect_known_niters_smaller_than_vf(), vect_location, and vect_need_peeling_or_partial_vectors_p().
Referenced by vect_analyze_loop_2(), and vect_do_peeling().
|
extern |
Function vect_do_peeling. Input: - LOOP_VINFO: Represent a loop to be vectorized, which looks like: preheader: LOOP: header_bb: loop_body if (exit_loop_cond) goto exit_bb else goto header_bb exit_bb: - NITERS: The number of iterations of the loop. - NITERSM1: The number of iterations of the loop's latch. - NITERS_NO_OVERFLOW: No overflow in computing NITERS. - TH, CHECK_PROFITABILITY: Threshold of niters to vectorize loop if CHECK_PROFITABILITY is true. Output: - *NITERS_VECTOR and *STEP_VECTOR describe how the main loop should iterate after vectorization; see vect_set_loop_condition for details. - *NITERS_VECTOR_MULT_VF_VAR is either null or an SSA name that should be set to the number of scalar iterations handled by the vector loop. The SSA name is only used on exit from the loop. This function peels prolog and epilog from the loop, adds guards skipping PROLOG and EPILOG for various conditions. As a result, the changed CFG would look like: guard_bb_1: if (prefer_scalar_loop) goto merge_bb_1 else goto guard_bb_2 guard_bb_2: if (skip_prolog) goto merge_bb_2 else goto prolog_preheader prolog_preheader: PROLOG: prolog_header_bb: prolog_body if (exit_prolog_cond) goto prolog_exit_bb else goto prolog_header_bb prolog_exit_bb: merge_bb_2: vector_preheader: VECTOR LOOP: vector_header_bb: vector_body if (exit_vector_cond) goto vector_exit_bb else goto vector_header_bb vector_exit_bb: guard_bb_3: if (skip_epilog) goto merge_bb_3 else goto epilog_preheader merge_bb_1: epilog_preheader: EPILOG: epilog_header_bb: epilog_body if (exit_epilog_cond) goto merge_bb_3 else goto epilog_header_bb merge_bb_3: Note this function peels prolog and epilog only if it's necessary, as well as guards. This function returns the epilogue loop if a decision was made to vectorize it, otherwise NULL. The analysis resulting in this epilogue loop's loop_vec_info was performed in the same vect_analyze_loop call as the main loop's. At that time vect_analyze_loop constructs a list of accepted loop_vec_info's for lower vectorization factors than the main loop. This list is chained in the loop's loop_vec_info in the 'epilogue_vinfo' member. When we decide to vectorize the epilogue loop for a lower vectorization factor, the loop_vec_info in epilogue_vinfo is updated and linked to the epilogue loop. This is later used to vectorize the epilogue. The reason the loop_vec_info needs updating is that it was constructed based on the original main loop, and the epilogue loop is a copy of this loop, so all links pointing to statements in the original loop need updating. Furthermore, these loop_vec_infos share the data_reference's records, which will also need to be updated. TODO: Guard for prefer_scalar_loop should be emitted along with versioning conditions if loop versioning is needed.
References add_phi_arg(), adjust_vec, adjust_vec_debug_stmts(), advance(), profile_probability::always(), profile_probability::apply_scale(), boolean_type_node, build_int_cst(), build_one_cst(), build_zero_cst(), CDI_DOMINATORS, cfun, basic_block_def::count, create_phi_node(), DEF_FROM_PTR, delete_update_ssa(), EDGE_PRED, EDGE_SUCC, _loop_vec_info::epilogue_vinfo, first_dom_son(), flow_bb_inside_loop_p(), flow_loop_nested_p(), fold_build2, FOR_EACH_IMM_USE_STMT, FOR_EACH_SSA_DEF_OPERAND, loop::force_vectorize, free(), free_original_copy_tables(), gcc_assert, gcc_checking_assert, get_bb_original(), get_dominated_by(), get_immediate_dominator(), get_loop_body(), get_loop_copy(), get_loop_exit_edges(), gimple_bb(), gimple_build_assign(), gimple_build_nop(), gimple_debug_bind_p(), gimple_debug_bind_reset_value(), gimple_phi_arg_def_from_edge(), gimple_phi_result(), gsi_end_p(), gsi_for_stmt(), gsi_insert_after(), gsi_insert_before(), gsi_last_bb(), GSI_NEW_STMT, gsi_next(), gsi_start_bb(), gsi_start_phis(), gsi_stmt(), profile_probability::guessed_always(), i, initialize_original_copy_tables(), profile_probability::initialized_p(), integer_onep(), profile_probability::invert(), poly_int< N, C >::is_constant(), iterate_fix_dominators(), LOOP_C_INFINITE, loop_constraint_clear(), loop_preheader_edge(), LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT, LOOP_VINFO_BBS, LOOP_VINFO_EARLY_BREAKS, LOOP_VINFO_EARLY_BREAKS_VECT_PEELED, LOOP_VINFO_EPILOGUE_IV_EXIT, LOOP_VINFO_INT_NITERS, LOOP_VINFO_IV_EXIT, LOOP_VINFO_LOOP, LOOP_VINFO_NITERS, LOOP_VINFO_NITERS_KNOWN_P, LOOP_VINFO_NITERSM1, LOOP_VINFO_PEELING_FOR_ALIGNMENT, LOOP_VINFO_PEELING_FOR_GAPS, LOOP_VINFO_PEELING_FOR_NITER, LOOP_VINFO_SCALAR_IV_EXIT, LOOP_VINFO_SCALAR_LOOP, LOOP_VINFO_USING_PARTIAL_VECTORS_P, LOOP_VINFO_VECT_FACTOR, _loop_vec_info::main_loop_edge, make_ssa_name(), MAY_HAVE_DEBUG_BIND_STMTS, need_ssa_update_p(), next_dom_son(), profile_count::nonzero_p(), NULL, NULL_TREE, loop::num_nodes, PHI_RESULT, profile_count::probability_in(), queue, record_niter_bound(), reset_original_copy_tables(), scale_bbs_frequencies(), scale_loop_profile(), scev_reset(), set_immediate_dominator(), set_range_info(),