Line data Source code
1 : /* Vectorizer
2 : Copyright (C) 2003-2026 Free Software Foundation, Inc.
3 : Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 :
5 : This file is part of GCC.
6 :
7 : GCC is free software; you can redistribute it and/or modify it under
8 : the terms of the GNU General Public License as published by the Free
9 : Software Foundation; either version 3, or (at your option) any later
10 : version.
11 :
12 : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 : WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 : for more details.
16 :
17 : You should have received a copy of the GNU General Public License
18 : along with GCC; see the file COPYING3. If not see
19 : <http://www.gnu.org/licenses/>. */
20 :
21 : #ifndef GCC_TREE_VECTORIZER_H
22 : #define GCC_TREE_VECTORIZER_H
23 :
24 : typedef class _stmt_vec_info *stmt_vec_info;
25 : typedef struct _slp_tree *slp_tree;
26 :
27 : #include "tree-data-ref.h"
28 : #include "tree-hash-traits.h"
29 : #include "target.h"
30 : #include "internal-fn.h"
31 : #include "tree-ssa-operands.h"
32 : #include "gimple-match.h"
33 : #include "dominance.h"
34 :
35 : /* Used for naming of new temporaries. */
36 : enum vect_var_kind {
37 : vect_simple_var,
38 : vect_pointer_var,
39 : vect_scalar_var,
40 : vect_mask_var
41 : };
42 :
43 : /* Defines type of operation. */
44 : enum operation_type {
45 : unary_op = 1,
46 : binary_op,
47 : ternary_op
48 : };
49 :
50 : /* Define type of available alignment support. */
51 : enum dr_alignment_support {
52 : dr_unaligned_unsupported,
53 : dr_unaligned_supported,
54 : dr_explicit_realign,
55 : dr_explicit_realign_optimized,
56 : dr_aligned
57 : };
58 :
59 : /* Define type of peeling support to indicate how peeling for alignment can help
60 : make vectorization supported. */
61 : enum peeling_support {
62 : peeling_known_supported,
63 : peeling_maybe_supported,
64 : peeling_unsupported
65 : };
66 :
67 : /* Define type of def-use cross-iteration cycle. */
68 : enum vect_def_type {
69 : vect_uninitialized_def = 0,
70 : vect_constant_def = 1,
71 : vect_external_def,
72 : vect_internal_def,
73 : vect_induction_def,
74 : vect_reduction_def,
75 : vect_double_reduction_def,
76 : vect_nested_cycle,
77 : vect_first_order_recurrence,
78 : vect_condition_def,
79 : vect_unknown_def_type
80 : };
81 :
82 : /* Define operation type of linear/non-linear induction variable. */
83 : enum vect_induction_op_type {
84 : vect_step_op_add = 0,
85 : vect_step_op_neg,
86 : vect_step_op_mul,
87 : vect_step_op_shl,
88 : vect_step_op_shr
89 : };
90 :
91 : /* Define type of reduction. */
92 : enum vect_reduction_type {
93 : TREE_CODE_REDUCTION,
94 : COND_REDUCTION,
95 : INTEGER_INDUC_COND_REDUCTION,
96 : CONST_COND_REDUCTION,
97 :
98 : /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop
99 : to implement:
100 :
101 : for (int i = 0; i < VF; ++i)
102 : res = cond[i] ? val[i] : res; */
103 : EXTRACT_LAST_REDUCTION,
104 :
105 : /* Use a folding reduction within the loop to implement:
106 :
107 : for (int i = 0; i < VF; ++i)
108 : res = res OP val[i];
109 :
110 : (with no reassocation). */
111 : FOLD_LEFT_REDUCTION
112 : };
113 :
114 : #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \
115 : || ((D) == vect_double_reduction_def) \
116 : || ((D) == vect_nested_cycle))
117 :
118 : /* Structure to encapsulate information about a group of like
119 : instructions to be presented to the target cost model. */
120 : struct stmt_info_for_cost {
121 : int count;
122 : enum vect_cost_for_stmt kind;
123 : enum vect_cost_model_location where;
124 : stmt_vec_info stmt_info;
125 : slp_tree node;
126 : tree vectype;
127 : int misalign;
128 : };
129 :
130 : typedef vec<stmt_info_for_cost> stmt_vector_for_cost;
131 :
132 : /* Maps base addresses to an innermost_loop_behavior and the stmt it was
133 : derived from that gives the maximum known alignment for that base. */
134 : typedef hash_map<tree_operand_hash,
135 : std::pair<stmt_vec_info, innermost_loop_behavior *> >
136 : vec_base_alignments;
137 :
138 : /* Represents elements [START, START + LENGTH) of cyclical array OPS*
139 : (i.e. OPS repeated to give at least START + LENGTH elements) */
140 : struct vect_scalar_ops_slice
141 : {
142 : tree op (unsigned int i) const;
143 : bool all_same_p () const;
144 :
145 : vec<tree> *ops;
146 : unsigned int start;
147 : unsigned int length;
148 : };
149 :
150 : /* Return element I of the slice. */
151 : inline tree
152 2617763 : vect_scalar_ops_slice::op (unsigned int i) const
153 : {
154 5235526 : return (*ops)[(i + start) % ops->length ()];
155 : }
156 :
157 : /* Hash traits for vect_scalar_ops_slice. */
158 : struct vect_scalar_ops_slice_hash : typed_noop_remove<vect_scalar_ops_slice>
159 : {
160 : typedef vect_scalar_ops_slice value_type;
161 : typedef vect_scalar_ops_slice compare_type;
162 :
163 : static const bool empty_zero_p = true;
164 :
165 : static void mark_deleted (value_type &s) { s.length = ~0U; }
166 0 : static void mark_empty (value_type &s) { s.length = 0; }
167 412608 : static bool is_deleted (const value_type &s) { return s.length == ~0U; }
168 3917610 : static bool is_empty (const value_type &s) { return s.length == 0; }
169 : static hashval_t hash (const value_type &);
170 : static bool equal (const value_type &, const compare_type &);
171 : };
172 :
173 : /* Describes how we're going to vectorize an individual load or store,
174 : or a group of loads or stores. */
175 : enum vect_memory_access_type {
176 : VMAT_UNINITIALIZED,
177 :
178 : /* An access to an invariant address. This is used only for loads. */
179 : VMAT_INVARIANT,
180 :
181 : /* A simple contiguous access. */
182 : VMAT_CONTIGUOUS,
183 :
184 : /* A contiguous access that goes down in memory rather than up,
185 : with no additional permutation. This is used only for stores
186 : of invariants. */
187 : VMAT_CONTIGUOUS_DOWN,
188 :
189 : /* A simple contiguous access in which the elements need to be reversed
190 : after loading or before storing. */
191 : VMAT_CONTIGUOUS_REVERSE,
192 :
193 : /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */
194 : VMAT_LOAD_STORE_LANES,
195 :
196 : /* An access in which each scalar element is loaded or stored
197 : individually. */
198 : VMAT_ELEMENTWISE,
199 :
200 : /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped
201 : SLP accesses. Each unrolled iteration uses a contiguous load
202 : or store for the whole group, but the groups from separate iterations
203 : are combined in the same way as for VMAT_ELEMENTWISE. */
204 : VMAT_STRIDED_SLP,
205 :
206 : /* The access uses gather loads or scatter stores. */
207 : VMAT_GATHER_SCATTER_LEGACY,
208 : VMAT_GATHER_SCATTER_IFN,
209 : VMAT_GATHER_SCATTER_EMULATED
210 : };
211 :
212 : /* Returns whether MAT is any of the VMAT_GATHER_SCATTER_* kinds. */
213 :
214 : inline bool
215 6010138 : mat_gather_scatter_p (vect_memory_access_type mat)
216 : {
217 6010138 : return (mat == VMAT_GATHER_SCATTER_LEGACY
218 : || mat == VMAT_GATHER_SCATTER_IFN
219 6010138 : || mat == VMAT_GATHER_SCATTER_EMULATED);
220 : }
221 :
222 : /*-----------------------------------------------------------------*/
223 : /* Info on vectorized defs. */
224 : /*-----------------------------------------------------------------*/
225 : enum stmt_vec_info_type {
226 : undef_vec_info_type = 0,
227 : load_vec_info_type,
228 : store_vec_info_type,
229 : shift_vec_info_type,
230 : op_vec_info_type,
231 : call_vec_info_type,
232 : call_simd_clone_vec_info_type,
233 : assignment_vec_info_type,
234 : condition_vec_info_type,
235 : comparison_vec_info_type,
236 : reduc_vec_info_type,
237 : induc_vec_info_type,
238 : type_promotion_vec_info_type,
239 : type_demotion_vec_info_type,
240 : type_conversion_vec_info_type,
241 : cycle_phi_info_type,
242 : lc_phi_info_type,
243 : phi_info_type,
244 : recurr_info_type,
245 : loop_exit_ctrl_vec_info_type,
246 : permute_info_type
247 : };
248 :
249 : /************************************************************************
250 : SLP
251 : ************************************************************************/
252 : typedef vec<std::pair<unsigned, unsigned> > lane_permutation_t;
253 : typedef auto_vec<std::pair<unsigned, unsigned>, 16> auto_lane_permutation_t;
254 : typedef vec<unsigned> load_permutation_t;
255 : typedef auto_vec<unsigned, 16> auto_load_permutation_t;
256 :
257 3112202 : struct vect_data {
258 1956359 : virtual ~vect_data () = default;
259 : };
260 :
261 : /* Analysis data from vectorizable_simd_clone_call for
262 : call_simd_clone_vec_info_type. */
263 : struct vect_simd_clone_data : vect_data {
264 1842 : virtual ~vect_simd_clone_data () = default;
265 1390 : vect_simd_clone_data () = default;
266 452 : vect_simd_clone_data (vect_simd_clone_data &&other) = default;
267 :
268 : /* Selected SIMD clone and clone for in-branch. */
269 : cgraph_node *clone;
270 : cgraph_node *clone_inbranch;
271 :
272 : /* Selected SIMD clone's function info. First vector element
273 : is NULL_TREE, followed by a pair of trees (base + step)
274 : for linear arguments (pair of NULLs for other arguments). */
275 : auto_vec<tree> simd_clone_info;
276 : };
277 :
278 : /* Analysis data from vectorizable_load and vectorizable_store for
279 : load_vec_info_type and store_vec_info_type. */
280 : struct vect_load_store_data : vect_data {
281 1154001 : vect_load_store_data (vect_load_store_data &&other) = default;
282 1956359 : vect_load_store_data () = default;
283 3109741 : virtual ~vect_load_store_data () = default;
284 :
285 : vect_memory_access_type memory_access_type;
286 : dr_alignment_support alignment_support_scheme;
287 : int misalignment;
288 : internal_fn lanes_ifn; // VMAT_LOAD_STORE_LANES
289 : poly_int64 poffset;
290 : union {
291 : internal_fn ifn; // VMAT_GATHER_SCATTER_IFN
292 : tree decl; // VMAT_GATHER_SCATTER_DECL
293 : } gs;
294 : tree strided_offset_vectype; // VMAT_GATHER_SCATTER_IFN, originally strided
295 : /* Load/store type with larger element mode used for punning the vectype. */
296 : tree ls_type; // VMAT_GATHER_SCATTER_IFN
297 : /* This is set to a supported offset vector type if we don't support the
298 : originally requested offset type, otherwise NULL.
299 : If nonzero there will be an additional offset conversion before
300 : the gather/scatter. */
301 : tree supported_offset_vectype; // VMAT_GATHER_SCATTER_IFN
302 : /* Similar for scale. Only nonzero if we don't support the requested
303 : scale. Then we need to multiply the offset vector before the
304 : gather/scatter. */
305 : int supported_scale; // VMAT_GATHER_SCATTER_IFN
306 : auto_vec<int> elsvals;
307 : /* True if the load requires a load permutation. */
308 : bool slp_perm; // SLP_TREE_LOAD_PERMUTATION
309 : unsigned n_perms; // SLP_TREE_LOAD_PERMUTATION
310 : unsigned n_loads; // SLP_TREE_LOAD_PERMUTATION
311 : /* Whether the load permutation is consecutive and simple. */
312 : bool subchain_p; // VMAT_STRIDED_SLP and VMAT_GATHER_SCATTER
313 : };
314 :
315 : /* A computation tree of an SLP instance. Each node corresponds to a group of
316 : stmts to be packed in a SIMD stmt. */
317 : struct _slp_tree {
318 : _slp_tree ();
319 : ~_slp_tree ();
320 :
321 : void push_vec_def (gimple *def);
322 8490 : void push_vec_def (tree def) { vec_defs.quick_push (def); }
323 :
324 : /* Nodes that contain def-stmts of this node statements operands. */
325 : vec<slp_tree> children;
326 :
327 : /* A group of scalar stmts to be vectorized together. */
328 : vec<stmt_vec_info> stmts;
329 : /* A group of scalar operands to be vectorized together. */
330 : vec<tree> ops;
331 : /* The representative that should be used for analysis and
332 : code generation. */
333 : stmt_vec_info representative;
334 :
335 : struct {
336 : /* SLP cycle the node resides in, or -1. */
337 : int id;
338 : /* The SLP operand index with the edge on the SLP cycle, or -1. */
339 : int reduc_idx;
340 : } cycle_info;
341 :
342 : /* Load permutation relative to the stores, NULL if there is no
343 : permutation. */
344 : load_permutation_t load_permutation;
345 : /* Lane permutation of the operands scalar lanes encoded as pairs
346 : of { operand number, lane number }. The number of elements
347 : denotes the number of output lanes. */
348 : lane_permutation_t lane_permutation;
349 :
350 : tree vectype;
351 : /* Vectorized defs. */
352 : vec<tree> vec_defs;
353 :
354 : /* Reference count in the SLP graph. */
355 : unsigned int refcnt;
356 : /* The maximum number of vector elements for the subtree rooted
357 : at this node. */
358 : poly_uint64 max_nunits;
359 : /* The DEF type of this node. */
360 : enum vect_def_type def_type;
361 : /* The number of scalar lanes produced by this node. */
362 : unsigned int lanes;
363 : /* The operation of this node. */
364 : enum tree_code code;
365 : /* For gather/scatter memory operations the scale each offset element
366 : should be multiplied by before being added to the base. */
367 : int gs_scale;
368 : /* For gather/scatter memory operations the loop-invariant base value. */
369 : tree gs_base;
370 : /* Whether uses of this load or feeders of this store are suitable
371 : for load/store-lanes. */
372 : bool ldst_lanes;
373 : /* For BB vect, flag to indicate this load node should be vectorized
374 : as to avoid STLF fails because of related stores. */
375 : bool avoid_stlf_fail;
376 :
377 : int vertex;
378 :
379 : /* The kind of operation as determined by analysis and optional
380 : kind specific data. */
381 : enum stmt_vec_info_type type;
382 : vect_data *data;
383 :
384 : template <class T>
385 1957749 : T& get_data (T& else_) { return data ? *static_cast <T *> (data) : else_; }
386 :
387 : /* If not NULL this is a cached failed SLP discovery attempt with
388 : the lanes that failed during SLP discovery as 'false'. This is
389 : a copy of the matches array. */
390 : bool *failed;
391 :
392 : /* Allocate from slp_tree_pool. */
393 : static void *operator new (size_t);
394 :
395 : /* Return memory to slp_tree_pool. */
396 : static void operator delete (void *, size_t);
397 :
398 : /* Linked list of nodes to release when we free the slp_tree_pool. */
399 : slp_tree next_node;
400 : slp_tree prev_node;
401 : };
402 :
403 : /* The enum describes the type of operations that an SLP instance
404 : can perform. */
405 :
406 : enum slp_instance_kind {
407 : slp_inst_kind_store,
408 : slp_inst_kind_reduc_group,
409 : slp_inst_kind_reduc_chain,
410 : slp_inst_kind_bb_reduc,
411 : slp_inst_kind_ctor,
412 : slp_inst_kind_gcond
413 : };
414 :
415 : /* SLP instance is a sequence of stmts in a loop that can be packed into
416 : SIMD stmts. */
417 : typedef class _slp_instance {
418 : public:
419 : /* The root of SLP tree. */
420 : slp_tree root;
421 :
422 : /* For vector constructors, the constructor stmt that the SLP tree is built
423 : from, NULL otherwise. */
424 : vec<stmt_vec_info> root_stmts;
425 :
426 : /* For slp_inst_kind_bb_reduc the defs that were not vectorized, NULL
427 : otherwise. */
428 : vec<tree> remain_defs;
429 :
430 : /* The group of nodes that contain loads of this SLP instance. */
431 : vec<slp_tree> loads;
432 :
433 : /* The SLP node containing the reduction PHIs. */
434 : slp_tree reduc_phis;
435 :
436 : /* Vector cost of this entry to the SLP graph. */
437 : stmt_vector_for_cost cost_vec;
438 :
439 : /* If this instance is the main entry of a subgraph the set of
440 : entries into the same subgraph, including itself. */
441 : vec<_slp_instance *> subgraph_entries;
442 :
443 : /* The type of operation the SLP instance is performing. */
444 : slp_instance_kind kind;
445 :
446 : dump_user_location_t location () const;
447 : } *slp_instance;
448 :
449 :
450 : /* Access Functions. */
451 : #define SLP_INSTANCE_TREE(S) (S)->root
452 : #define SLP_INSTANCE_LOADS(S) (S)->loads
453 : #define SLP_INSTANCE_ROOT_STMTS(S) (S)->root_stmts
454 : #define SLP_INSTANCE_REMAIN_DEFS(S) (S)->remain_defs
455 : #define SLP_INSTANCE_KIND(S) (S)->kind
456 :
457 : #define SLP_TREE_CHILDREN(S) (S)->children
458 : #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts
459 : #define SLP_TREE_SCALAR_OPS(S) (S)->ops
460 : #define SLP_TREE_REF_COUNT(S) (S)->refcnt
461 : #define SLP_TREE_VEC_DEFS(S) (S)->vec_defs
462 : #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation
463 : #define SLP_TREE_LANE_PERMUTATION(S) (S)->lane_permutation
464 : #define SLP_TREE_DEF_TYPE(S) (S)->def_type
465 : #define SLP_TREE_VECTYPE(S) (S)->vectype
466 : #define SLP_TREE_REPRESENTATIVE(S) (S)->representative
467 : #define SLP_TREE_LANES(S) (S)->lanes
468 : #define SLP_TREE_CODE(S) (S)->code
469 : #define SLP_TREE_TYPE(S) (S)->type
470 : #define SLP_TREE_GS_SCALE(S) (S)->gs_scale
471 : #define SLP_TREE_GS_BASE(S) (S)->gs_base
472 : #define SLP_TREE_REDUC_IDX(S) (S)->cycle_info.reduc_idx
473 : #define SLP_TREE_PERMUTE_P(S) ((S)->code == VEC_PERM_EXPR)
474 :
475 : inline vect_memory_access_type
476 1243267 : SLP_TREE_MEMORY_ACCESS_TYPE (slp_tree node)
477 : {
478 487311 : if (SLP_TREE_TYPE (node) == load_vec_info_type
479 424883 : || SLP_TREE_TYPE (node) == store_vec_info_type)
480 250048 : return static_cast<vect_load_store_data *> (node->data)->memory_access_type;
481 : return VMAT_UNINITIALIZED;
482 : }
483 :
484 : enum vect_partial_vector_style {
485 : vect_partial_vectors_none,
486 : vect_partial_vectors_while_ult,
487 : vect_partial_vectors_avx512,
488 : vect_partial_vectors_len
489 : };
490 :
491 : /* Key for map that records association between
492 : scalar conditions and corresponding loop mask, and
493 : is populated by vect_record_loop_mask. */
494 :
495 : struct scalar_cond_masked_key
496 : {
497 61908 : scalar_cond_masked_key (tree t, unsigned ncopies_)
498 61908 : : ncopies (ncopies_)
499 : {
500 61908 : get_cond_ops_from_tree (t);
501 : }
502 :
503 : void get_cond_ops_from_tree (tree);
504 :
505 : unsigned ncopies;
506 : bool inverted_p;
507 : tree_code code;
508 : tree op0;
509 : tree op1;
510 : };
511 :
512 : template<>
513 : struct default_hash_traits<scalar_cond_masked_key>
514 : {
515 : typedef scalar_cond_masked_key compare_type;
516 : typedef scalar_cond_masked_key value_type;
517 :
518 : static inline hashval_t
519 69934 : hash (value_type v)
520 : {
521 69934 : inchash::hash h;
522 69934 : h.add_int (v.code);
523 69934 : inchash::add_expr (v.op0, h, 0);
524 69934 : inchash::add_expr (v.op1, h, 0);
525 69934 : h.add_int (v.ncopies);
526 69934 : h.add_flag (v.inverted_p);
527 69934 : return h.end ();
528 : }
529 :
530 : static inline bool
531 9493 : equal (value_type existing, value_type candidate)
532 : {
533 9493 : return (existing.ncopies == candidate.ncopies
534 9324 : && existing.code == candidate.code
535 5877 : && existing.inverted_p == candidate.inverted_p
536 4404 : && operand_equal_p (existing.op0, candidate.op0, 0)
537 12225 : && operand_equal_p (existing.op1, candidate.op1, 0));
538 : }
539 :
540 : static const bool empty_zero_p = true;
541 :
542 : static inline void
543 0 : mark_empty (value_type &v)
544 : {
545 0 : v.ncopies = 0;
546 0 : v.inverted_p = false;
547 : }
548 :
549 : static inline bool
550 8144384 : is_empty (value_type v)
551 : {
552 8083247 : return v.ncopies == 0;
553 : }
554 :
555 : static inline void mark_deleted (value_type &) {}
556 :
557 : static inline bool is_deleted (const value_type &)
558 : {
559 : return false;
560 : }
561 :
562 54077 : static inline void remove (value_type &) {}
563 : };
564 :
565 : typedef hash_set<scalar_cond_masked_key> scalar_cond_masked_set_type;
566 :
567 : /* Key and map that records association between vector conditions and
568 : corresponding loop mask, and is populated by prepare_vec_mask. */
569 :
570 : typedef pair_hash<tree_operand_hash, tree_operand_hash> tree_cond_mask_hash;
571 : typedef hash_set<tree_cond_mask_hash> vec_cond_masked_set_type;
572 :
573 : /* Describes two objects whose addresses must be unequal for the vectorized
574 : loop to be valid. */
575 : typedef std::pair<tree, tree> vec_object_pair;
576 :
577 : /* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE.
578 : UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */
579 : class vec_lower_bound {
580 : public:
581 : vec_lower_bound () {}
582 1582 : vec_lower_bound (tree e, bool u, poly_uint64 m)
583 1582 : : expr (e), unsigned_p (u), min_value (m) {}
584 :
585 : tree expr;
586 : bool unsigned_p;
587 : poly_uint64 min_value;
588 : };
589 :
590 : /* Vectorizer state shared between different analyses like vector sizes
591 : of the same CFG region. */
592 : class vec_info_shared {
593 : public:
594 : vec_info_shared();
595 : ~vec_info_shared();
596 :
597 : void save_datarefs();
598 : void check_datarefs();
599 :
600 : /* All data references. Freed by free_data_refs, so not an auto_vec. */
601 : vec<data_reference_p> datarefs;
602 : vec<data_reference> datarefs_copy;
603 :
604 : /* The loop nest in which the data dependences are computed. */
605 : auto_vec<loop_p> loop_nest;
606 :
607 : /* All data dependences. Freed by free_dependence_relations, so not
608 : an auto_vec. */
609 : vec<ddr_p> ddrs;
610 : };
611 :
612 : /* Vectorizer state common between loop and basic-block vectorization. */
613 : class vec_info {
614 : public:
615 : typedef hash_set<int_hash<machine_mode, E_VOIDmode, E_BLKmode> > mode_set;
616 : enum vec_kind { bb, loop };
617 :
618 : vec_info (vec_kind, vec_info_shared *);
619 : ~vec_info ();
620 :
621 : stmt_vec_info add_stmt (gimple *);
622 : stmt_vec_info add_pattern_stmt (gimple *, stmt_vec_info);
623 : stmt_vec_info resync_stmt_addr (gimple *);
624 : stmt_vec_info lookup_stmt (gimple *);
625 : stmt_vec_info lookup_def (tree);
626 : stmt_vec_info lookup_single_use (tree);
627 : class dr_vec_info *lookup_dr (data_reference *);
628 : void move_dr (stmt_vec_info, stmt_vec_info);
629 : void remove_stmt (stmt_vec_info);
630 : void replace_stmt (gimple_stmt_iterator *, stmt_vec_info, gimple *);
631 : void insert_on_entry (stmt_vec_info, gimple *);
632 : void insert_seq_on_entry (stmt_vec_info, gimple_seq);
633 :
634 : /* The type of vectorization. */
635 : vec_kind kind;
636 :
637 : /* Shared vectorizer state. */
638 : vec_info_shared *shared;
639 :
640 : /* The mapping of GIMPLE UID to stmt_vec_info. */
641 : vec<stmt_vec_info> stmt_vec_infos;
642 : /* Whether the above mapping is complete. */
643 : bool stmt_vec_info_ro;
644 :
645 : /* Whether we've done a transform we think OK to not update virtual
646 : SSA form. */
647 : bool any_known_not_updated_vssa;
648 :
649 : /* The SLP graph. */
650 : auto_vec<slp_instance> slp_instances;
651 :
652 : /* Maps base addresses to an innermost_loop_behavior that gives the maximum
653 : known alignment for that base. */
654 : vec_base_alignments base_alignments;
655 :
656 : /* All interleaving chains of stores, represented by the first
657 : stmt in the chain. */
658 : auto_vec<stmt_vec_info> grouped_stores;
659 :
660 : /* The set of vector modes used in the vectorized region. */
661 : mode_set used_vector_modes;
662 :
663 : /* The argument we should pass to related_vector_mode when looking up
664 : the vector mode for a scalar mode, or VOIDmode if we haven't yet
665 : made any decisions about which vector modes to use. */
666 : machine_mode vector_mode;
667 :
668 : /* The basic blocks in the vectorization region. For _loop_vec_info,
669 : the memory is internally managed, while for _bb_vec_info, it points
670 : to element space of an external auto_vec<>. This inconsistency is
671 : not a good class design pattern. TODO: improve it with an unified
672 : auto_vec<> whose lifetime is confined to vec_info object. */
673 : basic_block *bbs;
674 :
675 : /* The count of the basic blocks in the vectorization region. */
676 : unsigned int nbbs;
677 :
678 : /* Used to keep a sequence of def stmts of a pattern stmt that are loop
679 : invariant if they exists.
680 : The sequence is emitted in the loop preheader should the loop be vectorized
681 : and are reset when undoing patterns. */
682 : gimple_seq inv_pattern_def_seq;
683 :
684 : private:
685 : stmt_vec_info new_stmt_vec_info (gimple *stmt);
686 : void set_vinfo_for_stmt (gimple *, stmt_vec_info, bool = true);
687 : void free_stmt_vec_infos ();
688 : void free_stmt_vec_info (stmt_vec_info);
689 : };
690 :
691 : class _loop_vec_info;
692 : class _bb_vec_info;
693 :
694 : template<>
695 : template<>
696 : inline bool
697 358049242 : is_a_helper <_loop_vec_info *>::test (vec_info *i)
698 : {
699 357440239 : return i->kind == vec_info::loop;
700 : }
701 :
702 : template<>
703 : template<>
704 : inline bool
705 66316553 : is_a_helper <_bb_vec_info *>::test (vec_info *i)
706 : {
707 66316553 : return i->kind == vec_info::bb;
708 : }
709 :
710 : /* In general, we can divide the vector statements in a vectorized loop
711 : into related groups ("rgroups") and say that for each rgroup there is
712 : some nS such that the rgroup operates on nS values from one scalar
713 : iteration followed by nS values from the next. That is, if VF is the
714 : vectorization factor of the loop, the rgroup operates on a sequence:
715 :
716 : (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS)
717 :
718 : where (i,j) represents a scalar value with index j in a scalar
719 : iteration with index i.
720 :
721 : [ We use the term "rgroup" to emphasise that this grouping isn't
722 : necessarily the same as the grouping of statements used elsewhere.
723 : For example, if we implement a group of scalar loads using gather
724 : loads, we'll use a separate gather load for each scalar load, and
725 : thus each gather load will belong to its own rgroup. ]
726 :
727 : In general this sequence will occupy nV vectors concatenated
728 : together. If these vectors have nL lanes each, the total number
729 : of scalar values N is given by:
730 :
731 : N = nS * VF = nV * nL
732 :
733 : None of nS, VF, nV and nL are required to be a power of 2. nS and nV
734 : are compile-time constants but VF and nL can be variable (if the target
735 : supports variable-length vectors).
736 :
737 : In classical vectorization, each iteration of the vector loop would
738 : handle exactly VF iterations of the original scalar loop. However,
739 : in vector loops that are able to operate on partial vectors, a
740 : particular iteration of the vector loop might handle fewer than VF
741 : iterations of the scalar loop. The vector lanes that correspond to
742 : iterations of the scalar loop are said to be "active" and the other
743 : lanes are said to be "inactive".
744 :
745 : In such vector loops, many rgroups need to be controlled to ensure
746 : that they have no effect for the inactive lanes. Conceptually, each
747 : such rgroup needs a sequence of booleans in the same order as above,
748 : but with each (i,j) replaced by a boolean that indicates whether
749 : iteration i is active. This sequence occupies nV vector controls
750 : that again have nL lanes each. Thus the control sequence as a whole
751 : consists of VF independent booleans that are each repeated nS times.
752 :
753 : Taking mask-based approach as a partially-populated vectors example.
754 : We make the simplifying assumption that if a sequence of nV masks is
755 : suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by
756 : VIEW_CONVERTing it. This holds for all current targets that support
757 : fully-masked loops. For example, suppose the scalar loop is:
758 :
759 : float *f;
760 : double *d;
761 : for (int i = 0; i < n; ++i)
762 : {
763 : f[i * 2 + 0] += 1.0f;
764 : f[i * 2 + 1] += 2.0f;
765 : d[i] += 3.0;
766 : }
767 :
768 : and suppose that vectors have 256 bits. The vectorized f accesses
769 : will belong to one rgroup and the vectorized d access to another:
770 :
771 : f rgroup: nS = 2, nV = 1, nL = 8
772 : d rgroup: nS = 1, nV = 1, nL = 4
773 : VF = 4
774 :
775 : [ In this simple example the rgroups do correspond to the normal
776 : SLP grouping scheme. ]
777 :
778 : If only the first three lanes are active, the masks we need are:
779 :
780 : f rgroup: 1 1 | 1 1 | 1 1 | 0 0
781 : d rgroup: 1 | 1 | 1 | 0
782 :
783 : Here we can use a mask calculated for f's rgroup for d's, but not
784 : vice versa.
785 :
786 : Thus for each value of nV, it is enough to provide nV masks, with the
787 : mask being calculated based on the highest nL (or, equivalently, based
788 : on the highest nS) required by any rgroup with that nV. We therefore
789 : represent the entire collection of masks as a two-level table, with the
790 : first level being indexed by nV - 1 (since nV == 0 doesn't exist) and
791 : the second being indexed by the mask index 0 <= i < nV. */
792 :
793 : /* The controls (like masks or lengths) needed by rgroups with nV vectors,
794 : according to the description above. */
795 : struct rgroup_controls {
796 : /* The largest nS for all rgroups that use these controls.
797 : For vect_partial_vectors_avx512 this is the constant nscalars_per_iter
798 : for all members of the group. */
799 : unsigned int max_nscalars_per_iter;
800 :
801 : /* For the largest nS recorded above, the loop controls divide each scalar
802 : into FACTOR equal-sized pieces. This is useful if we need to split
803 : element-based accesses into byte-based accesses.
804 : For vect_partial_vectors_avx512 this records nV instead. */
805 : unsigned int factor;
806 :
807 : /* This is a vector type with MAX_NSCALARS_PER_ITER * VF / nV elements.
808 : For mask-based controls, it is the type of the masks in CONTROLS.
809 : For length-based controls, it can be any vector type that has the
810 : specified number of elements; the type of the elements doesn't matter. */
811 : tree type;
812 :
813 : /* When there is no uniformly used LOOP_VINFO_RGROUP_COMPARE_TYPE this
814 : is the rgroup specific type used. */
815 : tree compare_type;
816 :
817 : /* A vector of nV controls, in iteration order. */
818 : vec<tree> controls;
819 :
820 : /* In case of len_load and len_store with a bias there is only one
821 : rgroup. This holds the adjusted loop length for the this rgroup. */
822 : tree bias_adjusted_ctrl;
823 : };
824 :
825 499535 : struct vec_loop_masks
826 : {
827 425923 : bool is_empty () const { return mask_set.is_empty (); }
828 :
829 : /* Set to record vectype, nvector pairs. */
830 : hash_set<pair_hash <nofree_ptr_hash <tree_node>,
831 : int_hash<unsigned, 0>>> mask_set;
832 :
833 : /* rgroup_controls used for the partial vector scheme. */
834 : auto_vec<rgroup_controls> rgc_vec;
835 : };
836 :
837 : typedef auto_vec<rgroup_controls> vec_loop_lens;
838 :
839 : typedef auto_vec<std::pair<data_reference*, tree> > drs_init_vec;
840 :
841 : /* Abstraction around info on reductions which is still in stmt_vec_info
842 : but will be duplicated or moved elsewhere. */
843 142672 : class vect_reduc_info_s
844 : {
845 : public:
846 : /* The def type of the main reduction PHI, vect_reduction_def or
847 : vect_double_reduction_def. */
848 : enum vect_def_type def_type;
849 :
850 : /* The reduction type as detected by
851 : vect_is_simple_reduction and vectorizable_reduction. */
852 : enum vect_reduction_type reduc_type;
853 :
854 : /* The original scalar reduction code, to be used in the epilogue. */
855 : code_helper reduc_code;
856 :
857 : /* A vector internal function we should use in the epilogue. */
858 : internal_fn reduc_fn;
859 :
860 : /* For loop reduction with multiple vectorized results (ncopies > 1), a
861 : lane-reducing operation participating in it may not use all of those
862 : results, this field specifies result index starting from which any
863 : following land-reducing operation would be assigned to. */
864 : unsigned int reduc_result_pos;
865 :
866 : /* Whether this represents a reduction chain. */
867 : bool is_reduc_chain;
868 :
869 : /* Whether we force a single cycle PHI during reduction vectorization. */
870 : bool force_single_cycle;
871 :
872 : /* The vector type for performing the actual reduction operation. */
873 : tree reduc_vectype;
874 :
875 : /* The vector type we should use for the final reduction in the epilogue
876 : when we reduce a mask. */
877 : tree reduc_vectype_for_mask;
878 :
879 : /* The neutral operand to use, if any. */
880 : tree neutral_op;
881 :
882 : /* For INTEGER_INDUC_COND_REDUCTION, the initial value to be used. */
883 : tree induc_cond_initial_val;
884 :
885 : /* If not NULL the value to be added to compute final reduction value. */
886 : tree reduc_epilogue_adjustment;
887 :
888 : /* If non-null, the reduction is being performed by an epilogue loop
889 : and we have decided to reuse this accumulator from the main loop. */
890 : struct vect_reusable_accumulator *reused_accumulator;
891 :
892 : /* If the vector code is performing N scalar reductions in parallel,
893 : this variable gives the initial scalar values of those N reductions. */
894 : auto_vec<tree> reduc_initial_values;
895 :
896 : /* If the vector code is performing N scalar reductions in parallel, this
897 : variable gives the vectorized code's final (scalar) result for each of
898 : those N reductions. In other words, REDUC_SCALAR_RESULTS[I] replaces
899 : the original scalar code's loop-closed SSA PHI for reduction number I. */
900 : auto_vec<tree> reduc_scalar_results;
901 : };
902 :
903 : typedef class vect_reduc_info_s *vect_reduc_info;
904 :
905 : #define VECT_REDUC_INFO_DEF_TYPE(I) ((I)->def_type)
906 : #define VECT_REDUC_INFO_TYPE(I) ((I)->reduc_type)
907 : #define VECT_REDUC_INFO_CODE(I) ((I)->reduc_code)
908 : #define VECT_REDUC_INFO_FN(I) ((I)->reduc_fn)
909 : #define VECT_REDUC_INFO_SCALAR_RESULTS(I) ((I)->reduc_scalar_results)
910 : #define VECT_REDUC_INFO_INITIAL_VALUES(I) ((I)->reduc_initial_values)
911 : #define VECT_REDUC_INFO_REUSED_ACCUMULATOR(I) ((I)->reused_accumulator)
912 : #define VECT_REDUC_INFO_INDUC_COND_INITIAL_VAL(I) ((I)->induc_cond_initial_val)
913 : #define VECT_REDUC_INFO_EPILOGUE_ADJUSTMENT(I) ((I)->reduc_epilogue_adjustment)
914 : #define VECT_REDUC_INFO_VECTYPE(I) ((I)->reduc_vectype)
915 : #define VECT_REDUC_INFO_VECTYPE_FOR_MASK(I) ((I)->reduc_vectype_for_mask)
916 : #define VECT_REDUC_INFO_FORCE_SINGLE_CYCLE(I) ((I)->force_single_cycle)
917 : #define VECT_REDUC_INFO_RESULT_POS(I) ((I)->reduc_result_pos)
918 : #define VECT_REDUC_INFO_NEUTRAL_OP(I) ((I)->neutral_op)
919 :
920 : /* Information about a reduction accumulator from the main loop that could
921 : conceivably be reused as the input to a reduction in an epilogue loop. */
922 : struct vect_reusable_accumulator {
923 : /* The final value of the accumulator, which forms the input to the
924 : reduction operation. */
925 : tree reduc_input;
926 :
927 : /* The stmt_vec_info that describes the reduction (i.e. the one for
928 : which is_reduc_info is true). */
929 : vect_reduc_info reduc_info;
930 : };
931 :
932 : /*-----------------------------------------------------------------*/
933 : /* Info on vectorized loops. */
934 : /*-----------------------------------------------------------------*/
935 : typedef class _loop_vec_info : public vec_info {
936 : public:
937 : _loop_vec_info (class loop *, vec_info_shared *);
938 : ~_loop_vec_info ();
939 :
940 : /* The loop to which this info struct refers to. */
941 : class loop *loop;
942 :
943 : /* Number of latch executions. */
944 : tree num_itersm1;
945 : /* Number of iterations. */
946 : tree num_iters;
947 : /* Number of iterations of the original loop. */
948 : tree num_iters_unchanged;
949 : /* Condition under which this loop is analyzed and versioned. */
950 : tree num_iters_assumptions;
951 :
952 : /* The cost of the vector code. */
953 : class vector_costs *vector_costs;
954 :
955 : /* The cost of the scalar code. */
956 : class vector_costs *scalar_costs;
957 :
958 : /* Threshold of number of iterations below which vectorization will not be
959 : performed. It is calculated from MIN_PROFITABLE_ITERS and
960 : param_min_vect_loop_bound. */
961 : unsigned int th;
962 :
963 : /* When applying loop versioning, the vector form should only be used
964 : if the number of scalar iterations is >= this value, on top of all
965 : the other requirements. Ignored when loop versioning is not being
966 : used. */
967 : poly_uint64 versioning_threshold;
968 :
969 : /* Unrolling factor. In case of suitable super-word parallelism
970 : it can be that no unrolling is needed, and thus this is 1. */
971 : poly_uint64 vectorization_factor;
972 :
973 : /* If this loop is an epilogue loop whose main loop can be skipped,
974 : MAIN_LOOP_EDGE is the edge from the main loop to this loop's
975 : preheader. SKIP_MAIN_LOOP_EDGE is then the edge that skips the
976 : main loop and goes straight to this loop's preheader.
977 :
978 : Both fields are null otherwise. */
979 : edge main_loop_edge;
980 : edge skip_main_loop_edge;
981 :
982 : /* If this loop is an epilogue loop that might be skipped after executing
983 : the main loop, this edge is the one that skips the epilogue. */
984 : edge skip_this_loop_edge;
985 :
986 : /* Reduction descriptors of this loop. Referenced to from SLP nodes
987 : by index. */
988 : auto_vec<vect_reduc_info> reduc_infos;
989 :
990 : /* The vectorized form of a standard reduction replaces the original
991 : scalar code's final result (a loop-closed SSA PHI) with the result
992 : of a vector-to-scalar reduction operation. After vectorization,
993 : this variable maps these vector-to-scalar results to information
994 : about the reductions that generated them. */
995 : hash_map<tree, vect_reusable_accumulator> reusable_accumulators;
996 :
997 : /* The number of times that the target suggested we unroll the vector loop
998 : in order to promote more ILP. This value will be used to re-analyze the
999 : loop for vectorization and if successful the value will be folded into
1000 : vectorization_factor (and therefore exactly divides
1001 : vectorization_factor). */
1002 : unsigned int suggested_unroll_factor;
1003 :
1004 : /* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR
1005 : if there is no particular limit. */
1006 : unsigned HOST_WIDE_INT max_vectorization_factor;
1007 :
1008 : /* The masks that a fully-masked loop should use to avoid operating
1009 : on inactive scalars. */
1010 : vec_loop_masks masks;
1011 :
1012 : /* The lengths that a loop with length should use to avoid operating
1013 : on inactive scalars. */
1014 : vec_loop_lens lens;
1015 :
1016 : /* Set of scalar conditions that have loop mask applied. */
1017 : scalar_cond_masked_set_type scalar_cond_masked_set;
1018 :
1019 : /* Set of vector conditions that have loop mask applied. */
1020 : vec_cond_masked_set_type vec_cond_masked_set;
1021 :
1022 : /* If we are using a loop mask to align memory addresses, this variable
1023 : contains the number of vector elements that we should skip in the
1024 : first iteration of the vector loop (i.e. the number of leading
1025 : elements that should be false in the first mask). */
1026 : tree mask_skip_niters;
1027 :
1028 : /* If we are using a loop mask to align memory addresses and we're in an
1029 : early break loop then this variable contains the number of elements that
1030 : were skipped during the initial iteration of the loop. */
1031 : tree mask_skip_niters_pfa_offset;
1032 :
1033 : /* The type that the loop control IV should be converted to before
1034 : testing which of the VF scalars are active and inactive.
1035 : Only meaningful if LOOP_VINFO_USING_PARTIAL_VECTORS_P. */
1036 : tree rgroup_compare_type;
1037 :
1038 : /* For #pragma omp simd if (x) loops the x expression. If constant 0,
1039 : the loop should not be vectorized, if constant non-zero, simd_if_cond
1040 : shouldn't be set and loop vectorized normally, if SSA_NAME, the loop
1041 : should be versioned on that condition, using scalar loop if the condition
1042 : is false and vectorized loop otherwise. */
1043 : tree simd_if_cond;
1044 :
1045 : /* The type that the vector loop control IV should have when
1046 : LOOP_VINFO_USING_PARTIAL_VECTORS_P is true. */
1047 : tree rgroup_iv_type;
1048 :
1049 : /* The style used for implementing partial vectors when
1050 : LOOP_VINFO_USING_PARTIAL_VECTORS_P is true. */
1051 : vect_partial_vector_style partial_vector_style;
1052 :
1053 : /* Unknown DRs according to which loop was peeled. */
1054 : class dr_vec_info *unaligned_dr;
1055 :
1056 : /* peeling_for_alignment indicates whether peeling for alignment will take
1057 : place, and what the peeling factor should be:
1058 : peeling_for_alignment = X means:
1059 : If X=0: Peeling for alignment will not be applied.
1060 : If X>0: Peel first X iterations.
1061 : If X=-1: Generate a runtime test to calculate the number of iterations
1062 : to be peeled, using the dataref recorded in the field
1063 : unaligned_dr. */
1064 : int peeling_for_alignment;
1065 :
1066 : /* The mask used to check the alignment of pointers or arrays. */
1067 : poly_uint64 ptr_mask;
1068 :
1069 : /* The maximum speculative read amount in VLA modes for runtime check. */
1070 : poly_uint64 max_spec_read_amount;
1071 :
1072 : /* Indicates whether the loop has any non-linear IV. */
1073 : bool nonlinear_iv;
1074 :
1075 : /* Data Dependence Relations defining address ranges that are candidates
1076 : for a run-time aliasing check. */
1077 : auto_vec<ddr_p> may_alias_ddrs;
1078 :
1079 : /* Data Dependence Relations defining address ranges together with segment
1080 : lengths from which the run-time aliasing check is built. */
1081 : auto_vec<dr_with_seg_len_pair_t> comp_alias_ddrs;
1082 :
1083 : /* Check that the addresses of each pair of objects is unequal. */
1084 : auto_vec<vec_object_pair> check_unequal_addrs;
1085 :
1086 : /* List of values that are required to be nonzero. This is used to check
1087 : whether things like "x[i * n] += 1;" are safe and eventually gets added
1088 : to the checks for lower bounds below. */
1089 : auto_vec<tree> check_nonzero;
1090 :
1091 : /* List of values that need to be checked for a minimum value. */
1092 : auto_vec<vec_lower_bound> lower_bounds;
1093 :
1094 : /* Statements in the loop that have data references that are candidates for a
1095 : runtime (loop versioning) misalignment check. */
1096 : auto_vec<stmt_vec_info> may_misalign_stmts;
1097 :
1098 : /* Reduction cycles detected in the loop. Used in loop-aware SLP. */
1099 : auto_vec<stmt_vec_info> reductions;
1100 :
1101 : /* Defs that could not be analyzed such as OMP SIMD calls without
1102 : a LHS. */
1103 : auto_vec<stmt_vec_info> alternate_defs;
1104 :
1105 : /* Cost vector for a single scalar iteration. */
1106 : auto_vec<stmt_info_for_cost> scalar_cost_vec;
1107 :
1108 : /* Map of IV base/step expressions to inserted name in the preheader. */
1109 : hash_map<tree_operand_hash, tree> *ivexpr_map;
1110 :
1111 : /* Map of OpenMP "omp simd array" scan variables to corresponding
1112 : rhs of the store of the initializer. */
1113 : hash_map<tree, tree> *scan_map;
1114 :
1115 : /* The factor used to over weight those statements in an inner loop
1116 : relative to the loop being vectorized. */
1117 : unsigned int inner_loop_cost_factor;
1118 :
1119 : /* Is the loop vectorizable? */
1120 : bool vectorizable;
1121 :
1122 : /* Records whether we still have the option of vectorizing this loop
1123 : using partially-populated vectors; in other words, whether it is
1124 : still possible for one iteration of the vector loop to handle
1125 : fewer than VF scalars. */
1126 : bool can_use_partial_vectors_p;
1127 :
1128 : /* Records whether we must use niter masking for correctness reasons. */
1129 : bool must_use_partial_vectors_p;
1130 :
1131 : /* True if we've decided to use partially-populated vectors, so that
1132 : the vector loop can handle fewer than VF scalars. */
1133 : bool using_partial_vectors_p;
1134 :
1135 : /* True if we've decided to use a decrementing loop control IV that counts
1136 : scalars. This can be done for any loop that:
1137 :
1138 : (a) uses length "controls"; and
1139 : (b) can iterate more than once. */
1140 : bool using_decrementing_iv_p;
1141 :
1142 : /* True if we've decided to use output of select_vl to adjust IV of
1143 : both loop control and data reference pointer. This is only true
1144 : for single-rgroup control. */
1145 : bool using_select_vl_p;
1146 :
1147 : /* True if we've decided to use peeling with versioning together, which allows
1148 : unaligned unsupported data refs to be uniformly aligned after a certain
1149 : amount of peeling (mutual alignment). Otherwise, we use versioning alone
1150 : so these data refs must be already aligned to a power-of-two boundary
1151 : without peeling. */
1152 : bool allow_mutual_alignment;
1153 :
1154 : /* The bias for len_load and len_store. For now, only 0 and -1 are
1155 : supported. -1 must be used when a backend does not support
1156 : len_load/len_store with a length of zero. */
1157 : signed char partial_load_store_bias;
1158 :
1159 : /* When we have grouped data accesses with gaps, we may introduce invalid
1160 : memory accesses. We peel the last iteration of the loop to prevent
1161 : this. */
1162 : bool peeling_for_gaps;
1163 :
1164 : /* When the number of iterations is not a multiple of the vector size
1165 : we need to peel off iterations at the end to form an epilogue loop. */
1166 : bool peeling_for_niter;
1167 :
1168 : /* When the loop has early breaks that we can vectorize we need to peel
1169 : the loop for the break finding loop. */
1170 : bool early_breaks;
1171 :
1172 : /* List of loop additional IV conditionals found in the loop. */
1173 : auto_vec<gcond *> conds;
1174 :
1175 : /* Main loop IV cond. */
1176 : gcond* loop_iv_cond;
1177 :
1178 : /* True if we have an unroll factor requested by the user through pragma GCC
1179 : unroll. */
1180 : bool user_unroll;
1181 :
1182 : /* True if there are no loop carried data dependencies in the loop.
1183 : If loop->safelen <= 1, then this is always true, either the loop
1184 : didn't have any loop carried data dependencies, or the loop is being
1185 : vectorized guarded with some runtime alias checks, or couldn't
1186 : be vectorized at all, but then this field shouldn't be used.
1187 : For loop->safelen >= 2, the user has asserted that there are no
1188 : backward dependencies, but there still could be loop carried forward
1189 : dependencies in such loops. This flag will be false if normal
1190 : vectorizer data dependency analysis would fail or require versioning
1191 : for alias, but because of loop->safelen >= 2 it has been vectorized
1192 : even without versioning for alias. E.g. in:
1193 : #pragma omp simd
1194 : for (int i = 0; i < m; i++)
1195 : a[i] = a[i + k] * c;
1196 : (or #pragma simd or #pragma ivdep) we can vectorize this and it will
1197 : DTRT even for k > 0 && k < m, but without safelen we would not
1198 : vectorize this, so this field would be false. */
1199 : bool no_data_dependencies;
1200 :
1201 : /* Mark loops having masked stores. */
1202 : bool has_mask_store;
1203 :
1204 : /* Queued scaling factor for the scalar loop. */
1205 : profile_probability scalar_loop_scaling;
1206 :
1207 : /* If if-conversion versioned this loop before conversion, this is the
1208 : loop version without if-conversion. */
1209 : class loop *scalar_loop;
1210 :
1211 : /* For loops being epilogues of already vectorized loops
1212 : this points to the main vectorized loop. Otherwise NULL. */
1213 : _loop_vec_info *main_loop_info;
1214 :
1215 : /* For loops being epilogues of already vectorized loops
1216 : this points to the preceeding vectorized (possibly epilogue) loop.
1217 : Otherwise NULL. */
1218 : _loop_vec_info *orig_loop_info;
1219 :
1220 : /* Used to store loop_vec_infos of the epilogue of this loop during
1221 : analysis. */
1222 : _loop_vec_info *epilogue_vinfo;
1223 :
1224 : /* If this is an epilogue loop the DR advancement applied. */
1225 : tree drs_advanced_by;
1226 :
1227 : /* The controlling loop exit for the current loop when vectorizing.
1228 : For counted loops, this IV controls the natural exits of the loop. */
1229 : edge vec_loop_main_exit;
1230 :
1231 : /* The controlling loop exit for the epilogue loop when vectorizing.
1232 : For counted loops, this IV controls the natural exits of the loop. */
1233 : edge vec_epilogue_loop_main_exit;
1234 :
1235 : /* The controlling loop exit for the scalar loop being vectorized.
1236 : For counted loops, this IV controls the natural exits of the loop. */
1237 : edge scalar_loop_main_exit;
1238 :
1239 : /* Used to store the list of stores needing to be moved if doing early
1240 : break vectorization as they would violate the scalar loop semantics if
1241 : vectorized in their current location. These are stored in order that they
1242 : need to be moved. */
1243 : auto_vec<gimple *> early_break_stores;
1244 :
1245 : /* The final basic block where to move statements to. In the case of
1246 : multiple exits this could be pretty far away. */
1247 : basic_block early_break_dest_bb;
1248 :
1249 : /* Statements whose VUSES need updating if early break vectorization is to
1250 : happen. */
1251 : auto_vec<gimple*> early_break_vuses;
1252 :
1253 : /* The IV adjustment value for inductions that needs to be materialized
1254 : inside the relavent exit blocks in order to adjust for early break. */
1255 : tree early_break_niters_var;
1256 :
1257 : /* The type of the variable to be used to create the scalar IV for early break
1258 : loops. */
1259 : tree early_break_iv_type;
1260 :
1261 : /* Record statements that are needed to be live for early break vectorization
1262 : but may not have an LC PHI node materialized yet in the exits. */
1263 : auto_vec<stmt_vec_info> early_break_live_ivs;
1264 : } *loop_vec_info;
1265 :
1266 : /* Access Functions. */
1267 : #define LOOP_VINFO_LOOP(L) (L)->loop
1268 : #define LOOP_VINFO_MAIN_EXIT(L) (L)->vec_loop_main_exit
1269 : #define LOOP_VINFO_EPILOGUE_MAIN_EXIT(L) (L)->vec_epilogue_loop_main_exit
1270 : #define LOOP_VINFO_SCALAR_MAIN_EXIT(L) (L)->scalar_loop_main_exit
1271 : #define LOOP_VINFO_BBS(L) (L)->bbs
1272 : #define LOOP_VINFO_NBBS(L) (L)->nbbs
1273 : #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1
1274 : #define LOOP_VINFO_NITERS(L) (L)->num_iters
1275 : #define LOOP_VINFO_NITERS_UNCOUNTED_P(L) (LOOP_VINFO_NITERS (L) \
1276 : == chrec_dont_know)
1277 : /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
1278 : prologue peeling retain total unchanged scalar loop iterations for
1279 : cost model. */
1280 : #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged
1281 : #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions
1282 : #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th
1283 : #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold
1284 : #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable
1285 : #define LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P(L) (L)->can_use_partial_vectors_p
1286 : #define LOOP_VINFO_MUST_USE_PARTIAL_VECTORS_P(L) (L)->must_use_partial_vectors_p
1287 : #define LOOP_VINFO_USING_PARTIAL_VECTORS_P(L) (L)->using_partial_vectors_p
1288 : #define LOOP_VINFO_USING_DECREMENTING_IV_P(L) (L)->using_decrementing_iv_p
1289 : #define LOOP_VINFO_USING_SELECT_VL_P(L) (L)->using_select_vl_p
1290 : #define LOOP_VINFO_ALLOW_MUTUAL_ALIGNMENT(L) (L)->allow_mutual_alignment
1291 : #define LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS(L) (L)->partial_load_store_bias
1292 : #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor
1293 : #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor
1294 : #define LOOP_VINFO_MASKS(L) (L)->masks
1295 : #define LOOP_VINFO_LENS(L) (L)->lens
1296 : #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters
1297 : #define LOOP_VINFO_MASK_NITERS_PFA_OFFSET(L) (L)->mask_skip_niters_pfa_offset
1298 : #define LOOP_VINFO_RGROUP_COMPARE_TYPE(L) (L)->rgroup_compare_type
1299 : #define LOOP_VINFO_RGROUP_IV_TYPE(L) (L)->rgroup_iv_type
1300 : #define LOOP_VINFO_PARTIAL_VECTORS_STYLE(L) (L)->partial_vector_style
1301 : #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask
1302 : #define LOOP_VINFO_MAX_SPEC_READ_AMOUNT(L) (L)->max_spec_read_amount
1303 : #define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest
1304 : #define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs
1305 : #define LOOP_VINFO_DDRS(L) (L)->shared->ddrs
1306 : #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters))
1307 : #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
1308 : #define LOOP_VINFO_NON_LINEAR_IV(L) (L)->nonlinear_iv
1309 : #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr
1310 : #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts
1311 : #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs
1312 : #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs
1313 : #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs
1314 : #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero
1315 : #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds
1316 : #define LOOP_VINFO_USER_UNROLL(L) (L)->user_unroll
1317 : #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores
1318 : #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances
1319 : #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions
1320 : #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps
1321 : #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter
1322 : #define LOOP_VINFO_EARLY_BREAKS(L) (L)->early_breaks
1323 : #define LOOP_VINFO_EARLY_BRK_STORES(L) (L)->early_break_stores
1324 : #define LOOP_VINFO_EARLY_BREAKS_VECT_PEELED(L) \
1325 : ((single_pred ((L)->loop->latch) != (L)->vec_loop_main_exit->src) \
1326 : || LOOP_VINFO_NITERS_UNCOUNTED_P (L))
1327 : #define LOOP_VINFO_EARLY_BREAKS_LIVE_IVS(L) \
1328 : (L)->early_break_live_ivs
1329 : #define LOOP_VINFO_EARLY_BRK_DEST_BB(L) (L)->early_break_dest_bb
1330 : #define LOOP_VINFO_EARLY_BRK_VUSES(L) (L)->early_break_vuses
1331 : #define LOOP_VINFO_EARLY_BRK_NITERS_VAR(L) (L)->early_break_niters_var
1332 : #define LOOP_VINFO_EARLY_BRK_IV_TYPE(L) (L)->early_break_iv_type
1333 : #define LOOP_VINFO_LOOP_CONDS(L) (L)->conds
1334 : #define LOOP_VINFO_LOOP_IV_COND(L) (L)->loop_iv_cond
1335 : #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
1336 : #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop
1337 : #define LOOP_VINFO_SCALAR_LOOP_SCALING(L) (L)->scalar_loop_scaling
1338 : #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store
1339 : #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec
1340 : #define LOOP_VINFO_MAIN_LOOP_INFO(L) (L)->main_loop_info
1341 : #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info
1342 : #define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond
1343 : #define LOOP_VINFO_INNER_LOOP_COST_FACTOR(L) (L)->inner_loop_cost_factor
1344 : #define LOOP_VINFO_INV_PATTERN_DEF_SEQ(L) (L)->inv_pattern_def_seq
1345 : #define LOOP_VINFO_DRS_ADVANCED_BY(L) (L)->drs_advanced_by
1346 : #define LOOP_VINFO_ALTERNATE_DEFS(L) (L)->alternate_defs
1347 :
1348 : #define LOOP_VINFO_FULLY_MASKED_P(L) \
1349 : (LOOP_VINFO_USING_PARTIAL_VECTORS_P (L) \
1350 : && !LOOP_VINFO_MASKS (L).is_empty ())
1351 :
1352 : #define LOOP_VINFO_FULLY_WITH_LENGTH_P(L) \
1353 : (LOOP_VINFO_USING_PARTIAL_VECTORS_P (L) \
1354 : && !LOOP_VINFO_LENS (L).is_empty ())
1355 :
1356 : #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
1357 : ((L)->may_misalign_stmts.length () > 0)
1358 : #define LOOP_REQUIRES_VERSIONING_FOR_SPEC_READ(L) \
1359 : (maybe_gt ((L)->max_spec_read_amount, 0U))
1360 : #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \
1361 : ((L)->comp_alias_ddrs.length () > 0 \
1362 : || (L)->check_unequal_addrs.length () > 0 \
1363 : || (L)->lower_bounds.length () > 0)
1364 : #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \
1365 : (LOOP_VINFO_NITERS_ASSUMPTIONS (L))
1366 : #define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \
1367 : (LOOP_VINFO_SIMD_IF_COND (L))
1368 : #define LOOP_REQUIRES_VERSIONING(L) \
1369 : (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \
1370 : || LOOP_REQUIRES_VERSIONING_FOR_SPEC_READ (L) \
1371 : || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \
1372 : || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \
1373 : || LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L))
1374 :
1375 : #define LOOP_VINFO_USE_VERSIONING_WITHOUT_PEELING(L) \
1376 : ((L)->may_misalign_stmts.length () > 0 \
1377 : && !LOOP_VINFO_ALLOW_MUTUAL_ALIGNMENT (L))
1378 :
1379 : #define LOOP_VINFO_NITERS_KNOWN_P(L) \
1380 : (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
1381 :
1382 : #define LOOP_VINFO_EPILOGUE_P(L) \
1383 : (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL)
1384 :
1385 : #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \
1386 : (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L)))
1387 :
1388 : /* Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL
1389 : value signifies success, and a NULL value signifies failure, supporting
1390 : propagating an opt_problem * describing the failure back up the call
1391 : stack. */
1392 : typedef opt_pointer_wrapper <loop_vec_info> opt_loop_vec_info;
1393 :
1394 : inline loop_vec_info
1395 537893 : loop_vec_info_for_loop (class loop *loop)
1396 : {
1397 537893 : return (loop_vec_info) loop->aux;
1398 : }
1399 :
1400 : struct slp_root
1401 : {
1402 1200089 : slp_root (slp_instance_kind kind_, vec<stmt_vec_info> stmts_,
1403 12453 : vec<stmt_vec_info> roots_, vec<tree> remain_ = vNULL)
1404 1200089 : : kind(kind_), stmts(stmts_), roots(roots_), remain(remain_) {}
1405 : slp_instance_kind kind;
1406 : vec<stmt_vec_info> stmts;
1407 : vec<stmt_vec_info> roots;
1408 : vec<tree> remain;
1409 : };
1410 :
1411 : typedef class _bb_vec_info : public vec_info
1412 : {
1413 : public:
1414 : _bb_vec_info (vec<basic_block> bbs, vec_info_shared *);
1415 : ~_bb_vec_info ();
1416 :
1417 : vec<slp_root> roots;
1418 : } *bb_vec_info;
1419 :
1420 : #define BB_VINFO_BBS(B) (B)->bbs
1421 : #define BB_VINFO_NBBS(B) (B)->nbbs
1422 : #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores
1423 : #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances
1424 : #define BB_VINFO_DATAREFS(B) (B)->shared->datarefs
1425 : #define BB_VINFO_DDRS(B) (B)->shared->ddrs
1426 :
1427 : /* Indicates whether/how a variable is used in the scope of loop/basic
1428 : block. */
1429 : enum vect_relevant {
1430 : vect_unused_in_scope = 0,
1431 :
1432 : /* The def is only used outside the loop. */
1433 : vect_used_only_live,
1434 : /* The def is in the inner loop, and the use is in the outer loop, and the
1435 : use is a reduction stmt. */
1436 : vect_used_in_outer_by_reduction,
1437 : /* The def is in the inner loop, and the use is in the outer loop (and is
1438 : not part of reduction). */
1439 : vect_used_in_outer,
1440 :
1441 : /* defs that feed computations that end up (only) in a reduction. These
1442 : defs may be used by non-reduction stmts, but eventually, any
1443 : computations/values that are affected by these defs are used to compute
1444 : a reduction (i.e. don't get stored to memory, for example). We use this
1445 : to identify computations that we can change the order in which they are
1446 : computed. */
1447 : vect_used_by_reduction,
1448 :
1449 : vect_used_in_scope
1450 : };
1451 :
1452 : /* The type of vectorization. pure_slp means the stmt is covered by the
1453 : SLP graph, not_vect means it is not. This is mostly used by BB
1454 : vectorization. */
1455 : enum slp_vect_type {
1456 : not_vect = 0,
1457 : pure_slp,
1458 : };
1459 :
1460 : /* Says whether a statement is a load, a store of a vectorized statement
1461 : result, or a store of an invariant value. */
1462 : enum vec_load_store_type {
1463 : VLS_LOAD,
1464 : VLS_STORE,
1465 : VLS_STORE_INVARIANT
1466 : };
1467 :
1468 : class dr_vec_info {
1469 : public:
1470 : /* The data reference itself. */
1471 : data_reference *dr;
1472 : /* The statement that contains the data reference. */
1473 : stmt_vec_info stmt;
1474 : /* The analysis group this DR belongs to when doing BB vectorization.
1475 : DRs of the same group belong to the same conditional execution context. */
1476 : unsigned group;
1477 : /* The misalignment in bytes of the reference, or -1 if not known. */
1478 : int misalignment;
1479 : /* The byte alignment that we'd ideally like the reference to have,
1480 : and the value that misalignment is measured against. */
1481 : poly_uint64 target_alignment;
1482 : /* If true the alignment of base_decl needs to be increased. */
1483 : bool base_misaligned;
1484 :
1485 : /* Set by early break vectorization when this DR needs peeling for alignment
1486 : for correctness. */
1487 : bool safe_speculative_read_required;
1488 :
1489 : /* Set by early break vectorization when this DR's scalar accesses are known
1490 : to be inbounds of a known bounds loop. */
1491 : bool scalar_access_known_in_bounds;
1492 :
1493 : tree base_decl;
1494 :
1495 : /* Stores current vectorized loop's offset. To be added to the DR's
1496 : offset to calculate current offset of data reference. */
1497 : tree offset;
1498 : };
1499 :
1500 : typedef struct data_reference *dr_p;
1501 :
1502 : class _stmt_vec_info {
1503 : public:
1504 :
1505 : /* Indicates whether this stmts is part of a computation whose result is
1506 : used outside the loop. */
1507 : bool live;
1508 :
1509 : /* Stmt is part of some pattern (computation idiom) */
1510 : bool in_pattern_p;
1511 :
1512 : /* True if the statement was created during pattern recognition as
1513 : part of the replacement for RELATED_STMT. This implies that the
1514 : statement isn't part of any basic block, although for convenience
1515 : its gimple_bb is the same as for RELATED_STMT. */
1516 : bool pattern_stmt_p;
1517 :
1518 : /* Is this statement vectorizable or should it be skipped in (partial)
1519 : vectorization. */
1520 : bool vectorizable;
1521 :
1522 : /* The stmt to which this info struct refers to. */
1523 : gimple *stmt;
1524 :
1525 : /* The vector type to be used for the LHS of this statement. */
1526 : tree vectype;
1527 :
1528 : /* The following is relevant only for stmts that contain a non-scalar
1529 : data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
1530 : at most one such data-ref. */
1531 :
1532 : dr_vec_info dr_aux;
1533 :
1534 : /* Information about the data-ref relative to this loop
1535 : nest (the loop that is being considered for vectorization). */
1536 : innermost_loop_behavior dr_wrt_vec_loop;
1537 :
1538 : /* For loop PHI nodes, the base and evolution part of it. This makes sure
1539 : this information is still available in vect_update_ivs_after_vectorizer
1540 : where we may not be able to re-analyze the PHI nodes evolution as
1541 : peeling for the prologue loop can make it unanalyzable. The evolution
1542 : part is still correct after peeling, but the base may have changed from
1543 : the version here. */
1544 : tree loop_phi_evolution_base_unchanged;
1545 : tree loop_phi_evolution_part;
1546 : enum vect_induction_op_type loop_phi_evolution_type;
1547 :
1548 : /* Used for various bookkeeping purposes, generally holding a pointer to
1549 : some other stmt S that is in some way "related" to this stmt.
1550 : Current use of this field is:
1551 : If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
1552 : true): S is the "pattern stmt" that represents (and replaces) the
1553 : sequence of stmts that constitutes the pattern. Similarly, the
1554 : related_stmt of the "pattern stmt" points back to this stmt (which is
1555 : the last stmt in the original sequence of stmts that constitutes the
1556 : pattern). */
1557 : stmt_vec_info related_stmt;
1558 :
1559 : /* Used to keep a sequence of def stmts of a pattern stmt if such exists.
1560 : The sequence is attached to the original statement rather than the
1561 : pattern statement. */
1562 : gimple_seq pattern_def_seq;
1563 :
1564 : /* Classify the def of this stmt. */
1565 : enum vect_def_type def_type;
1566 :
1567 : /* Whether the stmt is SLPed, loop-based vectorized, or both. */
1568 : enum slp_vect_type slp_type;
1569 :
1570 : /* Interleaving chains info. */
1571 : /* First element in the group. */
1572 : stmt_vec_info first_element;
1573 : /* Pointer to the next element in the group. */
1574 : stmt_vec_info next_element;
1575 : /* The size of the group. */
1576 : unsigned int size;
1577 : /* For loads only, the gap from the previous load. For consecutive loads, GAP
1578 : is 1. */
1579 : unsigned int gap;
1580 :
1581 : /* The minimum negative dependence distance this stmt participates in
1582 : or zero if none. */
1583 : unsigned int min_neg_dist;
1584 :
1585 : /* Not all stmts in the loop need to be vectorized. e.g, the increment
1586 : of the loop induction variable and computation of array indexes. relevant
1587 : indicates whether the stmt needs to be vectorized. */
1588 : enum vect_relevant relevant;
1589 :
1590 : /* For loads if this is a gather, for stores if this is a scatter. */
1591 : bool gather_scatter_p;
1592 :
1593 : /* True if this is an access with loop-invariant stride. */
1594 : bool strided_p;
1595 :
1596 : /* For both loads and stores. */
1597 : unsigned simd_lane_access_p : 3;
1598 :
1599 : /* On a reduction PHI the reduction type as detected by
1600 : vect_is_simple_reduction. */
1601 : enum vect_reduction_type reduc_type;
1602 :
1603 : /* On a reduction PHI, the original reduction code as detected by
1604 : vect_is_simple_reduction. */
1605 : code_helper reduc_code;
1606 :
1607 : /* On a stmt participating in a reduction the index of the operand
1608 : on the reduction SSA cycle. */
1609 : int reduc_idx;
1610 :
1611 : /* On a reduction PHI the def returned by vect_is_simple_reduction.
1612 : On the def returned by vect_is_simple_reduction the corresponding PHI. */
1613 : stmt_vec_info reduc_def;
1614 :
1615 : /* If nonzero, the lhs of the statement could be truncated to this
1616 : many bits without affecting any users of the result. */
1617 : unsigned int min_output_precision;
1618 :
1619 : /* If nonzero, all non-boolean input operands have the same precision,
1620 : and they could each be truncated to this many bits without changing
1621 : the result. */
1622 : unsigned int min_input_precision;
1623 :
1624 : /* If OPERATION_BITS is nonzero, the statement could be performed on
1625 : an integer with the sign and number of bits given by OPERATION_SIGN
1626 : and OPERATION_BITS without changing the result. */
1627 : unsigned int operation_precision;
1628 : signop operation_sign;
1629 :
1630 : /* If the statement produces a boolean result, this value describes
1631 : how we should choose the associated vector type. The possible
1632 : values are:
1633 :
1634 : - an integer precision N if we should use the vector mask type
1635 : associated with N-bit integers. This is only used if all relevant
1636 : input booleans also want the vector mask type for N-bit integers,
1637 : or if we can convert them into that form by pattern-matching.
1638 :
1639 : - ~0U if we considered choosing a vector mask type but decided
1640 : to treat the boolean as a normal integer type instead.
1641 :
1642 : - 0 otherwise. This means either that the operation isn't one that
1643 : could have a vector mask type (and so should have a normal vector
1644 : type instead) or that we simply haven't made a choice either way. */
1645 : unsigned int mask_precision;
1646 :
1647 : /* True if this is only suitable for SLP vectorization. */
1648 : bool slp_vect_only_p;
1649 :
1650 : /* True if this is a pattern that can only be handled by SLP
1651 : vectorization. */
1652 : bool slp_vect_pattern_only_p;
1653 : };
1654 :
1655 : /* Information about a gather/scatter call. */
1656 : struct gather_scatter_info {
1657 : /* The internal function to use for the gather/scatter operation,
1658 : or IFN_LAST if a built-in function should be used instead. */
1659 : internal_fn ifn;
1660 :
1661 : /* The FUNCTION_DECL for the built-in gather/scatter function,
1662 : or null if an internal function should be used instead. */
1663 : tree decl;
1664 :
1665 : /* The loop-invariant base value. */
1666 : tree base;
1667 :
1668 : /* The TBBA alias pointer the value of which determines the alignment
1669 : of the scalar accesses. */
1670 : tree alias_ptr;
1671 :
1672 : /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */
1673 : tree offset;
1674 :
1675 : /* Each offset element should be multiplied by this amount before
1676 : being added to the base. */
1677 : int scale;
1678 :
1679 : /* The type of the vectorized offset. */
1680 : tree offset_vectype;
1681 :
1682 : /* The type of the scalar elements after loading or before storing. */
1683 : tree element_type;
1684 :
1685 : /* The type of the scalar elements being loaded or stored. */
1686 : tree memory_type;
1687 : };
1688 :
1689 : /* Access Functions. */
1690 : #define STMT_VINFO_STMT(S) (S)->stmt
1691 : #define STMT_VINFO_RELEVANT(S) (S)->relevant
1692 : #define STMT_VINFO_LIVE_P(S) (S)->live
1693 : #define STMT_VINFO_VECTYPE(S) (S)->vectype
1694 : #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable
1695 : #define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0)
1696 : #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p
1697 : #define STMT_VINFO_STRIDED_P(S) (S)->strided_p
1698 : #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p
1699 : #define STMT_VINFO_REDUC_IDX(S) (S)->reduc_idx
1700 :
1701 : #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop
1702 : #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address
1703 : #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init
1704 : #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset
1705 : #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step
1706 : #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment
1707 : #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \
1708 : (S)->dr_wrt_vec_loop.base_misalignment
1709 : #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \
1710 : (S)->dr_wrt_vec_loop.offset_alignment
1711 : #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \
1712 : (S)->dr_wrt_vec_loop.step_alignment
1713 :
1714 : #define STMT_VINFO_DR_INFO(S) \
1715 : (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux)
1716 :
1717 : #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p
1718 : #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt
1719 : #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq
1720 : #define STMT_VINFO_DEF_TYPE(S) (S)->def_type
1721 : #define STMT_VINFO_GROUPED_ACCESS(S) \
1722 : ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S))
1723 : #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged
1724 : #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
1725 : #define STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE(S) (S)->loop_phi_evolution_type
1726 : #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist
1727 : #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type
1728 : #define STMT_VINFO_REDUC_CODE(S) (S)->reduc_code
1729 : #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def
1730 : #define STMT_VINFO_SLP_VECT_ONLY(S) (S)->slp_vect_only_p
1731 : #define STMT_VINFO_SLP_VECT_ONLY_PATTERN(S) (S)->slp_vect_pattern_only_p
1732 : #define STMT_VINFO_REDUC_VECTYPE_IN(S) (S)->reduc_vectype_in
1733 :
1734 : #define DR_GROUP_FIRST_ELEMENT(S) \
1735 : (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element)
1736 : #define DR_GROUP_NEXT_ELEMENT(S) \
1737 : (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element)
1738 : #define DR_GROUP_SIZE(S) \
1739 : (gcc_checking_assert ((S)->dr_aux.dr), (S)->size)
1740 : #define DR_GROUP_GAP(S) \
1741 : (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap)
1742 :
1743 : #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope)
1744 :
1745 : #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp)
1746 : #define STMT_SLP_TYPE(S) (S)->slp_type
1747 :
1748 :
1749 : /* Contains the scalar or vector costs for a vec_info. */
1750 : class vector_costs
1751 : {
1752 : public:
1753 : vector_costs (vec_info *, bool);
1754 0 : virtual ~vector_costs () {}
1755 :
1756 : /* Update the costs in response to adding COUNT copies of a statement.
1757 :
1758 : - WHERE specifies whether the cost occurs in the loop prologue,
1759 : the loop body, or the loop epilogue.
1760 : - KIND is the kind of statement, which is always meaningful.
1761 : - STMT_INFO or NODE, if nonnull, describe the statement that will be
1762 : vectorized.
1763 : - VECTYPE, if nonnull, is the vector type that the vectorized
1764 : statement will operate on. Note that this should be used in
1765 : preference to STMT_VINFO_VECTYPE (STMT_INFO) since the latter
1766 : is not correct for SLP.
1767 : - for unaligned_load and unaligned_store statements, MISALIGN is
1768 : the byte misalignment of the load or store relative to the target's
1769 : preferred alignment for VECTYPE, or DR_MISALIGNMENT_UNKNOWN
1770 : if the misalignment is not known.
1771 :
1772 : Return the calculated cost as well as recording it. The return
1773 : value is used for dumping purposes. */
1774 : virtual unsigned int add_stmt_cost (int count, vect_cost_for_stmt kind,
1775 : stmt_vec_info stmt_info,
1776 : slp_tree node,
1777 : tree vectype, int misalign,
1778 : vect_cost_model_location where);
1779 :
1780 : /* Finish calculating the cost of the code. The results can be
1781 : read back using the functions below.
1782 :
1783 : If the costs describe vector code, SCALAR_COSTS gives the costs
1784 : of the corresponding scalar code, otherwise it is null. */
1785 : virtual void finish_cost (const vector_costs *scalar_costs);
1786 :
1787 : /* The costs in THIS and OTHER both describe ways of vectorizing
1788 : a main loop. Return true if the costs described by THIS are
1789 : cheaper than the costs described by OTHER. Return false if any
1790 : of the following are true:
1791 :
1792 : - THIS and OTHER are of equal cost
1793 : - OTHER is better than THIS
1794 : - we can't be sure about the relative costs of THIS and OTHER. */
1795 : virtual bool better_main_loop_than_p (const vector_costs *other) const;
1796 :
1797 : /* Likewise, but the costs in THIS and OTHER both describe ways of
1798 : vectorizing an epilogue loop of MAIN_LOOP. */
1799 : virtual bool better_epilogue_loop_than_p (const vector_costs *other,
1800 : loop_vec_info main_loop) const;
1801 :
1802 : unsigned int prologue_cost () const;
1803 : unsigned int body_cost () const;
1804 : unsigned int epilogue_cost () const;
1805 : unsigned int outside_cost () const;
1806 : unsigned int total_cost () const;
1807 : unsigned int suggested_unroll_factor () const;
1808 : machine_mode suggested_epilogue_mode (int &masked) const;
1809 6656078 : bool costing_for_scalar () const { return m_costing_for_scalar; }
1810 :
1811 : protected:
1812 : unsigned int record_stmt_cost (stmt_vec_info, vect_cost_model_location,
1813 : unsigned int);
1814 : unsigned int adjust_cost_for_freq (stmt_vec_info, vect_cost_model_location,
1815 : unsigned int);
1816 : int compare_inside_loop_cost (const vector_costs *) const;
1817 : int compare_outside_loop_cost (const vector_costs *) const;
1818 :
1819 : /* The region of code that we're considering vectorizing. */
1820 : vec_info *m_vinfo;
1821 :
1822 : /* True if we're costing the scalar code, false if we're costing
1823 : the vector code. */
1824 : bool m_costing_for_scalar;
1825 :
1826 : /* The costs of the three regions, indexed by vect_cost_model_location. */
1827 : unsigned int m_costs[3];
1828 :
1829 : /* The suggested unrolling factor determined at finish_cost. */
1830 : unsigned int m_suggested_unroll_factor;
1831 :
1832 : /* The suggested mode to be used for a vectorized epilogue or VOIDmode,
1833 : determined at finish_cost. m_masked_epilogue specifies whether the
1834 : epilogue should use masked vectorization, regardless of the
1835 : --param vect-partial-vector-usage default. If -1 then the
1836 : --param setting takes precedence. If the user explicitly specified
1837 : --param vect-partial-vector-usage then that takes precedence. */
1838 : machine_mode m_suggested_epilogue_mode;
1839 : int m_masked_epilogue;
1840 :
1841 : /* True if finish_cost has been called. */
1842 : bool m_finished;
1843 : };
1844 :
1845 : /* Create costs for VINFO. COSTING_FOR_SCALAR is true if the costs
1846 : are for scalar code, false if they are for vector code. */
1847 :
1848 : inline
1849 1966356 : vector_costs::vector_costs (vec_info *vinfo, bool costing_for_scalar)
1850 1966356 : : m_vinfo (vinfo),
1851 1966356 : m_costing_for_scalar (costing_for_scalar),
1852 1966356 : m_costs (),
1853 1966356 : m_suggested_unroll_factor(1),
1854 1966356 : m_suggested_epilogue_mode(VOIDmode),
1855 1966356 : m_masked_epilogue (-1),
1856 1966356 : m_finished (false)
1857 : {
1858 : }
1859 :
1860 : /* Return the cost of the prologue code (in abstract units). */
1861 :
1862 : inline unsigned int
1863 819954 : vector_costs::prologue_cost () const
1864 : {
1865 819954 : gcc_checking_assert (m_finished);
1866 819954 : return m_costs[vect_prologue];
1867 : }
1868 :
1869 : /* Return the cost of the body code (in abstract units). */
1870 :
1871 : inline unsigned int
1872 1475086 : vector_costs::body_cost () const
1873 : {
1874 1475086 : gcc_checking_assert (m_finished);
1875 1475086 : return m_costs[vect_body];
1876 : }
1877 :
1878 : /* Return the cost of the epilogue code (in abstract units). */
1879 :
1880 : inline unsigned int
1881 819954 : vector_costs::epilogue_cost () const
1882 : {
1883 819954 : gcc_checking_assert (m_finished);
1884 819954 : return m_costs[vect_epilogue];
1885 : }
1886 :
1887 : /* Return the cost of the prologue and epilogue code (in abstract units). */
1888 :
1889 : inline unsigned int
1890 82416 : vector_costs::outside_cost () const
1891 : {
1892 82416 : return prologue_cost () + epilogue_cost ();
1893 : }
1894 :
1895 : /* Return the cost of the prologue, body and epilogue code
1896 : (in abstract units). */
1897 :
1898 : inline unsigned int
1899 82416 : vector_costs::total_cost () const
1900 : {
1901 82416 : return body_cost () + outside_cost ();
1902 : }
1903 :
1904 : /* Return the suggested unroll factor. */
1905 :
1906 : inline unsigned int
1907 82229 : vector_costs::suggested_unroll_factor () const
1908 : {
1909 82229 : gcc_checking_assert (m_finished);
1910 82229 : return m_suggested_unroll_factor;
1911 : }
1912 :
1913 : /* Return the suggested epilogue mode. */
1914 :
1915 : inline machine_mode
1916 13117 : vector_costs::suggested_epilogue_mode (int &masked_p) const
1917 : {
1918 13117 : gcc_checking_assert (m_finished);
1919 13117 : masked_p = m_masked_epilogue;
1920 13117 : return m_suggested_epilogue_mode;
1921 : }
1922 :
1923 : #define VECT_MAX_COST 1000
1924 :
1925 : /* The maximum number of intermediate steps required in multi-step type
1926 : conversion. */
1927 : #define MAX_INTERM_CVT_STEPS 3
1928 :
1929 : #define MAX_VECTORIZATION_FACTOR INT_MAX
1930 :
1931 : /* Nonzero if TYPE represents a (scalar) boolean type or type
1932 : in the middle-end compatible with it (unsigned precision 1 integral
1933 : types). Used to determine which types should be vectorized as
1934 : VECTOR_BOOLEAN_TYPE_P. */
1935 :
1936 : #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \
1937 : (TREE_CODE (TYPE) == BOOLEAN_TYPE \
1938 : || ((TREE_CODE (TYPE) == INTEGER_TYPE \
1939 : || TREE_CODE (TYPE) == ENUMERAL_TYPE) \
1940 : && TYPE_PRECISION (TYPE) == 1 \
1941 : && TYPE_UNSIGNED (TYPE)))
1942 :
1943 : inline bool
1944 9723918 : nested_in_vect_loop_p (class loop *loop, stmt_vec_info stmt_info)
1945 : {
1946 9723918 : return (loop->inner
1947 7799587 : && (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father));
1948 : }
1949 :
1950 : /* PHI is either a scalar reduction phi or a scalar induction phi.
1951 : Return the initial value of the variable on entry to the containing
1952 : loop. */
1953 :
1954 : inline tree
1955 33717 : vect_phi_initial_value (gphi *phi)
1956 : {
1957 33717 : basic_block bb = gimple_bb (phi);
1958 33717 : edge pe = loop_preheader_edge (bb->loop_father);
1959 33717 : gcc_assert (pe->dest == bb);
1960 33717 : return PHI_ARG_DEF_FROM_EDGE (phi, pe);
1961 : }
1962 :
1963 : /* Return true if STMT_INFO should produce a vector mask type rather than
1964 : a normal nonmask type. */
1965 :
1966 : inline bool
1967 6747897 : vect_use_mask_type_p (stmt_vec_info stmt_info)
1968 : {
1969 6747897 : return stmt_info->mask_precision && stmt_info->mask_precision != ~0U;
1970 : }
1971 :
1972 : /* Return TRUE if a statement represented by STMT_INFO is a part of a
1973 : pattern. */
1974 :
1975 : inline bool
1976 119569779 : is_pattern_stmt_p (stmt_vec_info stmt_info)
1977 : {
1978 77333097 : return stmt_info->pattern_stmt_p;
1979 : }
1980 :
1981 : /* If STMT_INFO is a pattern statement, return the statement that it
1982 : replaces, otherwise return STMT_INFO itself. */
1983 :
1984 : inline stmt_vec_info
1985 46151147 : vect_orig_stmt (stmt_vec_info stmt_info)
1986 : {
1987 34171540 : if (is_pattern_stmt_p (stmt_info))
1988 3155006 : return STMT_VINFO_RELATED_STMT (stmt_info);
1989 : return stmt_info;
1990 : }
1991 :
1992 : /* Return the later statement between STMT1_INFO and STMT2_INFO. */
1993 :
1994 : inline stmt_vec_info
1995 5866148 : get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info)
1996 : {
1997 5866148 : gimple *stmt1 = vect_orig_stmt (stmt1_info)->stmt;
1998 5866148 : gimple *stmt2 = vect_orig_stmt (stmt2_info)->stmt;
1999 5866148 : if (gimple_bb (stmt1) == gimple_bb (stmt2))
2000 : {
2001 5840459 : if (gimple_uid (stmt1) > gimple_uid (stmt2))
2002 : return stmt1_info;
2003 : else
2004 : return stmt2_info;
2005 : }
2006 : /* ??? We should be really calling this function only with stmts
2007 : in the same BB but we can recover if there's a domination
2008 : relationship between them. */
2009 25689 : else if (dominated_by_p (CDI_DOMINATORS,
2010 25689 : gimple_bb (stmt1), gimple_bb (stmt2)))
2011 : return stmt1_info;
2012 8846 : else if (dominated_by_p (CDI_DOMINATORS,
2013 8846 : gimple_bb (stmt2), gimple_bb (stmt1)))
2014 : return stmt2_info;
2015 0 : gcc_unreachable ();
2016 : }
2017 :
2018 : /* If STMT_INFO has been replaced by a pattern statement, return the
2019 : replacement statement, otherwise return STMT_INFO itself. */
2020 :
2021 : inline stmt_vec_info
2022 47665162 : vect_stmt_to_vectorize (stmt_vec_info stmt_info)
2023 : {
2024 47665162 : if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2025 1454344 : return STMT_VINFO_RELATED_STMT (stmt_info);
2026 : return stmt_info;
2027 : }
2028 :
2029 : /* Return true if BB is a loop header. */
2030 :
2031 : inline bool
2032 1294681 : is_loop_header_bb_p (basic_block bb)
2033 : {
2034 1294681 : if (bb == (bb->loop_father)->header)
2035 1284366 : return true;
2036 :
2037 : return false;
2038 : }
2039 :
2040 : /* Return pow2 (X). */
2041 :
2042 : inline int
2043 : vect_pow2 (int x)
2044 : {
2045 : int i, res = 1;
2046 :
2047 : for (i = 0; i < x; i++)
2048 : res *= 2;
2049 :
2050 : return res;
2051 : }
2052 :
2053 : /* Alias targetm.vectorize.builtin_vectorization_cost. */
2054 :
2055 : inline int
2056 9353319 : builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
2057 : tree vectype, int misalign)
2058 : {
2059 9353319 : return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
2060 : vectype, misalign);
2061 : }
2062 :
2063 : /* Get cost by calling cost target builtin. */
2064 :
2065 : inline
2066 150 : int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
2067 : {
2068 49880 : return builtin_vectorization_cost (type_of_cost, NULL, 0);
2069 : }
2070 :
2071 : /* Alias targetm.vectorize.init_cost. */
2072 :
2073 : inline vector_costs *
2074 1966356 : init_cost (vec_info *vinfo, bool costing_for_scalar)
2075 : {
2076 1966356 : return targetm.vectorize.create_costs (vinfo, costing_for_scalar);
2077 : }
2078 :
2079 : extern void dump_stmt_cost (FILE *, int, enum vect_cost_for_stmt,
2080 : stmt_vec_info, slp_tree, tree, int, unsigned,
2081 : enum vect_cost_model_location);
2082 :
2083 : /* Dump and add costs. */
2084 :
2085 : inline unsigned
2086 6656078 : add_stmt_cost (vector_costs *costs, int count,
2087 : enum vect_cost_for_stmt kind,
2088 : stmt_vec_info stmt_info, slp_tree node,
2089 : tree vectype, int misalign,
2090 : enum vect_cost_model_location where)
2091 : {
2092 : /* Even though a vector type might be set on stmt do not pass that on when
2093 : costing the scalar IL. A SLP node shouldn't have been recorded. */
2094 6656078 : if (costs->costing_for_scalar ())
2095 : {
2096 3510173 : vectype = NULL_TREE;
2097 3510173 : gcc_checking_assert (node == NULL);
2098 : }
2099 6656078 : unsigned cost = costs->add_stmt_cost (count, kind, stmt_info, node, vectype,
2100 : misalign, where);
2101 6656078 : if (dump_file && (dump_flags & TDF_DETAILS))
2102 214045 : dump_stmt_cost (dump_file, count, kind, stmt_info, node, vectype, misalign,
2103 : cost, where);
2104 6656078 : return cost;
2105 : }
2106 :
2107 : inline unsigned
2108 59597 : add_stmt_cost (vector_costs *costs, int count, enum vect_cost_for_stmt kind,
2109 : enum vect_cost_model_location where)
2110 : {
2111 59597 : gcc_assert (kind == cond_branch_taken || kind == cond_branch_not_taken
2112 : || kind == scalar_stmt);
2113 59597 : return add_stmt_cost (costs, count, kind, NULL, NULL, NULL_TREE, 0, where);
2114 : }
2115 :
2116 : inline unsigned
2117 3825714 : add_stmt_cost (vector_costs *costs, stmt_info_for_cost *i)
2118 : {
2119 3825714 : return add_stmt_cost (costs, i->count, i->kind, i->stmt_info, i->node,
2120 3825714 : i->vectype, i->misalign, i->where);
2121 : }
2122 :
2123 : inline void
2124 519515 : add_stmt_costs (vector_costs *costs, stmt_vector_for_cost *cost_vec)
2125 : {
2126 519515 : stmt_info_for_cost *cost;
2127 519515 : unsigned i;
2128 3064703 : FOR_EACH_VEC_ELT (*cost_vec, i, cost)
2129 2545188 : add_stmt_cost (costs, cost->count, cost->kind, cost->stmt_info,
2130 : cost->node, cost->vectype, cost->misalign, cost->where);
2131 519515 : }
2132 :
2133 : /*-----------------------------------------------------------------*/
2134 : /* Info on data references alignment. */
2135 : /*-----------------------------------------------------------------*/
2136 : #define DR_MISALIGNMENT_UNKNOWN (-1)
2137 : #define DR_MISALIGNMENT_UNINITIALIZED (-2)
2138 :
2139 : inline void
2140 2372885 : set_dr_misalignment (dr_vec_info *dr_info, int val)
2141 : {
2142 2372885 : dr_info->misalignment = val;
2143 : }
2144 :
2145 : extern int dr_misalignment (dr_vec_info *dr_info, tree vectype,
2146 : poly_int64 offset = 0);
2147 :
2148 : #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
2149 :
2150 : /* Only defined once DR_MISALIGNMENT is defined. */
2151 : inline const poly_uint64
2152 6234613 : dr_target_alignment (dr_vec_info *dr_info)
2153 : {
2154 6234613 : if (STMT_VINFO_GROUPED_ACCESS (dr_info->stmt))
2155 4555712 : dr_info = STMT_VINFO_DR_INFO (DR_GROUP_FIRST_ELEMENT (dr_info->stmt));
2156 6234613 : return dr_info->target_alignment;
2157 : }
2158 : #define DR_TARGET_ALIGNMENT(DR) dr_target_alignment (DR)
2159 : #define DR_SCALAR_KNOWN_BOUNDS(DR) (DR)->scalar_access_known_in_bounds
2160 :
2161 : /* Return if the stmt_vec_info requires peeling for alignment. */
2162 : inline bool
2163 3878846 : dr_safe_speculative_read_required (stmt_vec_info stmt_info)
2164 : {
2165 3878846 : dr_vec_info *dr_info;
2166 3878846 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2167 1630713 : dr_info = STMT_VINFO_DR_INFO (DR_GROUP_FIRST_ELEMENT (stmt_info));
2168 : else
2169 2248133 : dr_info = STMT_VINFO_DR_INFO (stmt_info);
2170 :
2171 3878846 : return dr_info->safe_speculative_read_required;
2172 : }
2173 :
2174 : /* Set the safe_speculative_read_required for the stmt_vec_info, if group
2175 : access then set on the fist element otherwise set on DR directly. */
2176 : inline void
2177 221622 : dr_set_safe_speculative_read_required (stmt_vec_info stmt_info,
2178 : bool requires_alignment)
2179 : {
2180 221622 : dr_vec_info *dr_info;
2181 221622 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2182 67568 : dr_info = STMT_VINFO_DR_INFO (DR_GROUP_FIRST_ELEMENT (stmt_info));
2183 : else
2184 154054 : dr_info = STMT_VINFO_DR_INFO (stmt_info);
2185 :
2186 221622 : dr_info->safe_speculative_read_required = requires_alignment;
2187 221622 : }
2188 :
2189 : inline void
2190 1459055 : set_dr_target_alignment (dr_vec_info *dr_info, poly_uint64 val)
2191 : {
2192 1459055 : dr_info->target_alignment = val;
2193 : }
2194 : #define SET_DR_TARGET_ALIGNMENT(DR, VAL) set_dr_target_alignment (DR, VAL)
2195 :
2196 : /* Return true if data access DR_INFO is aligned to the targets
2197 : preferred alignment for VECTYPE (which may be less than a full vector). */
2198 :
2199 : inline bool
2200 309301 : aligned_access_p (dr_vec_info *dr_info, tree vectype)
2201 : {
2202 309301 : return (dr_misalignment (dr_info, vectype) == 0);
2203 : }
2204 :
2205 : /* Return TRUE if the (mis-)alignment of the data access is known with
2206 : respect to the targets preferred alignment for VECTYPE, and FALSE
2207 : otherwise. */
2208 :
2209 : inline bool
2210 1858158 : known_alignment_for_access_p (dr_vec_info *dr_info, tree vectype)
2211 : {
2212 1663462 : return (dr_misalignment (dr_info, vectype) != DR_MISALIGNMENT_UNKNOWN);
2213 : }
2214 :
2215 : /* Return the minimum alignment in bytes that the vectorized version
2216 : of DR_INFO is guaranteed to have. */
2217 :
2218 : inline unsigned int
2219 269312 : vect_known_alignment_in_bytes (dr_vec_info *dr_info, tree vectype,
2220 : poly_int64 offset = 0)
2221 : {
2222 269312 : int misalignment = dr_misalignment (dr_info, vectype, offset);
2223 269312 : if (misalignment == DR_MISALIGNMENT_UNKNOWN)
2224 132123 : return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr)));
2225 137189 : else if (misalignment == 0)
2226 94746 : return known_alignment (DR_TARGET_ALIGNMENT (dr_info));
2227 42443 : return misalignment & -misalignment;
2228 : }
2229 :
2230 : /* Return the behavior of DR_INFO with respect to the vectorization context
2231 : (which for outer loop vectorization might not be the behavior recorded
2232 : in DR_INFO itself). */
2233 :
2234 : inline innermost_loop_behavior *
2235 5357437 : vect_dr_behavior (vec_info *vinfo, dr_vec_info *dr_info)
2236 : {
2237 5357437 : stmt_vec_info stmt_info = dr_info->stmt;
2238 5357437 : loop_vec_info loop_vinfo = dyn_cast<loop_vec_info> (vinfo);
2239 1931064 : if (loop_vinfo == NULL
2240 1931064 : || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt_info))
2241 5353722 : return &DR_INNERMOST (dr_info->dr);
2242 : else
2243 3715 : return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info);
2244 : }
2245 :
2246 : /* Return the offset calculated by adding the offset of this DR_INFO to the
2247 : corresponding data_reference's offset. If CHECK_OUTER then use
2248 : vect_dr_behavior to select the appropriate data_reference to use. */
2249 :
2250 : inline tree
2251 728131 : get_dr_vinfo_offset (vec_info *vinfo,
2252 : dr_vec_info *dr_info, bool check_outer = false)
2253 : {
2254 728131 : innermost_loop_behavior *base;
2255 728131 : if (check_outer)
2256 689490 : base = vect_dr_behavior (vinfo, dr_info);
2257 : else
2258 38641 : base = &dr_info->dr->innermost;
2259 :
2260 728131 : tree offset = base->offset;
2261 :
2262 728131 : if (!dr_info->offset)
2263 : return offset;
2264 :
2265 19828 : offset = fold_convert (sizetype, offset);
2266 19828 : return fold_build2 (PLUS_EXPR, TREE_TYPE (dr_info->offset), offset,
2267 : dr_info->offset);
2268 : }
2269 :
2270 :
2271 : /* Return the vect cost model for LOOP. */
2272 : inline enum vect_cost_model
2273 1826088 : loop_cost_model (loop_p loop)
2274 : {
2275 1826088 : if (loop != NULL
2276 1171213 : && loop->force_vectorize
2277 69952 : && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT)
2278 : return flag_simd_cost_model;
2279 1756136 : return flag_vect_cost_model;
2280 : }
2281 :
2282 : /* Return true if the vect cost model is unlimited. */
2283 : inline bool
2284 1239677 : unlimited_cost_model (loop_p loop)
2285 : {
2286 1239677 : return loop_cost_model (loop) == VECT_COST_MODEL_UNLIMITED;
2287 : }
2288 :
2289 : /* Return true if the loop described by LOOP_VINFO is fully-masked and
2290 : if the first iteration should use a partial mask in order to achieve
2291 : alignment. */
2292 :
2293 : inline bool
2294 217836 : vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo)
2295 : {
2296 : /* With early break vectorization we don't know whether the accesses will stay
2297 : inside the loop or not. TODO: The early break adjustment code can be
2298 : implemented the same way as vectorizable_linear_induction. However we
2299 : can't test this today so reject it. */
2300 81 : return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
2301 81 : && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2302 217840 : && !(LOOP_VINFO_NON_LINEAR_IV (loop_vinfo)
2303 0 : && LOOP_VINFO_EARLY_BREAKS (loop_vinfo)));
2304 : }
2305 :
2306 : /* Return the number of vectors of type VECTYPE that are needed to get
2307 : NUNITS elements. NUNITS should be based on the vectorization factor,
2308 : so it is always a known multiple of the number of elements in VECTYPE. */
2309 :
2310 : inline unsigned int
2311 6683578 : vect_get_num_vectors (poly_uint64 nunits, tree vectype)
2312 : {
2313 6683578 : return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant ();
2314 : }
2315 :
2316 : /* Return the number of vectors in the context of vectorization region VINFO,
2317 : needed for a group of statements and a vector type as specified by NODE. */
2318 :
2319 : inline unsigned int
2320 6682769 : vect_get_num_copies (vec_info *vinfo, slp_tree node)
2321 : {
2322 6682769 : poly_uint64 vf;
2323 :
2324 6682769 : if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
2325 2850804 : vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2326 : else
2327 : vf = 1;
2328 :
2329 6682769 : vf *= SLP_TREE_LANES (node);
2330 6682769 : tree vectype = SLP_TREE_VECTYPE (node);
2331 :
2332 6682769 : return vect_get_num_vectors (vf, vectype);
2333 : }
2334 :
2335 : /* Update maximum unit count *MAX_NUNITS so that it accounts for
2336 : NUNITS. *MAX_NUNITS can be 1 if we haven't yet recorded anything. */
2337 :
2338 : inline void
2339 8493221 : vect_update_max_nunits (poly_uint64 *max_nunits, poly_uint64 nunits)
2340 : {
2341 : /* All unit counts have the form vec_info::vector_size * X for some
2342 : rational X, so two unit sizes must have a common multiple.
2343 : Everything is a multiple of the initial value of 1. */
2344 3684112 : *max_nunits = force_common_multiple (*max_nunits, nunits);
2345 : }
2346 :
2347 : /* Update maximum unit count *MAX_NUNITS so that it accounts for
2348 : the number of units in vector type VECTYPE. *MAX_NUNITS can be 1
2349 : if we haven't yet recorded any vector types. */
2350 :
2351 : inline void
2352 4809109 : vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype)
2353 : {
2354 4809109 : vect_update_max_nunits (max_nunits, TYPE_VECTOR_SUBPARTS (vectype));
2355 4809109 : }
2356 :
2357 : /* Return the vectorization factor that should be used for costing
2358 : purposes while vectorizing the loop described by LOOP_VINFO.
2359 : Pick a reasonable estimate if the vectorization factor isn't
2360 : known at compile time. */
2361 :
2362 : inline unsigned int
2363 976737 : vect_vf_for_cost (loop_vec_info loop_vinfo)
2364 : {
2365 976737 : return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
2366 : }
2367 :
2368 : /* Estimate the number of elements in VEC_TYPE for costing purposes.
2369 : Pick a reasonable estimate if the exact number isn't known at
2370 : compile time. */
2371 :
2372 : inline unsigned int
2373 30326 : vect_nunits_for_cost (tree vec_type)
2374 : {
2375 30326 : return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type));
2376 : }
2377 :
2378 : /* Return the maximum possible vectorization factor for LOOP_VINFO. */
2379 :
2380 : inline unsigned HOST_WIDE_INT
2381 91516 : vect_max_vf (loop_vec_info loop_vinfo)
2382 : {
2383 91516 : unsigned HOST_WIDE_INT vf;
2384 91516 : if (LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
2385 91516 : return vf;
2386 : return MAX_VECTORIZATION_FACTOR;
2387 : }
2388 :
2389 : /* Return the size of the value accessed by unvectorized data reference
2390 : DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated
2391 : for the associated gimple statement, since that guarantees that DR_INFO
2392 : accesses either a scalar or a scalar equivalent. ("Scalar equivalent"
2393 : here includes things like V1SI, which can be vectorized in the same way
2394 : as a plain SI.) */
2395 :
2396 : inline unsigned int
2397 1787691 : vect_get_scalar_dr_size (dr_vec_info *dr_info)
2398 : {
2399 1787691 : return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_info->dr))));
2400 : }
2401 :
2402 : /* Return true if LOOP_VINFO requires a runtime check for whether the
2403 : vector loop is profitable. */
2404 :
2405 : inline bool
2406 67519 : vect_apply_runtime_profitability_check_p (loop_vec_info loop_vinfo)
2407 : {
2408 67519 : unsigned int th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
2409 37051 : return (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2410 67519 : && th >= vect_vf_for_cost (loop_vinfo));
2411 : }
2412 :
2413 : /* Return true if CODE is a lane-reducing opcode. */
2414 :
2415 : inline bool
2416 295798 : lane_reducing_op_p (code_helper code)
2417 : {
2418 295798 : return code == DOT_PROD_EXPR || code == WIDEN_SUM_EXPR || code == SAD_EXPR;
2419 : }
2420 :
2421 : /* Return true if STMT is a lane-reducing statement. */
2422 :
2423 : inline bool
2424 384608 : lane_reducing_stmt_p (gimple *stmt)
2425 : {
2426 384608 : if (auto *assign = dyn_cast <gassign *> (stmt))
2427 278197 : return lane_reducing_op_p (gimple_assign_rhs_code (assign));
2428 : return false;
2429 : }
2430 :
2431 : /* Source location + hotness information. */
2432 : extern dump_user_location_t vect_location;
2433 :
2434 : /* A macro for calling:
2435 : dump_begin_scope (MSG, vect_location);
2436 : via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc,
2437 : and then calling
2438 : dump_end_scope ();
2439 : once the object goes out of scope, thus capturing the nesting of
2440 : the scopes.
2441 :
2442 : These scopes affect dump messages within them: dump messages at the
2443 : top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those
2444 : in a nested scope implicitly default to MSG_PRIORITY_INTERNALS. */
2445 :
2446 : #define DUMP_VECT_SCOPE(MSG) \
2447 : AUTO_DUMP_SCOPE (MSG, vect_location)
2448 :
2449 : /* A sentinel class for ensuring that the "vect_location" global gets
2450 : reset at the end of a scope.
2451 :
2452 : The "vect_location" global is used during dumping and contains a
2453 : location_t, which could contain references to a tree block via the
2454 : ad-hoc data. This data is used for tracking inlining information,
2455 : but it's not a GC root; it's simply assumed that such locations never
2456 : get accessed if the blocks are optimized away.
2457 :
2458 : Hence we need to ensure that such locations are purged at the end
2459 : of any operations using them (e.g. via this class). */
2460 :
2461 : class auto_purge_vect_location
2462 : {
2463 : public:
2464 : ~auto_purge_vect_location ();
2465 : };
2466 :
2467 : /*-----------------------------------------------------------------*/
2468 : /* Function prototypes. */
2469 : /*-----------------------------------------------------------------*/
2470 :
2471 : /* Simple loop peeling and versioning utilities for vectorizer's purposes -
2472 : in tree-vect-loop-manip.cc. */
2473 : extern void vect_set_loop_condition (class loop *, edge, loop_vec_info,
2474 : tree, tree, tree, bool);
2475 : extern bool slpeel_can_duplicate_loop_p (const class loop *, const_edge,
2476 : const_edge);
2477 : class loop *slpeel_tree_duplicate_loop_to_edge_cfg (class loop *, edge,
2478 : class loop *, edge,
2479 : edge, edge *, bool = true,
2480 : vec<basic_block> * = NULL,
2481 : bool = false, bool = false);
2482 : class loop *vect_loop_versioning (loop_vec_info, gimple *);
2483 : extern class loop *vect_do_peeling (loop_vec_info, tree, tree,
2484 : tree *, tree *, tree *, int, bool, bool,
2485 : tree *);
2486 : extern tree vect_get_main_loop_result (loop_vec_info, tree, tree);
2487 : extern void vect_prepare_for_masked_peels (loop_vec_info);
2488 : extern dump_user_location_t find_loop_location (class loop *);
2489 : extern bool vect_can_advance_ivs_p (loop_vec_info);
2490 : extern void vect_update_inits_of_drs (loop_vec_info, tree, tree_code);
2491 : extern edge vec_init_loop_exit_info (class loop *);
2492 : extern void vect_iv_increment_position (edge, gimple_stmt_iterator *, bool *);
2493 :
2494 : /* In tree-vect-stmts.cc. */
2495 : extern tree get_related_vectype_for_scalar_type (machine_mode, tree,
2496 : poly_uint64 = 0);
2497 : extern tree get_vectype_for_scalar_type (vec_info *, tree, unsigned int = 0);
2498 : extern tree get_vectype_for_scalar_type (vec_info *, tree, slp_tree);
2499 : extern tree get_mask_type_for_scalar_type (vec_info *, tree, unsigned int = 0);
2500 : extern tree get_mask_type_for_scalar_type (vec_info *, tree, slp_tree);
2501 : extern tree get_same_sized_vectype (tree, tree);
2502 : extern bool vect_chooses_same_modes_p (vec_info *, machine_mode);
2503 : extern bool vect_chooses_same_modes_p (machine_mode, machine_mode);
2504 : extern bool vect_get_loop_mask_type (loop_vec_info);
2505 : extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *,
2506 : stmt_vec_info * = NULL, gimple ** = NULL);
2507 : extern bool vect_is_simple_use (vec_info *, slp_tree,
2508 : unsigned, tree *, slp_tree *,
2509 : enum vect_def_type *,
2510 : tree *, stmt_vec_info * = NULL);
2511 : extern bool vect_maybe_update_slp_op_vectype (slp_tree, tree);
2512 : extern tree perm_mask_for_reverse (tree);
2513 : extern bool supportable_widening_operation (code_helper, tree, tree, bool,
2514 : code_helper*, code_helper*,
2515 : int*, vec<tree> *);
2516 : extern bool supportable_narrowing_operation (code_helper, tree, tree,
2517 : code_helper *, int *,
2518 : vec<tree> *);
2519 : extern bool supportable_indirect_convert_operation (code_helper,
2520 : tree, tree,
2521 : vec<std::pair<tree, tree_code> > &,
2522 : tree = NULL_TREE,
2523 : slp_tree = NULL);
2524 : extern int compare_step_with_zero (vec_info *, stmt_vec_info);
2525 :
2526 : extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
2527 : enum vect_cost_for_stmt, stmt_vec_info,
2528 : tree, int, enum vect_cost_model_location);
2529 : extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
2530 : enum vect_cost_for_stmt, slp_tree,
2531 : tree, int, enum vect_cost_model_location);
2532 : extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
2533 : enum vect_cost_for_stmt,
2534 : enum vect_cost_model_location);
2535 : extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
2536 : enum vect_cost_for_stmt, stmt_vec_info,
2537 : slp_tree, tree, int,
2538 : enum vect_cost_model_location);
2539 :
2540 : /* Overload of record_stmt_cost with VECTYPE derived from STMT_INFO. */
2541 :
2542 : inline unsigned
2543 2669622 : record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
2544 : enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
2545 : int misalign, enum vect_cost_model_location where)
2546 : {
2547 2668887 : return record_stmt_cost (body_cost_vec, count, kind, stmt_info,
2548 1439696 : STMT_VINFO_VECTYPE (stmt_info), misalign, where);
2549 : }
2550 :
2551 : /* Overload of record_stmt_cost with VECTYPE derived from SLP node. */
2552 :
2553 : inline unsigned
2554 1438277 : record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
2555 : enum vect_cost_for_stmt kind, slp_tree node,
2556 : int misalign, enum vect_cost_model_location where)
2557 : {
2558 1270246 : return record_stmt_cost (body_cost_vec, count, kind, node,
2559 94378 : SLP_TREE_VECTYPE (node), misalign, where);
2560 : }
2561 :
2562 : extern void vect_finish_replace_stmt (vec_info *, stmt_vec_info, gimple *);
2563 : extern void vect_finish_stmt_generation (vec_info *, stmt_vec_info, gimple *,
2564 : gimple_stmt_iterator *);
2565 : extern opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info, bool *);
2566 : extern tree vect_get_store_rhs (stmt_vec_info);
2567 : void vect_get_vec_defs (vec_info *, slp_tree,
2568 : tree, vec<tree> *,
2569 : tree = NULL, vec<tree> * = NULL,
2570 : tree = NULL, vec<tree> * = NULL,
2571 : tree = NULL, vec<tree> * = NULL);
2572 : extern tree vect_init_vector (vec_info *, stmt_vec_info, tree, tree,
2573 : gimple_stmt_iterator *);
2574 : extern tree vect_get_slp_vect_def (slp_tree, unsigned);
2575 : extern bool vect_transform_stmt (vec_info *, stmt_vec_info,
2576 : gimple_stmt_iterator *,
2577 : slp_tree, slp_instance);
2578 : extern void vect_remove_stores (vec_info *, stmt_vec_info);
2579 : extern bool vect_nop_conversion_p (stmt_vec_info);
2580 : extern opt_result vect_analyze_stmt (vec_info *, slp_tree,
2581 : slp_instance, stmt_vector_for_cost *);
2582 : extern void vect_get_load_cost (vec_info *, stmt_vec_info, slp_tree, int,
2583 : dr_alignment_support, int, bool,
2584 : unsigned int *, unsigned int *,
2585 : stmt_vector_for_cost *,
2586 : stmt_vector_for_cost *, bool);
2587 : extern void vect_get_store_cost (vec_info *, stmt_vec_info, slp_tree, int,
2588 : dr_alignment_support, int,
2589 : unsigned int *, stmt_vector_for_cost *);
2590 : extern bool vect_supportable_shift (vec_info *, enum tree_code, tree);
2591 : extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &);
2592 : extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &);
2593 : extern void optimize_mask_stores (class loop*);
2594 : extern tree vect_gen_while (gimple_seq *, tree, tree, tree,
2595 : const char * = nullptr);
2596 : extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree);
2597 : extern opt_result vect_get_vector_types_for_stmt (vec_info *,
2598 : stmt_vec_info, tree *,
2599 : tree *, unsigned int = 0);
2600 : extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info, unsigned int = 0);
2601 :
2602 : /* In tree-if-conv.cc. */
2603 : extern bool ref_within_array_bound (gimple *, tree);
2604 :
2605 : /* In tree-vect-data-refs.cc. */
2606 : extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64);
2607 : extern enum dr_alignment_support vect_supportable_dr_alignment
2608 : (vec_info *, dr_vec_info *, tree, int,
2609 : bool = false);
2610 : extern tree vect_get_smallest_scalar_type (stmt_vec_info, tree);
2611 : extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *);
2612 : extern bool vect_slp_analyze_instance_dependence (vec_info *, slp_instance);
2613 : extern opt_result vect_enhance_data_refs_alignment (loop_vec_info);
2614 : extern void vect_analyze_data_refs_alignment (loop_vec_info);
2615 : extern bool vect_slp_analyze_instance_alignment (vec_info *, slp_instance);
2616 : extern opt_result vect_analyze_data_ref_accesses (vec_info *, vec<int> *);
2617 : extern opt_result vect_prune_runtime_alias_test_list (loop_vec_info);
2618 : extern bool vect_gather_scatter_fn_p (vec_info *, bool, bool, tree, tree,
2619 : tree, int, int *, internal_fn *, tree *,
2620 : tree *, vec<int> * = nullptr);
2621 : extern bool vect_check_gather_scatter (stmt_vec_info, tree,
2622 : loop_vec_info, gather_scatter_info *,
2623 : vec<int> * = nullptr);
2624 : extern void vect_describe_gather_scatter_call (stmt_vec_info,
2625 : gather_scatter_info *);
2626 : extern opt_result vect_find_stmt_data_reference (loop_p, gimple *,
2627 : vec<data_reference_p> *,
2628 : vec<int> *, int);
2629 : extern opt_result vect_analyze_data_refs (vec_info *, bool *);
2630 : extern void vect_record_base_alignments (vec_info *);
2631 : extern tree vect_create_data_ref_ptr (vec_info *,
2632 : stmt_vec_info, tree, class loop *, tree,
2633 : tree *, gimple_stmt_iterator *,
2634 : gimple **, bool,
2635 : tree = NULL_TREE);
2636 : extern tree bump_vector_ptr (vec_info *, tree, gimple *, gimple_stmt_iterator *,
2637 : stmt_vec_info, tree);
2638 : extern void vect_copy_ref_info (tree, tree);
2639 : extern tree vect_create_destination_var (tree, tree);
2640 : extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT);
2641 : extern internal_fn vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
2642 : extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT);
2643 : extern internal_fn vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT,
2644 : bool, vec<int> * = nullptr);
2645 : extern tree vect_setup_realignment (vec_info *,
2646 : stmt_vec_info, tree, gimple_stmt_iterator *,
2647 : tree *, enum dr_alignment_support, tree,
2648 : class loop **);
2649 : extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
2650 : extern tree vect_get_new_ssa_name (tree, enum vect_var_kind,
2651 : const char * = NULL);
2652 : extern tree vect_create_addr_base_for_vector_ref (vec_info *,
2653 : stmt_vec_info, gimple_seq *,
2654 : tree);
2655 :
2656 : /* In tree-vect-loop.cc. */
2657 : extern tree neutral_op_for_reduction (tree, code_helper, tree, bool = true);
2658 : extern widest_int vect_iv_limit_for_partial_vectors (loop_vec_info loop_vinfo);
2659 : bool vect_rgroup_iv_might_wrap_p (loop_vec_info, rgroup_controls *);
2660 : /* Used in gimple-loop-interchange.c and tree-parloops.cc. */
2661 : extern bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree,
2662 : enum tree_code);
2663 : extern bool needs_fold_left_reduction_p (tree, code_helper);
2664 : /* Drive for loop analysis stage. */
2665 : extern opt_loop_vec_info vect_analyze_loop (class loop *, gimple *,
2666 : vec_info_shared *);
2667 : extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL);
2668 : extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *,
2669 : tree *, bool);
2670 : extern tree vect_halve_mask_nunits (tree, machine_mode);
2671 : extern tree vect_double_mask_nunits (tree, machine_mode);
2672 : extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *,
2673 : unsigned int, tree, tree);
2674 : extern tree vect_get_loop_mask (loop_vec_info, gimple_stmt_iterator *,
2675 : vec_loop_masks *,
2676 : unsigned int, tree, unsigned int);
2677 : extern void vect_record_loop_len (loop_vec_info, vec_loop_lens *, unsigned int,
2678 : tree, unsigned int);
2679 : extern tree vect_get_loop_len (loop_vec_info, gimple_stmt_iterator *,
2680 : vec_loop_lens *, unsigned int, tree,
2681 : unsigned int, unsigned int, bool);
2682 : extern tree vect_gen_loop_len_mask (loop_vec_info, gimple_stmt_iterator *,
2683 : gimple_stmt_iterator *, vec_loop_lens *,
2684 : unsigned int, tree, tree, unsigned int,
2685 : unsigned int);
2686 : extern gimple_seq vect_gen_len (tree, tree, tree, tree);
2687 : extern vect_reduc_info info_for_reduction (loop_vec_info, slp_tree);
2688 : extern bool reduction_fn_for_scalar_code (code_helper, internal_fn *);
2689 : extern unsigned vect_min_prec_for_max_niters (loop_vec_info, unsigned int);
2690 : /* Drive for loop transformation stage. */
2691 : extern class loop *vect_transform_loop (loop_vec_info, gimple *);
2692 917448 : struct vect_loop_form_info
2693 : {
2694 : tree number_of_iterations;
2695 : tree number_of_iterationsm1;
2696 : tree assumptions;
2697 : auto_vec<gcond *> conds;
2698 : gcond *inner_loop_cond;
2699 : edge loop_exit;
2700 : };
2701 : extern opt_result vect_analyze_loop_form (class loop *, gimple *,
2702 : vect_loop_form_info *);
2703 : extern loop_vec_info vect_create_loop_vinfo (class loop *, vec_info_shared *,
2704 : const vect_loop_form_info *,
2705 : loop_vec_info = nullptr);
2706 : extern bool vectorizable_live_operation (vec_info *, stmt_vec_info,
2707 : slp_tree, slp_instance, int,
2708 : bool, stmt_vector_for_cost *);
2709 : extern bool vectorizable_lane_reducing (loop_vec_info, stmt_vec_info,
2710 : slp_tree, stmt_vector_for_cost *);
2711 : extern bool vectorizable_reduction (loop_vec_info, stmt_vec_info,
2712 : slp_tree, slp_instance,
2713 : stmt_vector_for_cost *);
2714 : extern bool vectorizable_induction (loop_vec_info, stmt_vec_info,
2715 : slp_tree, stmt_vector_for_cost *);
2716 : extern bool vect_transform_reduction (loop_vec_info, stmt_vec_info,
2717 : gimple_stmt_iterator *,
2718 : slp_tree);
2719 : extern bool vect_transform_cycle_phi (loop_vec_info, stmt_vec_info,
2720 : slp_tree, slp_instance);
2721 : extern bool vectorizable_lc_phi (loop_vec_info, stmt_vec_info, slp_tree);
2722 : extern bool vect_transform_lc_phi (loop_vec_info, stmt_vec_info, slp_tree);
2723 : extern bool vectorizable_phi (bb_vec_info, stmt_vec_info, slp_tree,
2724 : stmt_vector_for_cost *);
2725 : extern bool vectorizable_recurr (loop_vec_info, stmt_vec_info,
2726 : slp_tree, stmt_vector_for_cost *);
2727 : extern bool vectorizable_early_exit (loop_vec_info, stmt_vec_info,
2728 : gimple_stmt_iterator *,
2729 : slp_tree, stmt_vector_for_cost *);
2730 : extern bool vect_emulated_vector_p (tree);
2731 : extern bool vect_can_vectorize_without_simd_p (tree_code);
2732 : extern bool vect_can_vectorize_without_simd_p (code_helper);
2733 : extern int vect_get_known_peeling_cost (loop_vec_info, int, int *,
2734 : stmt_vector_for_cost *,
2735 : stmt_vector_for_cost *,
2736 : stmt_vector_for_cost *);
2737 : extern tree cse_and_gimplify_to_preheader (loop_vec_info, tree);
2738 :
2739 : /* Nonlinear induction. */
2740 : extern tree vect_peel_nonlinear_iv_init (gimple_seq*, tree, tree,
2741 : tree, enum vect_induction_op_type,
2742 : bool);
2743 :
2744 : /* In tree-vect-slp.cc. */
2745 : extern void vect_slp_init (void);
2746 : extern void vect_slp_fini (void);
2747 : extern void vect_free_slp_instance (slp_instance);
2748 : extern bool vect_transform_slp_perm_load (vec_info *, slp_tree, const vec<tree> &,
2749 : gimple_stmt_iterator *, poly_uint64,
2750 : bool, unsigned *,
2751 : unsigned * = nullptr, bool = false);
2752 : extern bool vectorizable_slp_permutation (vec_info *, gimple_stmt_iterator *,
2753 : slp_tree, stmt_vector_for_cost *);
2754 : extern bool vect_slp_analyze_operations (vec_info *);
2755 : extern void vect_schedule_slp (vec_info *, const vec<slp_instance> &);
2756 : extern opt_result vect_analyze_slp (vec_info *, unsigned, bool);
2757 : extern bool vect_make_slp_decision (loop_vec_info);
2758 : extern bool vect_detect_hybrid_slp (loop_vec_info);
2759 : extern void vect_optimize_slp (vec_info *);
2760 : extern void vect_gather_slp_loads (vec_info *);
2761 : extern tree vect_get_slp_scalar_def (slp_tree, unsigned);
2762 : extern void vect_get_slp_defs (slp_tree, vec<tree> *);
2763 : extern void vect_get_slp_defs (vec_info *, slp_tree, vec<vec<tree> > *,
2764 : unsigned n = -1U);
2765 : extern bool vect_slp_if_converted_bb (basic_block bb, loop_p orig_loop);
2766 : extern bool vect_slp_function (function *);
2767 : extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree);
2768 : extern stmt_vec_info vect_find_first_scalar_stmt_in_slp (slp_tree);
2769 : extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info);
2770 : extern bool can_duplicate_and_interleave_p (vec_info *, unsigned int, tree,
2771 : unsigned int * = NULL,
2772 : tree * = NULL, tree * = NULL);
2773 : extern void duplicate_and_interleave (vec_info *, gimple_seq *, tree,
2774 : const vec<tree> &, unsigned int, vec<tree> &);
2775 : extern int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info);
2776 : extern slp_tree vect_create_new_slp_node (unsigned, tree_code);
2777 : extern void vect_free_slp_tree (slp_tree);
2778 : extern bool compatible_calls_p (gcall *, gcall *, bool);
2779 : extern int vect_slp_child_index_for_operand (const gimple *, int op, bool);
2780 :
2781 : extern tree prepare_vec_mask (loop_vec_info, tree, tree, tree,
2782 : gimple_stmt_iterator *);
2783 : extern tree vect_get_mask_load_else (int, tree);
2784 : extern bool vect_load_perm_consecutive_p (slp_tree, unsigned = UINT_MAX);
2785 :
2786 : /* In tree-vect-patterns.cc. */
2787 : extern void
2788 : vect_mark_pattern_stmts (vec_info *, stmt_vec_info, gimple *, tree);
2789 : extern bool vect_get_range_info (tree, wide_int*, wide_int*);
2790 :
2791 : /* Pattern recognition functions.
2792 : Additional pattern recognition functions can (and will) be added
2793 : in the future. */
2794 : void vect_pattern_recog (vec_info *);
2795 :
2796 : /* In tree-vectorizer.cc. */
2797 : unsigned vectorize_loops (void);
2798 : void vect_free_loop_info_assumptions (class loop *);
2799 : gimple *vect_loop_vectorized_call (class loop *, gcond **cond = NULL);
2800 : bool vect_stmt_dominates_stmt_p (gimple *, gimple *);
2801 :
2802 : /* SLP Pattern matcher types, tree-vect-slp-patterns.cc. */
2803 :
2804 : /* Forward declaration of possible two operands operation that can be matched
2805 : by the complex numbers pattern matchers. */
2806 : enum _complex_operation : unsigned;
2807 :
2808 : /* All possible load permute values that could result from the partial data-flow
2809 : analysis. */
2810 : typedef enum _complex_perm_kinds {
2811 : PERM_UNKNOWN,
2812 : PERM_EVENODD,
2813 : PERM_ODDEVEN,
2814 : PERM_ODDODD,
2815 : PERM_EVENEVEN,
2816 : /* Can be combined with any other PERM values. */
2817 : PERM_TOP
2818 : } complex_perm_kinds_t;
2819 :
2820 : /* Cache from nodes to the load permutation they represent. */
2821 : typedef hash_map <slp_tree, complex_perm_kinds_t>
2822 : slp_tree_to_load_perm_map_t;
2823 :
2824 : /* Cache from nodes pair to being compatible or not. */
2825 : typedef pair_hash <nofree_ptr_hash <_slp_tree>,
2826 : nofree_ptr_hash <_slp_tree>> slp_node_hash;
2827 : typedef hash_map <slp_node_hash, bool> slp_compat_nodes_map_t;
2828 :
2829 :
2830 : /* Vector pattern matcher base class. All SLP pattern matchers must inherit
2831 : from this type. */
2832 :
2833 : class vect_pattern
2834 : {
2835 : protected:
2836 : /* The number of arguments that the IFN requires. */
2837 : unsigned m_num_args;
2838 :
2839 : /* The internal function that will be used when a pattern is created. */
2840 : internal_fn m_ifn;
2841 :
2842 : /* The current node being inspected. */
2843 : slp_tree *m_node;
2844 :
2845 : /* The list of operands to be the children for the node produced when the
2846 : internal function is created. */
2847 : vec<slp_tree> m_ops;
2848 :
2849 : /* Default constructor where NODE is the root of the tree to inspect. */
2850 1081 : vect_pattern (slp_tree *node, vec<slp_tree> *m_ops, internal_fn ifn)
2851 1081 : {
2852 1081 : this->m_ifn = ifn;
2853 1081 : this->m_node = node;
2854 1081 : this->m_ops.create (0);
2855 1081 : if (m_ops)
2856 20 : this->m_ops.safe_splice (*m_ops);
2857 : }
2858 :
2859 : public:
2860 :
2861 : /* Create a new instance of the pattern matcher class of the given type. */
2862 : static vect_pattern* recognize (slp_tree_to_load_perm_map_t *,
2863 : slp_compat_nodes_map_t *, slp_tree *);
2864 :
2865 : /* Build the pattern from the data collected so far. */
2866 : virtual void build (vec_info *) = 0;
2867 :
2868 : /* Default destructor. */
2869 : virtual ~vect_pattern ()
2870 : {
2871 : this->m_ops.release ();
2872 : }
2873 : };
2874 :
2875 : /* Function pointer to create a new pattern matcher from a generic type. */
2876 : typedef vect_pattern* (*vect_pattern_decl_t) (slp_tree_to_load_perm_map_t *,
2877 : slp_compat_nodes_map_t *,
2878 : slp_tree *);
2879 :
2880 : /* List of supported pattern matchers. */
2881 : extern vect_pattern_decl_t slp_patterns[];
2882 :
2883 : /* Number of supported pattern matchers. */
2884 : extern size_t num__slp_patterns;
2885 :
2886 : /* ----------------------------------------------------------------------
2887 : Target support routines
2888 : -----------------------------------------------------------------------
2889 : The following routines are provided to simplify costing decisions in
2890 : target code. Please add more as needed. */
2891 :
2892 : /* Return true if an operaton of kind KIND for STMT_INFO represents
2893 : the extraction of an element from a vector in preparation for
2894 : storing the element to memory. */
2895 : inline bool
2896 : vect_is_store_elt_extraction (vect_cost_for_stmt kind, stmt_vec_info stmt_info)
2897 : {
2898 : return (kind == vec_to_scalar
2899 : && STMT_VINFO_DATA_REF (stmt_info)
2900 : && DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)));
2901 : }
2902 :
2903 : /* Return true if STMT_INFO represents part of a reduction. */
2904 : inline bool
2905 45344692 : vect_is_reduction (stmt_vec_info stmt_info)
2906 : {
2907 45344692 : return STMT_VINFO_REDUC_IDX (stmt_info) != -1;
2908 : }
2909 :
2910 : /* Return true if SLP_NODE represents part of a reduction. */
2911 : inline bool
2912 259732 : vect_is_reduction (slp_tree slp_node)
2913 : {
2914 259732 : return SLP_TREE_REDUC_IDX (slp_node) != -1;
2915 : }
2916 :
2917 : /* If STMT_INFO describes a reduction, return the vect_reduction_type
2918 : of the reduction it describes, otherwise return -1. */
2919 : inline int
2920 20 : vect_reduc_type (vec_info *vinfo, slp_tree node)
2921 : {
2922 20 : if (loop_vec_info loop_vinfo = dyn_cast<loop_vec_info> (vinfo))
2923 : {
2924 20 : vect_reduc_info reduc_info = info_for_reduction (loop_vinfo, node);
2925 20 : if (reduc_info)
2926 20 : return int (VECT_REDUC_INFO_TYPE (reduc_info));
2927 : }
2928 : return -1;
2929 : }
2930 :
2931 : /* If STMT_INFO is a COND_EXPR that includes an embedded comparison, return the
2932 : scalar type of the values being compared. Return null otherwise. */
2933 : inline tree
2934 : vect_embedded_comparison_type (stmt_vec_info stmt_info)
2935 : {
2936 : if (auto *assign = dyn_cast<gassign *> (stmt_info->stmt))
2937 : if (gimple_assign_rhs_code (assign) == COND_EXPR)
2938 : {
2939 : tree cond = gimple_assign_rhs1 (assign);
2940 : if (COMPARISON_CLASS_P (cond))
2941 : return TREE_TYPE (TREE_OPERAND (cond, 0));
2942 : }
2943 : return NULL_TREE;
2944 : }
2945 :
2946 : /* If STMT_INFO is a comparison or contains an embedded comparison, return the
2947 : scalar type of the values being compared. Return null otherwise. */
2948 : inline tree
2949 : vect_comparison_type (stmt_vec_info stmt_info)
2950 : {
2951 : if (auto *assign = dyn_cast<gassign *> (stmt_info->stmt))
2952 : if (TREE_CODE_CLASS (gimple_assign_rhs_code (assign)) == tcc_comparison)
2953 : return TREE_TYPE (gimple_assign_rhs1 (assign));
2954 : return vect_embedded_comparison_type (stmt_info);
2955 : }
2956 :
2957 : /* Return true if STMT_INFO extends the result of a load. */
2958 : inline bool
2959 : vect_is_extending_load (class vec_info *vinfo, stmt_vec_info stmt_info)
2960 : {
2961 : /* Although this is quite large for an inline function, this part
2962 : at least should be inline. */
2963 : gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
2964 : if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
2965 : return false;
2966 :
2967 : tree rhs = gimple_assign_rhs1 (stmt_info->stmt);
2968 : tree lhs_type = TREE_TYPE (gimple_assign_lhs (assign));
2969 : tree rhs_type = TREE_TYPE (rhs);
2970 : if (!INTEGRAL_TYPE_P (lhs_type)
2971 : || !INTEGRAL_TYPE_P (rhs_type)
2972 : || TYPE_PRECISION (lhs_type) <= TYPE_PRECISION (rhs_type))
2973 : return false;
2974 :
2975 : stmt_vec_info def_stmt_info = vinfo->lookup_def (rhs);
2976 : return (def_stmt_info
2977 : && STMT_VINFO_DATA_REF (def_stmt_info)
2978 : && DR_IS_READ (STMT_VINFO_DATA_REF (def_stmt_info)));
2979 : }
2980 :
2981 : /* Return true if STMT_INFO is an integer truncation. */
2982 : inline bool
2983 : vect_is_integer_truncation (stmt_vec_info stmt_info)
2984 : {
2985 : gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
2986 : if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
2987 : return false;
2988 :
2989 : tree lhs_type = TREE_TYPE (gimple_assign_lhs (assign));
2990 : tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
2991 : return (INTEGRAL_TYPE_P (lhs_type)
2992 : && INTEGRAL_TYPE_P (rhs_type)
2993 : && TYPE_PRECISION (lhs_type) < TYPE_PRECISION (rhs_type));
2994 : }
2995 :
2996 : /* Build a GIMPLE_ASSIGN or GIMPLE_CALL with the tree_code,
2997 : or internal_fn contained in ch, respectively. */
2998 : gimple * vect_gimple_build (tree, code_helper, tree, tree = NULL_TREE);
2999 : #endif /* GCC_TREE_VECTORIZER_H */
|