Branch data Line data Source code
1 : : /* Vectorizer
2 : : Copyright (C) 2003-2025 Free Software Foundation, Inc.
3 : : Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 : :
5 : : This file is part of GCC.
6 : :
7 : : GCC is free software; you can redistribute it and/or modify it under
8 : : the terms of the GNU General Public License as published by the Free
9 : : Software Foundation; either version 3, or (at your option) any later
10 : : version.
11 : :
12 : : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 : : WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 : : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 : : for more details.
16 : :
17 : : You should have received a copy of the GNU General Public License
18 : : along with GCC; see the file COPYING3. If not see
19 : : <http://www.gnu.org/licenses/>. */
20 : :
21 : : #ifndef GCC_TREE_VECTORIZER_H
22 : : #define GCC_TREE_VECTORIZER_H
23 : :
24 : : typedef class _stmt_vec_info *stmt_vec_info;
25 : : typedef struct _slp_tree *slp_tree;
26 : :
27 : : #include "tree-data-ref.h"
28 : : #include "tree-hash-traits.h"
29 : : #include "target.h"
30 : : #include "internal-fn.h"
31 : : #include "tree-ssa-operands.h"
32 : : #include "gimple-match.h"
33 : : #include "dominance.h"
34 : :
35 : : /* Used for naming of new temporaries. */
36 : : enum vect_var_kind {
37 : : vect_simple_var,
38 : : vect_pointer_var,
39 : : vect_scalar_var,
40 : : vect_mask_var
41 : : };
42 : :
43 : : /* Defines type of operation. */
44 : : enum operation_type {
45 : : unary_op = 1,
46 : : binary_op,
47 : : ternary_op
48 : : };
49 : :
50 : : /* Define type of available alignment support. */
51 : : enum dr_alignment_support {
52 : : dr_unaligned_unsupported,
53 : : dr_unaligned_supported,
54 : : dr_explicit_realign,
55 : : dr_explicit_realign_optimized,
56 : : dr_aligned
57 : : };
58 : :
59 : : /* Define type of peeling support to indicate how peeling for alignment can help
60 : : make vectorization supported. */
61 : : enum peeling_support {
62 : : peeling_known_supported,
63 : : peeling_maybe_supported,
64 : : peeling_unsupported
65 : : };
66 : :
67 : : /* Define type of def-use cross-iteration cycle. */
68 : : enum vect_def_type {
69 : : vect_uninitialized_def = 0,
70 : : vect_constant_def = 1,
71 : : vect_external_def,
72 : : vect_internal_def,
73 : : vect_induction_def,
74 : : vect_reduction_def,
75 : : vect_double_reduction_def,
76 : : vect_nested_cycle,
77 : : vect_first_order_recurrence,
78 : : vect_condition_def,
79 : : vect_unknown_def_type
80 : : };
81 : :
82 : : /* Define operation type of linear/non-linear induction variable. */
83 : : enum vect_induction_op_type {
84 : : vect_step_op_add = 0,
85 : : vect_step_op_neg,
86 : : vect_step_op_mul,
87 : : vect_step_op_shl,
88 : : vect_step_op_shr
89 : : };
90 : :
91 : : /* Define type of reduction. */
92 : : enum vect_reduction_type {
93 : : TREE_CODE_REDUCTION,
94 : : COND_REDUCTION,
95 : : INTEGER_INDUC_COND_REDUCTION,
96 : : CONST_COND_REDUCTION,
97 : :
98 : : /* Retain a scalar phi and use a FOLD_EXTRACT_LAST within the loop
99 : : to implement:
100 : :
101 : : for (int i = 0; i < VF; ++i)
102 : : res = cond[i] ? val[i] : res; */
103 : : EXTRACT_LAST_REDUCTION,
104 : :
105 : : /* Use a folding reduction within the loop to implement:
106 : :
107 : : for (int i = 0; i < VF; ++i)
108 : : res = res OP val[i];
109 : :
110 : : (with no reassocation). */
111 : : FOLD_LEFT_REDUCTION
112 : : };
113 : :
114 : : #define VECTORIZABLE_CYCLE_DEF(D) (((D) == vect_reduction_def) \
115 : : || ((D) == vect_double_reduction_def) \
116 : : || ((D) == vect_nested_cycle))
117 : :
118 : : /* Structure to encapsulate information about a group of like
119 : : instructions to be presented to the target cost model. */
120 : : struct stmt_info_for_cost {
121 : : int count;
122 : : enum vect_cost_for_stmt kind;
123 : : enum vect_cost_model_location where;
124 : : stmt_vec_info stmt_info;
125 : : slp_tree node;
126 : : tree vectype;
127 : : int misalign;
128 : : };
129 : :
130 : : typedef vec<stmt_info_for_cost> stmt_vector_for_cost;
131 : :
132 : : /* Maps base addresses to an innermost_loop_behavior and the stmt it was
133 : : derived from that gives the maximum known alignment for that base. */
134 : : typedef hash_map<tree_operand_hash,
135 : : std::pair<stmt_vec_info, innermost_loop_behavior *> >
136 : : vec_base_alignments;
137 : :
138 : : /* Represents elements [START, START + LENGTH) of cyclical array OPS*
139 : : (i.e. OPS repeated to give at least START + LENGTH elements) */
140 : : struct vect_scalar_ops_slice
141 : : {
142 : : tree op (unsigned int i) const;
143 : : bool all_same_p () const;
144 : :
145 : : vec<tree> *ops;
146 : : unsigned int start;
147 : : unsigned int length;
148 : : };
149 : :
150 : : /* Return element I of the slice. */
151 : : inline tree
152 : 2658672 : vect_scalar_ops_slice::op (unsigned int i) const
153 : : {
154 : 5317344 : return (*ops)[(i + start) % ops->length ()];
155 : : }
156 : :
157 : : /* Hash traits for vect_scalar_ops_slice. */
158 : : struct vect_scalar_ops_slice_hash : typed_noop_remove<vect_scalar_ops_slice>
159 : : {
160 : : typedef vect_scalar_ops_slice value_type;
161 : : typedef vect_scalar_ops_slice compare_type;
162 : :
163 : : static const bool empty_zero_p = true;
164 : :
165 : : static void mark_deleted (value_type &s) { s.length = ~0U; }
166 : 0 : static void mark_empty (value_type &s) { s.length = 0; }
167 : 417874 : static bool is_deleted (const value_type &s) { return s.length == ~0U; }
168 : 3978207 : static bool is_empty (const value_type &s) { return s.length == 0; }
169 : : static hashval_t hash (const value_type &);
170 : : static bool equal (const value_type &, const compare_type &);
171 : : };
172 : :
173 : : /* Describes how we're going to vectorize an individual load or store,
174 : : or a group of loads or stores. */
175 : : enum vect_memory_access_type {
176 : : VMAT_UNINITIALIZED,
177 : :
178 : : /* An access to an invariant address. This is used only for loads. */
179 : : VMAT_INVARIANT,
180 : :
181 : : /* A simple contiguous access. */
182 : : VMAT_CONTIGUOUS,
183 : :
184 : : /* A contiguous access that goes down in memory rather than up,
185 : : with no additional permutation. This is used only for stores
186 : : of invariants. */
187 : : VMAT_CONTIGUOUS_DOWN,
188 : :
189 : : /* A simple contiguous access in which the elements need to be reversed
190 : : after loading or before storing. */
191 : : VMAT_CONTIGUOUS_REVERSE,
192 : :
193 : : /* An access that uses IFN_LOAD_LANES or IFN_STORE_LANES. */
194 : : VMAT_LOAD_STORE_LANES,
195 : :
196 : : /* An access in which each scalar element is loaded or stored
197 : : individually. */
198 : : VMAT_ELEMENTWISE,
199 : :
200 : : /* A hybrid of VMAT_CONTIGUOUS and VMAT_ELEMENTWISE, used for grouped
201 : : SLP accesses. Each unrolled iteration uses a contiguous load
202 : : or store for the whole group, but the groups from separate iterations
203 : : are combined in the same way as for VMAT_ELEMENTWISE. */
204 : : VMAT_STRIDED_SLP,
205 : :
206 : : /* The access uses gather loads or scatter stores. */
207 : : VMAT_GATHER_SCATTER_LEGACY,
208 : : VMAT_GATHER_SCATTER_IFN,
209 : : VMAT_GATHER_SCATTER_EMULATED
210 : : };
211 : :
212 : : /* Returns whether MAT is any of the VMAT_GATHER_SCATTER_* kinds. */
213 : :
214 : : inline bool
215 : 6039993 : mat_gather_scatter_p (vect_memory_access_type mat)
216 : : {
217 : 6039993 : return (mat == VMAT_GATHER_SCATTER_LEGACY
218 : : || mat == VMAT_GATHER_SCATTER_IFN
219 : 6039993 : || mat == VMAT_GATHER_SCATTER_EMULATED);
220 : : }
221 : :
222 : : /*-----------------------------------------------------------------*/
223 : : /* Info on vectorized defs. */
224 : : /*-----------------------------------------------------------------*/
225 : : enum stmt_vec_info_type {
226 : : undef_vec_info_type = 0,
227 : : load_vec_info_type,
228 : : store_vec_info_type,
229 : : shift_vec_info_type,
230 : : op_vec_info_type,
231 : : call_vec_info_type,
232 : : call_simd_clone_vec_info_type,
233 : : assignment_vec_info_type,
234 : : condition_vec_info_type,
235 : : comparison_vec_info_type,
236 : : reduc_vec_info_type,
237 : : induc_vec_info_type,
238 : : type_promotion_vec_info_type,
239 : : type_demotion_vec_info_type,
240 : : type_conversion_vec_info_type,
241 : : cycle_phi_info_type,
242 : : lc_phi_info_type,
243 : : phi_info_type,
244 : : recurr_info_type,
245 : : loop_exit_ctrl_vec_info_type,
246 : : permute_info_type
247 : : };
248 : :
249 : : /************************************************************************
250 : : SLP
251 : : ************************************************************************/
252 : : typedef vec<std::pair<unsigned, unsigned> > lane_permutation_t;
253 : : typedef auto_vec<std::pair<unsigned, unsigned>, 16> auto_lane_permutation_t;
254 : : typedef vec<unsigned> load_permutation_t;
255 : : typedef auto_vec<unsigned, 16> auto_load_permutation_t;
256 : :
257 : 3111633 : struct vect_data {
258 : 1955274 : virtual ~vect_data () = default;
259 : : };
260 : :
261 : : /* Analysis data from vectorizable_simd_clone_call for
262 : : call_simd_clone_vec_info_type. */
263 : : struct vect_simd_clone_data : vect_data {
264 : 1823 : virtual ~vect_simd_clone_data () = default;
265 : 1377 : vect_simd_clone_data () = default;
266 : 446 : vect_simd_clone_data (vect_simd_clone_data &&other) = default;
267 : :
268 : : /* Selected SIMD clone and clone for in-branch. */
269 : : cgraph_node *clone;
270 : : cgraph_node *clone_inbranch;
271 : :
272 : : /* Selected SIMD clone's function info. First vector element
273 : : is NULL_TREE, followed by a pair of trees (base + step)
274 : : for linear arguments (pair of NULLs for other arguments). */
275 : : auto_vec<tree> simd_clone_info;
276 : : };
277 : :
278 : : /* Analysis data from vectorizable_load and vectorizable_store for
279 : : load_vec_info_type and store_vec_info_type. */
280 : : struct vect_load_store_data : vect_data {
281 : 1154536 : vect_load_store_data (vect_load_store_data &&other) = default;
282 : 1955274 : vect_load_store_data () = default;
283 : 3105966 : virtual ~vect_load_store_data () = default;
284 : :
285 : : vect_memory_access_type memory_access_type;
286 : : dr_alignment_support alignment_support_scheme;
287 : : int misalignment;
288 : : internal_fn lanes_ifn; // VMAT_LOAD_STORE_LANES
289 : : poly_int64 poffset;
290 : : union {
291 : : internal_fn ifn; // VMAT_GATHER_SCATTER_IFN
292 : : tree decl; // VMAT_GATHER_SCATTER_DECL
293 : : } gs;
294 : : tree strided_offset_vectype; // VMAT_GATHER_SCATTER_IFN, originally strided
295 : : /* Load/store type with larger element mode used for punning the vectype. */
296 : : tree ls_type; // VMAT_GATHER_SCATTER_IFN
297 : : /* This is set to a supported offset vector type if we don't support the
298 : : originally requested offset type, otherwise NULL.
299 : : If nonzero there will be an additional offset conversion before
300 : : the gather/scatter. */
301 : : tree supported_offset_vectype; // VMAT_GATHER_SCATTER_IFN
302 : : /* Similar for scale. Only nonzero if we don't support the requested
303 : : scale. Then we need to multiply the offset vector before the
304 : : gather/scatter. */
305 : : int supported_scale; // VMAT_GATHER_SCATTER_IFN
306 : : auto_vec<int> elsvals;
307 : : /* True if the load requires a load permutation. */
308 : : bool slp_perm; // SLP_TREE_LOAD_PERMUTATION
309 : : unsigned n_perms; // SLP_TREE_LOAD_PERMUTATION
310 : : /* Whether the load permutation is consecutive and simple. */
311 : : bool subchain_p; // VMAT_STRIDED_SLP and VMAT_GATHER_SCATTER
312 : : };
313 : :
314 : : /* A computation tree of an SLP instance. Each node corresponds to a group of
315 : : stmts to be packed in a SIMD stmt. */
316 : : struct _slp_tree {
317 : : _slp_tree ();
318 : : ~_slp_tree ();
319 : :
320 : : void push_vec_def (gimple *def);
321 : 8293 : void push_vec_def (tree def) { vec_defs.quick_push (def); }
322 : :
323 : : /* Nodes that contain def-stmts of this node statements operands. */
324 : : vec<slp_tree> children;
325 : :
326 : : /* A group of scalar stmts to be vectorized together. */
327 : : vec<stmt_vec_info> stmts;
328 : : /* A group of scalar operands to be vectorized together. */
329 : : vec<tree> ops;
330 : : /* The representative that should be used for analysis and
331 : : code generation. */
332 : : stmt_vec_info representative;
333 : :
334 : : struct {
335 : : /* SLP cycle the node resides in, or -1. */
336 : : int id;
337 : : /* The SLP operand index with the edge on the SLP cycle, or -1. */
338 : : int reduc_idx;
339 : : } cycle_info;
340 : :
341 : : /* Load permutation relative to the stores, NULL if there is no
342 : : permutation. */
343 : : load_permutation_t load_permutation;
344 : : /* Lane permutation of the operands scalar lanes encoded as pairs
345 : : of { operand number, lane number }. The number of elements
346 : : denotes the number of output lanes. */
347 : : lane_permutation_t lane_permutation;
348 : :
349 : : tree vectype;
350 : : /* Vectorized defs. */
351 : : vec<tree> vec_defs;
352 : :
353 : : /* Reference count in the SLP graph. */
354 : : unsigned int refcnt;
355 : : /* The maximum number of vector elements for the subtree rooted
356 : : at this node. */
357 : : poly_uint64 max_nunits;
358 : : /* The DEF type of this node. */
359 : : enum vect_def_type def_type;
360 : : /* The number of scalar lanes produced by this node. */
361 : : unsigned int lanes;
362 : : /* The operation of this node. */
363 : : enum tree_code code;
364 : : /* For gather/scatter memory operations the scale each offset element
365 : : should be multiplied by before being added to the base. */
366 : : int gs_scale;
367 : : /* For gather/scatter memory operations the loop-invariant base value. */
368 : : tree gs_base;
369 : : /* Whether uses of this load or feeders of this store are suitable
370 : : for load/store-lanes. */
371 : : bool ldst_lanes;
372 : : /* For BB vect, flag to indicate this load node should be vectorized
373 : : as to avoid STLF fails because of related stores. */
374 : : bool avoid_stlf_fail;
375 : :
376 : : int vertex;
377 : :
378 : : /* The kind of operation as determined by analysis and optional
379 : : kind specific data. */
380 : : enum stmt_vec_info_type type;
381 : : vect_data *data;
382 : :
383 : : template <class T>
384 : 1956651 : T& get_data (T& else_) { return data ? *static_cast <T *> (data) : else_; }
385 : :
386 : : /* If not NULL this is a cached failed SLP discovery attempt with
387 : : the lanes that failed during SLP discovery as 'false'. This is
388 : : a copy of the matches array. */
389 : : bool *failed;
390 : :
391 : : /* Allocate from slp_tree_pool. */
392 : : static void *operator new (size_t);
393 : :
394 : : /* Return memory to slp_tree_pool. */
395 : : static void operator delete (void *, size_t);
396 : :
397 : : /* Linked list of nodes to release when we free the slp_tree_pool. */
398 : : slp_tree next_node;
399 : : slp_tree prev_node;
400 : : };
401 : :
402 : : /* The enum describes the type of operations that an SLP instance
403 : : can perform. */
404 : :
405 : : enum slp_instance_kind {
406 : : slp_inst_kind_store,
407 : : slp_inst_kind_reduc_group,
408 : : slp_inst_kind_reduc_chain,
409 : : slp_inst_kind_bb_reduc,
410 : : slp_inst_kind_ctor,
411 : : slp_inst_kind_gcond
412 : : };
413 : :
414 : : /* SLP instance is a sequence of stmts in a loop that can be packed into
415 : : SIMD stmts. */
416 : : typedef class _slp_instance {
417 : : public:
418 : : /* The root of SLP tree. */
419 : : slp_tree root;
420 : :
421 : : /* For vector constructors, the constructor stmt that the SLP tree is built
422 : : from, NULL otherwise. */
423 : : vec<stmt_vec_info> root_stmts;
424 : :
425 : : /* For slp_inst_kind_bb_reduc the defs that were not vectorized, NULL
426 : : otherwise. */
427 : : vec<tree> remain_defs;
428 : :
429 : : /* The group of nodes that contain loads of this SLP instance. */
430 : : vec<slp_tree> loads;
431 : :
432 : : /* The SLP node containing the reduction PHIs. */
433 : : slp_tree reduc_phis;
434 : :
435 : : /* Vector cost of this entry to the SLP graph. */
436 : : stmt_vector_for_cost cost_vec;
437 : :
438 : : /* If this instance is the main entry of a subgraph the set of
439 : : entries into the same subgraph, including itself. */
440 : : vec<_slp_instance *> subgraph_entries;
441 : :
442 : : /* The type of operation the SLP instance is performing. */
443 : : slp_instance_kind kind;
444 : :
445 : : dump_user_location_t location () const;
446 : : } *slp_instance;
447 : :
448 : :
449 : : /* Access Functions. */
450 : : #define SLP_INSTANCE_TREE(S) (S)->root
451 : : #define SLP_INSTANCE_LOADS(S) (S)->loads
452 : : #define SLP_INSTANCE_ROOT_STMTS(S) (S)->root_stmts
453 : : #define SLP_INSTANCE_REMAIN_DEFS(S) (S)->remain_defs
454 : : #define SLP_INSTANCE_KIND(S) (S)->kind
455 : :
456 : : #define SLP_TREE_CHILDREN(S) (S)->children
457 : : #define SLP_TREE_SCALAR_STMTS(S) (S)->stmts
458 : : #define SLP_TREE_SCALAR_OPS(S) (S)->ops
459 : : #define SLP_TREE_REF_COUNT(S) (S)->refcnt
460 : : #define SLP_TREE_VEC_DEFS(S) (S)->vec_defs
461 : : #define SLP_TREE_LOAD_PERMUTATION(S) (S)->load_permutation
462 : : #define SLP_TREE_LANE_PERMUTATION(S) (S)->lane_permutation
463 : : #define SLP_TREE_DEF_TYPE(S) (S)->def_type
464 : : #define SLP_TREE_VECTYPE(S) (S)->vectype
465 : : #define SLP_TREE_REPRESENTATIVE(S) (S)->representative
466 : : #define SLP_TREE_LANES(S) (S)->lanes
467 : : #define SLP_TREE_CODE(S) (S)->code
468 : : #define SLP_TREE_TYPE(S) (S)->type
469 : : #define SLP_TREE_GS_SCALE(S) (S)->gs_scale
470 : : #define SLP_TREE_GS_BASE(S) (S)->gs_base
471 : : #define SLP_TREE_REDUC_IDX(S) (S)->cycle_info.reduc_idx
472 : : #define SLP_TREE_PERMUTE_P(S) ((S)->code == VEC_PERM_EXPR)
473 : :
474 : : inline vect_memory_access_type
475 : 1268359 : SLP_TREE_MEMORY_ACCESS_TYPE (slp_tree node)
476 : : {
477 : 495446 : if (SLP_TREE_TYPE (node) == load_vec_info_type
478 : 433480 : || SLP_TREE_TYPE (node) == store_vec_info_type)
479 : 250060 : return static_cast<vect_load_store_data *> (node->data)->memory_access_type;
480 : : return VMAT_UNINITIALIZED;
481 : : }
482 : :
483 : : enum vect_partial_vector_style {
484 : : vect_partial_vectors_none,
485 : : vect_partial_vectors_while_ult,
486 : : vect_partial_vectors_avx512,
487 : : vect_partial_vectors_len
488 : : };
489 : :
490 : : /* Key for map that records association between
491 : : scalar conditions and corresponding loop mask, and
492 : : is populated by vect_record_loop_mask. */
493 : :
494 : : struct scalar_cond_masked_key
495 : : {
496 : 55741 : scalar_cond_masked_key (tree t, unsigned ncopies_)
497 : 55741 : : ncopies (ncopies_)
498 : : {
499 : 55741 : get_cond_ops_from_tree (t);
500 : : }
501 : :
502 : : void get_cond_ops_from_tree (tree);
503 : :
504 : : unsigned ncopies;
505 : : bool inverted_p;
506 : : tree_code code;
507 : : tree op0;
508 : : tree op1;
509 : : };
510 : :
511 : : template<>
512 : : struct default_hash_traits<scalar_cond_masked_key>
513 : : {
514 : : typedef scalar_cond_masked_key compare_type;
515 : : typedef scalar_cond_masked_key value_type;
516 : :
517 : : static inline hashval_t
518 : 64037 : hash (value_type v)
519 : : {
520 : 64037 : inchash::hash h;
521 : 64037 : h.add_int (v.code);
522 : 64037 : inchash::add_expr (v.op0, h, 0);
523 : 64037 : inchash::add_expr (v.op1, h, 0);
524 : 64037 : h.add_int (v.ncopies);
525 : 64037 : h.add_flag (v.inverted_p);
526 : 64037 : return h.end ();
527 : : }
528 : :
529 : : static inline bool
530 : 9708 : equal (value_type existing, value_type candidate)
531 : : {
532 : 9708 : return (existing.ncopies == candidate.ncopies
533 : 9539 : && existing.code == candidate.code
534 : 5933 : && existing.inverted_p == candidate.inverted_p
535 : 4426 : && operand_equal_p (existing.op0, candidate.op0, 0)
536 : 12434 : && operand_equal_p (existing.op1, candidate.op1, 0));
537 : : }
538 : :
539 : : static const bool empty_zero_p = true;
540 : :
541 : : static inline void
542 : 0 : mark_empty (value_type &v)
543 : : {
544 : 0 : v.ncopies = 0;
545 : 0 : v.inverted_p = false;
546 : : }
547 : :
548 : : static inline bool
549 : 6900872 : is_empty (value_type v)
550 : : {
551 : 6845894 : return v.ncopies == 0;
552 : : }
553 : :
554 : : static inline void mark_deleted (value_type &) {}
555 : :
556 : : static inline bool is_deleted (const value_type &)
557 : : {
558 : : return false;
559 : : }
560 : :
561 : 48020 : static inline void remove (value_type &) {}
562 : : };
563 : :
564 : : typedef hash_set<scalar_cond_masked_key> scalar_cond_masked_set_type;
565 : :
566 : : /* Key and map that records association between vector conditions and
567 : : corresponding loop mask, and is populated by prepare_vec_mask. */
568 : :
569 : : typedef pair_hash<tree_operand_hash, tree_operand_hash> tree_cond_mask_hash;
570 : : typedef hash_set<tree_cond_mask_hash> vec_cond_masked_set_type;
571 : :
572 : : /* Describes two objects whose addresses must be unequal for the vectorized
573 : : loop to be valid. */
574 : : typedef std::pair<tree, tree> vec_object_pair;
575 : :
576 : : /* Records that vectorization is only possible if abs (EXPR) >= MIN_VALUE.
577 : : UNSIGNED_P is true if we can assume that abs (EXPR) == EXPR. */
578 : : class vec_lower_bound {
579 : : public:
580 : : vec_lower_bound () {}
581 : 1420 : vec_lower_bound (tree e, bool u, poly_uint64 m)
582 : 1420 : : expr (e), unsigned_p (u), min_value (m) {}
583 : :
584 : : tree expr;
585 : : bool unsigned_p;
586 : : poly_uint64 min_value;
587 : : };
588 : :
589 : : /* Vectorizer state shared between different analyses like vector sizes
590 : : of the same CFG region. */
591 : : class vec_info_shared {
592 : : public:
593 : : vec_info_shared();
594 : : ~vec_info_shared();
595 : :
596 : : void save_datarefs();
597 : : void check_datarefs();
598 : :
599 : : /* All data references. Freed by free_data_refs, so not an auto_vec. */
600 : : vec<data_reference_p> datarefs;
601 : : vec<data_reference> datarefs_copy;
602 : :
603 : : /* The loop nest in which the data dependences are computed. */
604 : : auto_vec<loop_p> loop_nest;
605 : :
606 : : /* All data dependences. Freed by free_dependence_relations, so not
607 : : an auto_vec. */
608 : : vec<ddr_p> ddrs;
609 : : };
610 : :
611 : : /* Vectorizer state common between loop and basic-block vectorization. */
612 : : class vec_info {
613 : : public:
614 : : typedef hash_set<int_hash<machine_mode, E_VOIDmode, E_BLKmode> > mode_set;
615 : : enum vec_kind { bb, loop };
616 : :
617 : : vec_info (vec_kind, vec_info_shared *);
618 : : ~vec_info ();
619 : :
620 : : stmt_vec_info add_stmt (gimple *);
621 : : stmt_vec_info add_pattern_stmt (gimple *, stmt_vec_info);
622 : : stmt_vec_info resync_stmt_addr (gimple *);
623 : : stmt_vec_info lookup_stmt (gimple *);
624 : : stmt_vec_info lookup_def (tree);
625 : : stmt_vec_info lookup_single_use (tree);
626 : : class dr_vec_info *lookup_dr (data_reference *);
627 : : void move_dr (stmt_vec_info, stmt_vec_info);
628 : : void remove_stmt (stmt_vec_info);
629 : : void replace_stmt (gimple_stmt_iterator *, stmt_vec_info, gimple *);
630 : : void insert_on_entry (stmt_vec_info, gimple *);
631 : : void insert_seq_on_entry (stmt_vec_info, gimple_seq);
632 : :
633 : : /* The type of vectorization. */
634 : : vec_kind kind;
635 : :
636 : : /* Shared vectorizer state. */
637 : : vec_info_shared *shared;
638 : :
639 : : /* The mapping of GIMPLE UID to stmt_vec_info. */
640 : : vec<stmt_vec_info> stmt_vec_infos;
641 : : /* Whether the above mapping is complete. */
642 : : bool stmt_vec_info_ro;
643 : :
644 : : /* Whether we've done a transform we think OK to not update virtual
645 : : SSA form. */
646 : : bool any_known_not_updated_vssa;
647 : :
648 : : /* The SLP graph. */
649 : : auto_vec<slp_instance> slp_instances;
650 : :
651 : : /* Maps base addresses to an innermost_loop_behavior that gives the maximum
652 : : known alignment for that base. */
653 : : vec_base_alignments base_alignments;
654 : :
655 : : /* All interleaving chains of stores, represented by the first
656 : : stmt in the chain. */
657 : : auto_vec<stmt_vec_info> grouped_stores;
658 : :
659 : : /* The set of vector modes used in the vectorized region. */
660 : : mode_set used_vector_modes;
661 : :
662 : : /* The argument we should pass to related_vector_mode when looking up
663 : : the vector mode for a scalar mode, or VOIDmode if we haven't yet
664 : : made any decisions about which vector modes to use. */
665 : : machine_mode vector_mode;
666 : :
667 : : /* The basic blocks in the vectorization region. For _loop_vec_info,
668 : : the memory is internally managed, while for _bb_vec_info, it points
669 : : to element space of an external auto_vec<>. This inconsistency is
670 : : not a good class design pattern. TODO: improve it with an unified
671 : : auto_vec<> whose lifetime is confined to vec_info object. */
672 : : basic_block *bbs;
673 : :
674 : : /* The count of the basic blocks in the vectorization region. */
675 : : unsigned int nbbs;
676 : :
677 : : /* Used to keep a sequence of def stmts of a pattern stmt that are loop
678 : : invariant if they exists.
679 : : The sequence is emitted in the loop preheader should the loop be vectorized
680 : : and are reset when undoing patterns. */
681 : : gimple_seq inv_pattern_def_seq;
682 : :
683 : : private:
684 : : stmt_vec_info new_stmt_vec_info (gimple *stmt);
685 : : void set_vinfo_for_stmt (gimple *, stmt_vec_info, bool = true);
686 : : void free_stmt_vec_infos ();
687 : : void free_stmt_vec_info (stmt_vec_info);
688 : : };
689 : :
690 : : class _loop_vec_info;
691 : : class _bb_vec_info;
692 : :
693 : : template<>
694 : : template<>
695 : : inline bool
696 : 358921326 : is_a_helper <_loop_vec_info *>::test (vec_info *i)
697 : : {
698 : 358278290 : return i->kind == vec_info::loop;
699 : : }
700 : :
701 : : template<>
702 : : template<>
703 : : inline bool
704 : 65785943 : is_a_helper <_bb_vec_info *>::test (vec_info *i)
705 : : {
706 : 65785943 : return i->kind == vec_info::bb;
707 : : }
708 : :
709 : : /* In general, we can divide the vector statements in a vectorized loop
710 : : into related groups ("rgroups") and say that for each rgroup there is
711 : : some nS such that the rgroup operates on nS values from one scalar
712 : : iteration followed by nS values from the next. That is, if VF is the
713 : : vectorization factor of the loop, the rgroup operates on a sequence:
714 : :
715 : : (1,1) (1,2) ... (1,nS) (2,1) ... (2,nS) ... (VF,1) ... (VF,nS)
716 : :
717 : : where (i,j) represents a scalar value with index j in a scalar
718 : : iteration with index i.
719 : :
720 : : [ We use the term "rgroup" to emphasise that this grouping isn't
721 : : necessarily the same as the grouping of statements used elsewhere.
722 : : For example, if we implement a group of scalar loads using gather
723 : : loads, we'll use a separate gather load for each scalar load, and
724 : : thus each gather load will belong to its own rgroup. ]
725 : :
726 : : In general this sequence will occupy nV vectors concatenated
727 : : together. If these vectors have nL lanes each, the total number
728 : : of scalar values N is given by:
729 : :
730 : : N = nS * VF = nV * nL
731 : :
732 : : None of nS, VF, nV and nL are required to be a power of 2. nS and nV
733 : : are compile-time constants but VF and nL can be variable (if the target
734 : : supports variable-length vectors).
735 : :
736 : : In classical vectorization, each iteration of the vector loop would
737 : : handle exactly VF iterations of the original scalar loop. However,
738 : : in vector loops that are able to operate on partial vectors, a
739 : : particular iteration of the vector loop might handle fewer than VF
740 : : iterations of the scalar loop. The vector lanes that correspond to
741 : : iterations of the scalar loop are said to be "active" and the other
742 : : lanes are said to be "inactive".
743 : :
744 : : In such vector loops, many rgroups need to be controlled to ensure
745 : : that they have no effect for the inactive lanes. Conceptually, each
746 : : such rgroup needs a sequence of booleans in the same order as above,
747 : : but with each (i,j) replaced by a boolean that indicates whether
748 : : iteration i is active. This sequence occupies nV vector controls
749 : : that again have nL lanes each. Thus the control sequence as a whole
750 : : consists of VF independent booleans that are each repeated nS times.
751 : :
752 : : Taking mask-based approach as a partially-populated vectors example.
753 : : We make the simplifying assumption that if a sequence of nV masks is
754 : : suitable for one (nS,nL) pair, we can reuse it for (nS/2,nL/2) by
755 : : VIEW_CONVERTing it. This holds for all current targets that support
756 : : fully-masked loops. For example, suppose the scalar loop is:
757 : :
758 : : float *f;
759 : : double *d;
760 : : for (int i = 0; i < n; ++i)
761 : : {
762 : : f[i * 2 + 0] += 1.0f;
763 : : f[i * 2 + 1] += 2.0f;
764 : : d[i] += 3.0;
765 : : }
766 : :
767 : : and suppose that vectors have 256 bits. The vectorized f accesses
768 : : will belong to one rgroup and the vectorized d access to another:
769 : :
770 : : f rgroup: nS = 2, nV = 1, nL = 8
771 : : d rgroup: nS = 1, nV = 1, nL = 4
772 : : VF = 4
773 : :
774 : : [ In this simple example the rgroups do correspond to the normal
775 : : SLP grouping scheme. ]
776 : :
777 : : If only the first three lanes are active, the masks we need are:
778 : :
779 : : f rgroup: 1 1 | 1 1 | 1 1 | 0 0
780 : : d rgroup: 1 | 1 | 1 | 0
781 : :
782 : : Here we can use a mask calculated for f's rgroup for d's, but not
783 : : vice versa.
784 : :
785 : : Thus for each value of nV, it is enough to provide nV masks, with the
786 : : mask being calculated based on the highest nL (or, equivalently, based
787 : : on the highest nS) required by any rgroup with that nV. We therefore
788 : : represent the entire collection of masks as a two-level table, with the
789 : : first level being indexed by nV - 1 (since nV == 0 doesn't exist) and
790 : : the second being indexed by the mask index 0 <= i < nV. */
791 : :
792 : : /* The controls (like masks or lengths) needed by rgroups with nV vectors,
793 : : according to the description above. */
794 : : struct rgroup_controls {
795 : : /* The largest nS for all rgroups that use these controls.
796 : : For vect_partial_vectors_avx512 this is the constant nscalars_per_iter
797 : : for all members of the group. */
798 : : unsigned int max_nscalars_per_iter;
799 : :
800 : : /* For the largest nS recorded above, the loop controls divide each scalar
801 : : into FACTOR equal-sized pieces. This is useful if we need to split
802 : : element-based accesses into byte-based accesses.
803 : : For vect_partial_vectors_avx512 this records nV instead. */
804 : : unsigned int factor;
805 : :
806 : : /* This is a vector type with MAX_NSCALARS_PER_ITER * VF / nV elements.
807 : : For mask-based controls, it is the type of the masks in CONTROLS.
808 : : For length-based controls, it can be any vector type that has the
809 : : specified number of elements; the type of the elements doesn't matter. */
810 : : tree type;
811 : :
812 : : /* When there is no uniformly used LOOP_VINFO_RGROUP_COMPARE_TYPE this
813 : : is the rgroup specific type used. */
814 : : tree compare_type;
815 : :
816 : : /* A vector of nV controls, in iteration order. */
817 : : vec<tree> controls;
818 : :
819 : : /* In case of len_load and len_store with a bias there is only one
820 : : rgroup. This holds the adjusted loop length for the this rgroup. */
821 : : tree bias_adjusted_ctrl;
822 : : };
823 : :
824 : 417326 : struct vec_loop_masks
825 : : {
826 : 413568 : bool is_empty () const { return mask_set.is_empty (); }
827 : :
828 : : /* Set to record vectype, nvector pairs. */
829 : : hash_set<pair_hash <nofree_ptr_hash <tree_node>,
830 : : int_hash<unsigned, 0>>> mask_set;
831 : :
832 : : /* rgroup_controls used for the partial vector scheme. */
833 : : auto_vec<rgroup_controls> rgc_vec;
834 : : };
835 : :
836 : : typedef auto_vec<rgroup_controls> vec_loop_lens;
837 : :
838 : : typedef auto_vec<std::pair<data_reference*, tree> > drs_init_vec;
839 : :
840 : : /* Abstraction around info on reductions which is still in stmt_vec_info
841 : : but will be duplicated or moved elsewhere. */
842 : 139572 : class vect_reduc_info_s
843 : : {
844 : : public:
845 : : /* The def type of the main reduction PHI, vect_reduction_def or
846 : : vect_double_reduction_def. */
847 : : enum vect_def_type def_type;
848 : :
849 : : /* The reduction type as detected by
850 : : vect_is_simple_reduction and vectorizable_reduction. */
851 : : enum vect_reduction_type reduc_type;
852 : :
853 : : /* The original scalar reduction code, to be used in the epilogue. */
854 : : code_helper reduc_code;
855 : :
856 : : /* A vector internal function we should use in the epilogue. */
857 : : internal_fn reduc_fn;
858 : :
859 : : /* For loop reduction with multiple vectorized results (ncopies > 1), a
860 : : lane-reducing operation participating in it may not use all of those
861 : : results, this field specifies result index starting from which any
862 : : following land-reducing operation would be assigned to. */
863 : : unsigned int reduc_result_pos;
864 : :
865 : : /* Whether this represents a reduction chain. */
866 : : bool is_reduc_chain;
867 : :
868 : : /* Whether we force a single cycle PHI during reduction vectorization. */
869 : : bool force_single_cycle;
870 : :
871 : : /* The vector type for performing the actual reduction operation. */
872 : : tree reduc_vectype;
873 : :
874 : : /* The vector type we should use for the final reduction in the epilogue
875 : : when we reduce a mask. */
876 : : tree reduc_vectype_for_mask;
877 : :
878 : : /* For INTEGER_INDUC_COND_REDUCTION, the initial value to be used. */
879 : : tree induc_cond_initial_val;
880 : :
881 : : /* If not NULL the value to be added to compute final reduction value. */
882 : : tree reduc_epilogue_adjustment;
883 : :
884 : : /* If non-null, the reduction is being performed by an epilogue loop
885 : : and we have decided to reuse this accumulator from the main loop. */
886 : : struct vect_reusable_accumulator *reused_accumulator;
887 : :
888 : : /* If the vector code is performing N scalar reductions in parallel,
889 : : this variable gives the initial scalar values of those N reductions. */
890 : : auto_vec<tree> reduc_initial_values;
891 : :
892 : : /* If the vector code is performing N scalar reductions in parallel, this
893 : : variable gives the vectorized code's final (scalar) result for each of
894 : : those N reductions. In other words, REDUC_SCALAR_RESULTS[I] replaces
895 : : the original scalar code's loop-closed SSA PHI for reduction number I. */
896 : : auto_vec<tree> reduc_scalar_results;
897 : : };
898 : :
899 : : typedef class vect_reduc_info_s *vect_reduc_info;
900 : :
901 : : #define VECT_REDUC_INFO_DEF_TYPE(I) ((I)->def_type)
902 : : #define VECT_REDUC_INFO_TYPE(I) ((I)->reduc_type)
903 : : #define VECT_REDUC_INFO_CODE(I) ((I)->reduc_code)
904 : : #define VECT_REDUC_INFO_FN(I) ((I)->reduc_fn)
905 : : #define VECT_REDUC_INFO_SCALAR_RESULTS(I) ((I)->reduc_scalar_results)
906 : : #define VECT_REDUC_INFO_INITIAL_VALUES(I) ((I)->reduc_initial_values)
907 : : #define VECT_REDUC_INFO_REUSED_ACCUMULATOR(I) ((I)->reused_accumulator)
908 : : #define VECT_REDUC_INFO_INDUC_COND_INITIAL_VAL(I) ((I)->induc_cond_initial_val)
909 : : #define VECT_REDUC_INFO_EPILOGUE_ADJUSTMENT(I) ((I)->reduc_epilogue_adjustment)
910 : : #define VECT_REDUC_INFO_VECTYPE(I) ((I)->reduc_vectype)
911 : : #define VECT_REDUC_INFO_VECTYPE_FOR_MASK(I) ((I)->reduc_vectype_for_mask)
912 : : #define VECT_REDUC_INFO_FORCE_SINGLE_CYCLE(I) ((I)->force_single_cycle)
913 : : #define VECT_REDUC_INFO_RESULT_POS(I) ((I)->reduc_result_pos)
914 : :
915 : : /* Information about a reduction accumulator from the main loop that could
916 : : conceivably be reused as the input to a reduction in an epilogue loop. */
917 : : struct vect_reusable_accumulator {
918 : : /* The final value of the accumulator, which forms the input to the
919 : : reduction operation. */
920 : : tree reduc_input;
921 : :
922 : : /* The stmt_vec_info that describes the reduction (i.e. the one for
923 : : which is_reduc_info is true). */
924 : : vect_reduc_info reduc_info;
925 : : };
926 : :
927 : : /*-----------------------------------------------------------------*/
928 : : /* Info on vectorized loops. */
929 : : /*-----------------------------------------------------------------*/
930 : : typedef class _loop_vec_info : public vec_info {
931 : : public:
932 : : _loop_vec_info (class loop *, vec_info_shared *);
933 : : ~_loop_vec_info ();
934 : :
935 : : /* The loop to which this info struct refers to. */
936 : : class loop *loop;
937 : :
938 : : /* Number of latch executions. */
939 : : tree num_itersm1;
940 : : /* Number of iterations. */
941 : : tree num_iters;
942 : : /* Number of iterations of the original loop. */
943 : : tree num_iters_unchanged;
944 : : /* Condition under which this loop is analyzed and versioned. */
945 : : tree num_iters_assumptions;
946 : :
947 : : /* The cost of the vector code. */
948 : : class vector_costs *vector_costs;
949 : :
950 : : /* The cost of the scalar code. */
951 : : class vector_costs *scalar_costs;
952 : :
953 : : /* Threshold of number of iterations below which vectorization will not be
954 : : performed. It is calculated from MIN_PROFITABLE_ITERS and
955 : : param_min_vect_loop_bound. */
956 : : unsigned int th;
957 : :
958 : : /* When applying loop versioning, the vector form should only be used
959 : : if the number of scalar iterations is >= this value, on top of all
960 : : the other requirements. Ignored when loop versioning is not being
961 : : used. */
962 : : poly_uint64 versioning_threshold;
963 : :
964 : : /* Unrolling factor. In case of suitable super-word parallelism
965 : : it can be that no unrolling is needed, and thus this is 1. */
966 : : poly_uint64 vectorization_factor;
967 : :
968 : : /* If this loop is an epilogue loop whose main loop can be skipped,
969 : : MAIN_LOOP_EDGE is the edge from the main loop to this loop's
970 : : preheader. SKIP_MAIN_LOOP_EDGE is then the edge that skips the
971 : : main loop and goes straight to this loop's preheader.
972 : :
973 : : Both fields are null otherwise. */
974 : : edge main_loop_edge;
975 : : edge skip_main_loop_edge;
976 : :
977 : : /* If this loop is an epilogue loop that might be skipped after executing
978 : : the main loop, this edge is the one that skips the epilogue. */
979 : : edge skip_this_loop_edge;
980 : :
981 : : /* Reduction descriptors of this loop. Referenced to from SLP nodes
982 : : by index. */
983 : : auto_vec<vect_reduc_info> reduc_infos;
984 : :
985 : : /* The vectorized form of a standard reduction replaces the original
986 : : scalar code's final result (a loop-closed SSA PHI) with the result
987 : : of a vector-to-scalar reduction operation. After vectorization,
988 : : this variable maps these vector-to-scalar results to information
989 : : about the reductions that generated them. */
990 : : hash_map<tree, vect_reusable_accumulator> reusable_accumulators;
991 : :
992 : : /* The number of times that the target suggested we unroll the vector loop
993 : : in order to promote more ILP. This value will be used to re-analyze the
994 : : loop for vectorization and if successful the value will be folded into
995 : : vectorization_factor (and therefore exactly divides
996 : : vectorization_factor). */
997 : : unsigned int suggested_unroll_factor;
998 : :
999 : : /* Maximum runtime vectorization factor, or MAX_VECTORIZATION_FACTOR
1000 : : if there is no particular limit. */
1001 : : unsigned HOST_WIDE_INT max_vectorization_factor;
1002 : :
1003 : : /* The masks that a fully-masked loop should use to avoid operating
1004 : : on inactive scalars. */
1005 : : vec_loop_masks masks;
1006 : :
1007 : : /* The lengths that a loop with length should use to avoid operating
1008 : : on inactive scalars. */
1009 : : vec_loop_lens lens;
1010 : :
1011 : : /* Set of scalar conditions that have loop mask applied. */
1012 : : scalar_cond_masked_set_type scalar_cond_masked_set;
1013 : :
1014 : : /* Set of vector conditions that have loop mask applied. */
1015 : : vec_cond_masked_set_type vec_cond_masked_set;
1016 : :
1017 : : /* If we are using a loop mask to align memory addresses, this variable
1018 : : contains the number of vector elements that we should skip in the
1019 : : first iteration of the vector loop (i.e. the number of leading
1020 : : elements that should be false in the first mask). */
1021 : : tree mask_skip_niters;
1022 : :
1023 : : /* If we are using a loop mask to align memory addresses and we're in an
1024 : : early break loop then this variable contains the number of elements that
1025 : : were skipped during the initial iteration of the loop. */
1026 : : tree mask_skip_niters_pfa_offset;
1027 : :
1028 : : /* The type that the loop control IV should be converted to before
1029 : : testing which of the VF scalars are active and inactive.
1030 : : Only meaningful if LOOP_VINFO_USING_PARTIAL_VECTORS_P. */
1031 : : tree rgroup_compare_type;
1032 : :
1033 : : /* For #pragma omp simd if (x) loops the x expression. If constant 0,
1034 : : the loop should not be vectorized, if constant non-zero, simd_if_cond
1035 : : shouldn't be set and loop vectorized normally, if SSA_NAME, the loop
1036 : : should be versioned on that condition, using scalar loop if the condition
1037 : : is false and vectorized loop otherwise. */
1038 : : tree simd_if_cond;
1039 : :
1040 : : /* The type that the vector loop control IV should have when
1041 : : LOOP_VINFO_USING_PARTIAL_VECTORS_P is true. */
1042 : : tree rgroup_iv_type;
1043 : :
1044 : : /* The style used for implementing partial vectors when
1045 : : LOOP_VINFO_USING_PARTIAL_VECTORS_P is true. */
1046 : : vect_partial_vector_style partial_vector_style;
1047 : :
1048 : : /* Unknown DRs according to which loop was peeled. */
1049 : : class dr_vec_info *unaligned_dr;
1050 : :
1051 : : /* peeling_for_alignment indicates whether peeling for alignment will take
1052 : : place, and what the peeling factor should be:
1053 : : peeling_for_alignment = X means:
1054 : : If X=0: Peeling for alignment will not be applied.
1055 : : If X>0: Peel first X iterations.
1056 : : If X=-1: Generate a runtime test to calculate the number of iterations
1057 : : to be peeled, using the dataref recorded in the field
1058 : : unaligned_dr. */
1059 : : int peeling_for_alignment;
1060 : :
1061 : : /* The mask used to check the alignment of pointers or arrays. */
1062 : : poly_uint64 ptr_mask;
1063 : :
1064 : : /* The maximum speculative read amount in VLA modes for runtime check. */
1065 : : poly_uint64 max_spec_read_amount;
1066 : :
1067 : : /* Indicates whether the loop has any non-linear IV. */
1068 : : bool nonlinear_iv;
1069 : :
1070 : : /* Data Dependence Relations defining address ranges that are candidates
1071 : : for a run-time aliasing check. */
1072 : : auto_vec<ddr_p> may_alias_ddrs;
1073 : :
1074 : : /* Data Dependence Relations defining address ranges together with segment
1075 : : lengths from which the run-time aliasing check is built. */
1076 : : auto_vec<dr_with_seg_len_pair_t> comp_alias_ddrs;
1077 : :
1078 : : /* Check that the addresses of each pair of objects is unequal. */
1079 : : auto_vec<vec_object_pair> check_unequal_addrs;
1080 : :
1081 : : /* List of values that are required to be nonzero. This is used to check
1082 : : whether things like "x[i * n] += 1;" are safe and eventually gets added
1083 : : to the checks for lower bounds below. */
1084 : : auto_vec<tree> check_nonzero;
1085 : :
1086 : : /* List of values that need to be checked for a minimum value. */
1087 : : auto_vec<vec_lower_bound> lower_bounds;
1088 : :
1089 : : /* Statements in the loop that have data references that are candidates for a
1090 : : runtime (loop versioning) misalignment check. */
1091 : : auto_vec<stmt_vec_info> may_misalign_stmts;
1092 : :
1093 : : /* Reduction cycles detected in the loop. Used in loop-aware SLP. */
1094 : : auto_vec<stmt_vec_info> reductions;
1095 : :
1096 : : /* Defs that could not be analyzed such as OMP SIMD calls without
1097 : : a LHS. */
1098 : : auto_vec<stmt_vec_info> alternate_defs;
1099 : :
1100 : : /* Cost vector for a single scalar iteration. */
1101 : : auto_vec<stmt_info_for_cost> scalar_cost_vec;
1102 : :
1103 : : /* Map of IV base/step expressions to inserted name in the preheader. */
1104 : : hash_map<tree_operand_hash, tree> *ivexpr_map;
1105 : :
1106 : : /* Map of OpenMP "omp simd array" scan variables to corresponding
1107 : : rhs of the store of the initializer. */
1108 : : hash_map<tree, tree> *scan_map;
1109 : :
1110 : : /* The factor used to over weight those statements in an inner loop
1111 : : relative to the loop being vectorized. */
1112 : : unsigned int inner_loop_cost_factor;
1113 : :
1114 : : /* Is the loop vectorizable? */
1115 : : bool vectorizable;
1116 : :
1117 : : /* Records whether we still have the option of vectorizing this loop
1118 : : using partially-populated vectors; in other words, whether it is
1119 : : still possible for one iteration of the vector loop to handle
1120 : : fewer than VF scalars. */
1121 : : bool can_use_partial_vectors_p;
1122 : :
1123 : : /* Records whether we must use niter masking for correctness reasons. */
1124 : : bool must_use_partial_vectors_p;
1125 : :
1126 : : /* True if we've decided to use partially-populated vectors, so that
1127 : : the vector loop can handle fewer than VF scalars. */
1128 : : bool using_partial_vectors_p;
1129 : :
1130 : : /* True if we've decided to use a decrementing loop control IV that counts
1131 : : scalars. This can be done for any loop that:
1132 : :
1133 : : (a) uses length "controls"; and
1134 : : (b) can iterate more than once. */
1135 : : bool using_decrementing_iv_p;
1136 : :
1137 : : /* True if we've decided to use output of select_vl to adjust IV of
1138 : : both loop control and data reference pointer. This is only true
1139 : : for single-rgroup control. */
1140 : : bool using_select_vl_p;
1141 : :
1142 : : /* True if we've decided to use peeling with versioning together, which allows
1143 : : unaligned unsupported data refs to be uniformly aligned after a certain
1144 : : amount of peeling (mutual alignment). Otherwise, we use versioning alone
1145 : : so these data refs must be already aligned to a power-of-two boundary
1146 : : without peeling. */
1147 : : bool allow_mutual_alignment;
1148 : :
1149 : : /* The bias for len_load and len_store. For now, only 0 and -1 are
1150 : : supported. -1 must be used when a backend does not support
1151 : : len_load/len_store with a length of zero. */
1152 : : signed char partial_load_store_bias;
1153 : :
1154 : : /* When we have grouped data accesses with gaps, we may introduce invalid
1155 : : memory accesses. We peel the last iteration of the loop to prevent
1156 : : this. */
1157 : : bool peeling_for_gaps;
1158 : :
1159 : : /* When the number of iterations is not a multiple of the vector size
1160 : : we need to peel off iterations at the end to form an epilogue loop. */
1161 : : bool peeling_for_niter;
1162 : :
1163 : : /* When the loop has early breaks that we can vectorize we need to peel
1164 : : the loop for the break finding loop. */
1165 : : bool early_breaks;
1166 : :
1167 : : /* List of loop additional IV conditionals found in the loop. */
1168 : : auto_vec<gcond *> conds;
1169 : :
1170 : : /* Main loop IV cond. */
1171 : : gcond* loop_iv_cond;
1172 : :
1173 : : /* True if we have an unroll factor requested by the user through pragma GCC
1174 : : unroll. */
1175 : : bool user_unroll;
1176 : :
1177 : : /* True if there are no loop carried data dependencies in the loop.
1178 : : If loop->safelen <= 1, then this is always true, either the loop
1179 : : didn't have any loop carried data dependencies, or the loop is being
1180 : : vectorized guarded with some runtime alias checks, or couldn't
1181 : : be vectorized at all, but then this field shouldn't be used.
1182 : : For loop->safelen >= 2, the user has asserted that there are no
1183 : : backward dependencies, but there still could be loop carried forward
1184 : : dependencies in such loops. This flag will be false if normal
1185 : : vectorizer data dependency analysis would fail or require versioning
1186 : : for alias, but because of loop->safelen >= 2 it has been vectorized
1187 : : even without versioning for alias. E.g. in:
1188 : : #pragma omp simd
1189 : : for (int i = 0; i < m; i++)
1190 : : a[i] = a[i + k] * c;
1191 : : (or #pragma simd or #pragma ivdep) we can vectorize this and it will
1192 : : DTRT even for k > 0 && k < m, but without safelen we would not
1193 : : vectorize this, so this field would be false. */
1194 : : bool no_data_dependencies;
1195 : :
1196 : : /* Mark loops having masked stores. */
1197 : : bool has_mask_store;
1198 : :
1199 : : /* Queued scaling factor for the scalar loop. */
1200 : : profile_probability scalar_loop_scaling;
1201 : :
1202 : : /* If if-conversion versioned this loop before conversion, this is the
1203 : : loop version without if-conversion. */
1204 : : class loop *scalar_loop;
1205 : :
1206 : : /* For loops being epilogues of already vectorized loops
1207 : : this points to the main vectorized loop. Otherwise NULL. */
1208 : : _loop_vec_info *main_loop_info;
1209 : :
1210 : : /* For loops being epilogues of already vectorized loops
1211 : : this points to the preceeding vectorized (possibly epilogue) loop.
1212 : : Otherwise NULL. */
1213 : : _loop_vec_info *orig_loop_info;
1214 : :
1215 : : /* Used to store loop_vec_infos of the epilogue of this loop during
1216 : : analysis. */
1217 : : _loop_vec_info *epilogue_vinfo;
1218 : :
1219 : : /* If this is an epilogue loop the DR advancement applied. */
1220 : : tree drs_advanced_by;
1221 : :
1222 : : /* The controlling loop IV for the current loop when vectorizing. This IV
1223 : : controls the natural exits of the loop. */
1224 : : edge vec_loop_iv_exit;
1225 : :
1226 : : /* The controlling loop IV for the epilogue loop when vectorizing. This IV
1227 : : controls the natural exits of the loop. */
1228 : : edge vec_epilogue_loop_iv_exit;
1229 : :
1230 : : /* The controlling loop IV for the scalar loop being vectorized. This IV
1231 : : controls the natural exits of the loop. */
1232 : : edge scalar_loop_iv_exit;
1233 : :
1234 : : /* Used to store the list of stores needing to be moved if doing early
1235 : : break vectorization as they would violate the scalar loop semantics if
1236 : : vectorized in their current location. These are stored in order that they
1237 : : need to be moved. */
1238 : : auto_vec<gimple *> early_break_stores;
1239 : :
1240 : : /* The final basic block where to move statements to. In the case of
1241 : : multiple exits this could be pretty far away. */
1242 : : basic_block early_break_dest_bb;
1243 : :
1244 : : /* Statements whose VUSES need updating if early break vectorization is to
1245 : : happen. */
1246 : : auto_vec<gimple*> early_break_vuses;
1247 : :
1248 : : /* The IV adjustment value for inductions that needs to be materialized
1249 : : inside the relavent exit blocks in order to adjust for early break. */
1250 : : tree early_break_niters_var;
1251 : :
1252 : : /* Record statements that are needed to be live for early break vectorization
1253 : : but may not have an LC PHI node materialized yet in the exits. */
1254 : : auto_vec<stmt_vec_info> early_break_live_ivs;
1255 : : } *loop_vec_info;
1256 : :
1257 : : /* Access Functions. */
1258 : : #define LOOP_VINFO_LOOP(L) (L)->loop
1259 : : #define LOOP_VINFO_IV_EXIT(L) (L)->vec_loop_iv_exit
1260 : : #define LOOP_VINFO_EPILOGUE_IV_EXIT(L) (L)->vec_epilogue_loop_iv_exit
1261 : : #define LOOP_VINFO_SCALAR_IV_EXIT(L) (L)->scalar_loop_iv_exit
1262 : : #define LOOP_VINFO_BBS(L) (L)->bbs
1263 : : #define LOOP_VINFO_NBBS(L) (L)->nbbs
1264 : : #define LOOP_VINFO_NITERSM1(L) (L)->num_itersm1
1265 : : #define LOOP_VINFO_NITERS(L) (L)->num_iters
1266 : : /* Since LOOP_VINFO_NITERS and LOOP_VINFO_NITERSM1 can change after
1267 : : prologue peeling retain total unchanged scalar loop iterations for
1268 : : cost model. */
1269 : : #define LOOP_VINFO_NITERS_UNCHANGED(L) (L)->num_iters_unchanged
1270 : : #define LOOP_VINFO_NITERS_ASSUMPTIONS(L) (L)->num_iters_assumptions
1271 : : #define LOOP_VINFO_COST_MODEL_THRESHOLD(L) (L)->th
1272 : : #define LOOP_VINFO_VERSIONING_THRESHOLD(L) (L)->versioning_threshold
1273 : : #define LOOP_VINFO_VECTORIZABLE_P(L) (L)->vectorizable
1274 : : #define LOOP_VINFO_CAN_USE_PARTIAL_VECTORS_P(L) (L)->can_use_partial_vectors_p
1275 : : #define LOOP_VINFO_MUST_USE_PARTIAL_VECTORS_P(L) (L)->must_use_partial_vectors_p
1276 : : #define LOOP_VINFO_USING_PARTIAL_VECTORS_P(L) (L)->using_partial_vectors_p
1277 : : #define LOOP_VINFO_USING_DECREMENTING_IV_P(L) (L)->using_decrementing_iv_p
1278 : : #define LOOP_VINFO_USING_SELECT_VL_P(L) (L)->using_select_vl_p
1279 : : #define LOOP_VINFO_ALLOW_MUTUAL_ALIGNMENT(L) (L)->allow_mutual_alignment
1280 : : #define LOOP_VINFO_PARTIAL_LOAD_STORE_BIAS(L) (L)->partial_load_store_bias
1281 : : #define LOOP_VINFO_VECT_FACTOR(L) (L)->vectorization_factor
1282 : : #define LOOP_VINFO_MAX_VECT_FACTOR(L) (L)->max_vectorization_factor
1283 : : #define LOOP_VINFO_MASKS(L) (L)->masks
1284 : : #define LOOP_VINFO_LENS(L) (L)->lens
1285 : : #define LOOP_VINFO_MASK_SKIP_NITERS(L) (L)->mask_skip_niters
1286 : : #define LOOP_VINFO_MASK_NITERS_PFA_OFFSET(L) (L)->mask_skip_niters_pfa_offset
1287 : : #define LOOP_VINFO_RGROUP_COMPARE_TYPE(L) (L)->rgroup_compare_type
1288 : : #define LOOP_VINFO_RGROUP_IV_TYPE(L) (L)->rgroup_iv_type
1289 : : #define LOOP_VINFO_PARTIAL_VECTORS_STYLE(L) (L)->partial_vector_style
1290 : : #define LOOP_VINFO_PTR_MASK(L) (L)->ptr_mask
1291 : : #define LOOP_VINFO_MAX_SPEC_READ_AMOUNT(L) (L)->max_spec_read_amount
1292 : : #define LOOP_VINFO_LOOP_NEST(L) (L)->shared->loop_nest
1293 : : #define LOOP_VINFO_DATAREFS(L) (L)->shared->datarefs
1294 : : #define LOOP_VINFO_DDRS(L) (L)->shared->ddrs
1295 : : #define LOOP_VINFO_INT_NITERS(L) (TREE_INT_CST_LOW ((L)->num_iters))
1296 : : #define LOOP_VINFO_PEELING_FOR_ALIGNMENT(L) (L)->peeling_for_alignment
1297 : : #define LOOP_VINFO_NON_LINEAR_IV(L) (L)->nonlinear_iv
1298 : : #define LOOP_VINFO_UNALIGNED_DR(L) (L)->unaligned_dr
1299 : : #define LOOP_VINFO_MAY_MISALIGN_STMTS(L) (L)->may_misalign_stmts
1300 : : #define LOOP_VINFO_MAY_ALIAS_DDRS(L) (L)->may_alias_ddrs
1301 : : #define LOOP_VINFO_COMP_ALIAS_DDRS(L) (L)->comp_alias_ddrs
1302 : : #define LOOP_VINFO_CHECK_UNEQUAL_ADDRS(L) (L)->check_unequal_addrs
1303 : : #define LOOP_VINFO_CHECK_NONZERO(L) (L)->check_nonzero
1304 : : #define LOOP_VINFO_LOWER_BOUNDS(L) (L)->lower_bounds
1305 : : #define LOOP_VINFO_USER_UNROLL(L) (L)->user_unroll
1306 : : #define LOOP_VINFO_GROUPED_STORES(L) (L)->grouped_stores
1307 : : #define LOOP_VINFO_SLP_INSTANCES(L) (L)->slp_instances
1308 : : #define LOOP_VINFO_REDUCTIONS(L) (L)->reductions
1309 : : #define LOOP_VINFO_PEELING_FOR_GAPS(L) (L)->peeling_for_gaps
1310 : : #define LOOP_VINFO_PEELING_FOR_NITER(L) (L)->peeling_for_niter
1311 : : #define LOOP_VINFO_EARLY_BREAKS(L) (L)->early_breaks
1312 : : #define LOOP_VINFO_EARLY_BRK_STORES(L) (L)->early_break_stores
1313 : : #define LOOP_VINFO_EARLY_BREAKS_VECT_PEELED(L) \
1314 : : (single_pred ((L)->loop->latch) != (L)->vec_loop_iv_exit->src)
1315 : : #define LOOP_VINFO_EARLY_BREAKS_LIVE_IVS(L) \
1316 : : (L)->early_break_live_ivs
1317 : : #define LOOP_VINFO_EARLY_BRK_DEST_BB(L) (L)->early_break_dest_bb
1318 : : #define LOOP_VINFO_EARLY_BRK_VUSES(L) (L)->early_break_vuses
1319 : : #define LOOP_VINFO_EARLY_BRK_NITERS_VAR(L) (L)->early_break_niters_var
1320 : : #define LOOP_VINFO_LOOP_CONDS(L) (L)->conds
1321 : : #define LOOP_VINFO_LOOP_IV_COND(L) (L)->loop_iv_cond
1322 : : #define LOOP_VINFO_NO_DATA_DEPENDENCIES(L) (L)->no_data_dependencies
1323 : : #define LOOP_VINFO_SCALAR_LOOP(L) (L)->scalar_loop
1324 : : #define LOOP_VINFO_SCALAR_LOOP_SCALING(L) (L)->scalar_loop_scaling
1325 : : #define LOOP_VINFO_HAS_MASK_STORE(L) (L)->has_mask_store
1326 : : #define LOOP_VINFO_SCALAR_ITERATION_COST(L) (L)->scalar_cost_vec
1327 : : #define LOOP_VINFO_MAIN_LOOP_INFO(L) (L)->main_loop_info
1328 : : #define LOOP_VINFO_ORIG_LOOP_INFO(L) (L)->orig_loop_info
1329 : : #define LOOP_VINFO_SIMD_IF_COND(L) (L)->simd_if_cond
1330 : : #define LOOP_VINFO_INNER_LOOP_COST_FACTOR(L) (L)->inner_loop_cost_factor
1331 : : #define LOOP_VINFO_INV_PATTERN_DEF_SEQ(L) (L)->inv_pattern_def_seq
1332 : : #define LOOP_VINFO_DRS_ADVANCED_BY(L) (L)->drs_advanced_by
1333 : : #define LOOP_VINFO_ALTERNATE_DEFS(L) (L)->alternate_defs
1334 : :
1335 : : #define LOOP_VINFO_FULLY_MASKED_P(L) \
1336 : : (LOOP_VINFO_USING_PARTIAL_VECTORS_P (L) \
1337 : : && !LOOP_VINFO_MASKS (L).is_empty ())
1338 : :
1339 : : #define LOOP_VINFO_FULLY_WITH_LENGTH_P(L) \
1340 : : (LOOP_VINFO_USING_PARTIAL_VECTORS_P (L) \
1341 : : && !LOOP_VINFO_LENS (L).is_empty ())
1342 : :
1343 : : #define LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT(L) \
1344 : : ((L)->may_misalign_stmts.length () > 0)
1345 : : #define LOOP_REQUIRES_VERSIONING_FOR_SPEC_READ(L) \
1346 : : (maybe_gt ((L)->max_spec_read_amount, 0U))
1347 : : #define LOOP_REQUIRES_VERSIONING_FOR_ALIAS(L) \
1348 : : ((L)->comp_alias_ddrs.length () > 0 \
1349 : : || (L)->check_unequal_addrs.length () > 0 \
1350 : : || (L)->lower_bounds.length () > 0)
1351 : : #define LOOP_REQUIRES_VERSIONING_FOR_NITERS(L) \
1352 : : (LOOP_VINFO_NITERS_ASSUMPTIONS (L))
1353 : : #define LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND(L) \
1354 : : (LOOP_VINFO_SIMD_IF_COND (L))
1355 : : #define LOOP_REQUIRES_VERSIONING(L) \
1356 : : (LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (L) \
1357 : : || LOOP_REQUIRES_VERSIONING_FOR_SPEC_READ (L) \
1358 : : || LOOP_REQUIRES_VERSIONING_FOR_ALIAS (L) \
1359 : : || LOOP_REQUIRES_VERSIONING_FOR_NITERS (L) \
1360 : : || LOOP_REQUIRES_VERSIONING_FOR_SIMD_IF_COND (L))
1361 : :
1362 : : #define LOOP_VINFO_USE_VERSIONING_WITHOUT_PEELING(L) \
1363 : : ((L)->may_misalign_stmts.length () > 0 \
1364 : : && !LOOP_VINFO_ALLOW_MUTUAL_ALIGNMENT (L))
1365 : :
1366 : : #define LOOP_VINFO_NITERS_KNOWN_P(L) \
1367 : : (tree_fits_shwi_p ((L)->num_iters) && tree_to_shwi ((L)->num_iters) > 0)
1368 : :
1369 : : #define LOOP_VINFO_EPILOGUE_P(L) \
1370 : : (LOOP_VINFO_ORIG_LOOP_INFO (L) != NULL)
1371 : :
1372 : : #define LOOP_VINFO_ORIG_MAX_VECT_FACTOR(L) \
1373 : : (LOOP_VINFO_MAX_VECT_FACTOR (LOOP_VINFO_ORIG_LOOP_INFO (L)))
1374 : :
1375 : : /* Wrapper for loop_vec_info, for tracking success/failure, where a non-NULL
1376 : : value signifies success, and a NULL value signifies failure, supporting
1377 : : propagating an opt_problem * describing the failure back up the call
1378 : : stack. */
1379 : : typedef opt_pointer_wrapper <loop_vec_info> opt_loop_vec_info;
1380 : :
1381 : : inline loop_vec_info
1382 : 548758 : loop_vec_info_for_loop (class loop *loop)
1383 : : {
1384 : 548758 : return (loop_vec_info) loop->aux;
1385 : : }
1386 : :
1387 : : struct slp_root
1388 : : {
1389 : 1227461 : slp_root (slp_instance_kind kind_, vec<stmt_vec_info> stmts_,
1390 : 12021 : vec<stmt_vec_info> roots_, vec<tree> remain_ = vNULL)
1391 : 1227461 : : kind(kind_), stmts(stmts_), roots(roots_), remain(remain_) {}
1392 : : slp_instance_kind kind;
1393 : : vec<stmt_vec_info> stmts;
1394 : : vec<stmt_vec_info> roots;
1395 : : vec<tree> remain;
1396 : : };
1397 : :
1398 : : typedef class _bb_vec_info : public vec_info
1399 : : {
1400 : : public:
1401 : : _bb_vec_info (vec<basic_block> bbs, vec_info_shared *);
1402 : : ~_bb_vec_info ();
1403 : :
1404 : : vec<slp_root> roots;
1405 : : } *bb_vec_info;
1406 : :
1407 : : #define BB_VINFO_BBS(B) (B)->bbs
1408 : : #define BB_VINFO_NBBS(B) (B)->nbbs
1409 : : #define BB_VINFO_GROUPED_STORES(B) (B)->grouped_stores
1410 : : #define BB_VINFO_SLP_INSTANCES(B) (B)->slp_instances
1411 : : #define BB_VINFO_DATAREFS(B) (B)->shared->datarefs
1412 : : #define BB_VINFO_DDRS(B) (B)->shared->ddrs
1413 : :
1414 : : /* Indicates whether/how a variable is used in the scope of loop/basic
1415 : : block. */
1416 : : enum vect_relevant {
1417 : : vect_unused_in_scope = 0,
1418 : :
1419 : : /* The def is only used outside the loop. */
1420 : : vect_used_only_live,
1421 : : /* The def is in the inner loop, and the use is in the outer loop, and the
1422 : : use is a reduction stmt. */
1423 : : vect_used_in_outer_by_reduction,
1424 : : /* The def is in the inner loop, and the use is in the outer loop (and is
1425 : : not part of reduction). */
1426 : : vect_used_in_outer,
1427 : :
1428 : : /* defs that feed computations that end up (only) in a reduction. These
1429 : : defs may be used by non-reduction stmts, but eventually, any
1430 : : computations/values that are affected by these defs are used to compute
1431 : : a reduction (i.e. don't get stored to memory, for example). We use this
1432 : : to identify computations that we can change the order in which they are
1433 : : computed. */
1434 : : vect_used_by_reduction,
1435 : :
1436 : : vect_used_in_scope
1437 : : };
1438 : :
1439 : : /* The type of vectorization. pure_slp means the stmt is covered by the
1440 : : SLP graph, not_vect means it is not. This is mostly used by BB
1441 : : vectorization. */
1442 : : enum slp_vect_type {
1443 : : not_vect = 0,
1444 : : pure_slp,
1445 : : };
1446 : :
1447 : : /* Says whether a statement is a load, a store of a vectorized statement
1448 : : result, or a store of an invariant value. */
1449 : : enum vec_load_store_type {
1450 : : VLS_LOAD,
1451 : : VLS_STORE,
1452 : : VLS_STORE_INVARIANT
1453 : : };
1454 : :
1455 : : class dr_vec_info {
1456 : : public:
1457 : : /* The data reference itself. */
1458 : : data_reference *dr;
1459 : : /* The statement that contains the data reference. */
1460 : : stmt_vec_info stmt;
1461 : : /* The analysis group this DR belongs to when doing BB vectorization.
1462 : : DRs of the same group belong to the same conditional execution context. */
1463 : : unsigned group;
1464 : : /* The misalignment in bytes of the reference, or -1 if not known. */
1465 : : int misalignment;
1466 : : /* The byte alignment that we'd ideally like the reference to have,
1467 : : and the value that misalignment is measured against. */
1468 : : poly_uint64 target_alignment;
1469 : : /* If true the alignment of base_decl needs to be increased. */
1470 : : bool base_misaligned;
1471 : :
1472 : : /* Set by early break vectorization when this DR needs peeling for alignment
1473 : : for correctness. */
1474 : : bool safe_speculative_read_required;
1475 : :
1476 : : /* Set by early break vectorization when this DR's scalar accesses are known
1477 : : to be inbounds of a known bounds loop. */
1478 : : bool scalar_access_known_in_bounds;
1479 : :
1480 : : tree base_decl;
1481 : :
1482 : : /* Stores current vectorized loop's offset. To be added to the DR's
1483 : : offset to calculate current offset of data reference. */
1484 : : tree offset;
1485 : : };
1486 : :
1487 : : typedef struct data_reference *dr_p;
1488 : :
1489 : : class _stmt_vec_info {
1490 : : public:
1491 : :
1492 : : /* Indicates whether this stmts is part of a computation whose result is
1493 : : used outside the loop. */
1494 : : bool live;
1495 : :
1496 : : /* Stmt is part of some pattern (computation idiom) */
1497 : : bool in_pattern_p;
1498 : :
1499 : : /* True if the statement was created during pattern recognition as
1500 : : part of the replacement for RELATED_STMT. This implies that the
1501 : : statement isn't part of any basic block, although for convenience
1502 : : its gimple_bb is the same as for RELATED_STMT. */
1503 : : bool pattern_stmt_p;
1504 : :
1505 : : /* Is this statement vectorizable or should it be skipped in (partial)
1506 : : vectorization. */
1507 : : bool vectorizable;
1508 : :
1509 : : /* The stmt to which this info struct refers to. */
1510 : : gimple *stmt;
1511 : :
1512 : : /* The vector type to be used for the LHS of this statement. */
1513 : : tree vectype;
1514 : :
1515 : : /* The following is relevant only for stmts that contain a non-scalar
1516 : : data-ref (array/pointer/struct access). A GIMPLE stmt is expected to have
1517 : : at most one such data-ref. */
1518 : :
1519 : : dr_vec_info dr_aux;
1520 : :
1521 : : /* Information about the data-ref relative to this loop
1522 : : nest (the loop that is being considered for vectorization). */
1523 : : innermost_loop_behavior dr_wrt_vec_loop;
1524 : :
1525 : : /* For loop PHI nodes, the base and evolution part of it. This makes sure
1526 : : this information is still available in vect_update_ivs_after_vectorizer
1527 : : where we may not be able to re-analyze the PHI nodes evolution as
1528 : : peeling for the prologue loop can make it unanalyzable. The evolution
1529 : : part is still correct after peeling, but the base may have changed from
1530 : : the version here. */
1531 : : tree loop_phi_evolution_base_unchanged;
1532 : : tree loop_phi_evolution_part;
1533 : : enum vect_induction_op_type loop_phi_evolution_type;
1534 : :
1535 : : /* Used for various bookkeeping purposes, generally holding a pointer to
1536 : : some other stmt S that is in some way "related" to this stmt.
1537 : : Current use of this field is:
1538 : : If this stmt is part of a pattern (i.e. the field 'in_pattern_p' is
1539 : : true): S is the "pattern stmt" that represents (and replaces) the
1540 : : sequence of stmts that constitutes the pattern. Similarly, the
1541 : : related_stmt of the "pattern stmt" points back to this stmt (which is
1542 : : the last stmt in the original sequence of stmts that constitutes the
1543 : : pattern). */
1544 : : stmt_vec_info related_stmt;
1545 : :
1546 : : /* Used to keep a sequence of def stmts of a pattern stmt if such exists.
1547 : : The sequence is attached to the original statement rather than the
1548 : : pattern statement. */
1549 : : gimple_seq pattern_def_seq;
1550 : :
1551 : : /* Classify the def of this stmt. */
1552 : : enum vect_def_type def_type;
1553 : :
1554 : : /* Whether the stmt is SLPed, loop-based vectorized, or both. */
1555 : : enum slp_vect_type slp_type;
1556 : :
1557 : : /* Interleaving chains info. */
1558 : : /* First element in the group. */
1559 : : stmt_vec_info first_element;
1560 : : /* Pointer to the next element in the group. */
1561 : : stmt_vec_info next_element;
1562 : : /* The size of the group. */
1563 : : unsigned int size;
1564 : : /* For loads only, the gap from the previous load. For consecutive loads, GAP
1565 : : is 1. */
1566 : : unsigned int gap;
1567 : :
1568 : : /* The minimum negative dependence distance this stmt participates in
1569 : : or zero if none. */
1570 : : unsigned int min_neg_dist;
1571 : :
1572 : : /* Not all stmts in the loop need to be vectorized. e.g, the increment
1573 : : of the loop induction variable and computation of array indexes. relevant
1574 : : indicates whether the stmt needs to be vectorized. */
1575 : : enum vect_relevant relevant;
1576 : :
1577 : : /* For loads if this is a gather, for stores if this is a scatter. */
1578 : : bool gather_scatter_p;
1579 : :
1580 : : /* True if this is an access with loop-invariant stride. */
1581 : : bool strided_p;
1582 : :
1583 : : /* For both loads and stores. */
1584 : : unsigned simd_lane_access_p : 3;
1585 : :
1586 : : /* On a reduction PHI the reduction type as detected by
1587 : : vect_is_simple_reduction. */
1588 : : enum vect_reduction_type reduc_type;
1589 : :
1590 : : /* On a reduction PHI, the original reduction code as detected by
1591 : : vect_is_simple_reduction. */
1592 : : code_helper reduc_code;
1593 : :
1594 : : /* On a stmt participating in a reduction the index of the operand
1595 : : on the reduction SSA cycle. */
1596 : : int reduc_idx;
1597 : :
1598 : : /* On a reduction PHI the def returned by vect_is_simple_reduction.
1599 : : On the def returned by vect_is_simple_reduction the corresponding PHI. */
1600 : : stmt_vec_info reduc_def;
1601 : :
1602 : : /* If nonzero, the lhs of the statement could be truncated to this
1603 : : many bits without affecting any users of the result. */
1604 : : unsigned int min_output_precision;
1605 : :
1606 : : /* If nonzero, all non-boolean input operands have the same precision,
1607 : : and they could each be truncated to this many bits without changing
1608 : : the result. */
1609 : : unsigned int min_input_precision;
1610 : :
1611 : : /* If OPERATION_BITS is nonzero, the statement could be performed on
1612 : : an integer with the sign and number of bits given by OPERATION_SIGN
1613 : : and OPERATION_BITS without changing the result. */
1614 : : unsigned int operation_precision;
1615 : : signop operation_sign;
1616 : :
1617 : : /* If the statement produces a boolean result, this value describes
1618 : : how we should choose the associated vector type. The possible
1619 : : values are:
1620 : :
1621 : : - an integer precision N if we should use the vector mask type
1622 : : associated with N-bit integers. This is only used if all relevant
1623 : : input booleans also want the vector mask type for N-bit integers,
1624 : : or if we can convert them into that form by pattern-matching.
1625 : :
1626 : : - ~0U if we considered choosing a vector mask type but decided
1627 : : to treat the boolean as a normal integer type instead.
1628 : :
1629 : : - 0 otherwise. This means either that the operation isn't one that
1630 : : could have a vector mask type (and so should have a normal vector
1631 : : type instead) or that we simply haven't made a choice either way. */
1632 : : unsigned int mask_precision;
1633 : :
1634 : : /* True if this is only suitable for SLP vectorization. */
1635 : : bool slp_vect_only_p;
1636 : :
1637 : : /* True if this is a pattern that can only be handled by SLP
1638 : : vectorization. */
1639 : : bool slp_vect_pattern_only_p;
1640 : : };
1641 : :
1642 : : /* Information about a gather/scatter call. */
1643 : : struct gather_scatter_info {
1644 : : /* The internal function to use for the gather/scatter operation,
1645 : : or IFN_LAST if a built-in function should be used instead. */
1646 : : internal_fn ifn;
1647 : :
1648 : : /* The FUNCTION_DECL for the built-in gather/scatter function,
1649 : : or null if an internal function should be used instead. */
1650 : : tree decl;
1651 : :
1652 : : /* The loop-invariant base value. */
1653 : : tree base;
1654 : :
1655 : : /* The TBBA alias pointer the value of which determines the alignment
1656 : : of the scalar accesses. */
1657 : : tree alias_ptr;
1658 : :
1659 : : /* The original scalar offset, which is a non-loop-invariant SSA_NAME. */
1660 : : tree offset;
1661 : :
1662 : : /* Each offset element should be multiplied by this amount before
1663 : : being added to the base. */
1664 : : int scale;
1665 : :
1666 : : /* The type of the vectorized offset. */
1667 : : tree offset_vectype;
1668 : :
1669 : : /* The type of the scalar elements after loading or before storing. */
1670 : : tree element_type;
1671 : :
1672 : : /* The type of the scalar elements being loaded or stored. */
1673 : : tree memory_type;
1674 : : };
1675 : :
1676 : : /* Access Functions. */
1677 : : #define STMT_VINFO_STMT(S) (S)->stmt
1678 : : #define STMT_VINFO_RELEVANT(S) (S)->relevant
1679 : : #define STMT_VINFO_LIVE_P(S) (S)->live
1680 : : #define STMT_VINFO_VECTYPE(S) (S)->vectype
1681 : : #define STMT_VINFO_VECTORIZABLE(S) (S)->vectorizable
1682 : : #define STMT_VINFO_DATA_REF(S) ((S)->dr_aux.dr + 0)
1683 : : #define STMT_VINFO_GATHER_SCATTER_P(S) (S)->gather_scatter_p
1684 : : #define STMT_VINFO_STRIDED_P(S) (S)->strided_p
1685 : : #define STMT_VINFO_SIMD_LANE_ACCESS_P(S) (S)->simd_lane_access_p
1686 : : #define STMT_VINFO_REDUC_IDX(S) (S)->reduc_idx
1687 : :
1688 : : #define STMT_VINFO_DR_WRT_VEC_LOOP(S) (S)->dr_wrt_vec_loop
1689 : : #define STMT_VINFO_DR_BASE_ADDRESS(S) (S)->dr_wrt_vec_loop.base_address
1690 : : #define STMT_VINFO_DR_INIT(S) (S)->dr_wrt_vec_loop.init
1691 : : #define STMT_VINFO_DR_OFFSET(S) (S)->dr_wrt_vec_loop.offset
1692 : : #define STMT_VINFO_DR_STEP(S) (S)->dr_wrt_vec_loop.step
1693 : : #define STMT_VINFO_DR_BASE_ALIGNMENT(S) (S)->dr_wrt_vec_loop.base_alignment
1694 : : #define STMT_VINFO_DR_BASE_MISALIGNMENT(S) \
1695 : : (S)->dr_wrt_vec_loop.base_misalignment
1696 : : #define STMT_VINFO_DR_OFFSET_ALIGNMENT(S) \
1697 : : (S)->dr_wrt_vec_loop.offset_alignment
1698 : : #define STMT_VINFO_DR_STEP_ALIGNMENT(S) \
1699 : : (S)->dr_wrt_vec_loop.step_alignment
1700 : :
1701 : : #define STMT_VINFO_DR_INFO(S) \
1702 : : (gcc_checking_assert ((S)->dr_aux.stmt == (S)), &(S)->dr_aux)
1703 : :
1704 : : #define STMT_VINFO_IN_PATTERN_P(S) (S)->in_pattern_p
1705 : : #define STMT_VINFO_RELATED_STMT(S) (S)->related_stmt
1706 : : #define STMT_VINFO_PATTERN_DEF_SEQ(S) (S)->pattern_def_seq
1707 : : #define STMT_VINFO_DEF_TYPE(S) (S)->def_type
1708 : : #define STMT_VINFO_GROUPED_ACCESS(S) \
1709 : : ((S)->dr_aux.dr && DR_GROUP_FIRST_ELEMENT(S))
1710 : : #define STMT_VINFO_LOOP_PHI_EVOLUTION_BASE_UNCHANGED(S) (S)->loop_phi_evolution_base_unchanged
1711 : : #define STMT_VINFO_LOOP_PHI_EVOLUTION_PART(S) (S)->loop_phi_evolution_part
1712 : : #define STMT_VINFO_LOOP_PHI_EVOLUTION_TYPE(S) (S)->loop_phi_evolution_type
1713 : : #define STMT_VINFO_MIN_NEG_DIST(S) (S)->min_neg_dist
1714 : : #define STMT_VINFO_REDUC_TYPE(S) (S)->reduc_type
1715 : : #define STMT_VINFO_REDUC_CODE(S) (S)->reduc_code
1716 : : #define STMT_VINFO_REDUC_DEF(S) (S)->reduc_def
1717 : : #define STMT_VINFO_SLP_VECT_ONLY(S) (S)->slp_vect_only_p
1718 : : #define STMT_VINFO_SLP_VECT_ONLY_PATTERN(S) (S)->slp_vect_pattern_only_p
1719 : : #define STMT_VINFO_REDUC_VECTYPE_IN(S) (S)->reduc_vectype_in
1720 : :
1721 : : #define DR_GROUP_FIRST_ELEMENT(S) \
1722 : : (gcc_checking_assert ((S)->dr_aux.dr), (S)->first_element)
1723 : : #define DR_GROUP_NEXT_ELEMENT(S) \
1724 : : (gcc_checking_assert ((S)->dr_aux.dr), (S)->next_element)
1725 : : #define DR_GROUP_SIZE(S) \
1726 : : (gcc_checking_assert ((S)->dr_aux.dr), (S)->size)
1727 : : #define DR_GROUP_GAP(S) \
1728 : : (gcc_checking_assert ((S)->dr_aux.dr), (S)->gap)
1729 : :
1730 : : #define STMT_VINFO_RELEVANT_P(S) ((S)->relevant != vect_unused_in_scope)
1731 : :
1732 : : #define PURE_SLP_STMT(S) ((S)->slp_type == pure_slp)
1733 : : #define STMT_SLP_TYPE(S) (S)->slp_type
1734 : :
1735 : :
1736 : : /* Contains the scalar or vector costs for a vec_info. */
1737 : : class vector_costs
1738 : : {
1739 : : public:
1740 : : vector_costs (vec_info *, bool);
1741 : 0 : virtual ~vector_costs () {}
1742 : :
1743 : : /* Update the costs in response to adding COUNT copies of a statement.
1744 : :
1745 : : - WHERE specifies whether the cost occurs in the loop prologue,
1746 : : the loop body, or the loop epilogue.
1747 : : - KIND is the kind of statement, which is always meaningful.
1748 : : - STMT_INFO or NODE, if nonnull, describe the statement that will be
1749 : : vectorized.
1750 : : - VECTYPE, if nonnull, is the vector type that the vectorized
1751 : : statement will operate on. Note that this should be used in
1752 : : preference to STMT_VINFO_VECTYPE (STMT_INFO) since the latter
1753 : : is not correct for SLP.
1754 : : - for unaligned_load and unaligned_store statements, MISALIGN is
1755 : : the byte misalignment of the load or store relative to the target's
1756 : : preferred alignment for VECTYPE, or DR_MISALIGNMENT_UNKNOWN
1757 : : if the misalignment is not known.
1758 : :
1759 : : Return the calculated cost as well as recording it. The return
1760 : : value is used for dumping purposes. */
1761 : : virtual unsigned int add_stmt_cost (int count, vect_cost_for_stmt kind,
1762 : : stmt_vec_info stmt_info,
1763 : : slp_tree node,
1764 : : tree vectype, int misalign,
1765 : : vect_cost_model_location where);
1766 : :
1767 : : /* Finish calculating the cost of the code. The results can be
1768 : : read back using the functions below.
1769 : :
1770 : : If the costs describe vector code, SCALAR_COSTS gives the costs
1771 : : of the corresponding scalar code, otherwise it is null. */
1772 : : virtual void finish_cost (const vector_costs *scalar_costs);
1773 : :
1774 : : /* The costs in THIS and OTHER both describe ways of vectorizing
1775 : : a main loop. Return true if the costs described by THIS are
1776 : : cheaper than the costs described by OTHER. Return false if any
1777 : : of the following are true:
1778 : :
1779 : : - THIS and OTHER are of equal cost
1780 : : - OTHER is better than THIS
1781 : : - we can't be sure about the relative costs of THIS and OTHER. */
1782 : : virtual bool better_main_loop_than_p (const vector_costs *other) const;
1783 : :
1784 : : /* Likewise, but the costs in THIS and OTHER both describe ways of
1785 : : vectorizing an epilogue loop of MAIN_LOOP. */
1786 : : virtual bool better_epilogue_loop_than_p (const vector_costs *other,
1787 : : loop_vec_info main_loop) const;
1788 : :
1789 : : unsigned int prologue_cost () const;
1790 : : unsigned int body_cost () const;
1791 : : unsigned int epilogue_cost () const;
1792 : : unsigned int outside_cost () const;
1793 : : unsigned int total_cost () const;
1794 : : unsigned int suggested_unroll_factor () const;
1795 : : machine_mode suggested_epilogue_mode (int &masked) const;
1796 : 6645377 : bool costing_for_scalar () const { return m_costing_for_scalar; }
1797 : :
1798 : : protected:
1799 : : unsigned int record_stmt_cost (stmt_vec_info, vect_cost_model_location,
1800 : : unsigned int);
1801 : : unsigned int adjust_cost_for_freq (stmt_vec_info, vect_cost_model_location,
1802 : : unsigned int);
1803 : : int compare_inside_loop_cost (const vector_costs *) const;
1804 : : int compare_outside_loop_cost (const vector_costs *) const;
1805 : :
1806 : : /* The region of code that we're considering vectorizing. */
1807 : : vec_info *m_vinfo;
1808 : :
1809 : : /* True if we're costing the scalar code, false if we're costing
1810 : : the vector code. */
1811 : : bool m_costing_for_scalar;
1812 : :
1813 : : /* The costs of the three regions, indexed by vect_cost_model_location. */
1814 : : unsigned int m_costs[3];
1815 : :
1816 : : /* The suggested unrolling factor determined at finish_cost. */
1817 : : unsigned int m_suggested_unroll_factor;
1818 : :
1819 : : /* The suggested mode to be used for a vectorized epilogue or VOIDmode,
1820 : : determined at finish_cost. m_masked_epilogue specifies whether the
1821 : : epilogue should use masked vectorization, regardless of the
1822 : : --param vect-partial-vector-usage default. If -1 then the
1823 : : --param setting takes precedence. If the user explicitly specified
1824 : : --param vect-partial-vector-usage then that takes precedence. */
1825 : : machine_mode m_suggested_epilogue_mode;
1826 : : int m_masked_epilogue;
1827 : :
1828 : : /* True if finish_cost has been called. */
1829 : : bool m_finished;
1830 : : };
1831 : :
1832 : : /* Create costs for VINFO. COSTING_FOR_SCALAR is true if the costs
1833 : : are for scalar code, false if they are for vector code. */
1834 : :
1835 : : inline
1836 : 1959786 : vector_costs::vector_costs (vec_info *vinfo, bool costing_for_scalar)
1837 : 1959786 : : m_vinfo (vinfo),
1838 : 1959786 : m_costing_for_scalar (costing_for_scalar),
1839 : 1959786 : m_costs (),
1840 : 1959786 : m_suggested_unroll_factor(1),
1841 : 1959786 : m_suggested_epilogue_mode(VOIDmode),
1842 : 1959786 : m_masked_epilogue (-1),
1843 : 1959786 : m_finished (false)
1844 : : {
1845 : : }
1846 : :
1847 : : /* Return the cost of the prologue code (in abstract units). */
1848 : :
1849 : : inline unsigned int
1850 : 830739 : vector_costs::prologue_cost () const
1851 : : {
1852 : 830739 : gcc_checking_assert (m_finished);
1853 : 830739 : return m_costs[vect_prologue];
1854 : : }
1855 : :
1856 : : /* Return the cost of the body code (in abstract units). */
1857 : :
1858 : : inline unsigned int
1859 : 1494370 : vector_costs::body_cost () const
1860 : : {
1861 : 1494370 : gcc_checking_assert (m_finished);
1862 : 1494370 : return m_costs[vect_body];
1863 : : }
1864 : :
1865 : : /* Return the cost of the epilogue code (in abstract units). */
1866 : :
1867 : : inline unsigned int
1868 : 830739 : vector_costs::epilogue_cost () const
1869 : : {
1870 : 830739 : gcc_checking_assert (m_finished);
1871 : 830739 : return m_costs[vect_epilogue];
1872 : : }
1873 : :
1874 : : /* Return the cost of the prologue and epilogue code (in abstract units). */
1875 : :
1876 : : inline unsigned int
1877 : 83554 : vector_costs::outside_cost () const
1878 : : {
1879 : 83554 : return prologue_cost () + epilogue_cost ();
1880 : : }
1881 : :
1882 : : /* Return the cost of the prologue, body and epilogue code
1883 : : (in abstract units). */
1884 : :
1885 : : inline unsigned int
1886 : 83554 : vector_costs::total_cost () const
1887 : : {
1888 : 83554 : return body_cost () + outside_cost ();
1889 : : }
1890 : :
1891 : : /* Return the suggested unroll factor. */
1892 : :
1893 : : inline unsigned int
1894 : 83367 : vector_costs::suggested_unroll_factor () const
1895 : : {
1896 : 83367 : gcc_checking_assert (m_finished);
1897 : 83367 : return m_suggested_unroll_factor;
1898 : : }
1899 : :
1900 : : /* Return the suggested epilogue mode. */
1901 : :
1902 : : inline machine_mode
1903 : 12709 : vector_costs::suggested_epilogue_mode (int &masked_p) const
1904 : : {
1905 : 12709 : gcc_checking_assert (m_finished);
1906 : 12709 : masked_p = m_masked_epilogue;
1907 : 12709 : return m_suggested_epilogue_mode;
1908 : : }
1909 : :
1910 : : #define VECT_MAX_COST 1000
1911 : :
1912 : : /* The maximum number of intermediate steps required in multi-step type
1913 : : conversion. */
1914 : : #define MAX_INTERM_CVT_STEPS 3
1915 : :
1916 : : #define MAX_VECTORIZATION_FACTOR INT_MAX
1917 : :
1918 : : /* Nonzero if TYPE represents a (scalar) boolean type or type
1919 : : in the middle-end compatible with it (unsigned precision 1 integral
1920 : : types). Used to determine which types should be vectorized as
1921 : : VECTOR_BOOLEAN_TYPE_P. */
1922 : :
1923 : : #define VECT_SCALAR_BOOLEAN_TYPE_P(TYPE) \
1924 : : (TREE_CODE (TYPE) == BOOLEAN_TYPE \
1925 : : || ((TREE_CODE (TYPE) == INTEGER_TYPE \
1926 : : || TREE_CODE (TYPE) == ENUMERAL_TYPE) \
1927 : : && TYPE_PRECISION (TYPE) == 1 \
1928 : : && TYPE_UNSIGNED (TYPE)))
1929 : :
1930 : : inline bool
1931 : 9168220 : nested_in_vect_loop_p (class loop *loop, stmt_vec_info stmt_info)
1932 : : {
1933 : 9168220 : return (loop->inner
1934 : 7289399 : && (loop->inner == (gimple_bb (stmt_info->stmt))->loop_father));
1935 : : }
1936 : :
1937 : : /* PHI is either a scalar reduction phi or a scalar induction phi.
1938 : : Return the initial value of the variable on entry to the containing
1939 : : loop. */
1940 : :
1941 : : inline tree
1942 : 34335 : vect_phi_initial_value (gphi *phi)
1943 : : {
1944 : 34335 : basic_block bb = gimple_bb (phi);
1945 : 34335 : edge pe = loop_preheader_edge (bb->loop_father);
1946 : 34335 : gcc_assert (pe->dest == bb);
1947 : 34335 : return PHI_ARG_DEF_FROM_EDGE (phi, pe);
1948 : : }
1949 : :
1950 : : /* Return true if STMT_INFO should produce a vector mask type rather than
1951 : : a normal nonmask type. */
1952 : :
1953 : : inline bool
1954 : 6841953 : vect_use_mask_type_p (stmt_vec_info stmt_info)
1955 : : {
1956 : 6841953 : return stmt_info->mask_precision && stmt_info->mask_precision != ~0U;
1957 : : }
1958 : :
1959 : : /* Return TRUE if a statement represented by STMT_INFO is a part of a
1960 : : pattern. */
1961 : :
1962 : : inline bool
1963 : 120199757 : is_pattern_stmt_p (stmt_vec_info stmt_info)
1964 : : {
1965 : 77841794 : return stmt_info->pattern_stmt_p;
1966 : : }
1967 : :
1968 : : /* If STMT_INFO is a pattern statement, return the statement that it
1969 : : replaces, otherwise return STMT_INFO itself. */
1970 : :
1971 : : inline stmt_vec_info
1972 : 46345425 : vect_orig_stmt (stmt_vec_info stmt_info)
1973 : : {
1974 : 34202635 : if (is_pattern_stmt_p (stmt_info))
1975 : 3037292 : return STMT_VINFO_RELATED_STMT (stmt_info);
1976 : : return stmt_info;
1977 : : }
1978 : :
1979 : : /* Return the later statement between STMT1_INFO and STMT2_INFO. */
1980 : :
1981 : : inline stmt_vec_info
1982 : 5946807 : get_later_stmt (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info)
1983 : : {
1984 : 5946807 : gimple *stmt1 = vect_orig_stmt (stmt1_info)->stmt;
1985 : 5946807 : gimple *stmt2 = vect_orig_stmt (stmt2_info)->stmt;
1986 : 5946807 : if (gimple_bb (stmt1) == gimple_bb (stmt2))
1987 : : {
1988 : 5920009 : if (gimple_uid (stmt1) > gimple_uid (stmt2))
1989 : : return stmt1_info;
1990 : : else
1991 : : return stmt2_info;
1992 : : }
1993 : : /* ??? We should be really calling this function only with stmts
1994 : : in the same BB but we can recover if there's a domination
1995 : : relationship between them. */
1996 : 26798 : else if (dominated_by_p (CDI_DOMINATORS,
1997 : 26798 : gimple_bb (stmt1), gimple_bb (stmt2)))
1998 : : return stmt1_info;
1999 : 8398 : else if (dominated_by_p (CDI_DOMINATORS,
2000 : 8398 : gimple_bb (stmt2), gimple_bb (stmt1)))
2001 : : return stmt2_info;
2002 : 0 : gcc_unreachable ();
2003 : : }
2004 : :
2005 : : /* If STMT_INFO has been replaced by a pattern statement, return the
2006 : : replacement statement, otherwise return STMT_INFO itself. */
2007 : :
2008 : : inline stmt_vec_info
2009 : 47098796 : vect_stmt_to_vectorize (stmt_vec_info stmt_info)
2010 : : {
2011 : 47098796 : if (STMT_VINFO_IN_PATTERN_P (stmt_info))
2012 : 1395705 : return STMT_VINFO_RELATED_STMT (stmt_info);
2013 : : return stmt_info;
2014 : : }
2015 : :
2016 : : /* Return true if BB is a loop header. */
2017 : :
2018 : : inline bool
2019 : 1124589 : is_loop_header_bb_p (basic_block bb)
2020 : : {
2021 : 1124589 : if (bb == (bb->loop_father)->header)
2022 : 1114584 : return true;
2023 : :
2024 : : return false;
2025 : : }
2026 : :
2027 : : /* Return pow2 (X). */
2028 : :
2029 : : inline int
2030 : : vect_pow2 (int x)
2031 : : {
2032 : : int i, res = 1;
2033 : :
2034 : : for (i = 0; i < x; i++)
2035 : : res *= 2;
2036 : :
2037 : : return res;
2038 : : }
2039 : :
2040 : : /* Alias targetm.vectorize.builtin_vectorization_cost. */
2041 : :
2042 : : inline int
2043 : 9205613 : builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
2044 : : tree vectype, int misalign)
2045 : : {
2046 : 9205613 : return targetm.vectorize.builtin_vectorization_cost (type_of_cost,
2047 : : vectype, misalign);
2048 : : }
2049 : :
2050 : : /* Get cost by calling cost target builtin. */
2051 : :
2052 : : inline
2053 : 38 : int vect_get_stmt_cost (enum vect_cost_for_stmt type_of_cost)
2054 : : {
2055 : 50997 : return builtin_vectorization_cost (type_of_cost, NULL, 0);
2056 : : }
2057 : :
2058 : : /* Alias targetm.vectorize.init_cost. */
2059 : :
2060 : : inline vector_costs *
2061 : 1959786 : init_cost (vec_info *vinfo, bool costing_for_scalar)
2062 : : {
2063 : 1959786 : return targetm.vectorize.create_costs (vinfo, costing_for_scalar);
2064 : : }
2065 : :
2066 : : extern void dump_stmt_cost (FILE *, int, enum vect_cost_for_stmt,
2067 : : stmt_vec_info, slp_tree, tree, int, unsigned,
2068 : : enum vect_cost_model_location);
2069 : :
2070 : : /* Dump and add costs. */
2071 : :
2072 : : inline unsigned
2073 : 6645377 : add_stmt_cost (vector_costs *costs, int count,
2074 : : enum vect_cost_for_stmt kind,
2075 : : stmt_vec_info stmt_info, slp_tree node,
2076 : : tree vectype, int misalign,
2077 : : enum vect_cost_model_location where)
2078 : : {
2079 : : /* Even though a vector type might be set on stmt do not pass that on when
2080 : : costing the scalar IL. A SLP node shouldn't have been recorded. */
2081 : 6645377 : if (costs->costing_for_scalar ())
2082 : : {
2083 : 3538370 : vectype = NULL_TREE;
2084 : 3538370 : gcc_checking_assert (node == NULL);
2085 : : }
2086 : 6645377 : unsigned cost = costs->add_stmt_cost (count, kind, stmt_info, node, vectype,
2087 : : misalign, where);
2088 : 6645377 : if (dump_file && (dump_flags & TDF_DETAILS))
2089 : 205665 : dump_stmt_cost (dump_file, count, kind, stmt_info, node, vectype, misalign,
2090 : : cost, where);
2091 : 6645377 : return cost;
2092 : : }
2093 : :
2094 : : inline unsigned
2095 : 60260 : add_stmt_cost (vector_costs *costs, int count, enum vect_cost_for_stmt kind,
2096 : : enum vect_cost_model_location where)
2097 : : {
2098 : 60260 : gcc_assert (kind == cond_branch_taken || kind == cond_branch_not_taken
2099 : : || kind == scalar_stmt);
2100 : 60260 : return add_stmt_cost (costs, count, kind, NULL, NULL, NULL_TREE, 0, where);
2101 : : }
2102 : :
2103 : : inline unsigned
2104 : 3859255 : add_stmt_cost (vector_costs *costs, stmt_info_for_cost *i)
2105 : : {
2106 : 3859255 : return add_stmt_cost (costs, i->count, i->kind, i->stmt_info, i->node,
2107 : 3859255 : i->vectype, i->misalign, i->where);
2108 : : }
2109 : :
2110 : : inline void
2111 : 496845 : add_stmt_costs (vector_costs *costs, stmt_vector_for_cost *cost_vec)
2112 : : {
2113 : 496845 : stmt_info_for_cost *cost;
2114 : 496845 : unsigned i;
2115 : 2996844 : FOR_EACH_VEC_ELT (*cost_vec, i, cost)
2116 : 2499999 : add_stmt_cost (costs, cost->count, cost->kind, cost->stmt_info,
2117 : : cost->node, cost->vectype, cost->misalign, cost->where);
2118 : 496845 : }
2119 : :
2120 : : /*-----------------------------------------------------------------*/
2121 : : /* Info on data references alignment. */
2122 : : /*-----------------------------------------------------------------*/
2123 : : #define DR_MISALIGNMENT_UNKNOWN (-1)
2124 : : #define DR_MISALIGNMENT_UNINITIALIZED (-2)
2125 : :
2126 : : inline void
2127 : 2366695 : set_dr_misalignment (dr_vec_info *dr_info, int val)
2128 : : {
2129 : 2366695 : dr_info->misalignment = val;
2130 : : }
2131 : :
2132 : : extern int dr_misalignment (dr_vec_info *dr_info, tree vectype,
2133 : : poly_int64 offset = 0);
2134 : :
2135 : : #define SET_DR_MISALIGNMENT(DR, VAL) set_dr_misalignment (DR, VAL)
2136 : :
2137 : : /* Only defined once DR_MISALIGNMENT is defined. */
2138 : : inline const poly_uint64
2139 : 6266284 : dr_target_alignment (dr_vec_info *dr_info)
2140 : : {
2141 : 6266284 : if (STMT_VINFO_GROUPED_ACCESS (dr_info->stmt))
2142 : 4616227 : dr_info = STMT_VINFO_DR_INFO (DR_GROUP_FIRST_ELEMENT (dr_info->stmt));
2143 : 6266284 : return dr_info->target_alignment;
2144 : : }
2145 : : #define DR_TARGET_ALIGNMENT(DR) dr_target_alignment (DR)
2146 : : #define DR_SCALAR_KNOWN_BOUNDS(DR) (DR)->scalar_access_known_in_bounds
2147 : :
2148 : : /* Return if the stmt_vec_info requires peeling for alignment. */
2149 : : inline bool
2150 : 3840040 : dr_safe_speculative_read_required (stmt_vec_info stmt_info)
2151 : : {
2152 : 3840040 : dr_vec_info *dr_info;
2153 : 3840040 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2154 : 1631015 : dr_info = STMT_VINFO_DR_INFO (DR_GROUP_FIRST_ELEMENT (stmt_info));
2155 : : else
2156 : 2209025 : dr_info = STMT_VINFO_DR_INFO (stmt_info);
2157 : :
2158 : 3840040 : return dr_info->safe_speculative_read_required;
2159 : : }
2160 : :
2161 : : /* Set the safe_speculative_read_required for the stmt_vec_info, if group
2162 : : access then set on the fist element otherwise set on DR directly. */
2163 : : inline void
2164 : 210743 : dr_set_safe_speculative_read_required (stmt_vec_info stmt_info,
2165 : : bool requires_alignment)
2166 : : {
2167 : 210743 : dr_vec_info *dr_info;
2168 : 210743 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2169 : 68814 : dr_info = STMT_VINFO_DR_INFO (DR_GROUP_FIRST_ELEMENT (stmt_info));
2170 : : else
2171 : 141929 : dr_info = STMT_VINFO_DR_INFO (stmt_info);
2172 : :
2173 : 210743 : dr_info->safe_speculative_read_required = requires_alignment;
2174 : 210743 : }
2175 : :
2176 : : inline void
2177 : 1452490 : set_dr_target_alignment (dr_vec_info *dr_info, poly_uint64 val)
2178 : : {
2179 : 1452490 : dr_info->target_alignment = val;
2180 : : }
2181 : : #define SET_DR_TARGET_ALIGNMENT(DR, VAL) set_dr_target_alignment (DR, VAL)
2182 : :
2183 : : /* Return true if data access DR_INFO is aligned to the targets
2184 : : preferred alignment for VECTYPE (which may be less than a full vector). */
2185 : :
2186 : : inline bool
2187 : 309427 : aligned_access_p (dr_vec_info *dr_info, tree vectype)
2188 : : {
2189 : 309427 : return (dr_misalignment (dr_info, vectype) == 0);
2190 : : }
2191 : :
2192 : : /* Return TRUE if the (mis-)alignment of the data access is known with
2193 : : respect to the targets preferred alignment for VECTYPE, and FALSE
2194 : : otherwise. */
2195 : :
2196 : : inline bool
2197 : 1781781 : known_alignment_for_access_p (dr_vec_info *dr_info, tree vectype)
2198 : : {
2199 : 1601030 : return (dr_misalignment (dr_info, vectype) != DR_MISALIGNMENT_UNKNOWN);
2200 : : }
2201 : :
2202 : : /* Return the minimum alignment in bytes that the vectorized version
2203 : : of DR_INFO is guaranteed to have. */
2204 : :
2205 : : inline unsigned int
2206 : 234044 : vect_known_alignment_in_bytes (dr_vec_info *dr_info, tree vectype,
2207 : : poly_int64 offset = 0)
2208 : : {
2209 : 234044 : int misalignment = dr_misalignment (dr_info, vectype, offset);
2210 : 234044 : if (misalignment == DR_MISALIGNMENT_UNKNOWN)
2211 : 115589 : return TYPE_ALIGN_UNIT (TREE_TYPE (DR_REF (dr_info->dr)));
2212 : 118455 : else if (misalignment == 0)
2213 : 89976 : return known_alignment (DR_TARGET_ALIGNMENT (dr_info));
2214 : 28479 : return misalignment & -misalignment;
2215 : : }
2216 : :
2217 : : /* Return the behavior of DR_INFO with respect to the vectorization context
2218 : : (which for outer loop vectorization might not be the behavior recorded
2219 : : in DR_INFO itself). */
2220 : :
2221 : : inline innermost_loop_behavior *
2222 : 5322587 : vect_dr_behavior (vec_info *vinfo, dr_vec_info *dr_info)
2223 : : {
2224 : 5322587 : stmt_vec_info stmt_info = dr_info->stmt;
2225 : 5322587 : loop_vec_info loop_vinfo = dyn_cast<loop_vec_info> (vinfo);
2226 : 1885299 : if (loop_vinfo == NULL
2227 : 1885299 : || !nested_in_vect_loop_p (LOOP_VINFO_LOOP (loop_vinfo), stmt_info))
2228 : 5318908 : return &DR_INNERMOST (dr_info->dr);
2229 : : else
2230 : 3679 : return &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info);
2231 : : }
2232 : :
2233 : : /* Return the offset calculated by adding the offset of this DR_INFO to the
2234 : : corresponding data_reference's offset. If CHECK_OUTER then use
2235 : : vect_dr_behavior to select the appropriate data_reference to use. */
2236 : :
2237 : : inline tree
2238 : 730166 : get_dr_vinfo_offset (vec_info *vinfo,
2239 : : dr_vec_info *dr_info, bool check_outer = false)
2240 : : {
2241 : 730166 : innermost_loop_behavior *base;
2242 : 730166 : if (check_outer)
2243 : 689879 : base = vect_dr_behavior (vinfo, dr_info);
2244 : : else
2245 : 40287 : base = &dr_info->dr->innermost;
2246 : :
2247 : 730166 : tree offset = base->offset;
2248 : :
2249 : 730166 : if (!dr_info->offset)
2250 : : return offset;
2251 : :
2252 : 19003 : offset = fold_convert (sizetype, offset);
2253 : 19003 : return fold_build2 (PLUS_EXPR, TREE_TYPE (dr_info->offset), offset,
2254 : : dr_info->offset);
2255 : : }
2256 : :
2257 : :
2258 : : /* Return the vect cost model for LOOP. */
2259 : : inline enum vect_cost_model
2260 : 1827968 : loop_cost_model (loop_p loop)
2261 : : {
2262 : 1827968 : if (loop != NULL
2263 : 1164298 : && loop->force_vectorize
2264 : 69940 : && flag_simd_cost_model != VECT_COST_MODEL_DEFAULT)
2265 : : return flag_simd_cost_model;
2266 : 1758028 : return flag_vect_cost_model;
2267 : : }
2268 : :
2269 : : /* Return true if the vect cost model is unlimited. */
2270 : : inline bool
2271 : 1260812 : unlimited_cost_model (loop_p loop)
2272 : : {
2273 : 1260812 : return loop_cost_model (loop) == VECT_COST_MODEL_UNLIMITED;
2274 : : }
2275 : :
2276 : : /* Return true if the loop described by LOOP_VINFO is fully-masked and
2277 : : if the first iteration should use a partial mask in order to achieve
2278 : : alignment. */
2279 : :
2280 : : inline bool
2281 : 216830 : vect_use_loop_mask_for_alignment_p (loop_vec_info loop_vinfo)
2282 : : {
2283 : : /* With early break vectorization we don't know whether the accesses will stay
2284 : : inside the loop or not. TODO: The early break adjustment code can be
2285 : : implemented the same way as vectorizable_linear_induction. However we
2286 : : can't test this today so reject it. */
2287 : 81 : return (LOOP_VINFO_FULLY_MASKED_P (loop_vinfo)
2288 : 81 : && LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo)
2289 : 216834 : && !(LOOP_VINFO_NON_LINEAR_IV (loop_vinfo)
2290 : 0 : && LOOP_VINFO_EARLY_BREAKS (loop_vinfo)));
2291 : : }
2292 : :
2293 : : /* Return the number of vectors of type VECTYPE that are needed to get
2294 : : NUNITS elements. NUNITS should be based on the vectorization factor,
2295 : : so it is always a known multiple of the number of elements in VECTYPE. */
2296 : :
2297 : : inline unsigned int
2298 : 6588955 : vect_get_num_vectors (poly_uint64 nunits, tree vectype)
2299 : : {
2300 : 6588955 : return exact_div (nunits, TYPE_VECTOR_SUBPARTS (vectype)).to_constant ();
2301 : : }
2302 : :
2303 : : /* Return the number of vectors in the context of vectorization region VINFO,
2304 : : needed for a group of statements and a vector type as specified by NODE. */
2305 : :
2306 : : inline unsigned int
2307 : 6588157 : vect_get_num_copies (vec_info *vinfo, slp_tree node)
2308 : : {
2309 : 6588157 : poly_uint64 vf;
2310 : :
2311 : 6588157 : if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
2312 : 2731524 : vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2313 : : else
2314 : : vf = 1;
2315 : :
2316 : 6588157 : vf *= SLP_TREE_LANES (node);
2317 : 6588157 : tree vectype = SLP_TREE_VECTYPE (node);
2318 : :
2319 : 6588157 : return vect_get_num_vectors (vf, vectype);
2320 : : }
2321 : :
2322 : : /* Update maximum unit count *MAX_NUNITS so that it accounts for
2323 : : NUNITS. *MAX_NUNITS can be 1 if we haven't yet recorded anything. */
2324 : :
2325 : : inline void
2326 : 8412799 : vect_update_max_nunits (poly_uint64 *max_nunits, poly_uint64 nunits)
2327 : : {
2328 : : /* All unit counts have the form vec_info::vector_size * X for some
2329 : : rational X, so two unit sizes must have a common multiple.
2330 : : Everything is a multiple of the initial value of 1. */
2331 : 3646455 : *max_nunits = force_common_multiple (*max_nunits, nunits);
2332 : : }
2333 : :
2334 : : /* Update maximum unit count *MAX_NUNITS so that it accounts for
2335 : : the number of units in vector type VECTYPE. *MAX_NUNITS can be 1
2336 : : if we haven't yet recorded any vector types. */
2337 : :
2338 : : inline void
2339 : 4766344 : vect_update_max_nunits (poly_uint64 *max_nunits, tree vectype)
2340 : : {
2341 : 4766344 : vect_update_max_nunits (max_nunits, TYPE_VECTOR_SUBPARTS (vectype));
2342 : 4766344 : }
2343 : :
2344 : : /* Return the vectorization factor that should be used for costing
2345 : : purposes while vectorizing the loop described by LOOP_VINFO.
2346 : : Pick a reasonable estimate if the vectorization factor isn't
2347 : : known at compile time. */
2348 : :
2349 : : inline unsigned int
2350 : 956118 : vect_vf_for_cost (loop_vec_info loop_vinfo)
2351 : : {
2352 : 956118 : return estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
2353 : : }
2354 : :
2355 : : /* Estimate the number of elements in VEC_TYPE for costing purposes.
2356 : : Pick a reasonable estimate if the exact number isn't known at
2357 : : compile time. */
2358 : :
2359 : : inline unsigned int
2360 : 29792 : vect_nunits_for_cost (tree vec_type)
2361 : : {
2362 : 29792 : return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vec_type));
2363 : : }
2364 : :
2365 : : /* Return the maximum possible vectorization factor for LOOP_VINFO. */
2366 : :
2367 : : inline unsigned HOST_WIDE_INT
2368 : 80843 : vect_max_vf (loop_vec_info loop_vinfo)
2369 : : {
2370 : 80843 : unsigned HOST_WIDE_INT vf;
2371 : 80843 : if (LOOP_VINFO_VECT_FACTOR (loop_vinfo).is_constant (&vf))
2372 : 80843 : return vf;
2373 : : return MAX_VECTORIZATION_FACTOR;
2374 : : }
2375 : :
2376 : : /* Return the size of the value accessed by unvectorized data reference
2377 : : DR_INFO. This is only valid once STMT_VINFO_VECTYPE has been calculated
2378 : : for the associated gimple statement, since that guarantees that DR_INFO
2379 : : accesses either a scalar or a scalar equivalent. ("Scalar equivalent"
2380 : : here includes things like V1SI, which can be vectorized in the same way
2381 : : as a plain SI.) */
2382 : :
2383 : : inline unsigned int
2384 : 1732857 : vect_get_scalar_dr_size (dr_vec_info *dr_info)
2385 : : {
2386 : 1732857 : return tree_to_uhwi (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dr_info->dr))));
2387 : : }
2388 : :
2389 : : /* Return true if LOOP_VINFO requires a runtime check for whether the
2390 : : vector loop is profitable. */
2391 : :
2392 : : inline bool
2393 : 66427 : vect_apply_runtime_profitability_check_p (loop_vec_info loop_vinfo)
2394 : : {
2395 : 66427 : unsigned int th = LOOP_VINFO_COST_MODEL_THRESHOLD (loop_vinfo);
2396 : 36334 : return (!LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo)
2397 : 66427 : && th >= vect_vf_for_cost (loop_vinfo));
2398 : : }
2399 : :
2400 : : /* Return true if CODE is a lane-reducing opcode. */
2401 : :
2402 : : inline bool
2403 : 288820 : lane_reducing_op_p (code_helper code)
2404 : : {
2405 : 288820 : return code == DOT_PROD_EXPR || code == WIDEN_SUM_EXPR || code == SAD_EXPR;
2406 : : }
2407 : :
2408 : : /* Return true if STMT is a lane-reducing statement. */
2409 : :
2410 : : inline bool
2411 : 361113 : lane_reducing_stmt_p (gimple *stmt)
2412 : : {
2413 : 361113 : if (auto *assign = dyn_cast <gassign *> (stmt))
2414 : 271180 : return lane_reducing_op_p (gimple_assign_rhs_code (assign));
2415 : : return false;
2416 : : }
2417 : :
2418 : : /* Source location + hotness information. */
2419 : : extern dump_user_location_t vect_location;
2420 : :
2421 : : /* A macro for calling:
2422 : : dump_begin_scope (MSG, vect_location);
2423 : : via an RAII object, thus printing "=== MSG ===\n" to the dumpfile etc,
2424 : : and then calling
2425 : : dump_end_scope ();
2426 : : once the object goes out of scope, thus capturing the nesting of
2427 : : the scopes.
2428 : :
2429 : : These scopes affect dump messages within them: dump messages at the
2430 : : top level implicitly default to MSG_PRIORITY_USER_FACING, whereas those
2431 : : in a nested scope implicitly default to MSG_PRIORITY_INTERNALS. */
2432 : :
2433 : : #define DUMP_VECT_SCOPE(MSG) \
2434 : : AUTO_DUMP_SCOPE (MSG, vect_location)
2435 : :
2436 : : /* A sentinel class for ensuring that the "vect_location" global gets
2437 : : reset at the end of a scope.
2438 : :
2439 : : The "vect_location" global is used during dumping and contains a
2440 : : location_t, which could contain references to a tree block via the
2441 : : ad-hoc data. This data is used for tracking inlining information,
2442 : : but it's not a GC root; it's simply assumed that such locations never
2443 : : get accessed if the blocks are optimized away.
2444 : :
2445 : : Hence we need to ensure that such locations are purged at the end
2446 : : of any operations using them (e.g. via this class). */
2447 : :
2448 : : class auto_purge_vect_location
2449 : : {
2450 : : public:
2451 : : ~auto_purge_vect_location ();
2452 : : };
2453 : :
2454 : : /*-----------------------------------------------------------------*/
2455 : : /* Function prototypes. */
2456 : : /*-----------------------------------------------------------------*/
2457 : :
2458 : : /* Simple loop peeling and versioning utilities for vectorizer's purposes -
2459 : : in tree-vect-loop-manip.cc. */
2460 : : extern void vect_set_loop_condition (class loop *, edge, loop_vec_info,
2461 : : tree, tree, tree, bool);
2462 : : extern bool slpeel_can_duplicate_loop_p (const class loop *, const_edge,
2463 : : const_edge);
2464 : : class loop *slpeel_tree_duplicate_loop_to_edge_cfg (class loop *, edge,
2465 : : class loop *, edge,
2466 : : edge, edge *, bool = true,
2467 : : vec<basic_block> * = NULL);
2468 : : class loop *vect_loop_versioning (loop_vec_info, gimple *);
2469 : : extern class loop *vect_do_peeling (loop_vec_info, tree, tree,
2470 : : tree *, tree *, tree *, int, bool, bool,
2471 : : tree *);
2472 : : extern tree vect_get_main_loop_result (loop_vec_info, tree, tree);
2473 : : extern void vect_prepare_for_masked_peels (loop_vec_info);
2474 : : extern dump_user_location_t find_loop_location (class loop *);
2475 : : extern bool vect_can_advance_ivs_p (loop_vec_info);
2476 : : extern void vect_update_inits_of_drs (loop_vec_info, tree, tree_code);
2477 : : extern edge vec_init_loop_exit_info (class loop *);
2478 : : extern void vect_iv_increment_position (edge, gimple_stmt_iterator *, bool *);
2479 : :
2480 : : /* In tree-vect-stmts.cc. */
2481 : : extern tree get_related_vectype_for_scalar_type (machine_mode, tree,
2482 : : poly_uint64 = 0);
2483 : : extern tree get_vectype_for_scalar_type (vec_info *, tree, unsigned int = 0);
2484 : : extern tree get_vectype_for_scalar_type (vec_info *, tree, slp_tree);
2485 : : extern tree get_mask_type_for_scalar_type (vec_info *, tree, unsigned int = 0);
2486 : : extern tree get_mask_type_for_scalar_type (vec_info *, tree, slp_tree);
2487 : : extern tree get_same_sized_vectype (tree, tree);
2488 : : extern bool vect_chooses_same_modes_p (vec_info *, machine_mode);
2489 : : extern bool vect_chooses_same_modes_p (machine_mode, machine_mode);
2490 : : extern bool vect_get_loop_mask_type (loop_vec_info);
2491 : : extern bool vect_is_simple_use (tree, vec_info *, enum vect_def_type *,
2492 : : stmt_vec_info * = NULL, gimple ** = NULL);
2493 : : extern bool vect_is_simple_use (vec_info *, slp_tree,
2494 : : unsigned, tree *, slp_tree *,
2495 : : enum vect_def_type *,
2496 : : tree *, stmt_vec_info * = NULL);
2497 : : extern bool vect_maybe_update_slp_op_vectype (slp_tree, tree);
2498 : : extern tree perm_mask_for_reverse (tree);
2499 : : extern bool supportable_widening_operation (code_helper, tree, tree, bool,
2500 : : code_helper*, code_helper*,
2501 : : int*, vec<tree> *);
2502 : : extern bool supportable_narrowing_operation (code_helper, tree, tree,
2503 : : code_helper *, int *,
2504 : : vec<tree> *);
2505 : : extern bool supportable_indirect_convert_operation (code_helper,
2506 : : tree, tree,
2507 : : vec<std::pair<tree, tree_code> > &,
2508 : : tree = NULL_TREE,
2509 : : slp_tree = NULL);
2510 : : extern int compare_step_with_zero (vec_info *, stmt_vec_info);
2511 : :
2512 : : extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
2513 : : enum vect_cost_for_stmt, stmt_vec_info,
2514 : : tree, int, enum vect_cost_model_location);
2515 : : extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
2516 : : enum vect_cost_for_stmt, slp_tree,
2517 : : tree, int, enum vect_cost_model_location);
2518 : : extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
2519 : : enum vect_cost_for_stmt,
2520 : : enum vect_cost_model_location);
2521 : : extern unsigned record_stmt_cost (stmt_vector_for_cost *, int,
2522 : : enum vect_cost_for_stmt, stmt_vec_info,
2523 : : slp_tree, tree, int,
2524 : : enum vect_cost_model_location);
2525 : :
2526 : : /* Overload of record_stmt_cost with VECTYPE derived from STMT_INFO. */
2527 : :
2528 : : inline unsigned
2529 : 2626927 : record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
2530 : : enum vect_cost_for_stmt kind, stmt_vec_info stmt_info,
2531 : : int misalign, enum vect_cost_model_location where)
2532 : : {
2533 : 2626152 : return record_stmt_cost (body_cost_vec, count, kind, stmt_info,
2534 : 1444701 : STMT_VINFO_VECTYPE (stmt_info), misalign, where);
2535 : : }
2536 : :
2537 : : /* Overload of record_stmt_cost with VECTYPE derived from SLP node. */
2538 : :
2539 : : inline unsigned
2540 : 1352584 : record_stmt_cost (stmt_vector_for_cost *body_cost_vec, int count,
2541 : : enum vect_cost_for_stmt kind, slp_tree node,
2542 : : int misalign, enum vect_cost_model_location where)
2543 : : {
2544 : 1198597 : return record_stmt_cost (body_cost_vec, count, kind, node,
2545 : 92719 : SLP_TREE_VECTYPE (node), misalign, where);
2546 : : }
2547 : :
2548 : : extern void vect_finish_replace_stmt (vec_info *, stmt_vec_info, gimple *);
2549 : : extern void vect_finish_stmt_generation (vec_info *, stmt_vec_info, gimple *,
2550 : : gimple_stmt_iterator *);
2551 : : extern opt_result vect_mark_stmts_to_be_vectorized (loop_vec_info, bool *);
2552 : : extern tree vect_get_store_rhs (stmt_vec_info);
2553 : : void vect_get_vec_defs (vec_info *, slp_tree,
2554 : : tree, vec<tree> *,
2555 : : tree = NULL, vec<tree> * = NULL,
2556 : : tree = NULL, vec<tree> * = NULL,
2557 : : tree = NULL, vec<tree> * = NULL);
2558 : : extern tree vect_init_vector (vec_info *, stmt_vec_info, tree, tree,
2559 : : gimple_stmt_iterator *);
2560 : : extern tree vect_get_slp_vect_def (slp_tree, unsigned);
2561 : : extern bool vect_transform_stmt (vec_info *, stmt_vec_info,
2562 : : gimple_stmt_iterator *,
2563 : : slp_tree, slp_instance);
2564 : : extern void vect_remove_stores (vec_info *, stmt_vec_info);
2565 : : extern bool vect_nop_conversion_p (stmt_vec_info);
2566 : : extern opt_result vect_analyze_stmt (vec_info *, slp_tree,
2567 : : slp_instance, stmt_vector_for_cost *);
2568 : : extern void vect_get_load_cost (vec_info *, stmt_vec_info, slp_tree, int,
2569 : : dr_alignment_support, int, bool,
2570 : : unsigned int *, unsigned int *,
2571 : : stmt_vector_for_cost *,
2572 : : stmt_vector_for_cost *, bool);
2573 : : extern void vect_get_store_cost (vec_info *, stmt_vec_info, slp_tree, int,
2574 : : dr_alignment_support, int,
2575 : : unsigned int *, stmt_vector_for_cost *);
2576 : : extern bool vect_supportable_shift (vec_info *, enum tree_code, tree);
2577 : : extern tree vect_gen_perm_mask_any (tree, const vec_perm_indices &);
2578 : : extern tree vect_gen_perm_mask_checked (tree, const vec_perm_indices &);
2579 : : extern void optimize_mask_stores (class loop*);
2580 : : extern tree vect_gen_while (gimple_seq *, tree, tree, tree,
2581 : : const char * = nullptr);
2582 : : extern tree vect_gen_while_not (gimple_seq *, tree, tree, tree);
2583 : : extern opt_result vect_get_vector_types_for_stmt (vec_info *,
2584 : : stmt_vec_info, tree *,
2585 : : tree *, unsigned int = 0);
2586 : : extern opt_tree vect_get_mask_type_for_stmt (stmt_vec_info, unsigned int = 0);
2587 : :
2588 : : /* In tree-if-conv.cc. */
2589 : : extern bool ref_within_array_bound (gimple *, tree);
2590 : :
2591 : : /* In tree-vect-data-refs.cc. */
2592 : : extern bool vect_can_force_dr_alignment_p (const_tree, poly_uint64);
2593 : : extern enum dr_alignment_support vect_supportable_dr_alignment
2594 : : (vec_info *, dr_vec_info *, tree, int,
2595 : : bool = false);
2596 : : extern tree vect_get_smallest_scalar_type (stmt_vec_info, tree);
2597 : : extern opt_result vect_analyze_data_ref_dependences (loop_vec_info, unsigned int *);
2598 : : extern bool vect_slp_analyze_instance_dependence (vec_info *, slp_instance);
2599 : : extern opt_result vect_enhance_data_refs_alignment (loop_vec_info);
2600 : : extern opt_result vect_analyze_data_refs_alignment (loop_vec_info);
2601 : : extern bool vect_slp_analyze_instance_alignment (vec_info *, slp_instance);
2602 : : extern opt_result vect_analyze_data_ref_accesses (vec_info *, vec<int> *);
2603 : : extern opt_result vect_prune_runtime_alias_test_list (loop_vec_info);
2604 : : extern bool vect_gather_scatter_fn_p (vec_info *, bool, bool, tree, tree,
2605 : : tree, int, int *, internal_fn *, tree *,
2606 : : tree *, vec<int> * = nullptr);
2607 : : extern bool vect_check_gather_scatter (stmt_vec_info, tree,
2608 : : loop_vec_info, gather_scatter_info *,
2609 : : vec<int> * = nullptr);
2610 : : extern void vect_describe_gather_scatter_call (stmt_vec_info,
2611 : : gather_scatter_info *);
2612 : : extern opt_result vect_find_stmt_data_reference (loop_p, gimple *,
2613 : : vec<data_reference_p> *,
2614 : : vec<int> *, int);
2615 : : extern opt_result vect_analyze_data_refs (vec_info *, bool *);
2616 : : extern void vect_record_base_alignments (vec_info *);
2617 : : extern tree vect_create_data_ref_ptr (vec_info *,
2618 : : stmt_vec_info, tree, class loop *, tree,
2619 : : tree *, gimple_stmt_iterator *,
2620 : : gimple **, bool,
2621 : : tree = NULL_TREE);
2622 : : extern tree bump_vector_ptr (vec_info *, tree, gimple *, gimple_stmt_iterator *,
2623 : : stmt_vec_info, tree);
2624 : : extern void vect_copy_ref_info (tree, tree);
2625 : : extern tree vect_create_destination_var (tree, tree);
2626 : : extern bool vect_grouped_store_supported (tree, unsigned HOST_WIDE_INT);
2627 : : extern internal_fn vect_store_lanes_supported (tree, unsigned HOST_WIDE_INT, bool);
2628 : : extern bool vect_grouped_load_supported (tree, bool, unsigned HOST_WIDE_INT);
2629 : : extern internal_fn vect_load_lanes_supported (tree, unsigned HOST_WIDE_INT,
2630 : : bool, vec<int> * = nullptr);
2631 : : extern tree vect_setup_realignment (vec_info *,
2632 : : stmt_vec_info, tree, gimple_stmt_iterator *,
2633 : : tree *, enum dr_alignment_support, tree,
2634 : : class loop **);
2635 : : extern tree vect_get_new_vect_var (tree, enum vect_var_kind, const char *);
2636 : : extern tree vect_get_new_ssa_name (tree, enum vect_var_kind,
2637 : : const char * = NULL);
2638 : : extern tree vect_create_addr_base_for_vector_ref (vec_info *,
2639 : : stmt_vec_info, gimple_seq *,
2640 : : tree);
2641 : :
2642 : : /* In tree-vect-loop.cc. */
2643 : : extern tree neutral_op_for_reduction (tree, code_helper, tree, bool = true);
2644 : : extern widest_int vect_iv_limit_for_partial_vectors (loop_vec_info loop_vinfo);
2645 : : bool vect_rgroup_iv_might_wrap_p (loop_vec_info, rgroup_controls *);
2646 : : /* Used in gimple-loop-interchange.c and tree-parloops.cc. */
2647 : : extern bool check_reduction_path (dump_user_location_t, loop_p, gphi *, tree,
2648 : : enum tree_code);
2649 : : extern bool needs_fold_left_reduction_p (tree, code_helper);
2650 : : /* Drive for loop analysis stage. */
2651 : : extern opt_loop_vec_info vect_analyze_loop (class loop *, gimple *,
2652 : : vec_info_shared *);
2653 : : extern tree vect_build_loop_niters (loop_vec_info, bool * = NULL);
2654 : : extern void vect_gen_vector_loop_niters (loop_vec_info, tree, tree *,
2655 : : tree *, bool);
2656 : : extern tree vect_halve_mask_nunits (tree, machine_mode);
2657 : : extern tree vect_double_mask_nunits (tree, machine_mode);
2658 : : extern void vect_record_loop_mask (loop_vec_info, vec_loop_masks *,
2659 : : unsigned int, tree, tree);
2660 : : extern tree vect_get_loop_mask (loop_vec_info, gimple_stmt_iterator *,
2661 : : vec_loop_masks *,
2662 : : unsigned int, tree, unsigned int);
2663 : : extern void vect_record_loop_len (loop_vec_info, vec_loop_lens *, unsigned int,
2664 : : tree, unsigned int);
2665 : : extern tree vect_get_loop_len (loop_vec_info, gimple_stmt_iterator *,
2666 : : vec_loop_lens *, unsigned int, tree,
2667 : : unsigned int, unsigned int);
2668 : : extern tree vect_gen_loop_len_mask (loop_vec_info, gimple_stmt_iterator *,
2669 : : gimple_stmt_iterator *, vec_loop_lens *,
2670 : : unsigned int, tree, tree, unsigned int,
2671 : : unsigned int);
2672 : : extern gimple_seq vect_gen_len (tree, tree, tree, tree);
2673 : : extern vect_reduc_info info_for_reduction (loop_vec_info, slp_tree);
2674 : : extern bool reduction_fn_for_scalar_code (code_helper, internal_fn *);
2675 : :
2676 : : /* Drive for loop transformation stage. */
2677 : : extern class loop *vect_transform_loop (loop_vec_info, gimple *);
2678 : 937148 : struct vect_loop_form_info
2679 : : {
2680 : : tree number_of_iterations;
2681 : : tree number_of_iterationsm1;
2682 : : tree assumptions;
2683 : : auto_vec<gcond *> conds;
2684 : : gcond *inner_loop_cond;
2685 : : edge loop_exit;
2686 : : };
2687 : : extern opt_result vect_analyze_loop_form (class loop *, gimple *,
2688 : : vect_loop_form_info *);
2689 : : extern loop_vec_info vect_create_loop_vinfo (class loop *, vec_info_shared *,
2690 : : const vect_loop_form_info *,
2691 : : loop_vec_info = nullptr);
2692 : : extern bool vectorizable_live_operation (vec_info *, stmt_vec_info,
2693 : : slp_tree, slp_instance, int,
2694 : : bool, stmt_vector_for_cost *);
2695 : : extern bool vectorizable_lane_reducing (loop_vec_info, stmt_vec_info,
2696 : : slp_tree, stmt_vector_for_cost *);
2697 : : extern bool vectorizable_reduction (loop_vec_info, stmt_vec_info,
2698 : : slp_tree, slp_instance,
2699 : : stmt_vector_for_cost *);
2700 : : extern bool vectorizable_induction (loop_vec_info, stmt_vec_info,
2701 : : slp_tree, stmt_vector_for_cost *);
2702 : : extern bool vect_transform_reduction (loop_vec_info, stmt_vec_info,
2703 : : gimple_stmt_iterator *,
2704 : : slp_tree);
2705 : : extern bool vect_transform_cycle_phi (loop_vec_info, stmt_vec_info,
2706 : : slp_tree, slp_instance);
2707 : : extern bool vectorizable_lc_phi (loop_vec_info, stmt_vec_info, slp_tree);
2708 : : extern bool vect_transform_lc_phi (loop_vec_info, stmt_vec_info, slp_tree);
2709 : : extern bool vectorizable_phi (bb_vec_info, stmt_vec_info, slp_tree,
2710 : : stmt_vector_for_cost *);
2711 : : extern bool vectorizable_recurr (loop_vec_info, stmt_vec_info,
2712 : : slp_tree, stmt_vector_for_cost *);
2713 : : extern bool vectorizable_early_exit (loop_vec_info, stmt_vec_info,
2714 : : gimple_stmt_iterator *,
2715 : : slp_tree, stmt_vector_for_cost *);
2716 : : extern bool vect_emulated_vector_p (tree);
2717 : : extern bool vect_can_vectorize_without_simd_p (tree_code);
2718 : : extern bool vect_can_vectorize_without_simd_p (code_helper);
2719 : : extern int vect_get_known_peeling_cost (loop_vec_info, int, int *,
2720 : : stmt_vector_for_cost *,
2721 : : stmt_vector_for_cost *,
2722 : : stmt_vector_for_cost *);
2723 : : extern tree cse_and_gimplify_to_preheader (loop_vec_info, tree);
2724 : :
2725 : : /* Nonlinear induction. */
2726 : : extern tree vect_peel_nonlinear_iv_init (gimple_seq*, tree, tree,
2727 : : tree, enum vect_induction_op_type,
2728 : : bool);
2729 : :
2730 : : /* In tree-vect-slp.cc. */
2731 : : extern void vect_slp_init (void);
2732 : : extern void vect_slp_fini (void);
2733 : : extern void vect_free_slp_instance (slp_instance);
2734 : : extern bool vect_transform_slp_perm_load (vec_info *, slp_tree, const vec<tree> &,
2735 : : gimple_stmt_iterator *, poly_uint64,
2736 : : bool, unsigned *,
2737 : : unsigned * = nullptr, bool = false);
2738 : : extern bool vectorizable_slp_permutation (vec_info *, gimple_stmt_iterator *,
2739 : : slp_tree, stmt_vector_for_cost *);
2740 : : extern bool vect_slp_analyze_operations (vec_info *);
2741 : : extern void vect_schedule_slp (vec_info *, const vec<slp_instance> &);
2742 : : extern opt_result vect_analyze_slp (vec_info *, unsigned, bool);
2743 : : extern bool vect_make_slp_decision (loop_vec_info);
2744 : : extern bool vect_detect_hybrid_slp (loop_vec_info);
2745 : : extern void vect_optimize_slp (vec_info *);
2746 : : extern void vect_gather_slp_loads (vec_info *);
2747 : : extern tree vect_get_slp_scalar_def (slp_tree, unsigned);
2748 : : extern void vect_get_slp_defs (slp_tree, vec<tree> *);
2749 : : extern void vect_get_slp_defs (vec_info *, slp_tree, vec<vec<tree> > *,
2750 : : unsigned n = -1U);
2751 : : extern bool vect_slp_if_converted_bb (basic_block bb, loop_p orig_loop);
2752 : : extern bool vect_slp_function (function *);
2753 : : extern stmt_vec_info vect_find_last_scalar_stmt_in_slp (slp_tree);
2754 : : extern stmt_vec_info vect_find_first_scalar_stmt_in_slp (slp_tree);
2755 : : extern bool is_simple_and_all_uses_invariant (stmt_vec_info, loop_vec_info);
2756 : : extern bool can_duplicate_and_interleave_p (vec_info *, unsigned int, tree,
2757 : : unsigned int * = NULL,
2758 : : tree * = NULL, tree * = NULL);
2759 : : extern void duplicate_and_interleave (vec_info *, gimple_seq *, tree,
2760 : : const vec<tree> &, unsigned int, vec<tree> &);
2761 : : extern int vect_get_place_in_interleaving_chain (stmt_vec_info, stmt_vec_info);
2762 : : extern slp_tree vect_create_new_slp_node (unsigned, tree_code);
2763 : : extern void vect_free_slp_tree (slp_tree);
2764 : : extern bool compatible_calls_p (gcall *, gcall *, bool);
2765 : : extern int vect_slp_child_index_for_operand (const gimple *, int op, bool);
2766 : :
2767 : : extern tree prepare_vec_mask (loop_vec_info, tree, tree, tree,
2768 : : gimple_stmt_iterator *);
2769 : : extern tree vect_get_mask_load_else (int, tree);
2770 : : extern bool vect_load_perm_consecutive_p (slp_tree, unsigned = UINT_MAX);
2771 : :
2772 : : /* In tree-vect-patterns.cc. */
2773 : : extern void
2774 : : vect_mark_pattern_stmts (vec_info *, stmt_vec_info, gimple *, tree);
2775 : : extern bool vect_get_range_info (tree, wide_int*, wide_int*);
2776 : :
2777 : : /* Pattern recognition functions.
2778 : : Additional pattern recognition functions can (and will) be added
2779 : : in the future. */
2780 : : void vect_pattern_recog (vec_info *);
2781 : :
2782 : : /* In tree-vectorizer.cc. */
2783 : : unsigned vectorize_loops (void);
2784 : : void vect_free_loop_info_assumptions (class loop *);
2785 : : gimple *vect_loop_vectorized_call (class loop *, gcond **cond = NULL);
2786 : : bool vect_stmt_dominates_stmt_p (gimple *, gimple *);
2787 : :
2788 : : /* SLP Pattern matcher types, tree-vect-slp-patterns.cc. */
2789 : :
2790 : : /* Forward declaration of possible two operands operation that can be matched
2791 : : by the complex numbers pattern matchers. */
2792 : : enum _complex_operation : unsigned;
2793 : :
2794 : : /* All possible load permute values that could result from the partial data-flow
2795 : : analysis. */
2796 : : typedef enum _complex_perm_kinds {
2797 : : PERM_UNKNOWN,
2798 : : PERM_EVENODD,
2799 : : PERM_ODDEVEN,
2800 : : PERM_ODDODD,
2801 : : PERM_EVENEVEN,
2802 : : /* Can be combined with any other PERM values. */
2803 : : PERM_TOP
2804 : : } complex_perm_kinds_t;
2805 : :
2806 : : /* Cache from nodes to the load permutation they represent. */
2807 : : typedef hash_map <slp_tree, complex_perm_kinds_t>
2808 : : slp_tree_to_load_perm_map_t;
2809 : :
2810 : : /* Cache from nodes pair to being compatible or not. */
2811 : : typedef pair_hash <nofree_ptr_hash <_slp_tree>,
2812 : : nofree_ptr_hash <_slp_tree>> slp_node_hash;
2813 : : typedef hash_map <slp_node_hash, bool> slp_compat_nodes_map_t;
2814 : :
2815 : :
2816 : : /* Vector pattern matcher base class. All SLP pattern matchers must inherit
2817 : : from this type. */
2818 : :
2819 : : class vect_pattern
2820 : : {
2821 : : protected:
2822 : : /* The number of arguments that the IFN requires. */
2823 : : unsigned m_num_args;
2824 : :
2825 : : /* The internal function that will be used when a pattern is created. */
2826 : : internal_fn m_ifn;
2827 : :
2828 : : /* The current node being inspected. */
2829 : : slp_tree *m_node;
2830 : :
2831 : : /* The list of operands to be the children for the node produced when the
2832 : : internal function is created. */
2833 : : vec<slp_tree> m_ops;
2834 : :
2835 : : /* Default constructor where NODE is the root of the tree to inspect. */
2836 : 1072 : vect_pattern (slp_tree *node, vec<slp_tree> *m_ops, internal_fn ifn)
2837 : 1072 : {
2838 : 1072 : this->m_ifn = ifn;
2839 : 1072 : this->m_node = node;
2840 : 1072 : this->m_ops.create (0);
2841 : 1072 : if (m_ops)
2842 : 20 : this->m_ops.safe_splice (*m_ops);
2843 : : }
2844 : :
2845 : : public:
2846 : :
2847 : : /* Create a new instance of the pattern matcher class of the given type. */
2848 : : static vect_pattern* recognize (slp_tree_to_load_perm_map_t *,
2849 : : slp_compat_nodes_map_t *, slp_tree *);
2850 : :
2851 : : /* Build the pattern from the data collected so far. */
2852 : : virtual void build (vec_info *) = 0;
2853 : :
2854 : : /* Default destructor. */
2855 : : virtual ~vect_pattern ()
2856 : : {
2857 : : this->m_ops.release ();
2858 : : }
2859 : : };
2860 : :
2861 : : /* Function pointer to create a new pattern matcher from a generic type. */
2862 : : typedef vect_pattern* (*vect_pattern_decl_t) (slp_tree_to_load_perm_map_t *,
2863 : : slp_compat_nodes_map_t *,
2864 : : slp_tree *);
2865 : :
2866 : : /* List of supported pattern matchers. */
2867 : : extern vect_pattern_decl_t slp_patterns[];
2868 : :
2869 : : /* Number of supported pattern matchers. */
2870 : : extern size_t num__slp_patterns;
2871 : :
2872 : : /* ----------------------------------------------------------------------
2873 : : Target support routines
2874 : : -----------------------------------------------------------------------
2875 : : The following routines are provided to simplify costing decisions in
2876 : : target code. Please add more as needed. */
2877 : :
2878 : : /* Return true if an operaton of kind KIND for STMT_INFO represents
2879 : : the extraction of an element from a vector in preparation for
2880 : : storing the element to memory. */
2881 : : inline bool
2882 : : vect_is_store_elt_extraction (vect_cost_for_stmt kind, stmt_vec_info stmt_info)
2883 : : {
2884 : : return (kind == vec_to_scalar
2885 : : && STMT_VINFO_DATA_REF (stmt_info)
2886 : : && DR_IS_WRITE (STMT_VINFO_DATA_REF (stmt_info)));
2887 : : }
2888 : :
2889 : : /* Return true if STMT_INFO represents part of a reduction. */
2890 : : inline bool
2891 : 45099424 : vect_is_reduction (stmt_vec_info stmt_info)
2892 : : {
2893 : 45099424 : return STMT_VINFO_REDUC_IDX (stmt_info) != -1;
2894 : : }
2895 : :
2896 : : /* Return true if SLP_NODE represents part of a reduction. */
2897 : : inline bool
2898 : 234390 : vect_is_reduction (slp_tree slp_node)
2899 : : {
2900 : 234390 : return SLP_TREE_REDUC_IDX (slp_node) != -1;
2901 : : }
2902 : :
2903 : : /* If STMT_INFO describes a reduction, return the vect_reduction_type
2904 : : of the reduction it describes, otherwise return -1. */
2905 : : inline int
2906 : : vect_reduc_type (vec_info *vinfo, slp_tree node)
2907 : : {
2908 : : if (loop_vec_info loop_vinfo = dyn_cast<loop_vec_info> (vinfo))
2909 : : {
2910 : : vect_reduc_info reduc_info = info_for_reduction (loop_vinfo, node);
2911 : : if (reduc_info)
2912 : : return int (VECT_REDUC_INFO_TYPE (reduc_info));
2913 : : }
2914 : : return -1;
2915 : : }
2916 : :
2917 : : /* If STMT_INFO is a COND_EXPR that includes an embedded comparison, return the
2918 : : scalar type of the values being compared. Return null otherwise. */
2919 : : inline tree
2920 : : vect_embedded_comparison_type (stmt_vec_info stmt_info)
2921 : : {
2922 : : if (auto *assign = dyn_cast<gassign *> (stmt_info->stmt))
2923 : : if (gimple_assign_rhs_code (assign) == COND_EXPR)
2924 : : {
2925 : : tree cond = gimple_assign_rhs1 (assign);
2926 : : if (COMPARISON_CLASS_P (cond))
2927 : : return TREE_TYPE (TREE_OPERAND (cond, 0));
2928 : : }
2929 : : return NULL_TREE;
2930 : : }
2931 : :
2932 : : /* If STMT_INFO is a comparison or contains an embedded comparison, return the
2933 : : scalar type of the values being compared. Return null otherwise. */
2934 : : inline tree
2935 : : vect_comparison_type (stmt_vec_info stmt_info)
2936 : : {
2937 : : if (auto *assign = dyn_cast<gassign *> (stmt_info->stmt))
2938 : : if (TREE_CODE_CLASS (gimple_assign_rhs_code (assign)) == tcc_comparison)
2939 : : return TREE_TYPE (gimple_assign_rhs1 (assign));
2940 : : return vect_embedded_comparison_type (stmt_info);
2941 : : }
2942 : :
2943 : : /* Return true if STMT_INFO extends the result of a load. */
2944 : : inline bool
2945 : : vect_is_extending_load (class vec_info *vinfo, stmt_vec_info stmt_info)
2946 : : {
2947 : : /* Although this is quite large for an inline function, this part
2948 : : at least should be inline. */
2949 : : gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
2950 : : if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
2951 : : return false;
2952 : :
2953 : : tree rhs = gimple_assign_rhs1 (stmt_info->stmt);
2954 : : tree lhs_type = TREE_TYPE (gimple_assign_lhs (assign));
2955 : : tree rhs_type = TREE_TYPE (rhs);
2956 : : if (!INTEGRAL_TYPE_P (lhs_type)
2957 : : || !INTEGRAL_TYPE_P (rhs_type)
2958 : : || TYPE_PRECISION (lhs_type) <= TYPE_PRECISION (rhs_type))
2959 : : return false;
2960 : :
2961 : : stmt_vec_info def_stmt_info = vinfo->lookup_def (rhs);
2962 : : return (def_stmt_info
2963 : : && STMT_VINFO_DATA_REF (def_stmt_info)
2964 : : && DR_IS_READ (STMT_VINFO_DATA_REF (def_stmt_info)));
2965 : : }
2966 : :
2967 : : /* Return true if STMT_INFO is an integer truncation. */
2968 : : inline bool
2969 : : vect_is_integer_truncation (stmt_vec_info stmt_info)
2970 : : {
2971 : : gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
2972 : : if (!assign || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (assign)))
2973 : : return false;
2974 : :
2975 : : tree lhs_type = TREE_TYPE (gimple_assign_lhs (assign));
2976 : : tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
2977 : : return (INTEGRAL_TYPE_P (lhs_type)
2978 : : && INTEGRAL_TYPE_P (rhs_type)
2979 : : && TYPE_PRECISION (lhs_type) < TYPE_PRECISION (rhs_type));
2980 : : }
2981 : :
2982 : : /* Build a GIMPLE_ASSIGN or GIMPLE_CALL with the tree_code,
2983 : : or internal_fn contained in ch, respectively. */
2984 : : gimple * vect_gimple_build (tree, code_helper, tree, tree = NULL_TREE);
2985 : : #endif /* GCC_TREE_VECTORIZER_H */
|