Line data Source code
1 : /* Data References Analysis and Manipulation Utilities for Vectorization.
2 : Copyright (C) 2003-2026 Free Software Foundation, Inc.
3 : Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 : and Ira Rosen <irar@il.ibm.com>
5 :
6 : This file is part of GCC.
7 :
8 : GCC is free software; you can redistribute it and/or modify it under
9 : the terms of the GNU General Public License as published by the Free
10 : Software Foundation; either version 3, or (at your option) any later
11 : version.
12 :
13 : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 : WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 : for more details.
17 :
18 : You should have received a copy of the GNU General Public License
19 : along with GCC; see the file COPYING3. If not see
20 : <http://www.gnu.org/licenses/>. */
21 :
22 : #define INCLUDE_ALGORITHM
23 : #include "config.h"
24 : #include "system.h"
25 : #include "coretypes.h"
26 : #include "backend.h"
27 : #include "target.h"
28 : #include "rtl.h"
29 : #include "tree.h"
30 : #include "gimple.h"
31 : #include "predict.h"
32 : #include "memmodel.h"
33 : #include "tm_p.h"
34 : #include "ssa.h"
35 : #include "optabs-tree.h"
36 : #include "cgraph.h"
37 : #include "dumpfile.h"
38 : #include "pretty-print.h"
39 : #include "alias.h"
40 : #include "fold-const.h"
41 : #include "stor-layout.h"
42 : #include "tree-eh.h"
43 : #include "gimplify.h"
44 : #include "gimple-iterator.h"
45 : #include "gimplify-me.h"
46 : #include "tree-ssa-loop-ivopts.h"
47 : #include "tree-ssa-loop-manip.h"
48 : #include "tree-ssa-loop.h"
49 : #include "cfgloop.h"
50 : #include "tree-scalar-evolution.h"
51 : #include "tree-vectorizer.h"
52 : #include "expr.h"
53 : #include "builtins.h"
54 : #include "tree-cfg.h"
55 : #include "tree-hash-traits.h"
56 : #include "vec-perm-indices.h"
57 : #include "internal-fn.h"
58 : #include "gimple-fold.h"
59 : #include "optabs-query.h"
60 :
61 : /* Return true if load- or store-lanes optab OPTAB is implemented for
62 : COUNT vectors of type VECTYPE. NAME is the name of OPTAB.
63 :
64 : If it is implemented and ELSVALS is nonzero store the possible else
65 : values in the vector it points to. */
66 :
67 : static bool
68 367456 : vect_lanes_optab_supported_p (const char *name, convert_optab optab,
69 : tree vectype, unsigned HOST_WIDE_INT count,
70 : vec<int> *elsvals = nullptr)
71 : {
72 367456 : machine_mode mode, array_mode;
73 367456 : bool limit_p;
74 :
75 367456 : mode = TYPE_MODE (vectype);
76 367456 : if (!targetm.array_mode (mode, count).exists (&array_mode))
77 : {
78 734912 : poly_uint64 bits = count * GET_MODE_BITSIZE (mode);
79 367456 : limit_p = !targetm.array_mode_supported_p (mode, count);
80 367456 : if (!int_mode_for_size (bits, limit_p).exists (&array_mode))
81 : {
82 316292 : if (dump_enabled_p ())
83 12852 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
84 : "no array mode for %s[%wu]\n",
85 12852 : GET_MODE_NAME (mode), count);
86 316292 : return false;
87 : }
88 : }
89 :
90 51164 : enum insn_code icode;
91 51164 : if ((icode = convert_optab_handler (optab, array_mode, mode))
92 : == CODE_FOR_nothing)
93 : {
94 51164 : if (dump_enabled_p ())
95 4104 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
96 : "cannot use %s<%s><%s>\n", name,
97 4104 : GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
98 51164 : return false;
99 : }
100 :
101 0 : if (dump_enabled_p ())
102 0 : dump_printf_loc (MSG_NOTE, vect_location,
103 0 : "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
104 0 : GET_MODE_NAME (mode));
105 :
106 0 : if (elsvals)
107 0 : get_supported_else_vals (icode,
108 0 : internal_fn_else_index (IFN_MASK_LEN_LOAD_LANES),
109 : *elsvals);
110 :
111 : return true;
112 : }
113 :
114 : /* Helper function to identify a simd clone call. If this is a call to a
115 : function with simd clones then return the corresponding cgraph_node,
116 : otherwise return NULL. */
117 :
118 : static cgraph_node*
119 617078 : simd_clone_call_p (gimple *stmt)
120 : {
121 694819 : gcall *call = dyn_cast <gcall *> (stmt);
122 79381 : if (!call)
123 : return NULL;
124 :
125 79381 : tree fndecl = NULL_TREE;
126 79381 : if (gimple_call_internal_p (call, IFN_MASK_CALL))
127 226 : fndecl = TREE_OPERAND (gimple_call_arg (stmt, 0), 0);
128 : else
129 79155 : fndecl = gimple_call_fndecl (stmt);
130 :
131 79381 : if (fndecl == NULL_TREE)
132 : return NULL;
133 :
134 36772 : cgraph_node *node = cgraph_node::get (fndecl);
135 36772 : if (node && node->simd_clones != NULL)
136 : return node;
137 :
138 : return NULL;
139 : }
140 :
141 :
142 :
143 : /* Return the smallest scalar part of STMT_INFO.
144 : This is used to determine the vectype of the stmt. We generally set the
145 : vectype according to the type of the result (lhs). For stmts whose
146 : result-type is different than the type of the arguments (e.g., demotion,
147 : promotion), vectype will be reset appropriately (later). Note that we have
148 : to visit the smallest datatype in this function, because that determines the
149 : VF. If the smallest datatype in the loop is present only as the rhs of a
150 : promotion operation - we'd miss it.
151 : Such a case, where a variable of this datatype does not appear in the lhs
152 : anywhere in the loop, can only occur if it's an invariant: e.g.:
153 : 'int_x = (int) short_inv', which we'd expect to have been optimized away by
154 : invariant motion. However, we cannot rely on invariant motion to always
155 : take invariants out of the loop, and so in the case of promotion we also
156 : have to check the rhs.
157 : LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
158 : types. */
159 :
160 : tree
161 4995888 : vect_get_smallest_scalar_type (stmt_vec_info stmt_info, tree scalar_type)
162 : {
163 4995888 : HOST_WIDE_INT lhs, rhs;
164 :
165 : /* During the analysis phase, this function is called on arbitrary
166 : statements that might not have scalar results. */
167 4995888 : if (!tree_fits_uhwi_p (TYPE_SIZE_UNIT (scalar_type)))
168 : return scalar_type;
169 :
170 4995888 : lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
171 :
172 4995888 : gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
173 4995888 : if (assign)
174 : {
175 4378810 : scalar_type = TREE_TYPE (gimple_assign_lhs (assign));
176 4378810 : if (gimple_assign_cast_p (assign)
177 3980949 : || gimple_assign_rhs_code (assign) == DOT_PROD_EXPR
178 3980315 : || gimple_assign_rhs_code (assign) == WIDEN_SUM_EXPR
179 3980315 : || gimple_assign_rhs_code (assign) == SAD_EXPR
180 3980206 : || gimple_assign_rhs_code (assign) == WIDEN_MULT_EXPR
181 3976475 : || gimple_assign_rhs_code (assign) == WIDEN_MULT_PLUS_EXPR
182 3976475 : || gimple_assign_rhs_code (assign) == WIDEN_MULT_MINUS_EXPR
183 3976475 : || gimple_assign_rhs_code (assign) == WIDEN_LSHIFT_EXPR
184 8355285 : || gimple_assign_rhs_code (assign) == FLOAT_EXPR)
185 : {
186 416798 : tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
187 :
188 416798 : rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
189 416798 : if (rhs < lhs)
190 4995888 : scalar_type = rhs_type;
191 : }
192 : }
193 617078 : else if (cgraph_node *node = simd_clone_call_p (stmt_info->stmt))
194 : {
195 1640 : auto clone = node->simd_clones->simdclone;
196 4988 : for (unsigned int i = 0; i < clone->nargs; ++i)
197 : {
198 3348 : if (clone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
199 : {
200 1939 : tree arg_scalar_type = TREE_TYPE (clone->args[i].vector_type);
201 1939 : rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (arg_scalar_type));
202 1939 : if (rhs < lhs)
203 : {
204 3348 : scalar_type = arg_scalar_type;
205 3348 : lhs = rhs;
206 : }
207 : }
208 : }
209 : }
210 615438 : else if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt))
211 : {
212 77741 : unsigned int i = 0;
213 77741 : if (gimple_call_internal_p (call))
214 : {
215 40243 : internal_fn ifn = gimple_call_internal_fn (call);
216 40243 : if (internal_load_fn_p (ifn))
217 : /* For loads the LHS type does the trick. */
218 : i = ~0U;
219 35414 : else if (internal_store_fn_p (ifn))
220 : {
221 : /* For stores use the tyep of the stored value. */
222 2717 : i = internal_fn_stored_value_index (ifn);
223 2717 : scalar_type = TREE_TYPE (gimple_call_arg (call, i));
224 2717 : i = ~0U;
225 : }
226 32697 : else if (internal_fn_mask_index (ifn) == 0)
227 11075 : i = 1;
228 : }
229 77741 : if (i < gimple_call_num_args (call))
230 : {
231 65615 : tree rhs_type = TREE_TYPE (gimple_call_arg (call, i));
232 65615 : if (tree_fits_uhwi_p (TYPE_SIZE_UNIT (rhs_type)))
233 : {
234 65615 : rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
235 65615 : if (rhs < lhs)
236 4995888 : scalar_type = rhs_type;
237 : }
238 : }
239 : }
240 :
241 : return scalar_type;
242 : }
243 :
244 :
245 : /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
246 : tested at run-time. Return TRUE if DDR was successfully inserted.
247 : Return false if versioning is not supported. */
248 :
249 : static opt_result
250 164398 : vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
251 : {
252 164398 : class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
253 :
254 164398 : if ((unsigned) param_vect_max_version_for_alias_checks == 0)
255 54 : return opt_result::failure_at (vect_location,
256 : "will not create alias checks, as"
257 : " --param vect-max-version-for-alias-checks"
258 : " == 0\n");
259 :
260 164344 : opt_result res
261 164344 : = runtime_alias_check_p (ddr, loop,
262 164344 : optimize_loop_nest_for_speed_p (loop));
263 164344 : if (!res)
264 143 : return res;
265 :
266 164201 : LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
267 164201 : return opt_result::success ();
268 : }
269 :
270 : /* Record that loop LOOP_VINFO needs to check that VALUE is nonzero. */
271 :
272 : static void
273 1438 : vect_check_nonzero_value (loop_vec_info loop_vinfo, tree value)
274 : {
275 1438 : const vec<tree> &checks = LOOP_VINFO_CHECK_NONZERO (loop_vinfo);
276 2205 : for (unsigned int i = 0; i < checks.length(); ++i)
277 767 : if (checks[i] == value)
278 : return;
279 :
280 1438 : if (dump_enabled_p ())
281 432 : dump_printf_loc (MSG_NOTE, vect_location,
282 : "need run-time check that %T is nonzero\n",
283 : value);
284 1438 : LOOP_VINFO_CHECK_NONZERO (loop_vinfo).safe_push (value);
285 : }
286 :
287 : /* Return true if we know that the order of vectorized DR_INFO_A and
288 : vectorized DR_INFO_B will be the same as the order of DR_INFO_A and
289 : DR_INFO_B. At least one of the accesses is a write. */
290 :
291 : static bool
292 140118 : vect_preserves_scalar_order_p (dr_vec_info *dr_info_a, dr_vec_info *dr_info_b)
293 : {
294 140118 : stmt_vec_info stmtinfo_a = dr_info_a->stmt;
295 140118 : stmt_vec_info stmtinfo_b = dr_info_b->stmt;
296 :
297 : /* Single statements are always kept in their original order. */
298 140118 : if (!STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
299 231743 : && !STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
300 : return true;
301 :
302 : /* If there is a loop invariant read involved we might vectorize it in
303 : the prologue, breaking scalar oder with respect to the in-loop store. */
304 24949 : if ((DR_IS_READ (dr_info_a->dr) && integer_zerop (DR_STEP (dr_info_a->dr)))
305 78102 : || (DR_IS_READ (dr_info_b->dr) && integer_zerop (DR_STEP (dr_info_b->dr))))
306 1718 : return false;
307 :
308 : /* STMT_A and STMT_B belong to overlapping groups. All loads are
309 : emitted at the position of the first scalar load.
310 : Stores in a group are emitted at the position of the last scalar store.
311 : Compute that position and check whether the resulting order matches
312 : the current one. */
313 52662 : stmt_vec_info il_a = DR_GROUP_FIRST_ELEMENT (stmtinfo_a);
314 52662 : if (il_a)
315 : {
316 48002 : if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmtinfo_a)))
317 213280 : for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (il_a); s;
318 188047 : s = DR_GROUP_NEXT_ELEMENT (s))
319 188047 : il_a = get_later_stmt (il_a, s);
320 : else /* DR_IS_READ */
321 92197 : for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (il_a); s;
322 69428 : s = DR_GROUP_NEXT_ELEMENT (s))
323 69428 : if (get_later_stmt (il_a, s) == il_a)
324 1634 : il_a = s;
325 : }
326 : else
327 : il_a = stmtinfo_a;
328 52662 : stmt_vec_info il_b = DR_GROUP_FIRST_ELEMENT (stmtinfo_b);
329 52662 : if (il_b)
330 : {
331 46698 : if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmtinfo_b)))
332 272604 : for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (il_b); s;
333 235029 : s = DR_GROUP_NEXT_ELEMENT (s))
334 235029 : il_b = get_later_stmt (il_b, s);
335 : else /* DR_IS_READ */
336 42431 : for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (il_b); s;
337 33308 : s = DR_GROUP_NEXT_ELEMENT (s))
338 33308 : if (get_later_stmt (il_b, s) == il_b)
339 157 : il_b = s;
340 : }
341 : else
342 : il_b = stmtinfo_b;
343 52662 : bool a_after_b = (get_later_stmt (stmtinfo_a, stmtinfo_b) == stmtinfo_a);
344 52662 : return (get_later_stmt (il_a, il_b) == il_a) == a_after_b;
345 : }
346 :
347 : /* A subroutine of vect_analyze_data_ref_dependence. Handle
348 : DDR_COULD_BE_INDEPENDENT_P ddr DDR that has a known set of dependence
349 : distances. These distances are conservatively correct but they don't
350 : reflect a guaranteed dependence.
351 :
352 : Return true if this function does all the work necessary to avoid
353 : an alias or false if the caller should use the dependence distances
354 : to limit the vectorization factor in the usual way. LOOP_DEPTH is
355 : the depth of the loop described by LOOP_VINFO and the other arguments
356 : are as for vect_analyze_data_ref_dependence. */
357 :
358 : static bool
359 8296 : vect_analyze_possibly_independent_ddr (data_dependence_relation *ddr,
360 : loop_vec_info loop_vinfo,
361 : int loop_depth, unsigned int *max_vf)
362 : {
363 8296 : class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
364 33202 : for (lambda_vector &dist_v : DDR_DIST_VECTS (ddr))
365 : {
366 16327 : int dist = dist_v[loop_depth];
367 16327 : if (dist != 0 && !(dist > 0 && DDR_REVERSED_P (ddr)))
368 : {
369 : /* If the user asserted safelen >= DIST consecutive iterations
370 : can be executed concurrently, assume independence.
371 :
372 : ??? An alternative would be to add the alias check even
373 : in this case, and vectorize the fallback loop with the
374 : maximum VF set to safelen. However, if the user has
375 : explicitly given a length, it's less likely that that
376 : would be a win. */
377 8045 : if (loop->safelen >= 2 && abs_hwi (dist) <= loop->safelen)
378 : {
379 32 : if ((unsigned int) loop->safelen < *max_vf)
380 2 : *max_vf = loop->safelen;
381 32 : LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
382 32 : continue;
383 : }
384 :
385 : /* For dependence distances of 2 or more, we have the option
386 : of limiting VF or checking for an alias at runtime.
387 : Prefer to check at runtime if we can, to avoid limiting
388 : the VF unnecessarily when the bases are in fact independent.
389 :
390 : Note that the alias checks will be removed if the VF ends up
391 : being small enough. */
392 8013 : dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (DDR_A (ddr));
393 8013 : dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (DDR_B (ddr));
394 8013 : return (!STMT_VINFO_GATHER_SCATTER_P (dr_info_a->stmt)
395 8013 : && !STMT_VINFO_GATHER_SCATTER_P (dr_info_b->stmt)
396 16034 : && vect_mark_for_runtime_alias_test (ddr, loop_vinfo));
397 : }
398 : }
399 : return true;
400 : }
401 :
402 :
403 : /* Function vect_analyze_data_ref_dependence.
404 :
405 : FIXME: I needed to change the sense of the returned flag.
406 :
407 : Return FALSE if there (might) exist a dependence between a memory-reference
408 : DRA and a memory-reference DRB. When versioning for alias may check a
409 : dependence at run-time, return TRUE. Adjust *MAX_VF according to
410 : the data dependence. */
411 :
412 : static opt_result
413 1474113 : vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
414 : loop_vec_info loop_vinfo,
415 : unsigned int *max_vf)
416 : {
417 1474113 : unsigned int i;
418 1474113 : class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
419 1474113 : struct data_reference *dra = DDR_A (ddr);
420 1474113 : struct data_reference *drb = DDR_B (ddr);
421 1474113 : dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (dra);
422 1474113 : dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (drb);
423 1474113 : stmt_vec_info stmtinfo_a = dr_info_a->stmt;
424 1474113 : stmt_vec_info stmtinfo_b = dr_info_b->stmt;
425 1474113 : lambda_vector dist_v;
426 1474113 : unsigned int loop_depth;
427 :
428 : /* If user asserted safelen consecutive iterations can be
429 : executed concurrently, assume independence. */
430 1649172 : auto apply_safelen = [&]()
431 : {
432 175059 : if (loop->safelen >= 2)
433 : {
434 7462 : if ((unsigned int) loop->safelen < *max_vf)
435 1896 : *max_vf = loop->safelen;
436 7462 : LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
437 7462 : return true;
438 : }
439 : return false;
440 1474113 : };
441 :
442 : /* In loop analysis all data references should be vectorizable. */
443 1474113 : if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
444 1474113 : || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
445 0 : gcc_unreachable ();
446 :
447 : /* Independent data accesses. */
448 1474113 : if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
449 1213742 : return opt_result::success ();
450 :
451 260371 : if (dra == drb
452 260371 : || (DR_IS_READ (dra) && DR_IS_READ (drb)))
453 0 : return opt_result::success ();
454 :
455 : /* We do not have to consider dependences between accesses that belong
456 : to the same group, unless the stride could be smaller than the
457 : group size. */
458 260371 : if (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
459 113971 : && (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
460 113971 : == DR_GROUP_FIRST_ELEMENT (stmtinfo_b))
461 278944 : && !STMT_VINFO_STRIDED_P (stmtinfo_a))
462 2279 : return opt_result::success ();
463 :
464 : /* Even if we have an anti-dependence then, as the vectorized loop covers at
465 : least two scalar iterations, there is always also a true dependence.
466 : As the vectorizer does not re-order loads and stores we can ignore
467 : the anti-dependence if TBAA can disambiguate both DRs similar to the
468 : case with known negative distance anti-dependences (positive
469 : distance anti-dependences would violate TBAA constraints). */
470 128414 : if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
471 129678 : || (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
472 404067 : && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
473 : get_alias_set (DR_REF (drb))))
474 5893 : return opt_result::success ();
475 :
476 252199 : if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
477 242264 : || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
478 : {
479 12610 : if (apply_safelen ())
480 1398 : return opt_result::success ();
481 :
482 11212 : return opt_result::failure_at
483 11212 : (stmtinfo_a->stmt,
484 : "possible alias involving gather/scatter between %T and %T\n",
485 : DR_REF (dra), DR_REF (drb));
486 : }
487 :
488 : /* Unknown data dependence. */
489 239589 : if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
490 : {
491 161888 : if (apply_safelen ())
492 6064 : return opt_result::success ();
493 :
494 155824 : if (dump_enabled_p ())
495 7665 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmtinfo_a->stmt,
496 : "versioning for alias required: "
497 : "can't determine dependence between %T and %T\n",
498 : DR_REF (dra), DR_REF (drb));
499 :
500 : /* Add to list of ddrs that need to be tested at run-time. */
501 155824 : return vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
502 : }
503 :
504 : /* Known data dependence. */
505 77701 : if (DDR_NUM_DIST_VECTS (ddr) == 0)
506 : {
507 561 : if (apply_safelen ())
508 0 : return opt_result::success ();
509 :
510 561 : if (dump_enabled_p ())
511 156 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmtinfo_a->stmt,
512 : "versioning for alias required: "
513 : "bad dist vector for %T and %T\n",
514 : DR_REF (dra), DR_REF (drb));
515 : /* Add to list of ddrs that need to be tested at run-time. */
516 561 : return vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
517 : }
518 :
519 77140 : loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
520 :
521 77140 : if (DDR_COULD_BE_INDEPENDENT_P (ddr)
522 77140 : && vect_analyze_possibly_independent_ddr (ddr, loop_vinfo,
523 : loop_depth, max_vf))
524 8288 : return opt_result::success ();
525 :
526 131323 : FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
527 : {
528 68874 : int dist = dist_v[loop_depth];
529 :
530 68874 : if (dump_enabled_p ())
531 4354 : dump_printf_loc (MSG_NOTE, vect_location,
532 : "dependence distance = %d.\n", dist);
533 :
534 68874 : if (dist == 0)
535 : {
536 57459 : if (dump_enabled_p ())
537 3560 : dump_printf_loc (MSG_NOTE, vect_location,
538 : "dependence distance == 0 between %T and %T\n",
539 : DR_REF (dra), DR_REF (drb));
540 :
541 : /* When we perform grouped accesses and perform implicit CSE
542 : by detecting equal accesses and doing disambiguation with
543 : runtime alias tests like for
544 : .. = a[i];
545 : .. = a[i+1];
546 : a[i] = ..;
547 : a[i+1] = ..;
548 : *p = ..;
549 : .. = a[i];
550 : .. = a[i+1];
551 : where we will end up loading { a[i], a[i+1] } once, make
552 : sure that inserting group loads before the first load and
553 : stores after the last store will do the right thing.
554 : Similar for groups like
555 : a[i] = ...;
556 : ... = a[i];
557 : a[i+1] = ...;
558 : where loads from the group interleave with the store. */
559 57459 : if (!vect_preserves_scalar_order_p (dr_info_a, dr_info_b))
560 0 : return opt_result::failure_at (stmtinfo_a->stmt,
561 : "READ_WRITE dependence"
562 : " in interleaving.\n");
563 :
564 57459 : if (loop->safelen < 2)
565 : {
566 53615 : tree indicator = dr_zero_step_indicator (dra);
567 53615 : if (!indicator || integer_zerop (indicator))
568 0 : return opt_result::failure_at (stmtinfo_a->stmt,
569 : "access also has a zero step\n");
570 53615 : else if (TREE_CODE (indicator) != INTEGER_CST)
571 1438 : vect_check_nonzero_value (loop_vinfo, indicator);
572 : }
573 57459 : continue;
574 57459 : }
575 :
576 11415 : if (dist > 0 && DDR_REVERSED_P (ddr))
577 : {
578 : /* If DDR_REVERSED_P the order of the data-refs in DDR was
579 : reversed (to make distance vector positive), and the actual
580 : distance is negative. */
581 3918 : if (dump_enabled_p ())
582 105 : dump_printf_loc (MSG_NOTE, vect_location,
583 : "dependence distance negative.\n");
584 : /* When doing outer loop vectorization, we need to check if there is
585 : a backward dependence at the inner loop level if the dependence
586 : at the outer loop is reversed. See PR81740. */
587 3918 : if (nested_in_vect_loop_p (loop, stmtinfo_a)
588 3906 : || nested_in_vect_loop_p (loop, stmtinfo_b))
589 : {
590 12 : unsigned inner_depth = index_in_loop_nest (loop->inner->num,
591 12 : DDR_LOOP_NEST (ddr));
592 12 : if (dist_v[inner_depth] < 0)
593 9 : return opt_result::failure_at (stmtinfo_a->stmt,
594 : "not vectorized, dependence "
595 : "between data-refs %T and %T\n",
596 : DR_REF (dra), DR_REF (drb));
597 : }
598 : /* Record a negative dependence distance to later limit the
599 : amount of stmt copying / unrolling we can perform.
600 : Only need to handle read-after-write dependence. */
601 3909 : if (DR_IS_READ (drb)
602 156 : && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
603 36 : || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
604 156 : STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
605 3909 : continue;
606 3909 : }
607 :
608 7497 : unsigned int abs_dist = abs (dist);
609 7497 : if (abs_dist >= 2 && abs_dist < *max_vf)
610 : {
611 : /* The dependence distance requires reduction of the maximal
612 : vectorization factor. */
613 558 : *max_vf = abs_dist;
614 558 : if (dump_enabled_p ())
615 30 : dump_printf_loc (MSG_NOTE, vect_location,
616 : "adjusting maximal vectorization factor to %i\n",
617 : *max_vf);
618 : }
619 :
620 7497 : if (abs_dist >= *max_vf)
621 : {
622 : /* Dependence distance does not create dependence, as far as
623 : vectorization is concerned, in this case. */
624 1103 : if (dump_enabled_p ())
625 437 : dump_printf_loc (MSG_NOTE, vect_location,
626 : "dependence distance >= VF.\n");
627 1103 : continue;
628 : }
629 :
630 6394 : return opt_result::failure_at (stmtinfo_a->stmt,
631 : "not vectorized, possible dependence "
632 : "between data-refs %T and %T\n",
633 : DR_REF (dra), DR_REF (drb));
634 : }
635 :
636 62449 : return opt_result::success ();
637 : }
638 :
639 : /* Function vect_analyze_early_break_dependences.
640 :
641 : Examine all the data references in the loop and make sure that if we have
642 : multiple exits that we are able to safely move stores such that they become
643 : safe for vectorization. The function also calculates the place where to move
644 : the instructions to and computes what the new vUSE chain should be.
645 :
646 : This works in tandem with the CFG that will be produced by
647 : slpeel_tree_duplicate_loop_to_edge_cfg later on.
648 :
649 : This function tries to validate whether an early break vectorization
650 : is possible for the current instruction sequence. Returns True i
651 : possible, otherwise False.
652 :
653 : Requirements:
654 : - Any memory access must be to a fixed size buffer.
655 : - There must not be any loads and stores to the same object.
656 : - Multiple loads are allowed as long as they don't alias.
657 :
658 : NOTE:
659 : This implementation is very conservative. Any overlapping loads/stores
660 : that take place before the early break statement gets rejected aside from
661 : WAR dependencies.
662 :
663 : i.e.:
664 :
665 : a[i] = 8
666 : c = a[i]
667 : if (b[i])
668 : ...
669 :
670 : is not allowed, but
671 :
672 : c = a[i]
673 : a[i] = 8
674 : if (b[i])
675 : ...
676 :
677 : is which is the common case. */
678 :
679 : static opt_result
680 140393 : vect_analyze_early_break_dependences (loop_vec_info loop_vinfo)
681 : {
682 140393 : DUMP_VECT_SCOPE ("vect_analyze_early_break_dependences");
683 :
684 : /* List of all load data references found during traversal. */
685 140393 : auto_vec<data_reference *> bases;
686 140393 : basic_block dest_bb = NULL;
687 :
688 140393 : class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
689 140393 : class loop *loop_nest = loop_outer (loop);
690 :
691 140393 : if (dump_enabled_p ())
692 1552 : dump_printf_loc (MSG_NOTE, vect_location,
693 : "loop contains multiple exits, analyzing"
694 : " statement dependencies.\n");
695 :
696 140393 : if (LOOP_VINFO_EARLY_BREAKS_VECT_PEELED (loop_vinfo))
697 25030 : if (dump_enabled_p ())
698 280 : dump_printf_loc (MSG_NOTE, vect_location,
699 : "alternate exit has been chosen as main exit.\n");
700 :
701 : /* Since we don't support general control flow, the location we'll move the
702 : side-effects to is always the latch connected exit. When we support
703 : general control flow we can do better but for now this is fine. Move
704 : side-effects to the in-loop destination of the last early exit. For the
705 : PEELED case we move the side-effects to the latch block as this is
706 : guaranteed to be the last block to be executed when a vector iteration
707 : finished. */
708 140393 : if (LOOP_VINFO_EARLY_BREAKS_VECT_PEELED (loop_vinfo))
709 25030 : dest_bb = loop->latch;
710 : else
711 115363 : dest_bb = single_pred (loop->latch);
712 :
713 : /* We start looking from dest_bb, for the non-PEELED case we don't want to
714 : move any stores already present, but we do want to read and validate the
715 : loads. */
716 140393 : basic_block bb = dest_bb;
717 :
718 : /* We move stores across all loads to the beginning of dest_bb, so
719 : the first block processed below doesn't need dependence checking. */
720 140393 : bool check_deps = false;
721 :
722 507851 : do
723 : {
724 324122 : gimple_stmt_iterator gsi = gsi_last_bb (bb);
725 :
726 : /* Now analyze all the remaining statements and try to determine which
727 : instructions are allowed/needed to be moved. */
728 2404320 : while (!gsi_end_p (gsi))
729 : {
730 2085783 : gimple *stmt = gsi_stmt (gsi);
731 2085783 : gsi_prev (&gsi);
732 2085783 : if (is_gimple_debug (stmt))
733 1839460 : continue;
734 :
735 1106303 : stmt_vec_info orig_stmt_vinfo = loop_vinfo->lookup_stmt (stmt);
736 1106303 : stmt_vec_info stmt_vinfo
737 1106303 : = vect_stmt_to_vectorize (orig_stmt_vinfo);
738 1106303 : auto dr_ref = STMT_VINFO_DATA_REF (stmt_vinfo);
739 1106303 : if (!dr_ref)
740 : {
741 : /* Trapping statements after the last early exit are fine. */
742 853845 : if (check_deps)
743 : {
744 517357 : bool could_trap_p = false;
745 517357 : gimple *cur_stmt = STMT_VINFO_STMT (stmt_vinfo);
746 517357 : could_trap_p = gimple_could_trap_p (cur_stmt);
747 517357 : if (STMT_VINFO_IN_PATTERN_P (orig_stmt_vinfo))
748 : {
749 191015 : gimple_stmt_iterator gsi2;
750 191015 : auto stmt_seq
751 191015 : = STMT_VINFO_PATTERN_DEF_SEQ (orig_stmt_vinfo);
752 191015 : for (gsi2 = gsi_start (stmt_seq);
753 385250 : !could_trap_p && !gsi_end_p (gsi2); gsi_next (&gsi2))
754 : {
755 194235 : cur_stmt = gsi_stmt (gsi2);
756 194235 : could_trap_p = gimple_could_trap_p (cur_stmt);
757 : }
758 : }
759 :
760 517357 : if (could_trap_p)
761 : {
762 5037 : if (dump_enabled_p ())
763 150 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
764 : "cannot vectorize as operation may trap.\n");
765 5037 : return opt_result::failure_at (cur_stmt,
766 : "can't safely apply code motion to dependencies"
767 : " to vectorize the early exit. %G may trap.\n",
768 : cur_stmt);
769 : }
770 : }
771 :
772 848808 : continue;
773 848808 : }
774 :
775 : /* We know everything below dest_bb is safe since we know we
776 : had a full vector iteration when reaching it. Either by
777 : the loop entry / IV exit test being last or because this
778 : is the loop latch itself. */
779 252458 : if (!check_deps)
780 11172 : continue;
781 :
782 : /* Check if vector accesses to the object will be within bounds.
783 : must be a constant or assume loop will be versioned or niters
784 : bounded by VF so accesses are within range. We only need to check
785 : the reads since writes are moved to a safe place where if we get
786 : there we know they are safe to perform. */
787 241286 : if (DR_IS_READ (dr_ref))
788 : {
789 225460 : dr_set_safe_speculative_read_required (stmt_vinfo, true);
790 225460 : bool inbounds = ref_within_array_bound (stmt, DR_REF (dr_ref));
791 225460 : DR_SCALAR_KNOWN_BOUNDS (STMT_VINFO_DR_INFO (stmt_vinfo)) = inbounds;
792 :
793 225460 : if (dump_enabled_p ())
794 2399 : dump_printf_loc (MSG_NOTE, vect_location,
795 : "marking DR (read) as possibly needing peeling "
796 : "for alignment at %G", stmt);
797 : }
798 :
799 241286 : if (DR_IS_READ (dr_ref))
800 225460 : bases.safe_push (dr_ref);
801 15826 : else if (DR_IS_WRITE (dr_ref))
802 : {
803 : /* We are moving writes down in the CFG. To be sure that this
804 : is valid after vectorization we have to check all the loads
805 : we are sinking the stores past to see if any of them may
806 : alias or are the same object.
807 :
808 : Same objects will not be an issue because unless the store
809 : is marked volatile the value can be forwarded. If the
810 : store is marked volatile we don't vectorize the loop
811 : anyway.
812 :
813 : That leaves the check for aliasing. We don't really need
814 : to care about the stores aliasing with each other since the
815 : stores are moved in order so the effects are still observed
816 : correctly. This leaves the check for WAR dependencies
817 : which we would be introducing here if the DR can alias.
818 : The check is quadratic in loads/stores but I have not found
819 : a better API to do this. I believe all loads and stores
820 : must be checked. We also must check them when we
821 : encountered the store, since we don't care about loads past
822 : the store. */
823 :
824 49060 : for (auto dr_read : bases)
825 15470 : if (dr_may_alias_p (dr_ref, dr_read, loop_nest))
826 : {
827 548 : if (dump_enabled_p ())
828 4 : dump_printf_loc (MSG_MISSED_OPTIMIZATION,
829 : vect_location,
830 : "early breaks not supported: "
831 : "overlapping loads and stores "
832 : "found before the break "
833 : "statement.\n");
834 :
835 548 : return opt_result::failure_at (stmt,
836 : "can't safely apply code motion to dependencies"
837 : " to vectorize the early exit. %G may alias with"
838 : " %G\n", stmt, dr_read->stmt);
839 : }
840 : }
841 :
842 481476 : if (gimple_vdef (stmt))
843 : {
844 15278 : if (dump_enabled_p ())
845 280 : dump_printf_loc (MSG_NOTE, vect_location,
846 : "==> recording stmt %G", stmt);
847 :
848 15278 : LOOP_VINFO_EARLY_BRK_STORES (loop_vinfo).safe_push (stmt);
849 : }
850 691658 : else if (gimple_vuse (stmt))
851 : {
852 225460 : LOOP_VINFO_EARLY_BRK_VUSES (loop_vinfo).safe_insert (0, stmt);
853 225460 : if (dump_enabled_p ())
854 2399 : dump_printf_loc (MSG_NOTE, vect_location,
855 : "marked statement for vUSE update: %G", stmt);
856 : }
857 : }
858 :
859 318537 : if (!single_pred_p (bb))
860 : {
861 134808 : gcc_assert (bb == loop->header);
862 134808 : break;
863 : }
864 :
865 : /* If we possibly sink through a virtual PHI make sure to elide that. */
866 183729 : if (gphi *vphi = get_virtual_phi (bb))
867 107 : LOOP_VINFO_EARLY_BRK_STORES (loop_vinfo).safe_push (vphi);
868 :
869 : /* All earlier blocks need dependence checking. */
870 183729 : check_deps = true;
871 183729 : bb = single_pred (bb);
872 183729 : }
873 : while (1);
874 :
875 : /* We don't allow outer -> inner loop transitions which should have been
876 : trapped already during loop form analysis. */
877 134808 : gcc_assert (dest_bb->loop_father == loop);
878 :
879 : /* Check that the destination block we picked has only one pred. To relax this we
880 : have to take special care when moving the statements. We don't currently support
881 : such control flow however this check is there to simplify how we handle
882 : labels that may be present anywhere in the IL. This check is to ensure that the
883 : labels aren't significant for the CFG. */
884 134808 : if (!single_pred (dest_bb))
885 0 : return opt_result::failure_at (vect_location,
886 : "chosen loop exit block (BB %d) does not have a "
887 : "single predecessor which is currently not "
888 : "supported for early break vectorization.\n",
889 : dest_bb->index);
890 :
891 134808 : LOOP_VINFO_EARLY_BRK_DEST_BB (loop_vinfo) = dest_bb;
892 :
893 134808 : if (!LOOP_VINFO_EARLY_BRK_VUSES (loop_vinfo).is_empty ())
894 : {
895 : /* All uses shall be updated to that of the first load. Entries are
896 : stored in reverse order. */
897 124287 : tree vuse = gimple_vuse (LOOP_VINFO_EARLY_BRK_VUSES (loop_vinfo).last ());
898 348503 : for (auto g : LOOP_VINFO_EARLY_BRK_VUSES (loop_vinfo))
899 : {
900 224216 : if (dump_enabled_p ())
901 2336 : dump_printf_loc (MSG_NOTE, vect_location,
902 : "will update use: %T, mem_ref: %G", vuse, g);
903 : }
904 : }
905 :
906 134808 : if (dump_enabled_p ())
907 1398 : dump_printf_loc (MSG_NOTE, vect_location,
908 : "recorded statements to be moved to BB %d\n",
909 1398 : LOOP_VINFO_EARLY_BRK_DEST_BB (loop_vinfo)->index);
910 :
911 134808 : return opt_result::success ();
912 140393 : }
913 :
914 : /* Function vect_analyze_data_ref_dependences.
915 :
916 : Examine all the data references in the loop, and make sure there do not
917 : exist any data dependences between them. Set *MAX_VF according to
918 : the maximum vectorization factor the data dependences allow. */
919 :
920 : opt_result
921 384764 : vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
922 : unsigned int *max_vf)
923 : {
924 384764 : unsigned int i;
925 384764 : struct data_dependence_relation *ddr;
926 :
927 384764 : DUMP_VECT_SCOPE ("vect_analyze_data_ref_dependences");
928 :
929 384764 : if (!LOOP_VINFO_DDRS (loop_vinfo).exists ())
930 : {
931 159810 : LOOP_VINFO_DDRS (loop_vinfo)
932 159810 : .create (LOOP_VINFO_DATAREFS (loop_vinfo).length ()
933 159810 : * LOOP_VINFO_DATAREFS (loop_vinfo).length ());
934 : /* We do not need read-read dependences. */
935 319620 : bool res = compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
936 : &LOOP_VINFO_DDRS (loop_vinfo),
937 159810 : LOOP_VINFO_LOOP_NEST (loop_vinfo),
938 : false);
939 159810 : gcc_assert (res);
940 : }
941 :
942 384764 : LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
943 :
944 : /* For epilogues we either have no aliases or alias versioning
945 : was applied to original loop. Therefore we may just get max_vf
946 : using VF of original loop. */
947 384764 : if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
948 12621 : *max_vf = LOOP_VINFO_ORIG_MAX_VECT_FACTOR (loop_vinfo);
949 : else
950 1828452 : FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
951 : {
952 1474113 : opt_result res
953 1474113 : = vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf);
954 1474113 : if (!res)
955 17804 : return res;
956 : }
957 :
958 : /* If we have early break statements in the loop, check to see if they
959 : are of a form we can vectorizer. */
960 366960 : if (LOOP_VINFO_EARLY_BREAKS (loop_vinfo))
961 140393 : return vect_analyze_early_break_dependences (loop_vinfo);
962 :
963 226567 : return opt_result::success ();
964 : }
965 :
966 :
967 : /* Function vect_slp_analyze_data_ref_dependence.
968 :
969 : Return TRUE if there (might) exist a dependence between a memory-reference
970 : DRA and a memory-reference DRB for VINFO. When versioning for alias
971 : may check a dependence at run-time, return FALSE. Adjust *MAX_VF
972 : according to the data dependence. */
973 :
974 : static bool
975 6918924 : vect_slp_analyze_data_ref_dependence (vec_info *vinfo,
976 : struct data_dependence_relation *ddr)
977 : {
978 6918924 : struct data_reference *dra = DDR_A (ddr);
979 6918924 : struct data_reference *drb = DDR_B (ddr);
980 6918924 : dr_vec_info *dr_info_a = vinfo->lookup_dr (dra);
981 6918924 : dr_vec_info *dr_info_b = vinfo->lookup_dr (drb);
982 :
983 : /* We need to check dependences of statements marked as unvectorizable
984 : as well, they still can prohibit vectorization. */
985 :
986 : /* Independent data accesses. */
987 6918924 : if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
988 : return false;
989 :
990 1098995 : if (dra == drb)
991 : return false;
992 :
993 : /* Read-read is OK. */
994 8515 : if (DR_IS_READ (dra) && DR_IS_READ (drb))
995 : return false;
996 :
997 : /* If dra and drb are part of the same interleaving chain consider
998 : them independent. */
999 8515 : if (STMT_VINFO_GROUPED_ACCESS (dr_info_a->stmt)
1000 8515 : && (DR_GROUP_FIRST_ELEMENT (dr_info_a->stmt)
1001 8515 : == DR_GROUP_FIRST_ELEMENT (dr_info_b->stmt)))
1002 : return false;
1003 :
1004 : /* Unknown data dependence. */
1005 8515 : if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
1006 : {
1007 8515 : if (dump_enabled_p ())
1008 4 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1009 : "can't determine dependence between %T and %T\n",
1010 : DR_REF (dra), DR_REF (drb));
1011 : }
1012 0 : else if (dump_enabled_p ())
1013 0 : dump_printf_loc (MSG_NOTE, vect_location,
1014 : "determined dependence between %T and %T\n",
1015 : DR_REF (dra), DR_REF (drb));
1016 :
1017 : return true;
1018 : }
1019 :
1020 :
1021 : /* Analyze dependences involved in the transform of a store SLP NODE. */
1022 :
1023 : static bool
1024 657300 : vect_slp_analyze_store_dependences (vec_info *vinfo, slp_tree node)
1025 : {
1026 : /* This walks over all stmts involved in the SLP store done
1027 : in NODE verifying we can sink them up to the last stmt in the
1028 : group. */
1029 657300 : stmt_vec_info last_access_info = vect_find_last_scalar_stmt_in_slp (node);
1030 657300 : gcc_assert (DR_IS_WRITE (STMT_VINFO_DATA_REF (last_access_info)));
1031 :
1032 2388785 : for (unsigned k = 0; k < SLP_TREE_SCALAR_STMTS (node).length (); ++k)
1033 : {
1034 1739968 : stmt_vec_info access_info
1035 1739968 : = vect_orig_stmt (SLP_TREE_SCALAR_STMTS (node)[k]);
1036 1739968 : if (access_info == last_access_info)
1037 649488 : continue;
1038 1090480 : data_reference *dr_a = STMT_VINFO_DATA_REF (access_info);
1039 1090480 : ao_ref ref;
1040 1090480 : bool ref_initialized_p = false;
1041 1090480 : for (gimple_stmt_iterator gsi = gsi_for_stmt (access_info->stmt);
1042 10633973 : gsi_stmt (gsi) != last_access_info->stmt; gsi_next (&gsi))
1043 : {
1044 9551976 : gimple *stmt = gsi_stmt (gsi);
1045 16957618 : if (! gimple_vuse (stmt))
1046 2632726 : continue;
1047 :
1048 : /* If we couldn't record a (single) data reference for this
1049 : stmt we have to resort to the alias oracle. */
1050 6919250 : stmt_vec_info stmt_info = vinfo->lookup_stmt (stmt);
1051 6919250 : data_reference *dr_b = STMT_VINFO_DATA_REF (stmt_info);
1052 6919250 : if (!dr_b)
1053 : {
1054 : /* We are moving a store - this means
1055 : we cannot use TBAA for disambiguation. */
1056 546 : if (!ref_initialized_p)
1057 546 : ao_ref_init (&ref, DR_REF (dr_a));
1058 546 : if (stmt_may_clobber_ref_p_1 (stmt, &ref, false)
1059 546 : || ref_maybe_used_by_stmt_p (stmt, &ref, false))
1060 8483 : return false;
1061 542 : continue;
1062 : }
1063 :
1064 6918704 : gcc_assert (!gimple_visited_p (stmt));
1065 :
1066 6918704 : ddr_p ddr = initialize_data_dependence_relation (dr_a,
1067 6918704 : dr_b, vNULL);
1068 6918704 : bool dependent = vect_slp_analyze_data_ref_dependence (vinfo, ddr);
1069 6918704 : free_dependence_relation (ddr);
1070 6918704 : if (dependent)
1071 : return false;
1072 : }
1073 : }
1074 : return true;
1075 : }
1076 :
1077 : /* Analyze dependences involved in the transform of a load SLP NODE. STORES
1078 : contain the vector of scalar stores of this instance if we are
1079 : disambiguating the loads. */
1080 :
1081 : static bool
1082 154777 : vect_slp_analyze_load_dependences (vec_info *vinfo, slp_tree node,
1083 : vec<stmt_vec_info> stores,
1084 : stmt_vec_info last_store_info)
1085 : {
1086 : /* This walks over all stmts involved in the SLP load done
1087 : in NODE verifying we can hoist them up to the first stmt in the
1088 : group. */
1089 154777 : stmt_vec_info first_access_info = vect_find_first_scalar_stmt_in_slp (node);
1090 154777 : gcc_assert (DR_IS_READ (STMT_VINFO_DATA_REF (first_access_info)));
1091 :
1092 542903 : for (unsigned k = 0; k < SLP_TREE_SCALAR_STMTS (node).length (); ++k)
1093 : {
1094 388162 : if (! SLP_TREE_SCALAR_STMTS (node)[k])
1095 161874 : continue;
1096 388162 : stmt_vec_info access_info
1097 388162 : = vect_orig_stmt (SLP_TREE_SCALAR_STMTS (node)[k]);
1098 388162 : if (access_info == first_access_info)
1099 161874 : continue;
1100 226288 : data_reference *dr_a = STMT_VINFO_DATA_REF (access_info);
1101 226288 : ao_ref ref;
1102 226288 : bool ref_initialized_p = false;
1103 226288 : hash_set<stmt_vec_info> grp_visited;
1104 226288 : for (gimple_stmt_iterator gsi = gsi_for_stmt (access_info->stmt);
1105 4484742 : gsi_stmt (gsi) != first_access_info->stmt; gsi_prev (&gsi))
1106 : {
1107 2129263 : gimple *stmt = gsi_stmt (gsi);
1108 3462856 : if (! gimple_vdef (stmt))
1109 2073677 : continue;
1110 :
1111 279598 : stmt_vec_info stmt_info = vinfo->lookup_stmt (stmt);
1112 :
1113 : /* If we run into a store of this same instance (we've just
1114 : marked those) then delay dependence checking until we run
1115 : into the last store because this is where it will have
1116 : been sunk to (and we verified that we can do that already). */
1117 279598 : if (gimple_visited_p (stmt))
1118 : {
1119 224012 : if (stmt_info != last_store_info)
1120 224010 : continue;
1121 :
1122 10 : for (stmt_vec_info &store_info : stores)
1123 : {
1124 4 : data_reference *store_dr = STMT_VINFO_DATA_REF (store_info);
1125 4 : ddr_p ddr = initialize_data_dependence_relation
1126 4 : (dr_a, store_dr, vNULL);
1127 4 : bool dependent
1128 4 : = vect_slp_analyze_data_ref_dependence (vinfo, ddr);
1129 4 : free_dependence_relation (ddr);
1130 4 : if (dependent)
1131 36 : return false;
1132 : }
1133 2 : continue;
1134 2 : }
1135 :
1136 114073 : auto check_hoist = [&] (stmt_vec_info stmt_info) -> bool
1137 : {
1138 : /* We are hoisting a load - this means we can use TBAA for
1139 : disambiguation. */
1140 58487 : if (!ref_initialized_p)
1141 58487 : ao_ref_init (&ref, DR_REF (dr_a));
1142 58487 : if (stmt_may_clobber_ref_p_1 (stmt_info->stmt, &ref, true))
1143 : {
1144 : /* If we couldn't record a (single) data reference for this
1145 : stmt we have to give up now. */
1146 216 : data_reference *dr_b = STMT_VINFO_DATA_REF (stmt_info);
1147 216 : if (!dr_b)
1148 : return false;
1149 216 : ddr_p ddr = initialize_data_dependence_relation (dr_a,
1150 216 : dr_b, vNULL);
1151 216 : bool dependent
1152 216 : = vect_slp_analyze_data_ref_dependence (vinfo, ddr);
1153 216 : free_dependence_relation (ddr);
1154 216 : if (dependent)
1155 : return false;
1156 : }
1157 : /* No dependence. */
1158 : return true;
1159 55586 : };
1160 55586 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1161 : {
1162 : /* When we run into a store group we have to honor
1163 : that earlier stores might be moved here. We don't
1164 : know exactly which and where to since we lack a
1165 : back-mapping from DR to SLP node, so assume all
1166 : earlier stores are sunk here. It's enough to
1167 : consider the last stmt of a group for this.
1168 : ??? Both this and the fact that we disregard that
1169 : the conflicting instance might be removed later
1170 : is overly conservative. */
1171 55136 : if (!grp_visited.add (DR_GROUP_FIRST_ELEMENT (stmt_info)))
1172 10686 : for (auto store_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
1173 128970 : store_info != NULL;
1174 118284 : store_info = DR_GROUP_NEXT_ELEMENT (store_info))
1175 118320 : if ((store_info == stmt_info
1176 107643 : || get_later_stmt (store_info, stmt_info) == stmt_info)
1177 165680 : && !check_hoist (store_info))
1178 : return false;
1179 : }
1180 : else
1181 : {
1182 450 : if (!check_hoist (stmt_info))
1183 : return false;
1184 : }
1185 : }
1186 226288 : }
1187 : return true;
1188 : }
1189 :
1190 :
1191 : /* Function vect_analyze_data_ref_dependences.
1192 :
1193 : Examine all the data references in the basic-block, and make sure there
1194 : do not exist any data dependences between them. Set *MAX_VF according to
1195 : the maximum vectorization factor the data dependences allow. */
1196 :
1197 : bool
1198 785090 : vect_slp_analyze_instance_dependence (vec_info *vinfo, slp_instance instance)
1199 : {
1200 785090 : DUMP_VECT_SCOPE ("vect_slp_analyze_instance_dependence");
1201 :
1202 : /* The stores of this instance are at the root of the SLP tree. */
1203 785090 : slp_tree store = NULL;
1204 785090 : if (SLP_INSTANCE_KIND (instance) == slp_inst_kind_store)
1205 657300 : store = SLP_INSTANCE_TREE (instance);
1206 :
1207 : /* Verify we can sink stores to the vectorized stmt insert location. */
1208 657300 : stmt_vec_info last_store_info = NULL;
1209 657300 : if (store)
1210 : {
1211 657300 : if (! vect_slp_analyze_store_dependences (vinfo, store))
1212 : return false;
1213 :
1214 : /* Mark stores in this instance and remember the last one. */
1215 648817 : last_store_info = vect_find_last_scalar_stmt_in_slp (store);
1216 2379601 : for (unsigned k = 0; k < SLP_TREE_SCALAR_STMTS (store).length (); ++k)
1217 1730784 : gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k]->stmt, true);
1218 : }
1219 :
1220 776607 : bool res = true;
1221 :
1222 : /* Verify we can sink loads to the vectorized stmt insert location,
1223 : special-casing stores of this instance. */
1224 1173862 : for (slp_tree &load : SLP_INSTANCE_LOADS (instance))
1225 154777 : if (! vect_slp_analyze_load_dependences (vinfo, load,
1226 : store
1227 : ? SLP_TREE_SCALAR_STMTS (store)
1228 : : vNULL, last_store_info))
1229 : {
1230 : res = false;
1231 : break;
1232 : }
1233 :
1234 : /* Unset the visited flag. */
1235 776607 : if (store)
1236 2379601 : for (unsigned k = 0; k < SLP_TREE_SCALAR_STMTS (store).length (); ++k)
1237 1730784 : gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k]->stmt, false);
1238 :
1239 : /* If this is a SLP instance with a store check if there's a dependent
1240 : load that cannot be forwarded from a previous iteration of a loop
1241 : both are in. This is to avoid situations like that in PR115777. */
1242 776607 : if (res && store)
1243 : {
1244 648793 : stmt_vec_info store_info
1245 648793 : = DR_GROUP_FIRST_ELEMENT (SLP_TREE_SCALAR_STMTS (store)[0]);
1246 648793 : class loop *store_loop = gimple_bb (store_info->stmt)->loop_father;
1247 648793 : if (! loop_outer (store_loop))
1248 553013 : return res;
1249 95780 : vec<loop_p> loop_nest;
1250 95780 : loop_nest.create (1);
1251 95780 : loop_nest.quick_push (store_loop);
1252 95780 : data_reference *drs = nullptr;
1253 179065 : for (slp_tree &load : SLP_INSTANCE_LOADS (instance))
1254 : {
1255 36998 : if (! STMT_VINFO_GROUPED_ACCESS (SLP_TREE_SCALAR_STMTS (load)[0]))
1256 0 : continue;
1257 36998 : stmt_vec_info load_info
1258 36998 : = DR_GROUP_FIRST_ELEMENT (SLP_TREE_SCALAR_STMTS (load)[0]);
1259 36998 : if (gimple_bb (load_info->stmt)->loop_father != store_loop)
1260 5171 : continue;
1261 :
1262 : /* For now concern ourselves with write-after-read as we also
1263 : only look for re-use of the store within the same SLP instance.
1264 : We can still get a RAW here when the instance contais a PHI
1265 : with a backedge though, thus this test. */
1266 31827 : if (! vect_stmt_dominates_stmt_p (STMT_VINFO_STMT (load_info),
1267 : STMT_VINFO_STMT (store_info)))
1268 11932 : continue;
1269 :
1270 19895 : if (! drs)
1271 : {
1272 19037 : drs = create_data_ref (loop_preheader_edge (store_loop),
1273 : store_loop,
1274 19037 : DR_REF (STMT_VINFO_DATA_REF (store_info)),
1275 : store_info->stmt, false, false);
1276 19037 : if (! DR_BASE_ADDRESS (drs)
1277 16147 : || TREE_CODE (DR_STEP (drs)) != INTEGER_CST)
1278 : break;
1279 : }
1280 16702 : data_reference *drl
1281 16702 : = create_data_ref (loop_preheader_edge (store_loop),
1282 : store_loop,
1283 16702 : DR_REF (STMT_VINFO_DATA_REF (load_info)),
1284 : load_info->stmt, true, false);
1285 :
1286 : /* See whether the DRs have a known constant distance throughout
1287 : the containing loop iteration. */
1288 31689 : if (! DR_BASE_ADDRESS (drl)
1289 14596 : || ! operand_equal_p (DR_STEP (drs), DR_STEP (drl))
1290 8617 : || ! operand_equal_p (DR_BASE_ADDRESS (drs),
1291 8617 : DR_BASE_ADDRESS (drl))
1292 18421 : || ! operand_equal_p (DR_OFFSET (drs), DR_OFFSET (drl)))
1293 : {
1294 14987 : free_data_ref (drl);
1295 14987 : continue;
1296 : }
1297 :
1298 : /* If the next iteration load overlaps with a non-power-of-two offset
1299 : we are surely failing any STLF attempt. */
1300 1715 : HOST_WIDE_INT step = TREE_INT_CST_LOW (DR_STEP (drl));
1301 1715 : unsigned HOST_WIDE_INT sizes
1302 1715 : = (TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drs))))
1303 1715 : * DR_GROUP_SIZE (store_info));
1304 1715 : unsigned HOST_WIDE_INT sizel
1305 1715 : = (TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drl))))
1306 1715 : * DR_GROUP_SIZE (load_info));
1307 1715 : if (ranges_overlap_p (TREE_INT_CST_LOW (DR_INIT (drl)) + step, sizel,
1308 1715 : TREE_INT_CST_LOW (DR_INIT (drs)), sizes))
1309 : {
1310 831 : unsigned HOST_WIDE_INT dist
1311 831 : = absu_hwi (TREE_INT_CST_LOW (DR_INIT (drl)) + step
1312 831 : - TREE_INT_CST_LOW (DR_INIT (drs)));
1313 831 : poly_uint64 loadsz = tree_to_poly_uint64
1314 831 : (TYPE_SIZE_UNIT (SLP_TREE_VECTYPE (load)));
1315 831 : poly_uint64 storesz = tree_to_poly_uint64
1316 831 : (TYPE_SIZE_UNIT (SLP_TREE_VECTYPE (store)));
1317 : /* When the overlap aligns with vector sizes used for the loads
1318 : and the vector stores are larger or equal to the loads
1319 : forwarding should work. */
1320 1662 : if (maybe_gt (loadsz, storesz) || ! multiple_p (dist, loadsz))
1321 70 : load->avoid_stlf_fail = true;
1322 : }
1323 1715 : free_data_ref (drl);
1324 : }
1325 95780 : if (drs)
1326 19037 : free_data_ref (drs);
1327 95780 : loop_nest.release ();
1328 : }
1329 :
1330 : return res;
1331 : }
1332 :
1333 : /* Return the misalignment of DR_INFO accessed in VECTYPE with OFFSET
1334 : applied. */
1335 :
1336 : int
1337 6588092 : dr_misalignment (dr_vec_info *dr_info, tree vectype, poly_int64 offset)
1338 : {
1339 6588092 : HOST_WIDE_INT diff = 0;
1340 : /* Alignment is only analyzed for the first element of a DR group,
1341 : use that but adjust misalignment by the offset of the access. */
1342 6588092 : if (STMT_VINFO_GROUPED_ACCESS (dr_info->stmt))
1343 : {
1344 2289224 : dr_vec_info *first_dr
1345 2289224 : = STMT_VINFO_DR_INFO (DR_GROUP_FIRST_ELEMENT (dr_info->stmt));
1346 : /* vect_analyze_data_ref_accesses guarantees that DR_INIT are
1347 : INTEGER_CSTs and the first element in the group has the lowest
1348 : address. */
1349 2289224 : diff = (TREE_INT_CST_LOW (DR_INIT (dr_info->dr))
1350 2289224 : - TREE_INT_CST_LOW (DR_INIT (first_dr->dr)));
1351 2289224 : gcc_assert (diff >= 0);
1352 : dr_info = first_dr;
1353 : }
1354 :
1355 6588092 : int misalign = dr_info->misalignment;
1356 6588092 : gcc_assert (misalign != DR_MISALIGNMENT_UNINITIALIZED);
1357 6588092 : if (misalign == DR_MISALIGNMENT_UNKNOWN)
1358 : return misalign;
1359 :
1360 : /* If the access is only aligned for a vector type with smaller alignment
1361 : requirement the access has unknown misalignment. */
1362 4009213 : if (maybe_lt (dr_info->target_alignment * BITS_PER_UNIT,
1363 4009213 : targetm.vectorize.preferred_vector_alignment (vectype)))
1364 : return DR_MISALIGNMENT_UNKNOWN;
1365 :
1366 : /* Apply the offset from the DR group start and the externally supplied
1367 : offset which can for example result from a negative stride access. */
1368 4009210 : poly_int64 misalignment = misalign + diff + offset;
1369 :
1370 : /* Below we reject compile-time non-constant target alignments, but if
1371 : our misalignment is zero, then we are known to already be aligned
1372 : w.r.t. any such possible target alignment. */
1373 4009210 : if (known_eq (misalignment, 0))
1374 : return 0;
1375 :
1376 631218 : unsigned HOST_WIDE_INT target_alignment_c;
1377 631218 : if (!dr_info->target_alignment.is_constant (&target_alignment_c)
1378 631218 : || !known_misalignment (misalignment, target_alignment_c, &misalign))
1379 : return DR_MISALIGNMENT_UNKNOWN;
1380 631218 : return misalign;
1381 : }
1382 :
1383 : /* Record the base alignment guarantee given by DRB, which occurs
1384 : in STMT_INFO. */
1385 :
1386 : static void
1387 4578813 : vect_record_base_alignment (vec_info *vinfo, stmt_vec_info stmt_info,
1388 : innermost_loop_behavior *drb)
1389 : {
1390 4578813 : bool existed;
1391 4578813 : std::pair<stmt_vec_info, innermost_loop_behavior *> &entry
1392 4578813 : = vinfo->base_alignments.get_or_insert (drb->base_address, &existed);
1393 4578813 : if (!existed || entry.second->base_alignment < drb->base_alignment)
1394 : {
1395 1417465 : entry = std::make_pair (stmt_info, drb);
1396 1417465 : if (dump_enabled_p ())
1397 32670 : dump_printf_loc (MSG_NOTE, vect_location,
1398 : "recording new base alignment for %T\n"
1399 : " alignment: %d\n"
1400 : " misalignment: %d\n"
1401 : " based on: %G",
1402 : drb->base_address,
1403 : drb->base_alignment,
1404 : drb->base_misalignment,
1405 : stmt_info->stmt);
1406 : }
1407 4578813 : }
1408 :
1409 : /* If the region we're going to vectorize is reached, all unconditional
1410 : data references occur at least once. We can therefore pool the base
1411 : alignment guarantees from each unconditional reference. Do this by
1412 : going through all the data references in VINFO and checking whether
1413 : the containing statement makes the reference unconditionally. If so,
1414 : record the alignment of the base address in VINFO so that it can be
1415 : used for all other references with the same base. */
1416 :
1417 : void
1418 1020036 : vect_record_base_alignments (vec_info *vinfo)
1419 : {
1420 1020036 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
1421 409936 : class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
1422 14964732 : for (data_reference *dr : vinfo->shared->datarefs)
1423 : {
1424 12001388 : dr_vec_info *dr_info = vinfo->lookup_dr (dr);
1425 12001388 : stmt_vec_info stmt_info = dr_info->stmt;
1426 12001388 : if (!DR_IS_CONDITIONAL_IN_STMT (dr)
1427 11991492 : && STMT_VINFO_VECTORIZABLE (stmt_info)
1428 4595796 : && !STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1429 : {
1430 4577286 : vect_record_base_alignment (vinfo, stmt_info, &DR_INNERMOST (dr));
1431 :
1432 : /* If DR is nested in the loop that is being vectorized, we can also
1433 : record the alignment of the base wrt the outer loop. */
1434 12926058 : if (loop && nested_in_vect_loop_p (loop, stmt_info))
1435 1527 : vect_record_base_alignment
1436 1527 : (vinfo, stmt_info, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info));
1437 : }
1438 : }
1439 1020036 : }
1440 :
1441 : /* Function vect_compute_data_ref_alignment
1442 :
1443 : Compute the misalignment of the data reference DR_INFO when vectorizing
1444 : with VECTYPE.
1445 :
1446 : Output:
1447 : 1. initialized misalignment info for DR_INFO
1448 :
1449 : FOR NOW: No analysis is actually performed. Misalignment is calculated
1450 : only for trivial cases. TODO. */
1451 :
1452 : static void
1453 1597573 : vect_compute_data_ref_alignment (vec_info *vinfo, dr_vec_info *dr_info,
1454 : tree vectype)
1455 : {
1456 1597573 : stmt_vec_info stmt_info = dr_info->stmt;
1457 1597573 : vec_base_alignments *base_alignments = &vinfo->base_alignments;
1458 1597573 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
1459 1597573 : class loop *loop = NULL;
1460 1597573 : tree ref = DR_REF (dr_info->dr);
1461 :
1462 1597573 : if (dump_enabled_p ())
1463 51544 : dump_printf_loc (MSG_NOTE, vect_location,
1464 : "vect_compute_data_ref_alignment:\n");
1465 :
1466 1597573 : if (loop_vinfo)
1467 822730 : loop = LOOP_VINFO_LOOP (loop_vinfo);
1468 :
1469 : /* Initialize misalignment to unknown. */
1470 1597573 : SET_DR_MISALIGNMENT (dr_info, DR_MISALIGNMENT_UNKNOWN);
1471 :
1472 1597573 : if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1473 : return;
1474 :
1475 1577343 : innermost_loop_behavior *drb = vect_dr_behavior (vinfo, dr_info);
1476 1577343 : bool step_preserves_misalignment_p;
1477 :
1478 1577343 : poly_uint64 vector_alignment
1479 1577343 : = exact_div (targetm.vectorize.preferred_vector_alignment (vectype),
1480 : BITS_PER_UNIT);
1481 :
1482 1577343 : if (loop_vinfo
1483 1577343 : && dr_safe_speculative_read_required (stmt_info))
1484 : {
1485 : /* The required target alignment must be a power-of-2 value and is
1486 : computed as the product of vector element size, VF and group size.
1487 : We compute the constant part first as VF may be a variable. For
1488 : variable VF, the power-of-2 check of VF is deferred to runtime. */
1489 305122 : auto align_factor_c
1490 305122 : = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1491 305122 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1492 89854 : align_factor_c *= DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info));
1493 :
1494 305122 : poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1495 305122 : poly_uint64 new_alignment = vf * align_factor_c;
1496 :
1497 610244 : if ((vf.is_constant () && pow2p_hwi (new_alignment.to_constant ()))
1498 : || (!vf.is_constant () && pow2p_hwi (align_factor_c)))
1499 : {
1500 241893 : if (dump_enabled_p ())
1501 : {
1502 2894 : dump_printf_loc (MSG_NOTE, vect_location,
1503 : "alignment increased due to early break to ");
1504 2894 : dump_dec (MSG_NOTE, new_alignment);
1505 2894 : dump_printf (MSG_NOTE, " bytes.\n");
1506 : }
1507 241893 : vector_alignment = new_alignment;
1508 : }
1509 : }
1510 :
1511 1577343 : SET_DR_TARGET_ALIGNMENT (dr_info, vector_alignment);
1512 :
1513 : /* If the main loop has peeled for alignment we have no way of knowing
1514 : whether the data accesses in the epilogues are aligned. We can't at
1515 : compile time answer the question whether we have entered the main loop or
1516 : not. Fixes PR 92351. */
1517 1577343 : if (loop_vinfo)
1518 : {
1519 802500 : loop_vec_info orig_loop_vinfo = LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo);
1520 802500 : if (orig_loop_vinfo
1521 32744 : && LOOP_VINFO_PEELING_FOR_ALIGNMENT (orig_loop_vinfo) != 0)
1522 : return;
1523 : }
1524 :
1525 1577126 : unsigned HOST_WIDE_INT vect_align_c;
1526 1577126 : if (!vector_alignment.is_constant (&vect_align_c))
1527 : return;
1528 :
1529 : /* No step for BB vectorization. */
1530 1577126 : if (!loop)
1531 : {
1532 774843 : gcc_assert (integer_zerop (drb->step));
1533 : step_preserves_misalignment_p = true;
1534 : }
1535 :
1536 : else
1537 : {
1538 : /* We can only use base and misalignment information relative to
1539 : an innermost loop if the misalignment stays the same throughout the
1540 : execution of the loop. As above, this is the case if the stride of
1541 : the dataref evenly divides by the alignment. Make sure to check
1542 : previous epilogues and the main loop. */
1543 : step_preserves_misalignment_p = true;
1544 : auto lvinfo = loop_vinfo;
1545 1637875 : while (lvinfo)
1546 : {
1547 835592 : poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (lvinfo);
1548 835592 : step_preserves_misalignment_p
1549 835592 : &= multiple_p (drb->step_alignment * vf, vect_align_c);
1550 835592 : lvinfo = LOOP_VINFO_ORIG_LOOP_INFO (lvinfo);
1551 : }
1552 :
1553 802283 : if (!step_preserves_misalignment_p && dump_enabled_p ())
1554 320 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1555 : "step doesn't divide the vector alignment.\n");
1556 :
1557 : /* In case the dataref is in an inner-loop of the loop that is being
1558 : vectorized (LOOP), we use the base and misalignment information
1559 : relative to the outer-loop (LOOP). This is ok only if the
1560 : misalignment stays the same throughout the execution of the
1561 : inner-loop, which is why we have to check that the stride of the
1562 : dataref in the inner-loop evenly divides by the vector alignment. */
1563 802283 : if (step_preserves_misalignment_p
1564 802283 : && nested_in_vect_loop_p (loop, stmt_info))
1565 : {
1566 1526 : step_preserves_misalignment_p
1567 1526 : = (DR_STEP_ALIGNMENT (dr_info->dr) % vect_align_c) == 0;
1568 :
1569 1526 : if (dump_enabled_p ())
1570 : {
1571 496 : if (step_preserves_misalignment_p)
1572 358 : dump_printf_loc (MSG_NOTE, vect_location,
1573 : "inner step divides the vector alignment.\n");
1574 : else
1575 138 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1576 : "inner step doesn't divide the vector"
1577 : " alignment.\n");
1578 : }
1579 : }
1580 : }
1581 :
1582 1577126 : unsigned int base_alignment = drb->base_alignment;
1583 1577126 : unsigned int base_misalignment = drb->base_misalignment;
1584 :
1585 : /* Calculate the maximum of the pooled base address alignment and the
1586 : alignment that we can compute for DR itself. */
1587 1577126 : std::pair<stmt_vec_info, innermost_loop_behavior *> *entry
1588 1577126 : = base_alignments->get (drb->base_address);
1589 1577126 : if (entry
1590 1572324 : && base_alignment < (*entry).second->base_alignment
1591 1580078 : && (loop_vinfo
1592 2266 : || (dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt_info->stmt),
1593 2266 : gimple_bb (entry->first->stmt))
1594 2166 : && (gimple_bb (stmt_info->stmt) != gimple_bb (entry->first->stmt)
1595 1942 : || (entry->first->dr_aux.group <= dr_info->group)))))
1596 : {
1597 2835 : base_alignment = entry->second->base_alignment;
1598 2835 : base_misalignment = entry->second->base_misalignment;
1599 : }
1600 :
1601 1577126 : if (drb->offset_alignment < vect_align_c
1602 1508379 : || !step_preserves_misalignment_p
1603 : /* We need to know whether the step wrt the vectorized loop is
1604 : negative when computing the starting misalignment below. */
1605 1500194 : || TREE_CODE (drb->step) != INTEGER_CST)
1606 : {
1607 104658 : if (dump_enabled_p ())
1608 3713 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1609 : "Unknown alignment for access: %T\n", ref);
1610 104658 : return;
1611 : }
1612 :
1613 1472468 : if (base_alignment < vect_align_c)
1614 : {
1615 728039 : unsigned int max_alignment;
1616 728039 : tree base = get_base_for_alignment (drb->base_address, &max_alignment);
1617 728039 : if (max_alignment < vect_align_c
1618 725662 : || (loop_vinfo && LOOP_VINFO_EPILOGUE_P (loop_vinfo))
1619 1432798 : || !vect_can_force_dr_alignment_p (base,
1620 704759 : vect_align_c * BITS_PER_UNIT))
1621 : {
1622 531234 : if (dump_enabled_p ())
1623 14282 : dump_printf_loc (MSG_NOTE, vect_location,
1624 : "can't force alignment of ref: %T\n", ref);
1625 531234 : return;
1626 : }
1627 :
1628 : /* Force the alignment of the decl.
1629 : NOTE: This is the only change to the code we make during
1630 : the analysis phase, before deciding to vectorize the loop. */
1631 196805 : if (dump_enabled_p ())
1632 7936 : dump_printf_loc (MSG_NOTE, vect_location,
1633 : "force alignment of %T\n", ref);
1634 :
1635 196805 : dr_info->base_decl = base;
1636 196805 : dr_info->base_misaligned = true;
1637 196805 : base_misalignment = 0;
1638 : }
1639 941234 : poly_int64 misalignment
1640 941234 : = base_misalignment + wi::to_poly_offset (drb->init).force_shwi ();
1641 :
1642 941234 : unsigned int const_misalignment;
1643 941234 : if (!known_misalignment (misalignment, vect_align_c, &const_misalignment))
1644 : {
1645 : if (dump_enabled_p ())
1646 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1647 : "Non-constant misalignment for access: %T\n", ref);
1648 : return;
1649 : }
1650 :
1651 941234 : SET_DR_MISALIGNMENT (dr_info, const_misalignment);
1652 :
1653 941234 : if (dump_enabled_p ())
1654 32220 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1655 : "misalign = %d bytes of ref %T\n",
1656 : const_misalignment, ref);
1657 :
1658 : return;
1659 : }
1660 :
1661 : /* Return whether DR_INFO, which is related to DR_PEEL_INFO in
1662 : that it only differs in DR_INIT, is aligned if DR_PEEL_INFO
1663 : is made aligned via peeling. */
1664 :
1665 : static bool
1666 1983294 : vect_dr_aligned_if_related_peeled_dr_is (dr_vec_info *dr_info,
1667 : dr_vec_info *dr_peel_info)
1668 : {
1669 1983294 : if (multiple_p (DR_TARGET_ALIGNMENT (dr_peel_info),
1670 1984062 : DR_TARGET_ALIGNMENT (dr_info)))
1671 : {
1672 1982526 : poly_offset_int diff
1673 1982526 : = (wi::to_poly_offset (DR_INIT (dr_peel_info->dr))
1674 1982526 : - wi::to_poly_offset (DR_INIT (dr_info->dr)));
1675 1982526 : if (known_eq (diff, 0)
1676 1982526 : || multiple_p (diff, DR_TARGET_ALIGNMENT (dr_info)))
1677 747865 : return true;
1678 : }
1679 : return false;
1680 : }
1681 :
1682 : /* Return whether DR_INFO is aligned if DR_PEEL_INFO is made
1683 : aligned via peeling. */
1684 :
1685 : static bool
1686 197608 : vect_dr_aligned_if_peeled_dr_is (dr_vec_info *dr_info,
1687 : dr_vec_info *dr_peel_info)
1688 : {
1689 197608 : if (!operand_equal_p (DR_BASE_ADDRESS (dr_info->dr),
1690 197608 : DR_BASE_ADDRESS (dr_peel_info->dr), 0)
1691 48475 : || !operand_equal_p (DR_OFFSET (dr_info->dr),
1692 48475 : DR_OFFSET (dr_peel_info->dr), 0)
1693 245176 : || !operand_equal_p (DR_STEP (dr_info->dr),
1694 47568 : DR_STEP (dr_peel_info->dr), 0))
1695 150438 : return false;
1696 :
1697 47170 : return vect_dr_aligned_if_related_peeled_dr_is (dr_info, dr_peel_info);
1698 : }
1699 :
1700 : /* Compute the value for dr_info->misalign so that the access appears
1701 : aligned. This is used by peeling to compensate for dr_misalignment
1702 : applying the offset for negative step. */
1703 :
1704 : int
1705 20737 : vect_dr_misalign_for_aligned_access (dr_vec_info *dr_info)
1706 : {
1707 20737 : if (tree_int_cst_sgn (DR_STEP (dr_info->dr)) >= 0)
1708 : return 0;
1709 :
1710 201 : tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
1711 201 : poly_int64 misalignment
1712 201 : = ((TYPE_VECTOR_SUBPARTS (vectype) - 1)
1713 201 : * TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (vectype))));
1714 :
1715 201 : unsigned HOST_WIDE_INT target_alignment_c;
1716 201 : int misalign;
1717 201 : if (!dr_info->target_alignment.is_constant (&target_alignment_c)
1718 201 : || !known_misalignment (misalignment, target_alignment_c, &misalign))
1719 : return DR_MISALIGNMENT_UNKNOWN;
1720 201 : return misalign;
1721 : }
1722 :
1723 : /* Function vect_update_misalignment_for_peel.
1724 : Sets DR_INFO's misalignment
1725 : - to 0 if it has the same alignment as DR_PEEL_INFO,
1726 : - to the misalignment computed using NPEEL if DR_INFO's salignment is known,
1727 : - to -1 (unknown) otherwise.
1728 :
1729 : DR_INFO - the data reference whose misalignment is to be adjusted.
1730 : DR_PEEL_INFO - the data reference whose misalignment is being made
1731 : zero in the vector loop by the peel.
1732 : NPEEL - the number of iterations in the peel loop if the misalignment
1733 : of DR_PEEL_INFO is known at compile time. */
1734 :
1735 : static void
1736 2791 : vect_update_misalignment_for_peel (dr_vec_info *dr_info,
1737 : dr_vec_info *dr_peel_info, int npeel)
1738 : {
1739 : /* If dr_info is aligned of dr_peel_info is, then mark it so. */
1740 2791 : if (vect_dr_aligned_if_peeled_dr_is (dr_info, dr_peel_info))
1741 : {
1742 444 : SET_DR_MISALIGNMENT (dr_info,
1743 : vect_dr_misalign_for_aligned_access (dr_peel_info));
1744 444 : return;
1745 : }
1746 :
1747 2347 : unsigned HOST_WIDE_INT alignment;
1748 2347 : if (DR_TARGET_ALIGNMENT (dr_info).is_constant (&alignment)
1749 2347 : && known_alignment_for_access_p (dr_info,
1750 2347 : STMT_VINFO_VECTYPE (dr_info->stmt))
1751 250 : && known_alignment_for_access_p (dr_peel_info,
1752 250 : STMT_VINFO_VECTYPE (dr_peel_info->stmt)))
1753 : {
1754 202 : int misal = dr_info->misalignment;
1755 202 : misal += npeel * TREE_INT_CST_LOW (DR_STEP (dr_info->dr));
1756 202 : misal &= alignment - 1;
1757 202 : set_dr_misalignment (dr_info, misal);
1758 202 : return;
1759 : }
1760 :
1761 2145 : if (dump_enabled_p ())
1762 36 : dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment " \
1763 : "to unknown (-1).\n");
1764 2145 : SET_DR_MISALIGNMENT (dr_info, DR_MISALIGNMENT_UNKNOWN);
1765 : }
1766 :
1767 : /* Return true if alignment is relevant for DR_INFO. */
1768 :
1769 : static bool
1770 1795466 : vect_relevant_for_alignment_p (dr_vec_info *dr_info)
1771 : {
1772 1795466 : stmt_vec_info stmt_info = dr_info->stmt;
1773 :
1774 1795466 : if (!STMT_VINFO_RELEVANT_P (stmt_info))
1775 : return false;
1776 :
1777 : /* For interleaving, only the alignment of the first access matters. */
1778 1794016 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1779 2036434 : && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
1780 : return false;
1781 :
1782 : /* Scatter-gather and invariant accesses continue to address individual
1783 : scalars, so vector-level alignment is irrelevant. */
1784 1687142 : if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)
1785 1687142 : || integer_zerop (DR_STEP (dr_info->dr)))
1786 54783 : return false;
1787 :
1788 : /* Strided accesses perform only component accesses, alignment is
1789 : irrelevant for them. */
1790 1632359 : if (STMT_VINFO_STRIDED_P (stmt_info)
1791 1632359 : && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1792 : return false;
1793 :
1794 : return true;
1795 : }
1796 :
1797 : /* Given an memory reference EXP return whether its alignment is less
1798 : than its size. */
1799 :
1800 : static bool
1801 1589126 : not_size_aligned (tree exp)
1802 : {
1803 1589126 : if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
1804 : return true;
1805 :
1806 1589126 : return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
1807 1589126 : > get_object_alignment (exp));
1808 : }
1809 :
1810 : /* Function vector_alignment_reachable_p
1811 :
1812 : Return true if vector alignment for DR_INFO is reachable by peeling
1813 : a few loop iterations. Return false otherwise. */
1814 :
1815 : static bool
1816 610543 : vector_alignment_reachable_p (dr_vec_info *dr_info, poly_uint64 vf)
1817 : {
1818 610543 : stmt_vec_info stmt_info = dr_info->stmt;
1819 610543 : tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1820 610543 : poly_uint64 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1821 1221086 : poly_uint64 vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
1822 610543 : unsigned elem_size = vector_element_size (vector_size, nelements);
1823 610543 : unsigned group_size = 1;
1824 :
1825 610543 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1826 : {
1827 : /* For interleaved access we peel only if number of iterations in
1828 : the prolog loop ({VF - misalignment}), is a multiple of the
1829 : number of the interleaved accesses. */
1830 :
1831 : /* FORNOW: handle only known alignment. */
1832 87343 : if (!known_alignment_for_access_p (dr_info, vectype))
1833 610543 : return false;
1834 :
1835 51951 : unsigned mis_in_elements = dr_misalignment (dr_info, vectype) / elem_size;
1836 64399 : if (!multiple_p (nelements - mis_in_elements, DR_GROUP_SIZE (stmt_info)))
1837 : return false;
1838 :
1839 12448 : group_size = DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info));
1840 : }
1841 :
1842 : /* If the vectorization factor does not guarantee DR advancement of
1843 : a multiple of the target alignment no peeling will help. */
1844 535648 : if (!multiple_p (elem_size * group_size * vf, dr_target_alignment (dr_info)))
1845 154 : return false;
1846 :
1847 : /* If misalignment is known at the compile time then allow peeling
1848 : only if natural alignment is reachable through peeling. */
1849 535494 : if (known_alignment_for_access_p (dr_info, vectype)
1850 835198 : && !aligned_access_p (dr_info, vectype))
1851 : {
1852 14225 : HOST_WIDE_INT elmsize =
1853 14225 : int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1854 14225 : if (dump_enabled_p ())
1855 : {
1856 768 : dump_printf_loc (MSG_NOTE, vect_location,
1857 : "data size = %wd. misalignment = %d.\n", elmsize,
1858 : dr_misalignment (dr_info, vectype));
1859 : }
1860 14225 : if (dr_misalignment (dr_info, vectype) % elmsize)
1861 : {
1862 72 : if (dump_enabled_p ())
1863 7 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1864 : "data size does not divide the misalignment.\n");
1865 72 : return false;
1866 : }
1867 : }
1868 :
1869 535422 : if (!known_alignment_for_access_p (dr_info, vectype))
1870 : {
1871 235790 : tree type = TREE_TYPE (DR_REF (dr_info->dr));
1872 235790 : bool is_packed = not_size_aligned (DR_REF (dr_info->dr));
1873 235790 : if (dump_enabled_p ())
1874 15957 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1875 : "Unknown misalignment, %snaturally aligned\n",
1876 : is_packed ? "not " : "");
1877 235790 : return targetm.vectorize.vector_alignment_reachable (type, is_packed);
1878 : }
1879 :
1880 : return true;
1881 : }
1882 :
1883 :
1884 : /* Calculate the cost of the memory access represented by DR_INFO. */
1885 :
1886 : static void
1887 729262 : vect_get_data_access_cost (vec_info *vinfo, dr_vec_info *dr_info,
1888 : dr_alignment_support alignment_support_scheme,
1889 : int misalignment,
1890 : unsigned int *inside_cost,
1891 : unsigned int *outside_cost,
1892 : stmt_vector_for_cost *body_cost_vec,
1893 : stmt_vector_for_cost *prologue_cost_vec)
1894 : {
1895 729262 : stmt_vec_info stmt_info = dr_info->stmt;
1896 :
1897 729262 : if (DR_IS_READ (dr_info->dr))
1898 509733 : vect_get_load_cost (vinfo, stmt_info, NULL, 1,
1899 : alignment_support_scheme, misalignment, true,
1900 : inside_cost, outside_cost, prologue_cost_vec,
1901 : body_cost_vec, false);
1902 : else
1903 219529 : vect_get_store_cost (vinfo,stmt_info, NULL, 1,
1904 : alignment_support_scheme, misalignment, inside_cost,
1905 : body_cost_vec);
1906 :
1907 729262 : if (dump_enabled_p ())
1908 29833 : dump_printf_loc (MSG_NOTE, vect_location,
1909 : "vect_get_data_access_cost: inside_cost = %d, "
1910 : "outside_cost = %d.\n", *inside_cost, *outside_cost);
1911 729262 : }
1912 :
1913 :
1914 : typedef struct _vect_peel_info
1915 : {
1916 : dr_vec_info *dr_info;
1917 : int npeel;
1918 : unsigned int count;
1919 : } *vect_peel_info;
1920 :
1921 : typedef struct _vect_peel_extended_info
1922 : {
1923 : vec_info *vinfo;
1924 : struct _vect_peel_info peel_info;
1925 : unsigned int inside_cost;
1926 : unsigned int outside_cost;
1927 : } *vect_peel_extended_info;
1928 :
1929 :
1930 : /* Peeling hashtable helpers. */
1931 :
1932 : struct peel_info_hasher : free_ptr_hash <_vect_peel_info>
1933 : {
1934 : static inline hashval_t hash (const _vect_peel_info *);
1935 : static inline bool equal (const _vect_peel_info *, const _vect_peel_info *);
1936 : };
1937 :
1938 : inline hashval_t
1939 744917 : peel_info_hasher::hash (const _vect_peel_info *peel_info)
1940 : {
1941 744917 : return (hashval_t) peel_info->npeel;
1942 : }
1943 :
1944 : inline bool
1945 387320 : peel_info_hasher::equal (const _vect_peel_info *a, const _vect_peel_info *b)
1946 : {
1947 387320 : return (a->npeel == b->npeel);
1948 : }
1949 :
1950 :
1951 : /* Insert DR_INFO into peeling hash table with NPEEL as key. */
1952 :
1953 : static void
1954 358261 : vect_peeling_hash_insert (hash_table<peel_info_hasher> *peeling_htab,
1955 : loop_vec_info loop_vinfo, dr_vec_info *dr_info,
1956 : int npeel, bool supportable_if_not_aligned)
1957 : {
1958 358261 : struct _vect_peel_info elem, *slot;
1959 358261 : _vect_peel_info **new_slot;
1960 :
1961 358261 : elem.npeel = npeel;
1962 358261 : slot = peeling_htab->find (&elem);
1963 358261 : if (slot)
1964 157319 : slot->count++;
1965 : else
1966 : {
1967 200942 : slot = XNEW (struct _vect_peel_info);
1968 200942 : slot->npeel = npeel;
1969 200942 : slot->dr_info = dr_info;
1970 200942 : slot->count = 1;
1971 200942 : new_slot = peeling_htab->find_slot (slot, INSERT);
1972 200942 : *new_slot = slot;
1973 : }
1974 :
1975 : /* If this DR is not supported with unknown misalignment then bias
1976 : this slot when the cost model is disabled. */
1977 358261 : if (!supportable_if_not_aligned
1978 358261 : && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1979 4584 : slot->count += VECT_MAX_COST;
1980 358261 : }
1981 :
1982 :
1983 : /* Traverse peeling hash table to find peeling option that aligns maximum
1984 : number of data accesses. */
1985 :
1986 : int
1987 35624 : vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
1988 : _vect_peel_extended_info *max)
1989 : {
1990 35624 : vect_peel_info elem = *slot;
1991 :
1992 35624 : if (elem->count > max->peel_info.count
1993 21645 : || (elem->count == max->peel_info.count
1994 16935 : && max->peel_info.npeel > elem->npeel))
1995 : {
1996 13995 : max->peel_info.npeel = elem->npeel;
1997 13995 : max->peel_info.count = elem->count;
1998 13995 : max->peel_info.dr_info = elem->dr_info;
1999 : }
2000 :
2001 35624 : return 1;
2002 : }
2003 :
2004 : /* Get the costs of peeling NPEEL iterations for LOOP_VINFO, checking
2005 : data access costs for all data refs. If UNKNOWN_MISALIGNMENT is true,
2006 : npeel is computed at runtime but DR0_INFO's misalignment will be zero
2007 : after peeling. */
2008 :
2009 : static void
2010 399418 : vect_get_peeling_costs_all_drs (loop_vec_info loop_vinfo,
2011 : dr_vec_info *dr0_info,
2012 : unsigned int *inside_cost,
2013 : unsigned int *outside_cost,
2014 : stmt_vector_for_cost *body_cost_vec,
2015 : stmt_vector_for_cost *prologue_cost_vec,
2016 : unsigned int npeel)
2017 : {
2018 399418 : vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2019 :
2020 399418 : bool dr0_alignment_known_p
2021 : = (dr0_info
2022 731645 : && known_alignment_for_access_p (dr0_info,
2023 332227 : STMT_VINFO_VECTYPE (dr0_info->stmt)));
2024 :
2025 1965397 : for (data_reference *dr : datarefs)
2026 : {
2027 767143 : dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
2028 767143 : if (!vect_relevant_for_alignment_p (dr_info))
2029 37881 : continue;
2030 :
2031 729262 : tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
2032 729262 : dr_alignment_support alignment_support_scheme;
2033 729262 : int misalignment;
2034 729262 : unsigned HOST_WIDE_INT alignment;
2035 :
2036 729262 : bool negative = tree_int_cst_compare (DR_STEP (dr_info->dr),
2037 729262 : size_zero_node) < 0;
2038 729262 : poly_int64 off = 0;
2039 729262 : if (negative)
2040 23972 : off = ((TYPE_VECTOR_SUBPARTS (vectype) - 1)
2041 23972 : * -TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (vectype))));
2042 :
2043 729262 : if (npeel == 0)
2044 371224 : misalignment = dr_misalignment (dr_info, vectype, off);
2045 358038 : else if (dr_info == dr0_info
2046 358038 : || vect_dr_aligned_if_peeled_dr_is (dr_info, dr0_info))
2047 : misalignment = 0;
2048 125233 : else if (!dr0_alignment_known_p
2049 8095 : || !known_alignment_for_access_p (dr_info, vectype)
2050 133328 : || !DR_TARGET_ALIGNMENT (dr_info).is_constant (&alignment))
2051 : misalignment = DR_MISALIGNMENT_UNKNOWN;
2052 : else
2053 : {
2054 7086 : misalignment = dr_misalignment (dr_info, vectype, off);
2055 7086 : misalignment += npeel * TREE_INT_CST_LOW (DR_STEP (dr_info->dr));
2056 7086 : misalignment &= alignment - 1;
2057 : }
2058 729262 : alignment_support_scheme
2059 729262 : = vect_supportable_dr_alignment (loop_vinfo, dr_info, vectype,
2060 : misalignment);
2061 :
2062 729262 : vect_get_data_access_cost (loop_vinfo, dr_info,
2063 : alignment_support_scheme, misalignment,
2064 : inside_cost, outside_cost,
2065 : body_cost_vec, prologue_cost_vec);
2066 : }
2067 399418 : }
2068 :
2069 : /* Traverse peeling hash table and calculate cost for each peeling option.
2070 : Find the one with the lowest cost. */
2071 :
2072 : int
2073 145701 : vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
2074 : _vect_peel_extended_info *min)
2075 : {
2076 145701 : vect_peel_info elem = *slot;
2077 145701 : unsigned int inside_cost = 0, outside_cost = 0;
2078 145701 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (min->vinfo);
2079 145701 : stmt_vector_for_cost prologue_cost_vec, body_cost_vec;
2080 :
2081 145701 : prologue_cost_vec.create (2);
2082 145701 : body_cost_vec.create (2);
2083 :
2084 145701 : vect_get_peeling_costs_all_drs (loop_vinfo, elem->dr_info, &inside_cost,
2085 : &outside_cost, &body_cost_vec,
2086 145701 : &prologue_cost_vec, elem->npeel);
2087 :
2088 145701 : body_cost_vec.release ();
2089 145701 : prologue_cost_vec.release ();
2090 :
2091 145701 : outside_cost += vect_get_known_peeling_cost (loop_vinfo, elem->npeel);
2092 :
2093 145701 : if (inside_cost < min->inside_cost
2094 1671 : || (inside_cost == min->inside_cost
2095 1247 : && outside_cost < min->outside_cost))
2096 : {
2097 144036 : min->inside_cost = inside_cost;
2098 144036 : min->outside_cost = outside_cost;
2099 144036 : min->peel_info.dr_info = elem->dr_info;
2100 144036 : min->peel_info.npeel = elem->npeel;
2101 144036 : min->peel_info.count = elem->count;
2102 : }
2103 :
2104 145701 : return 1;
2105 : }
2106 :
2107 :
2108 : /* Choose best peeling option by traversing peeling hash table and either
2109 : choosing an option with the lowest cost (if cost model is enabled) or the
2110 : option that aligns as many accesses as possible. */
2111 :
2112 : static struct _vect_peel_extended_info
2113 156652 : vect_peeling_hash_choose_best_peeling (hash_table<peel_info_hasher> *peeling_htab,
2114 : loop_vec_info loop_vinfo)
2115 : {
2116 156652 : struct _vect_peel_extended_info res;
2117 :
2118 156652 : res.peel_info.dr_info = NULL;
2119 156652 : res.vinfo = loop_vinfo;
2120 :
2121 156652 : if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
2122 : {
2123 142726 : res.inside_cost = INT_MAX;
2124 142726 : res.outside_cost = INT_MAX;
2125 142726 : peeling_htab->traverse <_vect_peel_extended_info *,
2126 288427 : vect_peeling_hash_get_lowest_cost> (&res);
2127 : }
2128 : else
2129 : {
2130 13926 : res.peel_info.count = 0;
2131 13926 : peeling_htab->traverse <_vect_peel_extended_info *,
2132 49550 : vect_peeling_hash_get_most_frequent> (&res);
2133 13926 : res.inside_cost = 0;
2134 13926 : res.outside_cost = 0;
2135 : }
2136 :
2137 156652 : return res;
2138 : }
2139 :
2140 : /* Return if vectorization is definitely, possibly, or unlikely to be
2141 : supportable after loop peeling. */
2142 :
2143 : static enum peeling_support
2144 77270 : vect_peeling_supportable (loop_vec_info loop_vinfo, dr_vec_info *dr0_info,
2145 : unsigned npeel)
2146 : {
2147 77270 : vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2148 77270 : enum dr_alignment_support supportable_dr_alignment;
2149 :
2150 77270 : bool dr0_alignment_known_p
2151 154540 : = known_alignment_for_access_p (dr0_info,
2152 77270 : STMT_VINFO_VECTYPE (dr0_info->stmt));
2153 77270 : bool has_unsupported_dr_p = false;
2154 77270 : unsigned int dr0_step = tree_to_shwi (DR_STEP (dr0_info->dr));
2155 77270 : int known_unsupported_misalignment = DR_MISALIGNMENT_UNKNOWN;
2156 :
2157 : /* Check if each data ref can be vectorized after peeling. */
2158 330656 : for (data_reference *dr : datarefs)
2159 : {
2160 114622 : if (dr == dr0_info->dr)
2161 76328 : continue;
2162 :
2163 38294 : dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
2164 38294 : if (!vect_relevant_for_alignment_p (dr_info)
2165 38294 : || vect_dr_aligned_if_peeled_dr_is (dr_info, dr0_info))
2166 6675 : continue;
2167 :
2168 31619 : tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
2169 31619 : int misalignment;
2170 31619 : unsigned HOST_WIDE_INT alignment;
2171 31619 : if (!dr0_alignment_known_p
2172 1854 : || !known_alignment_for_access_p (dr_info, vectype)
2173 33473 : || !DR_TARGET_ALIGNMENT (dr_info).is_constant (&alignment))
2174 : misalignment = DR_MISALIGNMENT_UNKNOWN;
2175 : else
2176 : {
2177 1840 : misalignment = dr_misalignment (dr_info, vectype);
2178 1840 : misalignment += npeel * TREE_INT_CST_LOW (DR_STEP (dr_info->dr));
2179 1840 : misalignment &= alignment - 1;
2180 : }
2181 31619 : supportable_dr_alignment
2182 31619 : = vect_supportable_dr_alignment (loop_vinfo, dr_info, vectype,
2183 : misalignment);
2184 31619 : if (supportable_dr_alignment == dr_unaligned_unsupported)
2185 : {
2186 30164 : has_unsupported_dr_p = true;
2187 :
2188 : /* If unaligned unsupported DRs exist, we do following checks to see
2189 : if they can be mutually aligned to support vectorization. If yes,
2190 : we can try peeling and create a runtime (mutual alignment) check
2191 : to guard the peeled loop. If no, return PEELING_UNSUPPORTED. */
2192 :
2193 : /* 1) If unaligned unsupported DRs have different alignment steps, the
2194 : probability of DRs being mutually aligned is very low, and it's
2195 : quite complex to check mutual alignment at runtime. We return
2196 : PEELING_UNSUPPORTED in this case. */
2197 30164 : if (tree_to_shwi (DR_STEP (dr)) != dr0_step)
2198 77270 : return peeling_unsupported;
2199 :
2200 : /* 2) Based on above same alignment step condition, if one known
2201 : misaligned DR has zero misalignment, or different misalignment
2202 : amount from another known misaligned DR, peeling is unable to
2203 : help make all these DRs aligned together. We won't try peeling
2204 : with versioning anymore. */
2205 25924 : int curr_dr_misalignment = dr_misalignment (dr_info, vectype);
2206 25924 : if (curr_dr_misalignment == 0)
2207 : return peeling_unsupported;
2208 14388 : if (known_unsupported_misalignment != DR_MISALIGNMENT_UNKNOWN)
2209 : {
2210 8 : if (curr_dr_misalignment != DR_MISALIGNMENT_UNKNOWN
2211 8 : && curr_dr_misalignment != known_unsupported_misalignment)
2212 : return peeling_unsupported;
2213 : }
2214 : else
2215 : known_unsupported_misalignment = curr_dr_misalignment;
2216 : }
2217 : }
2218 :
2219 : /* Vectorization is known to be supportable with peeling alone when there is
2220 : no unsupported DR. */
2221 61494 : return has_unsupported_dr_p ? peeling_maybe_supported
2222 : : peeling_known_supported;
2223 : }
2224 :
2225 : /* Compare two data-references DRA and DRB to group them into chunks
2226 : with related alignment. */
2227 :
2228 : static int
2229 4588389 : dr_align_group_sort_cmp (const void *dra_, const void *drb_)
2230 : {
2231 4588389 : data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
2232 4588389 : data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
2233 4588389 : int cmp;
2234 :
2235 : /* Stabilize sort. */
2236 4588389 : if (dra == drb)
2237 : return 0;
2238 :
2239 : /* Ordering of DRs according to base. */
2240 4588389 : cmp = data_ref_compare_tree (DR_BASE_ADDRESS (dra),
2241 : DR_BASE_ADDRESS (drb));
2242 4588389 : if (cmp != 0)
2243 : return cmp;
2244 :
2245 : /* And according to DR_OFFSET. */
2246 2025395 : cmp = data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
2247 2025395 : if (cmp != 0)
2248 : return cmp;
2249 :
2250 : /* And after step. */
2251 2011163 : cmp = data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb));
2252 2011163 : if (cmp != 0)
2253 : return cmp;
2254 :
2255 : /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
2256 2005936 : cmp = data_ref_compare_tree (DR_INIT (dra), DR_INIT (drb));
2257 2005936 : if (cmp == 0)
2258 236885 : return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
2259 : return cmp;
2260 : }
2261 :
2262 : /* Function vect_enhance_data_refs_alignment
2263 :
2264 : This pass will use loop versioning and loop peeling in order to enhance
2265 : the alignment of data references in the loop.
2266 :
2267 : FOR NOW: we assume that whatever versioning/peeling takes place, only the
2268 : original loop is to be vectorized. Any other loops that are created by
2269 : the transformations performed in this pass - are not supposed to be
2270 : vectorized. This restriction will be relaxed.
2271 :
2272 : This pass will require a cost model to guide it whether to apply peeling
2273 : or versioning or a combination of the two. For example, the scheme that
2274 : intel uses when given a loop with several memory accesses, is as follows:
2275 : choose one memory access ('p') which alignment you want to force by doing
2276 : peeling. Then, either (1) generate a loop in which 'p' is aligned and all
2277 : other accesses are not necessarily aligned, or (2) use loop versioning to
2278 : generate one loop in which all accesses are aligned, and another loop in
2279 : which only 'p' is necessarily aligned.
2280 :
2281 : ("Automatic Intra-Register Vectorization for the Intel Architecture",
2282 : Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
2283 : Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
2284 :
2285 : Devising a cost model is the most critical aspect of this work. It will
2286 : guide us on which access to peel for, whether to use loop versioning, how
2287 : many versions to create, etc. The cost model will probably consist of
2288 : generic considerations as well as target specific considerations (on
2289 : powerpc for example, misaligned stores are more painful than misaligned
2290 : loads).
2291 :
2292 : Here are the general steps involved in alignment enhancements:
2293 :
2294 : -- original loop, before alignment analysis:
2295 : for (i=0; i<N; i++){
2296 : x = q[i]; # DR_MISALIGNMENT(q) = unknown
2297 : p[i] = y; # DR_MISALIGNMENT(p) = unknown
2298 : }
2299 :
2300 : -- After vect_compute_data_refs_alignment:
2301 : for (i=0; i<N; i++){
2302 : x = q[i]; # DR_MISALIGNMENT(q) = 3
2303 : p[i] = y; # DR_MISALIGNMENT(p) = unknown
2304 : }
2305 :
2306 : -- Possibility 1: we do loop versioning:
2307 : if (p is aligned) {
2308 : for (i=0; i<N; i++){ # loop 1A
2309 : x = q[i]; # DR_MISALIGNMENT(q) = 3
2310 : p[i] = y; # DR_MISALIGNMENT(p) = 0
2311 : }
2312 : }
2313 : else {
2314 : for (i=0; i<N; i++){ # loop 1B
2315 : x = q[i]; # DR_MISALIGNMENT(q) = 3
2316 : p[i] = y; # DR_MISALIGNMENT(p) = unaligned
2317 : }
2318 : }
2319 :
2320 : -- Possibility 2: we do loop peeling:
2321 : for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
2322 : x = q[i];
2323 : p[i] = y;
2324 : }
2325 : for (i = 3; i < N; i++){ # loop 2A
2326 : x = q[i]; # DR_MISALIGNMENT(q) = 0
2327 : p[i] = y; # DR_MISALIGNMENT(p) = unknown
2328 : }
2329 :
2330 : -- Possibility 3: combination of loop peeling and versioning:
2331 : if (p & q are mutually aligned) {
2332 : for (i=0; i<3; i++){ # (peeled loop iterations).
2333 : x = q[i];
2334 : p[i] = y;
2335 : }
2336 : for (i=3; i<N; i++){ # loop 3A
2337 : x = q[i]; # DR_MISALIGNMENT(q) = 0
2338 : p[i] = y; # DR_MISALIGNMENT(p) = 0
2339 : }
2340 : }
2341 : else {
2342 : for (i=0; i<N; i++){ # (scalar loop, not to be vectorized).
2343 : x = q[i]; # DR_MISALIGNMENT(q) = 3
2344 : p[i] = y; # DR_MISALIGNMENT(p) = unknown
2345 : }
2346 : }
2347 :
2348 : These loops are later passed to loop_transform to be vectorized. The
2349 : vectorizer will use the alignment information to guide the transformation
2350 : (whether to generate regular loads/stores, or with special handling for
2351 : misalignment). */
2352 :
2353 : opt_result
2354 378749 : vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
2355 : {
2356 378749 : class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2357 378749 : dr_vec_info *first_store = NULL;
2358 378749 : dr_vec_info *dr0_info = NULL;
2359 378749 : struct data_reference *dr;
2360 378749 : unsigned int i;
2361 378749 : bool do_peeling = false;
2362 378749 : bool do_versioning = false;
2363 378749 : bool try_peeling_with_versioning = false;
2364 378749 : unsigned int npeel = 0;
2365 378749 : bool one_misalignment_known = false;
2366 378749 : bool one_misalignment_unknown = false;
2367 378749 : bool one_dr_unsupportable = false;
2368 378749 : dr_vec_info *unsupportable_dr_info = NULL;
2369 378749 : unsigned int dr0_same_align_drs = 0, first_store_same_align_drs = 0;
2370 378749 : hash_table<peel_info_hasher> peeling_htab (1);
2371 :
2372 378749 : DUMP_VECT_SCOPE ("vect_enhance_data_refs_alignment");
2373 :
2374 : /* Reset data so we can safely be called multiple times. */
2375 378749 : LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
2376 378749 : LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = 0;
2377 :
2378 378749 : if (LOOP_VINFO_DATAREFS (loop_vinfo).is_empty ())
2379 14031 : return opt_result::success ();
2380 :
2381 : /* Sort the vector of datarefs so DRs that have the same or dependent
2382 : alignment are next to each other. */
2383 364718 : auto_vec<data_reference_p> datarefs
2384 364718 : = LOOP_VINFO_DATAREFS (loop_vinfo).copy ();
2385 364718 : datarefs.qsort (dr_align_group_sort_cmp);
2386 :
2387 : /* Compute the number of DRs that become aligned when we peel
2388 : a dataref so it becomes aligned. */
2389 729436 : auto_vec<unsigned> n_same_align_refs (datarefs.length ());
2390 364718 : n_same_align_refs.quick_grow_cleared (datarefs.length ());
2391 364718 : unsigned i0;
2392 749115 : for (i0 = 0; i0 < datarefs.length (); ++i0)
2393 377785 : if (DR_BASE_ADDRESS (datarefs[i0]))
2394 : break;
2395 2374254 : for (i = i0 + 1; i <= datarefs.length (); ++i)
2396 : {
2397 822409 : if (i == datarefs.length ()
2398 464303 : || !operand_equal_p (DR_BASE_ADDRESS (datarefs[i0]),
2399 464303 : DR_BASE_ADDRESS (datarefs[i]), 0)
2400 217966 : || !operand_equal_p (DR_OFFSET (datarefs[i0]),
2401 217966 : DR_OFFSET (datarefs[i]), 0)
2402 1039066 : || !operand_equal_p (DR_STEP (datarefs[i0]),
2403 216657 : DR_STEP (datarefs[i]), 0))
2404 : {
2405 : /* The subgroup [i0, i-1] now only differs in DR_INIT and
2406 : possibly DR_TARGET_ALIGNMENT. Still the whole subgroup
2407 : will get known misalignment if we align one of the refs
2408 : with the largest DR_TARGET_ALIGNMENT. */
2409 1428705 : for (unsigned j = i0; j < i; ++j)
2410 : {
2411 822409 : dr_vec_info *dr_infoj = loop_vinfo->lookup_dr (datarefs[j]);
2412 3580942 : for (unsigned k = i0; k < i; ++k)
2413 : {
2414 2758533 : if (k == j)
2415 822409 : continue;
2416 1936124 : dr_vec_info *dr_infok = loop_vinfo->lookup_dr (datarefs[k]);
2417 1936124 : if (vect_dr_aligned_if_related_peeled_dr_is (dr_infok,
2418 : dr_infoj))
2419 709456 : n_same_align_refs[j]++;
2420 : }
2421 : }
2422 : i0 = i;
2423 : }
2424 : }
2425 :
2426 : /* While cost model enhancements are expected in the future, the high level
2427 : view of the code at this time is as follows:
2428 :
2429 : A) If there is a misaligned access then see if doing peeling alone can
2430 : make all data references satisfy vect_supportable_dr_alignment. If so,
2431 : update data structures and return.
2432 :
2433 : B) If peeling alone wasn't possible and there is a data reference with an
2434 : unknown misalignment that does not satisfy vect_supportable_dr_alignment
2435 : then we may use either of the following two approaches.
2436 :
2437 : B1) Try peeling with versioning: Add a runtime loop versioning check to
2438 : see if all unsupportable data references are mutually aligned, which
2439 : means they will be uniformly aligned after a certain amount of loop
2440 : peeling. If peeling and versioning can be used together, set
2441 : LOOP_VINFO_ALLOW_MUTUAL_ALIGNMENT_P to TRUE and return.
2442 :
2443 : B2) Try versioning alone: Add a runtime loop versioning check to see if
2444 : all unsupportable data references are already uniformly aligned
2445 : without loop peeling. If versioning can be applied alone, set
2446 : LOOP_VINFO_ALLOW_MUTUAL_ALIGNMENT_P to FALSE and return.
2447 :
2448 : Above B1 is more powerful and more likely to be adopted than B2. But B2
2449 : is still available and useful in some cases, for example, the cost model
2450 : does not allow much peeling.
2451 :
2452 : C) If none of above was successful then the alignment was not enhanced,
2453 : just return. */
2454 :
2455 : /* (1) Peeling to force alignment. */
2456 :
2457 : /* (1.1) Decide whether to perform peeling, how many iterations to peel, and
2458 : if vectorization may be supported by peeling with versioning.
2459 : Considerations:
2460 : - How many accesses will become aligned due to the peeling
2461 : - How many accesses will become unaligned due to the peeling,
2462 : and the cost of misaligned accesses.
2463 : - The cost of peeling (the extra runtime checks, the increase
2464 : in code size). */
2465 :
2466 364718 : poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2467 1038794 : FOR_EACH_VEC_ELT (datarefs, i, dr)
2468 : {
2469 719314 : dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
2470 719314 : if (!vect_relevant_for_alignment_p (dr_info))
2471 108771 : continue;
2472 :
2473 610543 : stmt_vec_info stmt_info = dr_info->stmt;
2474 610543 : tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2475 :
2476 : /* With variable VF, unsafe speculative read can be avoided for known
2477 : inbounds DRs as long as partial vectors are used. */
2478 610543 : if (!vf.is_constant ()
2479 : && dr_safe_speculative_read_required (stmt_info)
2480 : && DR_SCALAR_KNOWN_BOUNDS (dr_info))
2481 : {
2482 : dr_set_safe_speculative_read_required (stmt_info, false);
2483 : LOOP_VINFO_MUST_USE_PARTIAL_VECTORS_P (loop_vinfo) = true;
2484 : }
2485 :
2486 610543 : do_peeling = vector_alignment_reachable_p (dr_info, vf);
2487 610543 : if (do_peeling)
2488 : {
2489 533234 : if (known_alignment_for_access_p (dr_info, vectype))
2490 : {
2491 299632 : unsigned int npeel_tmp = 0;
2492 299632 : bool negative = tree_int_cst_compare (DR_STEP (dr),
2493 299632 : size_zero_node) < 0;
2494 :
2495 : /* If known_alignment_for_access_p then we have set
2496 : DR_MISALIGNMENT which is only done if we know it at compiler
2497 : time, so it is safe to assume target alignment is constant.
2498 : */
2499 299632 : unsigned int target_align =
2500 299632 : DR_TARGET_ALIGNMENT (dr_info).to_constant ();
2501 299632 : unsigned HOST_WIDE_INT dr_size = vect_get_scalar_dr_size (dr_info);
2502 299632 : poly_int64 off = 0;
2503 299632 : if (negative)
2504 2552 : off = (TYPE_VECTOR_SUBPARTS (vectype) - 1) * -dr_size;
2505 299632 : unsigned int mis = dr_misalignment (dr_info, vectype, off);
2506 299632 : mis = negative ? mis : -mis;
2507 299632 : if (mis != 0)
2508 13198 : npeel_tmp = (mis & (target_align - 1)) / dr_size;
2509 :
2510 : /* For multiple types, it is possible that the bigger type access
2511 : will have more than one peeling option. E.g., a loop with two
2512 : types: one of size (vector size / 4), and the other one of
2513 : size (vector size / 8). Vectorization factor will 8. If both
2514 : accesses are misaligned by 3, the first one needs one scalar
2515 : iteration to be aligned, and the second one needs 5. But the
2516 : first one will be aligned also by peeling 5 scalar
2517 : iterations, and in that case both accesses will be aligned.
2518 : Hence, except for the immediate peeling amount, we also want
2519 : to try to add full vector size, while we don't exceed
2520 : vectorization factor.
2521 : We do this automatically for cost model, since we calculate
2522 : cost for every peeling option. */
2523 299632 : poly_uint64 nscalars = npeel_tmp;
2524 299632 : if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
2525 : {
2526 39630 : unsigned group_size = 1;
2527 39630 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2528 1909 : group_size = DR_GROUP_SIZE (stmt_info);
2529 39630 : nscalars = vf * group_size;
2530 : }
2531 :
2532 : /* Save info about DR in the hash table. Also include peeling
2533 : amounts according to the explanation above. Indicate
2534 : the alignment status when the ref is not aligned.
2535 : ??? Rather than using unknown alignment here we should
2536 : prune all entries from the peeling hashtable which cause
2537 : DRs to be not supported. */
2538 299632 : bool supportable_if_not_aligned
2539 : = vect_supportable_dr_alignment
2540 299632 : (loop_vinfo, dr_info, vectype, DR_MISALIGNMENT_UNKNOWN);
2541 657893 : while (known_le (npeel_tmp, nscalars))
2542 : {
2543 358261 : vect_peeling_hash_insert (&peeling_htab, loop_vinfo,
2544 : dr_info, npeel_tmp,
2545 : supportable_if_not_aligned);
2546 358261 : npeel_tmp += MAX (1, target_align / dr_size);
2547 : }
2548 :
2549 299632 : one_misalignment_known = true;
2550 : }
2551 : else
2552 : {
2553 : /* If we don't know any misalignment values, we prefer
2554 : peeling for data-ref that has the maximum number of data-refs
2555 : with the same alignment, unless the target prefers to align
2556 : stores over load. */
2557 233602 : unsigned same_align_drs = n_same_align_refs[i];
2558 233602 : if (!dr0_info
2559 233602 : || dr0_same_align_drs < same_align_drs)
2560 : {
2561 : dr0_same_align_drs = same_align_drs;
2562 : dr0_info = dr_info;
2563 : }
2564 : /* For data-refs with the same number of related
2565 : accesses prefer the one where the misalign
2566 : computation will be invariant in the outermost loop. */
2567 75828 : else if (dr0_same_align_drs == same_align_drs)
2568 : {
2569 74393 : class loop *ivloop0, *ivloop;
2570 74393 : ivloop0 = outermost_invariant_loop_for_expr
2571 74393 : (loop, DR_BASE_ADDRESS (dr0_info->dr));
2572 74393 : ivloop = outermost_invariant_loop_for_expr
2573 74393 : (loop, DR_BASE_ADDRESS (dr));
2574 74393 : if ((ivloop && !ivloop0)
2575 74393 : || (ivloop && ivloop0
2576 74385 : && flow_loop_nested_p (ivloop, ivloop0)))
2577 : dr0_info = dr_info;
2578 : }
2579 :
2580 233602 : one_misalignment_unknown = true;
2581 :
2582 : /* Check for data refs with unsupportable alignment that
2583 : can be peeled. */
2584 233602 : enum dr_alignment_support supportable_dr_alignment
2585 233602 : = vect_supportable_dr_alignment (loop_vinfo, dr_info, vectype,
2586 : DR_MISALIGNMENT_UNKNOWN);
2587 233602 : if (supportable_dr_alignment == dr_unaligned_unsupported)
2588 : {
2589 95401 : one_dr_unsupportable = true;
2590 95401 : unsupportable_dr_info = dr_info;
2591 : }
2592 :
2593 233602 : if (!first_store && DR_IS_WRITE (dr))
2594 : {
2595 50714 : first_store = dr_info;
2596 50714 : first_store_same_align_drs = same_align_drs;
2597 : }
2598 : }
2599 : }
2600 : else
2601 : {
2602 77309 : if (!aligned_access_p (dr_info, vectype))
2603 : {
2604 45238 : if (dump_enabled_p ())
2605 2080 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2606 : "vector alignment may not be reachable\n");
2607 : break;
2608 : }
2609 : }
2610 : }
2611 :
2612 : /* Check if we can possibly peel the loop. */
2613 364718 : if (!vect_can_advance_ivs_p (loop_vinfo)
2614 361261 : || !slpeel_can_duplicate_loop_p (loop, LOOP_VINFO_MAIN_EXIT (loop_vinfo),
2615 361261 : loop_preheader_edge (loop))
2616 361261 : || loop->inner
2617 : /* We don't currently maintaing the LCSSA for prologue peeled inversed
2618 : loops. */
2619 724384 : || (LOOP_VINFO_EARLY_BREAKS_VECT_PEELED (loop_vinfo)
2620 28401 : && !LOOP_VINFO_NITERS_UNCOUNTED_P (loop_vinfo)))
2621 : do_peeling = false;
2622 :
2623 364718 : struct _vect_peel_extended_info peel_for_known_alignment;
2624 364718 : struct _vect_peel_extended_info peel_for_unknown_alignment;
2625 364718 : struct _vect_peel_extended_info best_peel;
2626 :
2627 364718 : peel_for_unknown_alignment.inside_cost = INT_MAX;
2628 364718 : peel_for_unknown_alignment.outside_cost = INT_MAX;
2629 364718 : peel_for_unknown_alignment.peel_info.count = 0;
2630 :
2631 364718 : if (do_peeling
2632 364718 : && one_misalignment_unknown)
2633 : {
2634 : /* Check if the target requires to prefer stores over loads, i.e., if
2635 : misaligned stores are more expensive than misaligned loads (taking
2636 : drs with same alignment into account). */
2637 142572 : unsigned int load_inside_cost = 0;
2638 142572 : unsigned int load_outside_cost = 0;
2639 142572 : unsigned int store_inside_cost = 0;
2640 142572 : unsigned int store_outside_cost = 0;
2641 142572 : unsigned int estimated_npeels = vect_vf_for_cost (loop_vinfo) / 2;
2642 :
2643 142572 : stmt_vector_for_cost dummy;
2644 142572 : dummy.create (2);
2645 142572 : vect_get_peeling_costs_all_drs (loop_vinfo, dr0_info,
2646 : &load_inside_cost,
2647 : &load_outside_cost,
2648 : &dummy, &dummy, estimated_npeels);
2649 142572 : dummy.release ();
2650 :
2651 142572 : if (first_store)
2652 : {
2653 43954 : dummy.create (2);
2654 43954 : vect_get_peeling_costs_all_drs (loop_vinfo, first_store,
2655 : &store_inside_cost,
2656 : &store_outside_cost,
2657 : &dummy, &dummy,
2658 : estimated_npeels);
2659 43954 : dummy.release ();
2660 : }
2661 : else
2662 : {
2663 98618 : store_inside_cost = INT_MAX;
2664 98618 : store_outside_cost = INT_MAX;
2665 : }
2666 :
2667 142572 : if (load_inside_cost > store_inside_cost
2668 142572 : || (load_inside_cost == store_inside_cost
2669 43385 : && load_outside_cost > store_outside_cost))
2670 : {
2671 142572 : dr0_info = first_store;
2672 142572 : dr0_same_align_drs = first_store_same_align_drs;
2673 142572 : peel_for_unknown_alignment.inside_cost = store_inside_cost;
2674 142572 : peel_for_unknown_alignment.outside_cost = store_outside_cost;
2675 : }
2676 : else
2677 : {
2678 142572 : peel_for_unknown_alignment.inside_cost = load_inside_cost;
2679 142572 : peel_for_unknown_alignment.outside_cost = load_outside_cost;
2680 : }
2681 :
2682 142572 : peel_for_unknown_alignment.outside_cost
2683 142572 : += vect_get_known_peeling_cost (loop_vinfo, estimated_npeels);
2684 :
2685 142572 : peel_for_unknown_alignment.peel_info.count = dr0_same_align_drs + 1;
2686 : }
2687 :
2688 364718 : peel_for_unknown_alignment.peel_info.npeel = 0;
2689 364718 : peel_for_unknown_alignment.peel_info.dr_info = dr0_info;
2690 :
2691 364718 : best_peel = peel_for_unknown_alignment;
2692 :
2693 364718 : peel_for_known_alignment.inside_cost = INT_MAX;
2694 364718 : peel_for_known_alignment.outside_cost = INT_MAX;
2695 364718 : peel_for_known_alignment.peel_info.count = 0;
2696 364718 : peel_for_known_alignment.peel_info.dr_info = NULL;
2697 :
2698 364718 : if (do_peeling && one_misalignment_known)
2699 : {
2700 : /* Peeling is possible, but there is no data access that is not supported
2701 : unless aligned. So we try to choose the best possible peeling from
2702 : the hash table. */
2703 156652 : peel_for_known_alignment = vect_peeling_hash_choose_best_peeling
2704 156652 : (&peeling_htab, loop_vinfo);
2705 : }
2706 :
2707 : /* Compare costs of peeling for known and unknown alignment. */
2708 364718 : if (peel_for_known_alignment.peel_info.dr_info != NULL
2709 156652 : && peel_for_unknown_alignment.inside_cost
2710 : >= peel_for_known_alignment.inside_cost)
2711 : {
2712 142514 : best_peel = peel_for_known_alignment;
2713 :
2714 : /* If the best peeling for known alignment has NPEEL == 0, perform no
2715 : peeling at all except if there is an unsupportable dr that we can
2716 : align. */
2717 142514 : if (best_peel.peel_info.npeel == 0 && !one_dr_unsupportable)
2718 : do_peeling = false;
2719 : }
2720 :
2721 : /* If there is an unsupportable data ref, prefer this over all choices so far
2722 : since we'd have to discard a chosen peeling except when it accidentally
2723 : aligned the unsupportable data ref. */
2724 229876 : if (one_dr_unsupportable)
2725 : dr0_info = unsupportable_dr_info;
2726 286931 : else if (do_peeling)
2727 : {
2728 : /* Calculate the penalty for no peeling, i.e. leaving everything as-is.
2729 : TODO: Use nopeel_outside_cost or get rid of it? */
2730 67191 : unsigned nopeel_inside_cost = 0;
2731 67191 : unsigned nopeel_outside_cost = 0;
2732 :
2733 67191 : stmt_vector_for_cost dummy;
2734 67191 : dummy.create (2);
2735 67191 : vect_get_peeling_costs_all_drs (loop_vinfo, NULL, &nopeel_inside_cost,
2736 : &nopeel_outside_cost, &dummy, &dummy, 0);
2737 67191 : dummy.release ();
2738 :
2739 : /* Add epilogue costs. As we do not peel for alignment here, no prologue
2740 : costs will be recorded. */
2741 67191 : nopeel_outside_cost += vect_get_known_peeling_cost (loop_vinfo, 0);
2742 :
2743 67191 : npeel = best_peel.peel_info.npeel;
2744 67191 : dr0_info = best_peel.peel_info.dr_info;
2745 :
2746 : /* If no peeling is not more expensive than the best peeling we
2747 : have so far, don't perform any peeling. */
2748 67191 : if (nopeel_inside_cost <= best_peel.inside_cost)
2749 60673 : do_peeling = false;
2750 : }
2751 :
2752 144978 : if (do_peeling)
2753 : {
2754 77270 : stmt_vec_info stmt_info = dr0_info->stmt;
2755 77270 : if (known_alignment_for_access_p (dr0_info,
2756 : STMT_VINFO_VECTYPE (stmt_info)))
2757 : {
2758 6495 : bool negative = tree_int_cst_compare (DR_STEP (dr0_info->dr),
2759 6495 : size_zero_node) < 0;
2760 6495 : if (!npeel)
2761 : {
2762 : /* Since it's known at compile time, compute the number of
2763 : iterations in the peeled loop (the peeling factor) for use in
2764 : updating DR_MISALIGNMENT values. The peeling factor is the
2765 : vectorization factor minus the misalignment as an element
2766 : count. */
2767 0 : tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2768 0 : poly_int64 off = 0;
2769 0 : if (negative)
2770 0 : off = ((TYPE_VECTOR_SUBPARTS (vectype) - 1)
2771 0 : * -TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (vectype))));
2772 0 : unsigned int mis
2773 0 : = dr_misalignment (dr0_info, vectype, off);
2774 0 : mis = negative ? mis : -mis;
2775 : /* If known_alignment_for_access_p then we have set
2776 : DR_MISALIGNMENT which is only done if we know it at compiler
2777 : time, so it is safe to assume target alignment is constant.
2778 : */
2779 0 : unsigned int target_align =
2780 0 : DR_TARGET_ALIGNMENT (dr0_info).to_constant ();
2781 0 : npeel = ((mis & (target_align - 1))
2782 0 : / vect_get_scalar_dr_size (dr0_info));
2783 : }
2784 :
2785 : /* For interleaved data access every iteration accesses all the
2786 : members of the group, therefore we divide the number of iterations
2787 : by the group size. */
2788 6495 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2789 281 : npeel /= DR_GROUP_SIZE (stmt_info);
2790 :
2791 6495 : if (dump_enabled_p ())
2792 284 : dump_printf_loc (MSG_NOTE, vect_location,
2793 : "Try peeling by %d\n", npeel);
2794 : }
2795 :
2796 : /* Check how peeling for alignment can support vectorization. Function
2797 : vect_peeling_supportable returns one of the three possible values:
2798 : - PEELING_KNOWN_SUPPORTED: indicates that we know all unsupported
2799 : datarefs can be aligned after peeling. We can use peeling alone.
2800 : - PEELING_MAYBE_SUPPORTED: indicates that peeling may be able to make
2801 : these datarefs aligned but we are not sure about it at compile time.
2802 : We will try peeling with versioning to add a runtime check to guard
2803 : the peeled loop.
2804 : - PEELING_UNSUPPORTED: indicates that peeling is almost impossible to
2805 : support vectorization. We will stop trying peeling. */
2806 77270 : switch (vect_peeling_supportable (loop_vinfo, dr0_info, npeel))
2807 : {
2808 : case peeling_known_supported:
2809 : break;
2810 13194 : case peeling_maybe_supported:
2811 13194 : try_peeling_with_versioning = true;
2812 13194 : break;
2813 15776 : case peeling_unsupported:
2814 15776 : do_peeling = false;
2815 15776 : break;
2816 : }
2817 :
2818 : /* Check if all datarefs are supportable and log. */
2819 77270 : if (do_peeling
2820 77270 : && npeel == 0
2821 77270 : && known_alignment_for_access_p (dr0_info,
2822 : STMT_VINFO_VECTYPE (stmt_info)))
2823 3 : return opt_result::success ();
2824 :
2825 : /* Cost model #1 - honor --param vect-max-peeling-for-alignment. */
2826 77267 : if (do_peeling)
2827 : {
2828 61491 : unsigned max_allowed_peel
2829 61491 : = param_vect_max_peeling_for_alignment;
2830 61491 : if (loop_cost_model (loop) <= VECT_COST_MODEL_CHEAP)
2831 : max_allowed_peel = 0;
2832 13521 : if (max_allowed_peel != (unsigned)-1)
2833 : {
2834 47991 : unsigned max_peel = npeel;
2835 47991 : if (max_peel == 0)
2836 : {
2837 45236 : poly_uint64 target_align = DR_TARGET_ALIGNMENT (dr0_info);
2838 45236 : unsigned HOST_WIDE_INT target_align_c;
2839 45236 : if (target_align.is_constant (&target_align_c))
2840 90472 : max_peel =
2841 45236 : target_align_c / vect_get_scalar_dr_size (dr0_info) - 1;
2842 : else
2843 : {
2844 : do_peeling = false;
2845 : if (dump_enabled_p ())
2846 : dump_printf_loc (MSG_NOTE, vect_location,
2847 : "Disable peeling, max peels set and vector"
2848 : " alignment unknown\n");
2849 : }
2850 : }
2851 47991 : if (max_peel > max_allowed_peel)
2852 : {
2853 47983 : do_peeling = false;
2854 47983 : if (dump_enabled_p ())
2855 53 : dump_printf_loc (MSG_NOTE, vect_location,
2856 : "Disable peeling, max peels reached: %d\n", max_peel);
2857 : }
2858 : }
2859 : }
2860 :
2861 : /* Cost model #2 - if peeling may result in a remaining loop not
2862 : iterating enough to be vectorized then do not peel. Since this
2863 : is a cost heuristic rather than a correctness decision, use the
2864 : most likely runtime value for variable vectorization factors. */
2865 53 : if (do_peeling
2866 13508 : && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2867 : {
2868 3213 : unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
2869 3213 : unsigned int max_peel = npeel == 0 ? assumed_vf - 1 : npeel;
2870 3213 : if ((unsigned HOST_WIDE_INT) LOOP_VINFO_INT_NITERS (loop_vinfo)
2871 3213 : < assumed_vf + max_peel)
2872 : do_peeling = false;
2873 : }
2874 :
2875 : if (do_peeling)
2876 : {
2877 : /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
2878 : If the misalignment of DR_i is identical to that of dr0 then set
2879 : DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
2880 : dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
2881 : by the peeling factor times the element size of DR_i (MOD the
2882 : vectorization factor times the size). Otherwise, the
2883 : misalignment of DR_i must be set to unknown. */
2884 28543 : FOR_EACH_VEC_ELT (datarefs, i, dr)
2885 15844 : if (dr != dr0_info->dr)
2886 : {
2887 3145 : dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
2888 3145 : if (!vect_relevant_for_alignment_p (dr_info))
2889 354 : continue;
2890 :
2891 2791 : vect_update_misalignment_for_peel (dr_info, dr0_info, npeel);
2892 : }
2893 : }
2894 :
2895 77267 : if (do_peeling && !try_peeling_with_versioning)
2896 : {
2897 : /* Update data structures if peeling will be applied alone. */
2898 11661 : LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0_info;
2899 11661 : if (npeel)
2900 2104 : LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
2901 : else
2902 9557 : LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = -1;
2903 11661 : SET_DR_MISALIGNMENT (dr0_info,
2904 : vect_dr_misalign_for_aligned_access (dr0_info));
2905 11661 : if (dump_enabled_p ())
2906 : {
2907 344 : dump_printf_loc (MSG_NOTE, vect_location,
2908 : "Alignment of access forced using peeling.\n");
2909 344 : dump_printf_loc (MSG_NOTE, vect_location,
2910 : "Peeling for alignment will be applied.\n");
2911 : }
2912 :
2913 : /* The inside-loop cost will be accounted for in vectorizable_load
2914 : and vectorizable_store correctly with adjusted alignments.
2915 : Drop the body_cst_vec on the floor here. */
2916 11661 : return opt_result::success ();
2917 : }
2918 : }
2919 :
2920 : /* (2) Versioning to force alignment. */
2921 :
2922 : /* Try versioning if:
2923 : 1) optimize loop for speed and the cost-model is not cheap
2924 : 2) there is at least one unsupported misaligned data ref with an unknown
2925 : misalignment, and
2926 : 3) all misaligned data refs with a known misalignment are supported, and
2927 : 4) the number of runtime alignment checks is within reason. */
2928 :
2929 353054 : do_versioning
2930 353054 : = (optimize_loop_nest_for_speed_p (loop)
2931 352626 : && !loop->inner /* FORNOW */
2932 704085 : && loop_cost_model (loop) > VECT_COST_MODEL_CHEAP);
2933 :
2934 : if (do_versioning)
2935 : {
2936 355231 : FOR_EACH_VEC_ELT (datarefs, i, dr)
2937 : {
2938 267570 : dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
2939 267570 : if (!vect_relevant_for_alignment_p (dr_info))
2940 188043 : continue;
2941 :
2942 184240 : stmt_vec_info stmt_info = dr_info->stmt;
2943 184240 : if (STMT_VINFO_STRIDED_P (stmt_info))
2944 : {
2945 : do_versioning = false;
2946 4999 : break;
2947 : }
2948 :
2949 183170 : tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2950 183170 : bool negative = tree_int_cst_compare (DR_STEP (dr),
2951 183170 : size_zero_node) < 0;
2952 183170 : poly_int64 off = 0;
2953 183170 : if (negative)
2954 3388 : off = ((TYPE_VECTOR_SUBPARTS (vectype) - 1)
2955 3388 : * -TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (vectype))));
2956 183170 : int misalignment;
2957 183170 : if ((misalignment = dr_misalignment (dr_info, vectype, off)) == 0)
2958 104713 : continue;
2959 :
2960 78457 : enum dr_alignment_support supportable_dr_alignment
2961 78457 : = vect_supportable_dr_alignment (loop_vinfo, dr_info, vectype,
2962 : misalignment);
2963 78457 : if (supportable_dr_alignment == dr_unaligned_unsupported)
2964 : {
2965 15201 : if (misalignment != DR_MISALIGNMENT_UNKNOWN
2966 15201 : || (LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
2967 11796 : >= (unsigned) param_vect_max_version_for_alignment_checks))
2968 : {
2969 : do_versioning = false;
2970 4999 : break;
2971 : }
2972 :
2973 : /* Forcing alignment in the first iteration is no good if
2974 : we don't keep it across iterations. For now, just disable
2975 : versioning in this case.
2976 : ?? We could actually unroll the loop to achieve the required
2977 : overall step alignment, and forcing the alignment could be
2978 : done by doing some iterations of the non-vectorized loop. */
2979 11388 : if (!multiple_p (vf * DR_STEP_ALIGNMENT (dr),
2980 11388 : DR_TARGET_ALIGNMENT (dr_info)))
2981 : {
2982 : do_versioning = false;
2983 : break;
2984 : }
2985 :
2986 : /* Use "mask = DR_TARGET_ALIGNMENT - 1" to test rightmost address
2987 : bits for runtime alignment check. For example, for 16 bytes
2988 : target alignment the mask is 15 = 0xf. */
2989 11388 : poly_uint64 mask = DR_TARGET_ALIGNMENT (dr_info) - 1;
2990 :
2991 : /* FORNOW: use the same mask to test all potentially unaligned
2992 : references in the loop. */
2993 11388 : if (maybe_ne (LOOP_VINFO_PTR_MASK (loop_vinfo), 0U)
2994 11388 : && maybe_ne (LOOP_VINFO_PTR_MASK (loop_vinfo), mask))
2995 : {
2996 : do_versioning = false;
2997 : break;
2998 : }
2999 :
3000 11272 : LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
3001 11272 : LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (stmt_info);
3002 : }
3003 : }
3004 :
3005 : /* Versioning requires at least one misaligned data reference. */
3006 92660 : if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3007 : do_versioning = false;
3008 5564 : else if (!do_versioning)
3009 540 : LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
3010 : }
3011 :
3012 : /* If we are trying peeling with versioning but versioning is disabled for
3013 : some reason, peeling should be turned off together. */
3014 353054 : if (try_peeling_with_versioning && !do_versioning)
3015 : do_peeling = false;
3016 :
3017 340990 : if (do_versioning)
3018 : {
3019 : const vec<stmt_vec_info> &may_misalign_stmts
3020 : = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
3021 : stmt_vec_info stmt_info;
3022 :
3023 : /* It can now be assumed that the data references in the statements
3024 : in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
3025 : of the loop being vectorized. */
3026 13656 : FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt_info)
3027 : {
3028 8632 : dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
3029 8632 : SET_DR_MISALIGNMENT (dr_info,
3030 : vect_dr_misalign_for_aligned_access (dr_info));
3031 8632 : if (dump_enabled_p ())
3032 142 : dump_printf_loc (MSG_NOTE, vect_location,
3033 : "Alignment of access forced using versioning.\n");
3034 : }
3035 :
3036 5024 : if (do_peeling)
3037 : {
3038 : /* This point is reached if peeling and versioning are used together
3039 : to ensure alignment. Update data structures to make sure the loop
3040 : is correctly peeled and a right runtime check is added for loop
3041 : versioning. */
3042 1038 : gcc_assert (try_peeling_with_versioning);
3043 1038 : LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0_info;
3044 1038 : LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = -1;
3045 1038 : LOOP_VINFO_ALLOW_MUTUAL_ALIGNMENT (loop_vinfo) = true;
3046 1038 : if (dump_enabled_p ())
3047 9 : dump_printf_loc (MSG_NOTE, vect_location,
3048 : "Both peeling and versioning will be applied.\n");
3049 : }
3050 : else
3051 : {
3052 : /* This point is reached if versioning is used alone. */
3053 3986 : LOOP_VINFO_ALLOW_MUTUAL_ALIGNMENT (loop_vinfo) = false;
3054 3986 : if (dump_enabled_p ())
3055 82 : dump_printf_loc (MSG_NOTE, vect_location,
3056 : "Versioning for alignment will be applied.\n");
3057 : }
3058 :
3059 5024 : return opt_result::success ();
3060 : }
3061 :
3062 : /* This point is reached if neither peeling nor versioning is being done. */
3063 348030 : gcc_assert (! (do_peeling || do_versioning));
3064 :
3065 348030 : return opt_result::success ();
3066 743467 : }
3067 :
3068 :
3069 : /* Function vect_analyze_data_refs_alignment
3070 :
3071 : Analyze the alignment of the data-references in the loop. */
3072 :
3073 : void
3074 409936 : vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo)
3075 : {
3076 409936 : DUMP_VECT_SCOPE ("vect_analyze_data_refs_alignment");
3077 :
3078 409936 : vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
3079 409936 : struct data_reference *dr;
3080 409936 : unsigned int i;
3081 :
3082 409936 : vect_record_base_alignments (loop_vinfo);
3083 1755917 : FOR_EACH_VEC_ELT (datarefs, i, dr)
3084 : {
3085 950403 : dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
3086 950403 : if (STMT_VINFO_VECTORIZABLE (dr_info->stmt))
3087 : {
3088 950403 : if (STMT_VINFO_GROUPED_ACCESS (dr_info->stmt)
3089 1237697 : && DR_GROUP_FIRST_ELEMENT (dr_info->stmt) != dr_info->stmt)
3090 127673 : continue;
3091 :
3092 822730 : vect_compute_data_ref_alignment (loop_vinfo, dr_info,
3093 : STMT_VINFO_VECTYPE (dr_info->stmt));
3094 : }
3095 : }
3096 409936 : }
3097 :
3098 :
3099 : /* Analyze alignment of DRs of stmts in NODE. */
3100 :
3101 : static bool
3102 815460 : vect_slp_analyze_node_alignment (vec_info *vinfo, slp_tree node)
3103 : {
3104 : /* Alignment is maintained in the first element of the group. */
3105 815460 : stmt_vec_info first_stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
3106 815460 : first_stmt_info = DR_GROUP_FIRST_ELEMENT (first_stmt_info);
3107 815460 : dr_vec_info *dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
3108 815460 : tree vectype = SLP_TREE_VECTYPE (node);
3109 815460 : poly_uint64 vector_alignment
3110 815460 : = exact_div (targetm.vectorize.preferred_vector_alignment (vectype),
3111 : BITS_PER_UNIT);
3112 815460 : if (dr_info->misalignment == DR_MISALIGNMENT_UNINITIALIZED)
3113 774776 : vect_compute_data_ref_alignment (vinfo, dr_info, SLP_TREE_VECTYPE (node));
3114 : /* Re-analyze alignment when we're facing a vectorization with a bigger
3115 : alignment requirement. */
3116 40684 : else if (known_lt (dr_info->target_alignment, vector_alignment))
3117 : {
3118 67 : poly_uint64 old_target_alignment = dr_info->target_alignment;
3119 67 : int old_misalignment = dr_info->misalignment;
3120 67 : vect_compute_data_ref_alignment (vinfo, dr_info, SLP_TREE_VECTYPE (node));
3121 : /* But keep knowledge about a smaller alignment. */
3122 67 : if (old_misalignment != DR_MISALIGNMENT_UNKNOWN
3123 38 : && dr_info->misalignment == DR_MISALIGNMENT_UNKNOWN)
3124 : {
3125 1 : dr_info->target_alignment = old_target_alignment;
3126 1 : dr_info->misalignment = old_misalignment;
3127 : }
3128 : }
3129 : /* When we ever face unordered target alignments the first one wins in terms
3130 : of analyzing and the other will become unknown in dr_misalignment. */
3131 815460 : return true;
3132 : }
3133 :
3134 : /* Function vect_slp_analyze_instance_alignment
3135 :
3136 : Analyze the alignment of the data-references in the SLP instance.
3137 : Return FALSE if a data reference is found that cannot be vectorized. */
3138 :
3139 : bool
3140 785090 : vect_slp_analyze_instance_alignment (vec_info *vinfo,
3141 : slp_instance instance)
3142 : {
3143 785090 : DUMP_VECT_SCOPE ("vect_slp_analyze_instance_alignment");
3144 :
3145 785090 : slp_tree node;
3146 785090 : unsigned i;
3147 943250 : FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, node)
3148 158160 : if (! vect_slp_analyze_node_alignment (vinfo, node))
3149 : return false;
3150 :
3151 785090 : if (SLP_INSTANCE_KIND (instance) == slp_inst_kind_store
3152 785090 : && ! vect_slp_analyze_node_alignment
3153 657300 : (vinfo, SLP_INSTANCE_TREE (instance)))
3154 : return false;
3155 :
3156 : return true;
3157 : }
3158 :
3159 :
3160 : /* Analyze groups of accesses: check that DR_INFO belongs to a group of
3161 : accesses of legal size, step, etc. Detect gaps, single element
3162 : interleaving, and other special cases. Set grouped access info.
3163 : Collect groups of strided stores for further use in SLP analysis.
3164 : Worker for vect_analyze_group_access. */
3165 :
3166 : static bool
3167 12494204 : vect_analyze_group_access_1 (vec_info *vinfo, dr_vec_info *dr_info)
3168 : {
3169 12494204 : data_reference *dr = dr_info->dr;
3170 12494204 : tree step = DR_STEP (dr);
3171 12494204 : tree scalar_type = TREE_TYPE (DR_REF (dr));
3172 12494204 : HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
3173 12494204 : stmt_vec_info stmt_info = dr_info->stmt;
3174 12494204 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
3175 12494204 : bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
3176 12494204 : HOST_WIDE_INT dr_step = -1;
3177 12494204 : HOST_WIDE_INT groupsize, last_accessed_element = 1;
3178 12494204 : bool slp_impossible = false;
3179 :
3180 : /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
3181 : size of the interleaving group (including gaps). */
3182 12494204 : if (tree_fits_shwi_p (step))
3183 : {
3184 12484790 : dr_step = tree_to_shwi (step);
3185 : /* Check that STEP is a multiple of type size. Otherwise there is
3186 : a non-element-sized gap at the end of the group which we
3187 : cannot represent in DR_GROUP_GAP or DR_GROUP_SIZE.
3188 : ??? As we can handle non-constant step fine here we should
3189 : simply remove uses of DR_GROUP_GAP between the last and first
3190 : element and instead rely on DR_STEP. DR_GROUP_SIZE then would
3191 : simply not include that gap. */
3192 12484790 : if ((dr_step % type_size) != 0)
3193 : {
3194 498 : if (dump_enabled_p ())
3195 27 : dump_printf_loc (MSG_NOTE, vect_location,
3196 : "Step %T is not a multiple of the element size"
3197 : " for %T\n",
3198 : step, DR_REF (dr));
3199 498 : return false;
3200 : }
3201 12484292 : groupsize = absu_hwi (dr_step) / type_size;
3202 : }
3203 : else
3204 : groupsize = 0;
3205 :
3206 : /* Not consecutive access is possible only if it is a part of interleaving. */
3207 12493706 : if (!DR_GROUP_FIRST_ELEMENT (stmt_info))
3208 : {
3209 : /* Check if it this DR is a part of interleaving, and is a single
3210 : element of the group that is accessed in the loop. */
3211 :
3212 : /* Gaps are supported only for loads. STEP must be a multiple of the type
3213 : size. */
3214 8393553 : if (DR_IS_READ (dr)
3215 5012460 : && (dr_step % type_size) == 0
3216 : && groupsize > 0
3217 : /* This could be UINT_MAX but as we are generating code in a very
3218 : inefficient way we have to cap earlier.
3219 : See PR91403 for example. */
3220 5012460 : && groupsize <= 4096)
3221 : {
3222 72624 : DR_GROUP_FIRST_ELEMENT (stmt_info) = stmt_info;
3223 72624 : DR_GROUP_SIZE (stmt_info) = groupsize;
3224 72624 : DR_GROUP_GAP (stmt_info) = groupsize - 1;
3225 72624 : if (dump_enabled_p ())
3226 1468 : dump_printf_loc (MSG_NOTE, vect_location,
3227 : "Detected single element interleaving %T"
3228 : " step %T\n",
3229 : DR_REF (dr), step);
3230 :
3231 72624 : return true;
3232 : }
3233 :
3234 8320929 : if (dump_enabled_p ())
3235 3129 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3236 : "not consecutive access %G", stmt_info->stmt);
3237 :
3238 8320929 : if (bb_vinfo)
3239 : {
3240 : /* Mark the statement as unvectorizable. */
3241 8302098 : STMT_VINFO_VECTORIZABLE (stmt_info) = false;
3242 8302098 : return true;
3243 : }
3244 :
3245 18831 : if (dump_enabled_p ())
3246 305 : dump_printf_loc (MSG_NOTE, vect_location, "using strided accesses\n");
3247 18831 : STMT_VINFO_STRIDED_P (stmt_info) = true;
3248 18831 : return true;
3249 : }
3250 :
3251 4100153 : if (DR_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info)
3252 : {
3253 : /* First stmt in the interleaving chain. Check the chain. */
3254 1487945 : stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (stmt_info);
3255 1487945 : struct data_reference *data_ref = dr;
3256 1487945 : unsigned int count = 1;
3257 1487945 : tree prev_init = DR_INIT (data_ref);
3258 1487945 : HOST_WIDE_INT diff, gaps = 0;
3259 :
3260 : /* By construction, all group members have INTEGER_CST DR_INITs. */
3261 4100162 : while (next)
3262 : {
3263 : /* We never have the same DR multiple times. */
3264 2612279 : gcc_assert (tree_int_cst_compare (DR_INIT (data_ref),
3265 : DR_INIT (STMT_VINFO_DATA_REF (next))) != 0);
3266 :
3267 2612279 : data_ref = STMT_VINFO_DATA_REF (next);
3268 :
3269 : /* All group members have the same STEP by construction. */
3270 2612279 : gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
3271 :
3272 : /* Check that the distance between two accesses is equal to the type
3273 : size. Otherwise, we have gaps. */
3274 2612279 : diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
3275 2612279 : - TREE_INT_CST_LOW (prev_init)) / type_size;
3276 2612279 : if (diff < 1 || diff > UINT_MAX)
3277 : {
3278 : /* For artificial testcases with array accesses with large
3279 : constant indices we can run into overflow issues which
3280 : can end up fooling the groupsize constraint below so
3281 : check the individual gaps (which are represented as
3282 : unsigned int) as well. */
3283 0 : if (dump_enabled_p ())
3284 0 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3285 : "interleaved access with gap larger "
3286 : "than representable\n");
3287 0 : return false;
3288 : }
3289 2612279 : if (diff != 1)
3290 : {
3291 : /* FORNOW: SLP of accesses with gaps is not supported. */
3292 100834 : slp_impossible = true;
3293 100834 : if (DR_IS_WRITE (data_ref))
3294 : {
3295 62 : if (dump_enabled_p ())
3296 0 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3297 : "interleaved store with gaps\n");
3298 62 : return false;
3299 : }
3300 :
3301 100772 : gaps += diff - 1;
3302 : }
3303 :
3304 2612217 : last_accessed_element += diff;
3305 :
3306 : /* Store the gap from the previous member of the group. If there is no
3307 : gap in the access, DR_GROUP_GAP is always 1. */
3308 2612217 : DR_GROUP_GAP (next) = diff;
3309 :
3310 2612217 : prev_init = DR_INIT (data_ref);
3311 2612217 : next = DR_GROUP_NEXT_ELEMENT (next);
3312 : /* Count the number of data-refs in the chain. */
3313 2612217 : count++;
3314 : }
3315 :
3316 1487883 : if (groupsize == 0)
3317 1418233 : groupsize = count + gaps;
3318 :
3319 : /* This could be UINT_MAX but as we are generating code in a very
3320 : inefficient way we have to cap earlier. See PR78699 for example. */
3321 1487883 : if (groupsize > 4096)
3322 : {
3323 1 : if (dump_enabled_p ())
3324 1 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3325 : "group is too large\n");
3326 1 : return false;
3327 : }
3328 :
3329 : /* Check that the size of the interleaving is equal to count for stores,
3330 : i.e., that there are no gaps. */
3331 1487882 : if (groupsize != count
3332 105136 : && !DR_IS_READ (dr))
3333 : {
3334 11420 : groupsize = count;
3335 11420 : STMT_VINFO_STRIDED_P (stmt_info) = true;
3336 : }
3337 :
3338 : /* If there is a gap after the last load in the group it is the
3339 : difference between the groupsize and the last accessed
3340 : element.
3341 : When there is no gap, this difference should be 0. */
3342 1487882 : DR_GROUP_GAP (stmt_info) = groupsize - last_accessed_element;
3343 :
3344 1487882 : DR_GROUP_SIZE (stmt_info) = groupsize;
3345 1487882 : if (dump_enabled_p ())
3346 : {
3347 7978 : dump_printf_loc (MSG_NOTE, vect_location,
3348 : "Detected interleaving ");
3349 7978 : if (DR_IS_READ (dr))
3350 4299 : dump_printf (MSG_NOTE, "load ");
3351 3679 : else if (STMT_VINFO_STRIDED_P (stmt_info))
3352 496 : dump_printf (MSG_NOTE, "strided store ");
3353 : else
3354 3183 : dump_printf (MSG_NOTE, "store ");
3355 7978 : dump_printf (MSG_NOTE, "of size %u\n",
3356 : (unsigned)groupsize);
3357 7978 : dump_printf_loc (MSG_NOTE, vect_location, "\t%G", stmt_info->stmt);
3358 7978 : next = DR_GROUP_NEXT_ELEMENT (stmt_info);
3359 39058 : while (next)
3360 : {
3361 31080 : if (DR_GROUP_GAP (next) != 1)
3362 277 : dump_printf_loc (MSG_NOTE, vect_location,
3363 : "\t<gap of %d elements>\n",
3364 277 : DR_GROUP_GAP (next) - 1);
3365 31080 : dump_printf_loc (MSG_NOTE, vect_location, "\t%G", next->stmt);
3366 31080 : next = DR_GROUP_NEXT_ELEMENT (next);
3367 : }
3368 7978 : if (DR_GROUP_GAP (stmt_info) != 0)
3369 388 : dump_printf_loc (MSG_NOTE, vect_location,
3370 : "\t<gap of %d elements>\n",
3371 388 : DR_GROUP_GAP (stmt_info));
3372 : }
3373 :
3374 : /* SLP: create an SLP data structure for every interleaving group of
3375 : stores for further analysis in vect_analyse_slp. */
3376 1487882 : if (DR_IS_WRITE (dr) && !slp_impossible)
3377 : {
3378 914309 : if (loop_vinfo)
3379 29149 : LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt_info);
3380 914309 : if (bb_vinfo)
3381 885160 : BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt_info);
3382 : }
3383 : }
3384 :
3385 : return true;
3386 : }
3387 :
3388 : /* Analyze groups of accesses: check that DR_INFO belongs to a group of
3389 : accesses of legal size, step, etc. Detect gaps, single element
3390 : interleaving, and other special cases. Set grouped access info.
3391 : Collect groups of strided stores for further use in SLP analysis. */
3392 :
3393 : static bool
3394 12494204 : vect_analyze_group_access (vec_info *vinfo, dr_vec_info *dr_info)
3395 : {
3396 12494204 : if (!vect_analyze_group_access_1 (vinfo, dr_info))
3397 : {
3398 : /* Dissolve the group if present. */
3399 561 : stmt_vec_info stmt_info = DR_GROUP_FIRST_ELEMENT (dr_info->stmt);
3400 792 : while (stmt_info)
3401 : {
3402 231 : stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (stmt_info);
3403 231 : DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
3404 231 : DR_GROUP_NEXT_ELEMENT (stmt_info) = NULL;
3405 231 : stmt_info = next;
3406 : }
3407 : return false;
3408 : }
3409 : return true;
3410 : }
3411 :
3412 : /* Analyze the access pattern of the data-reference DR_INFO.
3413 : In case of non-consecutive accesses call vect_analyze_group_access() to
3414 : analyze groups of accesses. */
3415 :
3416 : static bool
3417 13243689 : vect_analyze_data_ref_access (vec_info *vinfo, dr_vec_info *dr_info)
3418 : {
3419 13243689 : data_reference *dr = dr_info->dr;
3420 13243689 : tree step = DR_STEP (dr);
3421 13243689 : tree scalar_type = TREE_TYPE (DR_REF (dr));
3422 13243689 : stmt_vec_info stmt_info = dr_info->stmt;
3423 13243689 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
3424 13243689 : class loop *loop = NULL;
3425 :
3426 13243689 : if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
3427 : return true;
3428 :
3429 13144009 : if (loop_vinfo)
3430 961892 : loop = LOOP_VINFO_LOOP (loop_vinfo);
3431 :
3432 13144009 : if (loop_vinfo && !step)
3433 : {
3434 0 : if (dump_enabled_p ())
3435 0 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3436 : "bad data-ref access in loop\n");
3437 0 : return false;
3438 : }
3439 :
3440 : /* Allow loads with zero step in inner-loop vectorization. */
3441 13144009 : if (loop_vinfo && integer_zerop (step))
3442 : {
3443 14044 : DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
3444 14044 : DR_GROUP_NEXT_ELEMENT (stmt_info) = NULL;
3445 14044 : if (!nested_in_vect_loop_p (loop, stmt_info))
3446 13783 : return DR_IS_READ (dr);
3447 : /* Allow references with zero step for outer loops marked
3448 : with pragma omp simd only - it guarantees absence of
3449 : loop-carried dependencies between inner loop iterations. */
3450 261 : if (loop->safelen < 2)
3451 : {
3452 225 : if (dump_enabled_p ())
3453 6 : dump_printf_loc (MSG_NOTE, vect_location,
3454 : "zero step in inner loop of nest\n");
3455 225 : return false;
3456 : }
3457 : }
3458 :
3459 13129965 : if (loop && nested_in_vect_loop_p (loop, stmt_info))
3460 : {
3461 : /* Interleaved accesses are not yet supported within outer-loop
3462 : vectorization for references in the inner-loop. */
3463 5807 : DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
3464 5807 : DR_GROUP_NEXT_ELEMENT (stmt_info) = NULL;
3465 :
3466 : /* For the rest of the analysis we use the outer-loop step. */
3467 5807 : step = STMT_VINFO_DR_STEP (stmt_info);
3468 5807 : if (integer_zerop (step))
3469 : {
3470 1281 : if (dump_enabled_p ())
3471 238 : dump_printf_loc (MSG_NOTE, vect_location,
3472 : "zero step in outer loop.\n");
3473 1281 : return DR_IS_READ (dr);
3474 : }
3475 : }
3476 :
3477 : /* Consecutive? */
3478 13128720 : if (TREE_CODE (step) == INTEGER_CST)
3479 : {
3480 13090375 : HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
3481 13090375 : if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
3482 13090375 : || (dr_step < 0
3483 27104 : && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
3484 : {
3485 : /* Mark that it is not interleaving. */
3486 602710 : DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
3487 602710 : DR_GROUP_NEXT_ELEMENT (stmt_info) = NULL;
3488 602710 : return true;
3489 : }
3490 : }
3491 :
3492 12526010 : if (loop && nested_in_vect_loop_p (loop, stmt_info))
3493 : {
3494 3334 : if (dump_enabled_p ())
3495 163 : dump_printf_loc (MSG_NOTE, vect_location,
3496 : "grouped access in outer loop.\n");
3497 3334 : return false;
3498 : }
3499 :
3500 :
3501 : /* Assume this is a DR handled by non-constant strided load case. */
3502 12522676 : if (TREE_CODE (step) != INTEGER_CST)
3503 37886 : return (STMT_VINFO_STRIDED_P (stmt_info)
3504 37886 : && (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
3505 9414 : || vect_analyze_group_access (vinfo, dr_info)));
3506 :
3507 : /* Not consecutive access - check if it's a part of interleaving group. */
3508 12484790 : return vect_analyze_group_access (vinfo, dr_info);
3509 : }
3510 :
3511 : /* Compare two data-references DRA and DRB to group them into chunks
3512 : suitable for grouping. */
3513 :
3514 : static int
3515 345372115 : dr_group_sort_cmp (const void *dra_, const void *drb_)
3516 : {
3517 345372115 : dr_vec_info *dra_info = *(dr_vec_info **)const_cast<void *>(dra_);
3518 345372115 : dr_vec_info *drb_info = *(dr_vec_info **)const_cast<void *>(drb_);
3519 345372115 : data_reference_p dra = dra_info->dr;
3520 345372115 : data_reference_p drb = drb_info->dr;
3521 345372115 : int cmp;
3522 :
3523 : /* Stabilize sort. */
3524 345372115 : if (dra == drb)
3525 : return 0;
3526 :
3527 : /* Different group IDs lead never belong to the same group. */
3528 345372115 : if (dra_info->group != drb_info->group)
3529 377104889 : return dra_info->group < drb_info->group ? -1 : 1;
3530 :
3531 : /* Ordering of DRs according to base. */
3532 97278513 : cmp = data_ref_compare_tree (DR_BASE_ADDRESS (dra),
3533 : DR_BASE_ADDRESS (drb));
3534 97278513 : if (cmp != 0)
3535 : return cmp;
3536 :
3537 : /* And according to DR_OFFSET. */
3538 52734568 : cmp = data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
3539 52734568 : if (cmp != 0)
3540 : return cmp;
3541 :
3542 : /* Put reads before writes. */
3543 52380479 : if (DR_IS_READ (dra) != DR_IS_READ (drb))
3544 4217702 : return DR_IS_READ (dra) ? -1 : 1;
3545 :
3546 : /* Then sort after access size. */
3547 49511010 : cmp = data_ref_compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
3548 49511010 : TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
3549 49511010 : if (cmp != 0)
3550 : return cmp;
3551 :
3552 : /* And after step. */
3553 42808119 : cmp = data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb));
3554 42808119 : if (cmp != 0)
3555 : return cmp;
3556 :
3557 : /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
3558 42800745 : cmp = data_ref_compare_tree (DR_INIT (dra), DR_INIT (drb));
3559 42800745 : if (cmp == 0)
3560 489208 : return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
3561 : return cmp;
3562 : }
3563 :
3564 : /* If OP is the result of a conversion, return the unconverted value,
3565 : otherwise return null. */
3566 :
3567 : static tree
3568 387 : strip_conversion (tree op)
3569 : {
3570 387 : if (TREE_CODE (op) != SSA_NAME)
3571 : return NULL_TREE;
3572 387 : gimple *stmt = SSA_NAME_DEF_STMT (op);
3573 387 : if (!is_gimple_assign (stmt)
3574 387 : || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt)))
3575 : return NULL_TREE;
3576 186 : return gimple_assign_rhs1 (stmt);
3577 : }
3578 :
3579 : /* Return true if vectorizable_* routines can handle statements STMT1_INFO
3580 : and STMT2_INFO being in a single group. When ALLOW_SLP_P, masked loads can
3581 : be grouped in SLP mode. */
3582 :
3583 : static bool
3584 6986636 : can_group_stmts_p (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info,
3585 : bool allow_slp_p)
3586 : {
3587 6986636 : if (gimple_assign_single_p (stmt1_info->stmt))
3588 6984965 : return gimple_assign_single_p (stmt2_info->stmt);
3589 :
3590 1671 : gcall *call1 = dyn_cast <gcall *> (stmt1_info->stmt);
3591 1671 : if (call1 && gimple_call_internal_p (call1))
3592 : {
3593 : /* Check for two masked loads or two masked stores. */
3594 1909 : gcall *call2 = dyn_cast <gcall *> (stmt2_info->stmt);
3595 1655 : if (!call2 || !gimple_call_internal_p (call2))
3596 : return false;
3597 1655 : internal_fn ifn = gimple_call_internal_fn (call1);
3598 1655 : if (ifn != IFN_MASK_LOAD && ifn != IFN_MASK_STORE)
3599 : return false;
3600 1655 : if (ifn != gimple_call_internal_fn (call2))
3601 : return false;
3602 :
3603 : /* Check that the masks are the same. Cope with casts of masks,
3604 : like those created by build_mask_conversion. */
3605 1655 : tree mask1 = gimple_call_arg (call1, 2);
3606 1655 : tree mask2 = gimple_call_arg (call2, 2);
3607 1655 : if (!operand_equal_p (mask1, mask2, 0) && !allow_slp_p)
3608 : {
3609 294 : mask1 = strip_conversion (mask1);
3610 294 : if (!mask1)
3611 : return false;
3612 93 : mask2 = strip_conversion (mask2);
3613 93 : if (!mask2)
3614 : return false;
3615 93 : if (!operand_equal_p (mask1, mask2, 0))
3616 : return false;
3617 : }
3618 1417 : return true;
3619 : }
3620 :
3621 : return false;
3622 : }
3623 :
3624 : /* Function vect_analyze_data_ref_accesses.
3625 :
3626 : Analyze the access pattern of all the data references in the loop.
3627 :
3628 : FORNOW: the only access pattern that is considered vectorizable is a
3629 : simple step 1 (consecutive) access.
3630 :
3631 : FORNOW: handle only arrays and pointer accesses. */
3632 :
3633 : opt_result
3634 2628447 : vect_analyze_data_ref_accesses (vec_info *vinfo,
3635 : vec<int> *dataref_groups)
3636 : {
3637 2628447 : unsigned int i;
3638 2628447 : vec<data_reference_p> datarefs = vinfo->shared->datarefs;
3639 :
3640 2628447 : DUMP_VECT_SCOPE ("vect_analyze_data_ref_accesses");
3641 :
3642 2628447 : if (datarefs.is_empty ())
3643 1052674 : return opt_result::success ();
3644 :
3645 : /* Sort the array of datarefs to make building the interleaving chains
3646 : linear. Don't modify the original vector's order, it is needed for
3647 : determining what dependencies are reversed. */
3648 1575773 : vec<dr_vec_info *> datarefs_copy;
3649 1575773 : datarefs_copy.create (datarefs.length ());
3650 16574529 : for (unsigned i = 0; i < datarefs.length (); i++)
3651 : {
3652 14998756 : dr_vec_info *dr_info = vinfo->lookup_dr (datarefs[i]);
3653 : /* If the caller computed DR grouping use that, otherwise group by
3654 : basic blocks. */
3655 14998756 : if (dataref_groups)
3656 13923197 : dr_info->group = (*dataref_groups)[i];
3657 : else
3658 1075559 : dr_info->group = gimple_bb (DR_STMT (datarefs[i]))->index;
3659 14998756 : datarefs_copy.quick_push (dr_info);
3660 : }
3661 1575773 : datarefs_copy.qsort (dr_group_sort_cmp);
3662 1575773 : hash_set<stmt_vec_info> to_fixup;
3663 :
3664 : /* Build the interleaving chains. */
3665 14169126 : for (i = 0; i < datarefs_copy.length () - 1;)
3666 : {
3667 11017580 : dr_vec_info *dr_info_a = datarefs_copy[i];
3668 11017580 : data_reference_p dra = dr_info_a->dr;
3669 11017580 : int dra_group_id = dr_info_a->group;
3670 11017580 : stmt_vec_info stmtinfo_a = dr_info_a->stmt;
3671 11017580 : stmt_vec_info lastinfo = NULL;
3672 11017580 : if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
3673 9412194 : || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a))
3674 : {
3675 1669976 : ++i;
3676 1669976 : continue;
3677 : }
3678 24587077 : for (i = i + 1; i < datarefs_copy.length (); ++i)
3679 : {
3680 11753007 : dr_vec_info *dr_info_b = datarefs_copy[i];
3681 11753007 : data_reference_p drb = dr_info_b->dr;
3682 11753007 : int drb_group_id = dr_info_b->group;
3683 11753007 : stmt_vec_info stmtinfo_b = dr_info_b->stmt;
3684 11753007 : if (!STMT_VINFO_VECTORIZABLE (stmtinfo_b)
3685 11448727 : || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
3686 : break;
3687 :
3688 : /* ??? Imperfect sorting (non-compatible types, non-modulo
3689 : accesses, same accesses) can lead to a group to be artificially
3690 : split here as we don't just skip over those. If it really
3691 : matters we can push those to a worklist and re-iterate
3692 : over them. The we can just skip ahead to the next DR here. */
3693 :
3694 : /* DRs in a different DR group should not be put into the same
3695 : interleaving group. */
3696 11445110 : if (dra_group_id != drb_group_id)
3697 : break;
3698 :
3699 : /* Check that the data-refs have same first location (except init)
3700 : and they are both either store or load (not load and store,
3701 : not masked loads or stores). */
3702 7259853 : if (DR_IS_READ (dra) != DR_IS_READ (drb)
3703 5969399 : || data_ref_compare_tree (DR_BASE_ADDRESS (dra),
3704 : DR_BASE_ADDRESS (drb)) != 0
3705 4360028 : || data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb)) != 0
3706 11600746 : || !can_group_stmts_p (stmtinfo_a, stmtinfo_b, true))
3707 : break;
3708 :
3709 : /* Check that the data-refs have the same constant size. */
3710 4340868 : tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
3711 4340868 : tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
3712 4340868 : if (!tree_fits_uhwi_p (sza)
3713 4340868 : || !tree_fits_uhwi_p (szb)
3714 8681736 : || !tree_int_cst_equal (sza, szb))
3715 : break;
3716 :
3717 : /* Check that the data-refs have the same step. */
3718 3994769 : if (data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb)) != 0)
3719 : break;
3720 :
3721 : /* Check the types are compatible.
3722 : ??? We don't distinguish this during sorting. */
3723 3994049 : if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
3724 3994049 : TREE_TYPE (DR_REF (drb))))
3725 : break;
3726 :
3727 : /* Check that the DR_INITs are compile-time constants. */
3728 2856784 : if (!tree_fits_shwi_p (DR_INIT (dra))
3729 2856784 : || !tree_fits_shwi_p (DR_INIT (drb)))
3730 : break;
3731 :
3732 : /* Different .GOMP_SIMD_LANE calls still give the same lane,
3733 : just hold extra information. */
3734 2856784 : if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmtinfo_a)
3735 1240 : && STMT_VINFO_SIMD_LANE_ACCESS_P (stmtinfo_b)
3736 2858024 : && data_ref_compare_tree (DR_INIT (dra), DR_INIT (drb)) == 0)
3737 : break;
3738 :
3739 : /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
3740 2855544 : HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
3741 2855544 : HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
3742 2855544 : HOST_WIDE_INT init_prev
3743 2855544 : = TREE_INT_CST_LOW (DR_INIT (datarefs_copy[i-1]->dr));
3744 2855544 : gcc_assert (init_a <= init_b
3745 : && init_a <= init_prev
3746 : && init_prev <= init_b);
3747 :
3748 : /* Do not place the same access in the interleaving chain twice. */
3749 2855544 : if (init_b == init_prev)
3750 : {
3751 29955 : gcc_assert (gimple_uid (DR_STMT (datarefs_copy[i-1]->dr))
3752 : < gimple_uid (DR_STMT (drb)));
3753 : /* Simply link in duplicates and fix up the chain below. */
3754 : }
3755 : else
3756 : {
3757 : /* If init_b == init_a + the size of the type * k, we have an
3758 : interleaving, and DRA is accessed before DRB. */
3759 2825589 : unsigned HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
3760 2825589 : if (type_size_a == 0
3761 2825589 : || (((unsigned HOST_WIDE_INT)init_b - init_a)
3762 2825589 : % type_size_a != 0))
3763 : break;
3764 :
3765 : /* If we have a store, the accesses are adjacent. This splits
3766 : groups into chunks we support (we don't support vectorization
3767 : of stores with gaps). */
3768 2823860 : if (!DR_IS_READ (dra)
3769 1855874 : && (((unsigned HOST_WIDE_INT)init_b - init_prev)
3770 : != type_size_a))
3771 : break;
3772 :
3773 : /* For datarefs with big gap, it's better to split them into different
3774 : groups.
3775 : .i.e a[0], a[1], a[2], .. a[7], a[100], a[101],..., a[107] */
3776 2644674 : if ((unsigned HOST_WIDE_INT)(init_b - init_prev)
3777 : > MAX_BITSIZE_MODE_ANY_MODE / BITS_PER_UNIT)
3778 : break;
3779 :
3780 : /* If the step (if not zero or non-constant) is smaller than the
3781 : difference between data-refs' inits this splits groups into
3782 : suitable sizes. */
3783 2635281 : if (tree_fits_shwi_p (DR_STEP (dra)))
3784 : {
3785 2629071 : unsigned HOST_WIDE_INT step
3786 2629071 : = absu_hwi (tree_to_shwi (DR_STEP (dra)));
3787 2629071 : if (step != 0
3788 163415 : && step <= ((unsigned HOST_WIDE_INT)init_b - init_a))
3789 : break;
3790 : }
3791 : }
3792 :
3793 2646120 : if (dump_enabled_p ())
3794 31985 : dump_printf_loc (MSG_NOTE, vect_location,
3795 31985 : DR_IS_READ (dra)
3796 : ? "Detected interleaving load %T and %T\n"
3797 : : "Detected interleaving store %T and %T\n",
3798 : DR_REF (dra), DR_REF (drb));
3799 :
3800 : /* Link the found element into the group list. */
3801 2646120 : if (!DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
3802 : {
3803 1466148 : DR_GROUP_FIRST_ELEMENT (stmtinfo_a) = stmtinfo_a;
3804 1466148 : lastinfo = stmtinfo_a;
3805 : }
3806 2646120 : DR_GROUP_FIRST_ELEMENT (stmtinfo_b) = stmtinfo_a;
3807 2646120 : DR_GROUP_NEXT_ELEMENT (lastinfo) = stmtinfo_b;
3808 2646120 : lastinfo = stmtinfo_b;
3809 :
3810 2646120 : if (! STMT_VINFO_SLP_VECT_ONLY (stmtinfo_a))
3811 : {
3812 2645743 : STMT_VINFO_SLP_VECT_ONLY (stmtinfo_a)
3813 2645743 : = !can_group_stmts_p (stmtinfo_a, stmtinfo_b, false);
3814 :
3815 2645743 : if (dump_enabled_p () && STMT_VINFO_SLP_VECT_ONLY (stmtinfo_a))
3816 126 : dump_printf_loc (MSG_NOTE, vect_location,
3817 : "Load suitable for SLP vectorization only.\n");
3818 : }
3819 :
3820 2646120 : if (init_b == init_prev
3821 29955 : && !to_fixup.add (DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
3822 2663131 : && dump_enabled_p ())
3823 213 : dump_printf_loc (MSG_NOTE, vect_location,
3824 : "Queuing group with duplicate access for fixup\n");
3825 : }
3826 : }
3827 :
3828 : /* Fixup groups with duplicate entries by splitting it. */
3829 1619507 : while (1)
3830 : {
3831 1619507 : hash_set<stmt_vec_info>::iterator it = to_fixup.begin ();
3832 1619507 : if (!(it != to_fixup.end ()))
3833 : break;
3834 43734 : stmt_vec_info grp = *it;
3835 43734 : to_fixup.remove (grp);
3836 :
3837 : /* Find the earliest duplicate group member. */
3838 43734 : unsigned first_duplicate = -1u;
3839 43734 : stmt_vec_info next, g = grp;
3840 275250 : while ((next = DR_GROUP_NEXT_ELEMENT (g)))
3841 : {
3842 187782 : if (tree_int_cst_equal (DR_INIT (STMT_VINFO_DR_INFO (next)->dr),
3843 187782 : DR_INIT (STMT_VINFO_DR_INFO (g)->dr))
3844 187782 : && gimple_uid (STMT_VINFO_STMT (next)) < first_duplicate)
3845 : first_duplicate = gimple_uid (STMT_VINFO_STMT (next));
3846 : g = next;
3847 : }
3848 43734 : if (first_duplicate == -1U)
3849 17011 : continue;
3850 :
3851 : /* Then move all stmts after the first duplicate to a new group.
3852 : Note this is a heuristic but one with the property that *it
3853 : is fixed up completely. */
3854 26723 : g = grp;
3855 26723 : stmt_vec_info newgroup = NULL, ng = grp;
3856 238797 : while ((next = DR_GROUP_NEXT_ELEMENT (g)))
3857 : {
3858 185351 : if (gimple_uid (STMT_VINFO_STMT (next)) >= first_duplicate)
3859 : {
3860 179191 : DR_GROUP_NEXT_ELEMENT (g) = DR_GROUP_NEXT_ELEMENT (next);
3861 179191 : if (!newgroup)
3862 : {
3863 26723 : newgroup = next;
3864 26723 : STMT_VINFO_SLP_VECT_ONLY (newgroup)
3865 26723 : = STMT_VINFO_SLP_VECT_ONLY (grp);
3866 : }
3867 : else
3868 152468 : DR_GROUP_NEXT_ELEMENT (ng) = next;
3869 179191 : ng = next;
3870 179191 : DR_GROUP_FIRST_ELEMENT (ng) = newgroup;
3871 : }
3872 : else
3873 : g = DR_GROUP_NEXT_ELEMENT (g);
3874 : }
3875 26723 : DR_GROUP_NEXT_ELEMENT (ng) = NULL;
3876 :
3877 : /* Fixup the new group which still may contain duplicates. */
3878 26723 : to_fixup.add (newgroup);
3879 : }
3880 :
3881 1575773 : dr_vec_info *dr_info;
3882 16552616 : FOR_EACH_VEC_ELT (datarefs_copy, i, dr_info)
3883 : {
3884 14984769 : if (STMT_VINFO_VECTORIZABLE (dr_info->stmt)
3885 14984769 : && !vect_analyze_data_ref_access (vinfo, dr_info))
3886 : {
3887 7980 : if (dump_enabled_p ())
3888 291 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3889 : "not vectorized: complicated access pattern.\n");
3890 :
3891 7980 : if (is_a <bb_vec_info> (vinfo))
3892 : {
3893 : /* Mark the statement as not vectorizable. */
3894 54 : STMT_VINFO_VECTORIZABLE (dr_info->stmt) = false;
3895 54 : continue;
3896 : }
3897 : else
3898 : {
3899 7926 : datarefs_copy.release ();
3900 7926 : return opt_result::failure_at (dr_info->stmt->stmt,
3901 : "not vectorized:"
3902 : " complicated access pattern.\n");
3903 : }
3904 : }
3905 : }
3906 :
3907 1567847 : datarefs_copy.release ();
3908 1567847 : return opt_result::success ();
3909 1575773 : }
3910 :
3911 : /* Function vect_vfa_segment_size.
3912 :
3913 : Input:
3914 : DR_INFO: The data reference.
3915 : LENGTH_FACTOR: segment length to consider.
3916 :
3917 : Return a value suitable for the dr_with_seg_len::seg_len field.
3918 : This is the "distance travelled" by the pointer from the first
3919 : iteration in the segment to the last. Note that it does not include
3920 : the size of the access; in effect it only describes the first byte. */
3921 :
3922 : static tree
3923 139318 : vect_vfa_segment_size (dr_vec_info *dr_info, tree length_factor)
3924 : {
3925 139318 : length_factor = size_binop (MINUS_EXPR,
3926 : fold_convert (sizetype, length_factor),
3927 : size_one_node);
3928 139318 : return size_binop (MULT_EXPR, fold_convert (sizetype, DR_STEP (dr_info->dr)),
3929 : length_factor);
3930 : }
3931 :
3932 : /* Return a value that, when added to abs (vect_vfa_segment_size (DR_INFO)),
3933 : gives the worst-case number of bytes covered by the segment. */
3934 :
3935 : static unsigned HOST_WIDE_INT
3936 139800 : vect_vfa_access_size (vec_info *vinfo, dr_vec_info *dr_info)
3937 : {
3938 139800 : stmt_vec_info stmt_vinfo = dr_info->stmt;
3939 139800 : tree ref_type = TREE_TYPE (DR_REF (dr_info->dr));
3940 139800 : unsigned HOST_WIDE_INT ref_size = tree_to_uhwi (TYPE_SIZE_UNIT (ref_type));
3941 139800 : unsigned HOST_WIDE_INT access_size = ref_size;
3942 139800 : if (DR_GROUP_FIRST_ELEMENT (stmt_vinfo))
3943 : {
3944 40837 : gcc_assert (DR_GROUP_FIRST_ELEMENT (stmt_vinfo) == stmt_vinfo);
3945 40837 : access_size *= DR_GROUP_SIZE (stmt_vinfo) - DR_GROUP_GAP (stmt_vinfo);
3946 : }
3947 139800 : tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
3948 139800 : int misalignment;
3949 279600 : if (((misalignment = dr_misalignment (dr_info, vectype)), true)
3950 139800 : && (vect_supportable_dr_alignment (vinfo, dr_info, vectype, misalignment)
3951 : == dr_explicit_realign_optimized))
3952 : {
3953 : /* We might access a full vector's worth. */
3954 0 : access_size += tree_to_uhwi (TYPE_SIZE_UNIT (vectype)) - ref_size;
3955 : }
3956 139800 : return access_size;
3957 : }
3958 :
3959 : /* Get the minimum alignment for all the scalar accesses that DR_INFO
3960 : describes. */
3961 :
3962 : static unsigned int
3963 139800 : vect_vfa_align (dr_vec_info *dr_info)
3964 : {
3965 0 : return dr_alignment (dr_info->dr);
3966 : }
3967 :
3968 : /* Function vect_no_alias_p.
3969 :
3970 : Given data references A and B with equal base and offset, see whether
3971 : the alias relation can be decided at compilation time. Return 1 if
3972 : it can and the references alias, 0 if it can and the references do
3973 : not alias, and -1 if we cannot decide at compile time. SEGMENT_LENGTH_A,
3974 : SEGMENT_LENGTH_B, ACCESS_SIZE_A and ACCESS_SIZE_B are the equivalent
3975 : of dr_with_seg_len::{seg_len,access_size} for A and B. */
3976 :
3977 : static int
3978 4256 : vect_compile_time_alias (dr_vec_info *a, dr_vec_info *b,
3979 : tree segment_length_a, tree segment_length_b,
3980 : unsigned HOST_WIDE_INT access_size_a,
3981 : unsigned HOST_WIDE_INT access_size_b)
3982 : {
3983 4256 : poly_offset_int offset_a = wi::to_poly_offset (DR_INIT (a->dr));
3984 4256 : poly_offset_int offset_b = wi::to_poly_offset (DR_INIT (b->dr));
3985 4256 : poly_uint64 const_length_a;
3986 4256 : poly_uint64 const_length_b;
3987 :
3988 : /* For negative step, we need to adjust address range by TYPE_SIZE_UNIT
3989 : bytes, e.g., int a[3] -> a[1] range is [a+4, a+16) instead of
3990 : [a, a+12) */
3991 4256 : if (tree_int_cst_compare (DR_STEP (a->dr), size_zero_node) < 0)
3992 : {
3993 250 : const_length_a = (-wi::to_poly_wide (segment_length_a)).force_uhwi ();
3994 250 : offset_a -= const_length_a;
3995 : }
3996 : else
3997 4006 : const_length_a = tree_to_poly_uint64 (segment_length_a);
3998 4256 : if (tree_int_cst_compare (DR_STEP (b->dr), size_zero_node) < 0)
3999 : {
4000 408 : const_length_b = (-wi::to_poly_wide (segment_length_b)).force_uhwi ();
4001 408 : offset_b -= const_length_b;
4002 : }
4003 : else
4004 3848 : const_length_b = tree_to_poly_uint64 (segment_length_b);
4005 :
4006 4256 : const_length_a += access_size_a;
4007 4256 : const_length_b += access_size_b;
4008 :
4009 4256 : if (ranges_known_overlap_p (offset_a, const_length_a,
4010 : offset_b, const_length_b))
4011 : return 1;
4012 :
4013 536 : if (!ranges_maybe_overlap_p (offset_a, const_length_a,
4014 : offset_b, const_length_b))
4015 536 : return 0;
4016 :
4017 : return -1;
4018 : }
4019 :
4020 : /* Return true if the minimum nonzero dependence distance for loop LOOP_DEPTH
4021 : in DDR is >= VF. */
4022 :
4023 : static bool
4024 82800 : dependence_distance_ge_vf (data_dependence_relation *ddr,
4025 : unsigned int loop_depth, poly_uint64 vf)
4026 : {
4027 82800 : if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE
4028 87801 : || DDR_NUM_DIST_VECTS (ddr) == 0)
4029 : return false;
4030 :
4031 : /* If the dependence is exact, we should have limited the VF instead. */
4032 5036 : gcc_checking_assert (DDR_COULD_BE_INDEPENDENT_P (ddr));
4033 :
4034 : unsigned int i;
4035 : lambda_vector dist_v;
4036 10103 : FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
4037 : {
4038 10068 : HOST_WIDE_INT dist = dist_v[loop_depth];
4039 10068 : if (dist != 0
4040 5036 : && !(dist > 0 && DDR_REVERSED_P (ddr))
4041 15104 : && maybe_lt ((unsigned HOST_WIDE_INT) abs_hwi (dist), vf))
4042 : return false;
4043 : }
4044 :
4045 35 : if (dump_enabled_p ())
4046 2 : dump_printf_loc (MSG_NOTE, vect_location,
4047 : "dependence distance between %T and %T is >= VF\n",
4048 2 : DR_REF (DDR_A (ddr)), DR_REF (DDR_B (ddr)));
4049 :
4050 : return true;
4051 : }
4052 :
4053 : /* Dump LOWER_BOUND using flags DUMP_KIND. Dumps are known to be enabled. */
4054 :
4055 : static void
4056 437 : dump_lower_bound (dump_flags_t dump_kind, const vec_lower_bound &lower_bound)
4057 : {
4058 437 : dump_printf (dump_kind, "%s (%T) >= ",
4059 437 : lower_bound.unsigned_p ? "unsigned" : "abs",
4060 437 : lower_bound.expr);
4061 437 : dump_dec (dump_kind, lower_bound.min_value);
4062 437 : }
4063 :
4064 : /* Record that the vectorized loop requires the vec_lower_bound described
4065 : by EXPR, UNSIGNED_P and MIN_VALUE. */
4066 :
4067 : static void
4068 6524 : vect_check_lower_bound (loop_vec_info loop_vinfo, tree expr, bool unsigned_p,
4069 : poly_uint64 min_value)
4070 : {
4071 6524 : vec<vec_lower_bound> &lower_bounds
4072 : = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo);
4073 7496 : for (unsigned int i = 0; i < lower_bounds.length (); ++i)
4074 5874 : if (operand_equal_p (lower_bounds[i].expr, expr, 0))
4075 : {
4076 4902 : unsigned_p &= lower_bounds[i].unsigned_p;
4077 4902 : min_value = upper_bound (lower_bounds[i].min_value, min_value);
4078 4902 : if (lower_bounds[i].unsigned_p != unsigned_p
4079 4902 : || maybe_lt (lower_bounds[i].min_value, min_value))
4080 : {
4081 790 : lower_bounds[i].unsigned_p = unsigned_p;
4082 790 : lower_bounds[i].min_value = min_value;
4083 790 : if (dump_enabled_p ())
4084 : {
4085 250 : dump_printf_loc (MSG_NOTE, vect_location,
4086 : "updating run-time check to ");
4087 250 : dump_lower_bound (MSG_NOTE, lower_bounds[i]);
4088 250 : dump_printf (MSG_NOTE, "\n");
4089 : }
4090 : }
4091 4902 : return;
4092 : }
4093 :
4094 1622 : vec_lower_bound lower_bound (expr, unsigned_p, min_value);
4095 1622 : if (dump_enabled_p ())
4096 : {
4097 187 : dump_printf_loc (MSG_NOTE, vect_location, "need a run-time check that ");
4098 187 : dump_lower_bound (MSG_NOTE, lower_bound);
4099 187 : dump_printf (MSG_NOTE, "\n");
4100 : }
4101 1622 : LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).safe_push (lower_bound);
4102 : }
4103 :
4104 : /* Return true if it's unlikely that the step of the vectorized form of DR_INFO
4105 : will span fewer than GAP bytes. */
4106 :
4107 : static bool
4108 5312 : vect_small_gap_p (loop_vec_info loop_vinfo, dr_vec_info *dr_info,
4109 : poly_int64 gap)
4110 : {
4111 5312 : stmt_vec_info stmt_info = dr_info->stmt;
4112 5312 : HOST_WIDE_INT count
4113 5312 : = estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
4114 5312 : if (DR_GROUP_FIRST_ELEMENT (stmt_info))
4115 4552 : count *= DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info));
4116 5312 : return (estimated_poly_value (gap)
4117 5312 : <= count * vect_get_scalar_dr_size (dr_info));
4118 : }
4119 :
4120 : /* Return true if we know that there is no alias between DR_INFO_A and
4121 : DR_INFO_B when abs (DR_STEP (DR_INFO_A->dr)) >= N for some N.
4122 : When returning true, set *LOWER_BOUND_OUT to this N. */
4123 :
4124 : static bool
4125 19428 : vectorizable_with_step_bound_p (dr_vec_info *dr_info_a, dr_vec_info *dr_info_b,
4126 : poly_uint64 *lower_bound_out)
4127 : {
4128 : /* Check that there is a constant gap of known sign between DR_A
4129 : and DR_B. */
4130 19428 : data_reference *dr_a = dr_info_a->dr;
4131 19428 : data_reference *dr_b = dr_info_b->dr;
4132 19428 : poly_int64 init_a, init_b;
4133 19428 : if (!operand_equal_p (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b), 0)
4134 8792 : || !operand_equal_p (DR_OFFSET (dr_a), DR_OFFSET (dr_b), 0)
4135 8106 : || !operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0)
4136 8096 : || !poly_int_tree_p (DR_INIT (dr_a), &init_a)
4137 8096 : || !poly_int_tree_p (DR_INIT (dr_b), &init_b)
4138 19428 : || !ordered_p (init_a, init_b))
4139 11332 : return false;
4140 :
4141 : /* Sort DR_A and DR_B by the address they access. */
4142 8096 : if (maybe_lt (init_b, init_a))
4143 : {
4144 116 : std::swap (init_a, init_b);
4145 116 : std::swap (dr_info_a, dr_info_b);
4146 116 : std::swap (dr_a, dr_b);
4147 : }
4148 :
4149 : /* If the two accesses could be dependent within a scalar iteration,
4150 : make sure that we'd retain their order. */
4151 8096 : if (maybe_gt (init_a + vect_get_scalar_dr_size (dr_info_a), init_b)
4152 8096 : && !vect_preserves_scalar_order_p (dr_info_a, dr_info_b))
4153 : return false;
4154 :
4155 : /* There is no alias if abs (DR_STEP) is greater than or equal to
4156 : the bytes spanned by the combination of the two accesses. */
4157 8096 : *lower_bound_out = init_b + vect_get_scalar_dr_size (dr_info_b) - init_a;
4158 8096 : return true;
4159 : }
4160 :
4161 : /* Function vect_prune_runtime_alias_test_list.
4162 :
4163 : Prune a list of ddrs to be tested at run-time by versioning for alias.
4164 : Merge several alias checks into one if possible.
4165 : Return FALSE if resulting list of ddrs is longer then allowed by
4166 : PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
4167 :
4168 : opt_result
4169 409936 : vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
4170 : {
4171 409936 : typedef pair_hash <tree_operand_hash, tree_operand_hash> tree_pair_hash;
4172 409936 : hash_set <tree_pair_hash> compared_objects;
4173 :
4174 409936 : const vec<ddr_p> &may_alias_ddrs = LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
4175 409936 : vec<dr_with_seg_len_pair_t> &comp_alias_ddrs
4176 : = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
4177 409936 : const vec<vec_object_pair> &check_unequal_addrs
4178 : = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo);
4179 409936 : poly_uint64 vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4180 409936 : tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
4181 :
4182 409936 : ddr_p ddr;
4183 409936 : unsigned int i;
4184 409936 : tree length_factor;
4185 :
4186 409936 : DUMP_VECT_SCOPE ("vect_prune_runtime_alias_test_list");
4187 :
4188 : /* Step values are irrelevant for aliasing if the number of vector
4189 : iterations is equal to the number of scalar iterations (which can
4190 : happen for fully-SLP loops). */
4191 409936 : bool vf_one_p = known_eq (LOOP_VINFO_VECT_FACTOR (loop_vinfo), 1U);
4192 :
4193 409936 : if (!vf_one_p)
4194 : {
4195 : /* Convert the checks for nonzero steps into bound tests. */
4196 : tree value;
4197 406600 : FOR_EACH_VEC_ELT (LOOP_VINFO_CHECK_NONZERO (loop_vinfo), i, value)
4198 1569 : vect_check_lower_bound (loop_vinfo, value, true, 1);
4199 : }
4200 :
4201 409936 : if (may_alias_ddrs.is_empty ())
4202 383793 : return opt_result::success ();
4203 :
4204 26143 : comp_alias_ddrs.create (may_alias_ddrs.length ());
4205 :
4206 26143 : unsigned int loop_depth
4207 26143 : = index_in_loop_nest (LOOP_VINFO_LOOP (loop_vinfo)->num,
4208 26143 : LOOP_VINFO_LOOP_NEST (loop_vinfo));
4209 :
4210 : /* First, we collect all data ref pairs for aliasing checks. */
4211 105211 : FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
4212 : {
4213 82800 : poly_uint64 lower_bound;
4214 82800 : tree segment_length_a, segment_length_b;
4215 82800 : unsigned HOST_WIDE_INT access_size_a, access_size_b;
4216 82800 : unsigned HOST_WIDE_INT align_a, align_b;
4217 :
4218 : /* Ignore the alias if the VF we chose ended up being no greater
4219 : than the dependence distance. */
4220 82800 : if (dependence_distance_ge_vf (ddr, loop_depth, vect_factor))
4221 13424 : continue;
4222 :
4223 82765 : if (DDR_OBJECT_A (ddr))
4224 : {
4225 106 : vec_object_pair new_pair (DDR_OBJECT_A (ddr), DDR_OBJECT_B (ddr));
4226 106 : if (!compared_objects.add (new_pair))
4227 : {
4228 22 : if (dump_enabled_p ())
4229 16 : dump_printf_loc (MSG_NOTE, vect_location,
4230 : "checking that %T and %T"
4231 : " have different addresses\n",
4232 : new_pair.first, new_pair.second);
4233 22 : LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).safe_push (new_pair);
4234 : }
4235 106 : continue;
4236 106 : }
4237 :
4238 82659 : dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (DDR_A (ddr));
4239 82659 : stmt_vec_info stmt_info_a = dr_info_a->stmt;
4240 :
4241 82659 : dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (DDR_B (ddr));
4242 82659 : stmt_vec_info stmt_info_b = dr_info_b->stmt;
4243 :
4244 82659 : bool preserves_scalar_order_p
4245 82659 : = vect_preserves_scalar_order_p (dr_info_a, dr_info_b);
4246 82659 : bool ignore_step_p
4247 : = (vf_one_p
4248 82659 : && (preserves_scalar_order_p
4249 4037 : || operand_equal_p (DR_STEP (dr_info_a->dr),
4250 4037 : DR_STEP (dr_info_b->dr))));
4251 :
4252 : /* Skip the pair if inter-iteration dependencies are irrelevant
4253 : and intra-iteration dependencies are guaranteed to be honored. */
4254 15825 : if (ignore_step_p
4255 8033 : && (preserves_scalar_order_p
4256 3318 : || vectorizable_with_step_bound_p (dr_info_a, dr_info_b,
4257 : &lower_bound)))
4258 : {
4259 7792 : if (dump_enabled_p ())
4260 2528 : dump_printf_loc (MSG_NOTE, vect_location,
4261 : "no need for alias check between "
4262 : "%T and %T when VF is 1\n",
4263 2528 : DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr));
4264 7792 : continue;
4265 : }
4266 :
4267 : /* See whether we can handle the alias using a bounds check on
4268 : the step, and whether that's likely to be the best approach.
4269 : (It might not be, for example, if the minimum step is much larger
4270 : than the number of bytes handled by one vector iteration.) */
4271 74867 : if (!ignore_step_p
4272 74626 : && TREE_CODE (DR_STEP (dr_info_a->dr)) != INTEGER_CST
4273 16110 : && vectorizable_with_step_bound_p (dr_info_a, dr_info_b,
4274 : &lower_bound)
4275 79886 : && (vect_small_gap_p (loop_vinfo, dr_info_a, lower_bound)
4276 293 : || vect_small_gap_p (loop_vinfo, dr_info_b, lower_bound)))
4277 : {
4278 4955 : bool unsigned_p = dr_known_forward_stride_p (dr_info_a->dr);
4279 4955 : if (dump_enabled_p ())
4280 : {
4281 3384 : dump_printf_loc (MSG_NOTE, vect_location, "no alias between "
4282 : "%T and %T when the step %T is outside ",
4283 : DR_REF (dr_info_a->dr),
4284 1692 : DR_REF (dr_info_b->dr),
4285 1692 : DR_STEP (dr_info_a->dr));
4286 1692 : if (unsigned_p)
4287 504 : dump_printf (MSG_NOTE, "[0");
4288 : else
4289 : {
4290 1188 : dump_printf (MSG_NOTE, "(");
4291 1188 : dump_dec (MSG_NOTE, poly_int64 (-lower_bound));
4292 : }
4293 1692 : dump_printf (MSG_NOTE, ", ");
4294 1692 : dump_dec (MSG_NOTE, lower_bound);
4295 1692 : dump_printf (MSG_NOTE, ")\n");
4296 : }
4297 4955 : vect_check_lower_bound (loop_vinfo, DR_STEP (dr_info_a->dr),
4298 : unsigned_p, lower_bound);
4299 4955 : continue;
4300 4955 : }
4301 :
4302 69912 : stmt_vec_info dr_group_first_a = DR_GROUP_FIRST_ELEMENT (stmt_info_a);
4303 69912 : if (dr_group_first_a)
4304 : {
4305 20043 : stmt_info_a = dr_group_first_a;
4306 20043 : dr_info_a = STMT_VINFO_DR_INFO (stmt_info_a);
4307 : }
4308 :
4309 69912 : stmt_vec_info dr_group_first_b = DR_GROUP_FIRST_ELEMENT (stmt_info_b);
4310 69912 : if (dr_group_first_b)
4311 : {
4312 20794 : stmt_info_b = dr_group_first_b;
4313 20794 : dr_info_b = STMT_VINFO_DR_INFO (stmt_info_b);
4314 : }
4315 :
4316 69912 : if (ignore_step_p)
4317 : {
4318 241 : segment_length_a = size_zero_node;
4319 241 : segment_length_b = size_zero_node;
4320 : }
4321 : else
4322 : {
4323 69671 : if (!operand_equal_p (DR_STEP (dr_info_a->dr),
4324 69671 : DR_STEP (dr_info_b->dr), 0))
4325 : {
4326 14477 : length_factor = scalar_loop_iters;
4327 14477 : if (TREE_CODE (length_factor) == SCEV_NOT_KNOWN)
4328 12 : return opt_result::failure_at (vect_location,
4329 : "Unsupported alias check on"
4330 : " uncounted loop\n");
4331 : }
4332 : else
4333 55194 : length_factor = size_int (vect_factor);
4334 69659 : segment_length_a = vect_vfa_segment_size (dr_info_a, length_factor);
4335 69659 : segment_length_b = vect_vfa_segment_size (dr_info_b, length_factor);
4336 : }
4337 69900 : access_size_a = vect_vfa_access_size (loop_vinfo, dr_info_a);
4338 69900 : access_size_b = vect_vfa_access_size (loop_vinfo, dr_info_b);
4339 69900 : align_a = vect_vfa_align (dr_info_a);
4340 69900 : align_b = vect_vfa_align (dr_info_b);
4341 :
4342 : /* See whether the alias is known at compilation time. */
4343 69900 : if (operand_equal_p (DR_BASE_ADDRESS (dr_info_a->dr),
4344 69900 : DR_BASE_ADDRESS (dr_info_b->dr), 0)
4345 6090 : && operand_equal_p (DR_OFFSET (dr_info_a->dr),
4346 6090 : DR_OFFSET (dr_info_b->dr), 0)
4347 4398 : && TREE_CODE (DR_STEP (dr_info_a->dr)) == INTEGER_CST
4348 4324 : && TREE_CODE (DR_STEP (dr_info_b->dr)) == INTEGER_CST
4349 4314 : && poly_int_tree_p (segment_length_a)
4350 74172 : && poly_int_tree_p (segment_length_b))
4351 : {
4352 4256 : int res = vect_compile_time_alias (dr_info_a, dr_info_b,
4353 : segment_length_a,
4354 : segment_length_b,
4355 : access_size_a,
4356 : access_size_b);
4357 4256 : if (res >= 0 && dump_enabled_p ())
4358 : {
4359 208 : dump_printf_loc (MSG_NOTE, vect_location,
4360 : "can tell at compile time that %T and %T",
4361 104 : DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr));
4362 104 : if (res == 0)
4363 57 : dump_printf (MSG_NOTE, " do not alias\n");
4364 : else
4365 47 : dump_printf (MSG_NOTE, " alias\n");
4366 : }
4367 :
4368 4256 : if (res == 0)
4369 536 : continue;
4370 :
4371 3720 : if (res == 1)
4372 3720 : return opt_result::failure_at (stmt_info_b->stmt,
4373 : "not vectorized:"
4374 : " compilation time alias: %G%G",
4375 : stmt_info_a->stmt,
4376 : stmt_info_b->stmt);
4377 : }
4378 :
4379 : /* dr_with_seg_len requires the alignment to apply to the segment length
4380 : and access size, not just the start address. The access size can be
4381 : smaller than the pointer alignment for grouped accesses and bitfield
4382 : references; see PR115192 and PR116125 respectively. */
4383 65644 : align_a = std::min (align_a, least_bit_hwi (access_size_a));
4384 65644 : align_b = std::min (align_b, least_bit_hwi (access_size_b));
4385 :
4386 65644 : dr_with_seg_len dr_a (dr_info_a->dr, segment_length_a,
4387 65644 : access_size_a, align_a);
4388 65644 : dr_with_seg_len dr_b (dr_info_b->dr, segment_length_b,
4389 65644 : access_size_b, align_b);
4390 : /* Canonicalize the order to be the one that's needed for accurate
4391 : RAW, WAR and WAW flags, in cases where the data references are
4392 : well-ordered. The order doesn't really matter otherwise,
4393 : but we might as well be consistent. */
4394 65644 : if (get_later_stmt (stmt_info_a, stmt_info_b) == stmt_info_a)
4395 4884 : std::swap (dr_a, dr_b);
4396 :
4397 65644 : dr_with_seg_len_pair_t dr_with_seg_len_pair
4398 : (dr_a, dr_b, (preserves_scalar_order_p
4399 : ? dr_with_seg_len_pair_t::WELL_ORDERED
4400 72173 : : dr_with_seg_len_pair_t::REORDERED));
4401 :
4402 65644 : comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
4403 : }
4404 :
4405 22411 : prune_runtime_alias_test_list (&comp_alias_ddrs, vect_factor);
4406 :
4407 44822 : unsigned int count = (comp_alias_ddrs.length ()
4408 22411 : + check_unequal_addrs.length ());
4409 :
4410 22411 : if (count
4411 22411 : && (loop_cost_model (LOOP_VINFO_LOOP (loop_vinfo))
4412 : == VECT_COST_MODEL_VERY_CHEAP))
4413 12615 : return opt_result::failure_at
4414 12615 : (vect_location, "would need a runtime alias check\n");
4415 :
4416 9796 : if (dump_enabled_p ())
4417 1946 : dump_printf_loc (MSG_NOTE, vect_location,
4418 : "improved number of alias checks from %d to %d\n",
4419 : may_alias_ddrs.length (), count);
4420 9796 : unsigned limit = param_vect_max_version_for_alias_checks;
4421 9796 : if (loop_cost_model (LOOP_VINFO_LOOP (loop_vinfo)) == VECT_COST_MODEL_CHEAP)
4422 934 : limit = param_vect_max_version_for_alias_checks * 6 / 10;
4423 9796 : if (count > limit)
4424 162 : return opt_result::failure_at
4425 162 : (vect_location,
4426 : "number of versioning for alias run-time tests exceeds %d "
4427 : "(--param vect-max-version-for-alias-checks)\n", limit);
4428 :
4429 9634 : return opt_result::success ();
4430 409936 : }
4431 :
4432 : /* Structure to hold information about a supported gather/scatter
4433 : configuration. */
4434 : struct gather_scatter_config
4435 : {
4436 : internal_fn ifn;
4437 : tree offset_vectype;
4438 : int scale;
4439 : vec<int> elsvals;
4440 : };
4441 :
4442 : /* Determine which gather/scatter IFN is supported for the given parameters.
4443 : IFN_MASK_GATHER_LOAD, IFN_GATHER_LOAD, and IFN_MASK_LEN_GATHER_LOAD
4444 : are mutually exclusive, so we only need to find one. Return the
4445 : supported IFN or IFN_LAST if none are supported. */
4446 :
4447 : static internal_fn
4448 1177948 : vect_gather_scatter_which_ifn (bool read_p, bool masked_p,
4449 : tree vectype, tree memory_type,
4450 : tree offset_vectype, int scale,
4451 : vec<int> *elsvals)
4452 : {
4453 : /* Work out which functions to try. */
4454 1177948 : internal_fn ifn, alt_ifn, alt_ifn2;
4455 1177948 : if (read_p)
4456 : {
4457 875806 : ifn = masked_p ? IFN_MASK_GATHER_LOAD : IFN_GATHER_LOAD;
4458 : alt_ifn = IFN_MASK_GATHER_LOAD;
4459 : alt_ifn2 = IFN_MASK_LEN_GATHER_LOAD;
4460 : }
4461 : else
4462 : {
4463 302142 : ifn = masked_p ? IFN_MASK_SCATTER_STORE : IFN_SCATTER_STORE;
4464 : alt_ifn = IFN_MASK_SCATTER_STORE;
4465 : alt_ifn2 = IFN_MASK_LEN_SCATTER_STORE;
4466 : }
4467 :
4468 1177948 : if (!offset_vectype)
4469 : return IFN_LAST;
4470 :
4471 1177948 : if (internal_gather_scatter_fn_supported_p (ifn, vectype, memory_type,
4472 : offset_vectype, scale, elsvals))
4473 : return ifn;
4474 1177948 : if (internal_gather_scatter_fn_supported_p (alt_ifn, vectype, memory_type,
4475 : offset_vectype, scale, elsvals))
4476 : return alt_ifn;
4477 1177948 : if (internal_gather_scatter_fn_supported_p (alt_ifn2, vectype, memory_type,
4478 : offset_vectype, scale, elsvals))
4479 : return alt_ifn2;
4480 :
4481 : return IFN_LAST;
4482 : }
4483 :
4484 : /* Collect all supported offset vector types for a gather load or scatter
4485 : store. READ_P is true for loads and false for stores. MASKED_P is true
4486 : if the load or store is conditional. VECTYPE is the data vector type.
4487 : MEMORY_TYPE is the type of the memory elements being loaded or stored,
4488 : and OFFSET_TYPE is the type of the offset.
4489 : SCALE is the amount by which the offset should be multiplied.
4490 :
4491 : Return a vector of all configurations the target supports (which can
4492 : be none). */
4493 :
4494 : static auto_vec<gather_scatter_config>
4495 84997 : vect_gather_scatter_get_configs (vec_info *vinfo, bool read_p, bool masked_p,
4496 : tree vectype, tree memory_type,
4497 : tree offset_type, int scale)
4498 : {
4499 84997 : auto_vec<gather_scatter_config> configs;
4500 :
4501 84997 : auto_vec<tree, 8> offset_types_to_try;
4502 :
4503 : /* Try all sizes from the offset type's precision up to POINTER_SIZE. */
4504 84997 : for (unsigned int bits = TYPE_PRECISION (offset_type);
4505 397820 : bits <= POINTER_SIZE;
4506 299351 : bits *= 2)
4507 : {
4508 : /* Signed variant. */
4509 299351 : offset_types_to_try.safe_push
4510 299351 : (build_nonstandard_integer_type (bits, 0));
4511 : /* Unsigned variant. */
4512 299351 : offset_types_to_try.safe_push
4513 299351 : (build_nonstandard_integer_type (bits, 1));
4514 : }
4515 :
4516 : /* Once we find which IFN works for one offset type, we know that it
4517 : will work for other offset types as well. Then we can perform
4518 : the checks for the remaining offset types with only that IFN.
4519 : However, we might need to try different offset types to find which
4520 : IFN is supported, since the check is offset-type-specific. */
4521 : internal_fn ifn = IFN_LAST;
4522 :
4523 : /* Try each offset type. */
4524 683699 : for (unsigned int i = 0; i < offset_types_to_try.length (); i++)
4525 : {
4526 598702 : tree offset_type = offset_types_to_try[i];
4527 598702 : tree offset_vectype = get_vectype_for_scalar_type (vinfo, offset_type);
4528 598702 : if (!offset_vectype)
4529 10194 : continue;
4530 :
4531 : /* Try multiple scale values. Start with exact match, then try
4532 : smaller common scales that a target might support . */
4533 588508 : int scales_to_try[] = {scale, 1, 2, 4, 8};
4534 :
4535 3531048 : for (unsigned int j = 0;
4536 3531048 : j < sizeof (scales_to_try) / sizeof (*scales_to_try);
4537 : j++)
4538 : {
4539 2942540 : int try_scale = scales_to_try[j];
4540 :
4541 : /* Skip scales >= requested scale (except for exact match). */
4542 2942540 : if (j > 0 && try_scale >= scale)
4543 1764592 : continue;
4544 :
4545 : /* Skip if requested scale is not a multiple of this scale. */
4546 1178092 : if (j > 0 && scale % try_scale != 0)
4547 144 : continue;
4548 :
4549 1177948 : vec<int> elsvals = vNULL;
4550 :
4551 : /* If we haven't determined which IFN is supported yet, try all three
4552 : to find which one the target supports. */
4553 1177948 : if (ifn == IFN_LAST)
4554 : {
4555 1177948 : ifn = vect_gather_scatter_which_ifn (read_p, masked_p,
4556 : vectype, memory_type,
4557 : offset_vectype, try_scale,
4558 : &elsvals);
4559 1177948 : if (ifn != IFN_LAST)
4560 : {
4561 : /* Found which IFN is supported. Save this configuration. */
4562 0 : gather_scatter_config config;
4563 0 : config.ifn = ifn;
4564 0 : config.offset_vectype = offset_vectype;
4565 0 : config.scale = try_scale;
4566 0 : config.elsvals = elsvals;
4567 0 : configs.safe_push (config);
4568 : }
4569 : }
4570 : else
4571 : {
4572 : /* We already know which IFN is supported, just check if this
4573 : offset type and scale work with it. */
4574 0 : if (internal_gather_scatter_fn_supported_p (ifn, vectype,
4575 : memory_type,
4576 : offset_vectype,
4577 : try_scale,
4578 : &elsvals))
4579 : {
4580 0 : gather_scatter_config config;
4581 0 : config.ifn = ifn;
4582 0 : config.offset_vectype = offset_vectype;
4583 0 : config.scale = try_scale;
4584 0 : config.elsvals = elsvals;
4585 0 : configs.safe_push (config);
4586 : }
4587 : }
4588 : }
4589 : }
4590 :
4591 84997 : return configs;
4592 84997 : }
4593 :
4594 : /* Check whether we can use an internal function for a gather load
4595 : or scatter store. READ_P is true for loads and false for stores.
4596 : MASKED_P is true if the load or store is conditional. MEMORY_TYPE is
4597 : the type of the memory elements being loaded or stored. OFFSET_TYPE
4598 : is the type of the offset that is being applied to the invariant
4599 : base address. If OFFSET_TYPE is scalar the function chooses an
4600 : appropriate vector type for it. SCALE is the amount by which the
4601 : offset should be multiplied *after* it has been converted to address width.
4602 : If the target does not support the requested SCALE, SUPPORTED_SCALE
4603 : will contain the scale that is actually supported
4604 : (which may be smaller, requiring additional multiplication).
4605 : Otherwise SUPPORTED_SCALE is 0.
4606 :
4607 : Return true if the function is supported, storing the function id in
4608 : *IFN_OUT and the vector type for the offset in *OFFSET_VECTYPE_OUT.
4609 : If we support an offset vector type with different signedness than
4610 : OFFSET_TYPE store it in SUPPORTED_OFFSET_VECTYPE.
4611 :
4612 : If we can use gather/scatter and ELSVALS is nonzero, store the possible
4613 : else values in ELSVALS. */
4614 :
4615 : bool
4616 84997 : vect_gather_scatter_fn_p (vec_info *vinfo, bool read_p, bool masked_p,
4617 : tree vectype, tree memory_type, tree offset_type,
4618 : int scale, int *supported_scale,
4619 : internal_fn *ifn_out,
4620 : tree *offset_vectype_out,
4621 : tree *supported_offset_vectype,
4622 : vec<int> *elsvals)
4623 : {
4624 84997 : *supported_offset_vectype = NULL_TREE;
4625 84997 : *supported_scale = 0;
4626 84997 : unsigned int memory_bits = tree_to_uhwi (TYPE_SIZE (memory_type));
4627 84997 : unsigned int element_bits = vector_element_bits (vectype);
4628 84997 : if (element_bits != memory_bits)
4629 : /* For now the vector elements must be the same width as the
4630 : memory elements. */
4631 : return false;
4632 :
4633 : /* Get the original offset vector type for comparison. */
4634 84997 : tree offset_vectype = VECTOR_TYPE_P (offset_type)
4635 84997 : ? offset_type : get_vectype_for_scalar_type (vinfo, offset_type);
4636 :
4637 : /* If there is no offset vectype, bail. */
4638 70814 : if (!offset_vectype)
4639 : return false;
4640 :
4641 84997 : offset_type = TREE_TYPE (offset_vectype);
4642 :
4643 : /* Get all supported configurations for this data vector type. */
4644 84997 : auto_vec<gather_scatter_config> configs
4645 : = vect_gather_scatter_get_configs (vinfo, read_p, masked_p, vectype,
4646 84997 : memory_type, offset_type, scale);
4647 :
4648 84997 : if (configs.is_empty ())
4649 : return false;
4650 :
4651 : /* Selection priority:
4652 : 1 - Exact scale match + offset type match
4653 : 2 - Exact scale match + sign-swapped offset
4654 : 3 - Smaller scale + offset type match
4655 : 4 - Smaller scale + sign-swapped offset
4656 : Within each category, prefer smaller offset types. */
4657 :
4658 : /* First pass: exact scale match with no conversion. */
4659 0 : for (unsigned int i = 0; i < configs.length (); i++)
4660 : {
4661 0 : if (configs[i].scale == scale
4662 0 : && TYPE_SIGN (configs[i].offset_vectype)
4663 0 : == TYPE_SIGN (offset_vectype))
4664 : {
4665 0 : *ifn_out = configs[i].ifn;
4666 0 : *offset_vectype_out = configs[i].offset_vectype;
4667 0 : if (elsvals)
4668 0 : *elsvals = configs[i].elsvals;
4669 0 : return true;
4670 : }
4671 : }
4672 :
4673 : /* No direct match. This means we try to find either
4674 : - a sign-swapped offset vectype or
4675 : - a different scale and 2x larger offset type
4676 : - a different scale and larger sign-swapped offset vectype. */
4677 0 : unsigned int offset_precision = TYPE_PRECISION (TREE_TYPE (offset_vectype));
4678 0 : unsigned int needed_precision
4679 0 : = TYPE_UNSIGNED (offset_vectype) ? offset_precision * 2 : POINTER_SIZE;
4680 0 : needed_precision = std::min (needed_precision, (unsigned) POINTER_SIZE);
4681 :
4682 : /* Second pass: No direct match. This means we try to find a sign-swapped
4683 : offset vectype. */
4684 0 : enum tree_code tmp;
4685 0 : for (unsigned int i = 0; i < configs.length (); i++)
4686 : {
4687 0 : unsigned int precision
4688 0 : = TYPE_PRECISION (TREE_TYPE (configs[i].offset_vectype));
4689 0 : if (configs[i].scale == scale
4690 0 : && precision >= needed_precision
4691 0 : && (supportable_convert_operation (CONVERT_EXPR,
4692 0 : configs[i].offset_vectype,
4693 : offset_vectype, &tmp)
4694 0 : || (needed_precision == offset_precision
4695 0 : && tree_nop_conversion_p (configs[i].offset_vectype,
4696 : offset_vectype))))
4697 : {
4698 0 : *ifn_out = configs[i].ifn;
4699 0 : *offset_vectype_out = offset_vectype;
4700 0 : *supported_offset_vectype = configs[i].offset_vectype;
4701 0 : if (elsvals)
4702 0 : *elsvals = configs[i].elsvals;
4703 0 : return true;
4704 : }
4705 : }
4706 :
4707 : /* Third pass: Try a smaller scale with the same signedness. */
4708 0 : needed_precision = offset_precision * 2;
4709 0 : needed_precision = std::min (needed_precision, (unsigned) POINTER_SIZE);
4710 :
4711 0 : for (unsigned int i = 0; i < configs.length (); i++)
4712 : {
4713 0 : unsigned int precision
4714 0 : = TYPE_PRECISION (TREE_TYPE (configs[i].offset_vectype));
4715 0 : if (configs[i].scale < scale
4716 0 : && TYPE_SIGN (configs[i].offset_vectype)
4717 0 : == TYPE_SIGN (offset_vectype)
4718 0 : && precision >= needed_precision)
4719 : {
4720 0 : *ifn_out = configs[i].ifn;
4721 0 : *offset_vectype_out = configs[i].offset_vectype;
4722 0 : *supported_scale = configs[i].scale;
4723 0 : if (elsvals)
4724 0 : *elsvals = configs[i].elsvals;
4725 0 : return true;
4726 : }
4727 : }
4728 :
4729 : /* Fourth pass: Try a smaller scale and sign-swapped offset vectype. */
4730 0 : needed_precision
4731 0 : = TYPE_UNSIGNED (offset_vectype) ? offset_precision * 2 : POINTER_SIZE;
4732 0 : needed_precision = std::min (needed_precision, (unsigned) POINTER_SIZE);
4733 :
4734 0 : for (unsigned int i = 0; i < configs.length (); i++)
4735 : {
4736 0 : unsigned int precision
4737 0 : = TYPE_PRECISION (TREE_TYPE (configs[i].offset_vectype));
4738 0 : if (configs[i].scale < scale
4739 0 : && precision >= needed_precision
4740 0 : && (supportable_convert_operation (CONVERT_EXPR,
4741 0 : configs[i].offset_vectype,
4742 : offset_vectype, &tmp)
4743 0 : || (needed_precision == offset_precision
4744 0 : && tree_nop_conversion_p (configs[i].offset_vectype,
4745 : offset_vectype))))
4746 : {
4747 0 : *ifn_out = configs[i].ifn;
4748 0 : *offset_vectype_out = offset_vectype;
4749 0 : *supported_offset_vectype = configs[i].offset_vectype;
4750 0 : *supported_scale = configs[i].scale;
4751 0 : if (elsvals)
4752 0 : *elsvals = configs[i].elsvals;
4753 0 : return true;
4754 : }
4755 : }
4756 :
4757 : return false;
4758 84997 : }
4759 :
4760 : /* STMT_INFO is a call to an internal gather load or scatter store function.
4761 : Describe the operation in INFO. */
4762 :
4763 : void
4764 0 : vect_describe_gather_scatter_call (stmt_vec_info stmt_info,
4765 : gather_scatter_info *info)
4766 : {
4767 0 : gcall *call = as_a <gcall *> (stmt_info->stmt);
4768 0 : tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4769 0 : data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4770 :
4771 0 : info->ifn = gimple_call_internal_fn (call);
4772 0 : info->decl = NULL_TREE;
4773 0 : info->base = gimple_call_arg (call, 0);
4774 0 : info->alias_ptr = gimple_call_arg
4775 0 : (call, internal_fn_alias_ptr_index (info->ifn));
4776 0 : info->offset = gimple_call_arg
4777 0 : (call, internal_fn_offset_index (info->ifn));
4778 0 : info->offset_vectype = NULL_TREE;
4779 0 : info->scale = TREE_INT_CST_LOW (gimple_call_arg
4780 : (call, internal_fn_scale_index (info->ifn)));
4781 0 : info->element_type = TREE_TYPE (vectype);
4782 0 : info->memory_type = TREE_TYPE (DR_REF (dr));
4783 0 : }
4784 :
4785 : /* Return true if a non-affine read or write in STMT_INFO is suitable for a
4786 : gather load or scatter store with VECTYPE. Describe the operation in *INFO
4787 : if so. If it is suitable and ELSVALS is nonzero store the supported else
4788 : values in the vector it points to. */
4789 :
4790 : bool
4791 349804 : vect_check_gather_scatter (stmt_vec_info stmt_info, tree vectype,
4792 : loop_vec_info loop_vinfo,
4793 : gather_scatter_info *info, vec<int> *elsvals)
4794 : {
4795 349804 : HOST_WIDE_INT scale = 1;
4796 349804 : poly_int64 pbitpos, pbitsize;
4797 349804 : class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4798 349804 : struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4799 349804 : tree offtype = NULL_TREE;
4800 349804 : tree decl = NULL_TREE, base, off;
4801 349804 : tree memory_type = TREE_TYPE (DR_REF (dr));
4802 349804 : machine_mode pmode;
4803 349804 : int punsignedp, reversep, pvolatilep = 0;
4804 349804 : internal_fn ifn;
4805 349804 : tree offset_vectype;
4806 349804 : bool masked_p = false;
4807 :
4808 : /* See whether this is already a call to a gather/scatter internal function.
4809 : If not, see whether it's a masked load or store. */
4810 349804 : gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
4811 6306 : if (call && gimple_call_internal_p (call))
4812 : {
4813 6306 : ifn = gimple_call_internal_fn (call);
4814 6306 : if (internal_gather_scatter_fn_p (ifn))
4815 : {
4816 0 : vect_describe_gather_scatter_call (stmt_info, info);
4817 :
4818 : /* In pattern recog we simply used a ZERO else value that
4819 : we need to correct here. To that end just re-use the
4820 : (already succesful) check if we support a gather IFN
4821 : and have it populate the else values. */
4822 0 : if (DR_IS_READ (dr) && internal_fn_mask_index (ifn) >= 0 && elsvals)
4823 0 : supports_vec_gather_load_p (TYPE_MODE (vectype), elsvals);
4824 0 : return true;
4825 : }
4826 6306 : masked_p = (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE);
4827 : }
4828 :
4829 : /* True if we should aim to use internal functions rather than
4830 : built-in functions. */
4831 349804 : bool use_ifn_p = (DR_IS_READ (dr)
4832 349804 : ? supports_vec_gather_load_p (TYPE_MODE (vectype),
4833 : elsvals)
4834 349804 : : supports_vec_scatter_store_p (TYPE_MODE (vectype)));
4835 :
4836 349804 : base = DR_REF (dr);
4837 : /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
4838 : see if we can use the def stmt of the address. */
4839 349804 : if (masked_p
4840 6306 : && TREE_CODE (base) == MEM_REF
4841 6306 : && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
4842 6306 : && integer_zerop (TREE_OPERAND (base, 1))
4843 356110 : && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
4844 : {
4845 6306 : gimple *def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
4846 6306 : if (is_gimple_assign (def_stmt)
4847 6306 : && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
4848 639 : base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
4849 : }
4850 :
4851 : /* The gather and scatter builtins need address of the form
4852 : loop_invariant + vector * {1, 2, 4, 8}
4853 : or
4854 : loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
4855 : Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
4856 : of loop invariants/SSA_NAMEs defined in the loop, with casts,
4857 : multiplications and additions in it. To get a vector, we need
4858 : a single SSA_NAME that will be defined in the loop and will
4859 : contain everything that is not loop invariant and that can be
4860 : vectorized. The following code attempts to find such a preexistng
4861 : SSA_NAME OFF and put the loop invariants into a tree BASE
4862 : that can be gimplified before the loop. */
4863 349804 : base = get_inner_reference (base, &pbitsize, &pbitpos, &off, &pmode,
4864 : &punsignedp, &reversep, &pvolatilep);
4865 349804 : if (reversep)
4866 : return false;
4867 :
4868 : /* PR 107346. Packed structs can have fields at offsets that are not
4869 : multiples of BITS_PER_UNIT. Do not use gather/scatters in such cases. */
4870 349804 : if (!multiple_p (pbitpos, BITS_PER_UNIT))
4871 : return false;
4872 :
4873 : /* We need to be able to form an address to the base which for example
4874 : isn't possible for hard registers. */
4875 349804 : if (may_be_nonaddressable_p (base))
4876 : return false;
4877 :
4878 349796 : poly_int64 pbytepos = exact_div (pbitpos, BITS_PER_UNIT);
4879 :
4880 349796 : if (TREE_CODE (base) == MEM_REF)
4881 : {
4882 283960 : if (!integer_zerop (TREE_OPERAND (base, 1)))
4883 : {
4884 33697 : if (off == NULL_TREE)
4885 33380 : off = wide_int_to_tree (sizetype, mem_ref_offset (base));
4886 : else
4887 317 : off = size_binop (PLUS_EXPR, off,
4888 : fold_convert (sizetype, TREE_OPERAND (base, 1)));
4889 : }
4890 283960 : base = TREE_OPERAND (base, 0);
4891 : }
4892 : else
4893 65836 : base = build_fold_addr_expr (base);
4894 :
4895 349796 : if (off == NULL_TREE)
4896 225489 : off = size_zero_node;
4897 :
4898 : /* BASE must be loop invariant. If it is not invariant, but OFF is, then we
4899 : * can fix that by swapping BASE and OFF. */
4900 349796 : if (!expr_invariant_in_loop_p (loop, base))
4901 : {
4902 259402 : if (!expr_invariant_in_loop_p (loop, off))
4903 : return false;
4904 :
4905 259127 : std::swap (base, off);
4906 : }
4907 :
4908 349521 : base = fold_convert (sizetype, base);
4909 349521 : base = size_binop (PLUS_EXPR, base, size_int (pbytepos));
4910 349521 : int tmp_scale;
4911 349521 : tree tmp_offset_vectype;
4912 :
4913 : /* OFF at this point may be either a SSA_NAME or some tree expression
4914 : from get_inner_reference. Try to peel off loop invariants from it
4915 : into BASE as long as possible. */
4916 349521 : STRIP_NOPS (off);
4917 916256 : while (offtype == NULL_TREE)
4918 : {
4919 796194 : enum tree_code code;
4920 796194 : tree op0, op1, add = NULL_TREE;
4921 :
4922 796194 : if (TREE_CODE (off) == SSA_NAME)
4923 : {
4924 610507 : gimple *def_stmt = SSA_NAME_DEF_STMT (off);
4925 :
4926 610507 : if (expr_invariant_in_loop_p (loop, off))
4927 0 : return false;
4928 :
4929 610507 : if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
4930 : break;
4931 :
4932 479660 : op0 = gimple_assign_rhs1 (def_stmt);
4933 479660 : code = gimple_assign_rhs_code (def_stmt);
4934 479660 : op1 = gimple_assign_rhs2 (def_stmt);
4935 : }
4936 : else
4937 : {
4938 185687 : if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
4939 : return false;
4940 185687 : code = TREE_CODE (off);
4941 185687 : extract_ops_from_tree (off, &code, &op0, &op1);
4942 : }
4943 665347 : switch (code)
4944 : {
4945 203307 : case POINTER_PLUS_EXPR:
4946 203307 : case PLUS_EXPR:
4947 203307 : if (expr_invariant_in_loop_p (loop, op0))
4948 : {
4949 134466 : add = op0;
4950 134466 : off = op1;
4951 187587 : do_add:
4952 187587 : add = fold_convert (sizetype, add);
4953 187587 : if (scale != 1)
4954 46376 : add = size_binop (MULT_EXPR, add, size_int (scale));
4955 187587 : base = size_binop (PLUS_EXPR, base, add);
4956 566735 : continue;
4957 : }
4958 68841 : if (expr_invariant_in_loop_p (loop, op1))
4959 : {
4960 52921 : add = op1;
4961 52921 : off = op0;
4962 52921 : goto do_add;
4963 : }
4964 : break;
4965 396 : case MINUS_EXPR:
4966 396 : if (expr_invariant_in_loop_p (loop, op1))
4967 : {
4968 200 : add = fold_convert (sizetype, op1);
4969 200 : add = size_binop (MINUS_EXPR, size_zero_node, add);
4970 200 : off = op0;
4971 200 : goto do_add;
4972 : }
4973 : break;
4974 202823 : case MULT_EXPR:
4975 202823 : if (scale == 1 && tree_fits_shwi_p (op1))
4976 : {
4977 170132 : int new_scale = tree_to_shwi (op1);
4978 : /* Only treat this as a scaling operation if the target
4979 : supports it for at least some offset type. */
4980 170132 : if (use_ifn_p
4981 0 : && !vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr),
4982 : masked_p, vectype, memory_type,
4983 : signed_char_type_node,
4984 : new_scale, &tmp_scale,
4985 : &ifn,
4986 : &offset_vectype,
4987 : &tmp_offset_vectype,
4988 : elsvals)
4989 170132 : && !vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr),
4990 : masked_p, vectype, memory_type,
4991 : unsigned_char_type_node,
4992 : new_scale, &tmp_scale,
4993 : &ifn,
4994 : &offset_vectype,
4995 : &tmp_offset_vectype,
4996 : elsvals))
4997 : break;
4998 170132 : scale = new_scale;
4999 170132 : off = op0;
5000 170132 : continue;
5001 170132 : }
5002 : break;
5003 0 : case SSA_NAME:
5004 0 : off = op0;
5005 0 : continue;
5006 214966 : CASE_CONVERT:
5007 429916 : if (!POINTER_TYPE_P (TREE_TYPE (op0))
5008 429916 : && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
5009 : break;
5010 :
5011 : /* Don't include the conversion if the target is happy with
5012 : the current offset type. */
5013 214966 : if (use_ifn_p
5014 0 : && TREE_CODE (off) == SSA_NAME
5015 0 : && !POINTER_TYPE_P (TREE_TYPE (off))
5016 214966 : && vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr),
5017 : masked_p, vectype, memory_type,
5018 0 : TREE_TYPE (off),
5019 : scale, &tmp_scale,
5020 : &ifn,
5021 : &offset_vectype,
5022 : &tmp_offset_vectype,
5023 : elsvals))
5024 : break;
5025 :
5026 214966 : if (TYPE_PRECISION (TREE_TYPE (op0))
5027 214966 : == TYPE_PRECISION (TREE_TYPE (off)))
5028 : {
5029 88954 : off = op0;
5030 88954 : continue;
5031 : }
5032 :
5033 : /* Include the conversion if it is widening and we're using
5034 : the IFN path or the target can handle the converted from
5035 : offset or the current size is not already the same as the
5036 : data vector element size. */
5037 126012 : if ((TYPE_PRECISION (TREE_TYPE (op0))
5038 126012 : < TYPE_PRECISION (TREE_TYPE (off)))
5039 126012 : && (use_ifn_p
5040 125254 : || (DR_IS_READ (dr)
5041 81462 : ? (targetm.vectorize.builtin_gather
5042 81462 : && targetm.vectorize.builtin_gather (vectype,
5043 81462 : TREE_TYPE (op0),
5044 : scale))
5045 43792 : : (targetm.vectorize.builtin_scatter
5046 43792 : && targetm.vectorize.builtin_scatter (vectype,
5047 43792 : TREE_TYPE (op0),
5048 : scale)))
5049 124158 : || !operand_equal_p (TYPE_SIZE (TREE_TYPE (off)),
5050 124158 : TYPE_SIZE (TREE_TYPE (vectype)), 0)))
5051 : {
5052 120062 : off = op0;
5053 120062 : offtype = TREE_TYPE (off);
5054 120062 : STRIP_NOPS (off);
5055 120062 : continue;
5056 : }
5057 : break;
5058 : default:
5059 : break;
5060 0 : }
5061 : break;
5062 : }
5063 :
5064 : /* If at the end OFF still isn't a SSA_NAME or isn't
5065 : defined in the loop, punt. */
5066 349521 : if (TREE_CODE (off) != SSA_NAME
5067 349521 : || expr_invariant_in_loop_p (loop, off))
5068 6377 : return false;
5069 :
5070 343144 : if (offtype == NULL_TREE)
5071 223436 : offtype = TREE_TYPE (off);
5072 :
5073 343144 : if (use_ifn_p)
5074 : {
5075 0 : if (!vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr), masked_p,
5076 : vectype, memory_type, offtype,
5077 : scale, &tmp_scale,
5078 : &ifn, &offset_vectype,
5079 : &tmp_offset_vectype,
5080 : elsvals))
5081 0 : ifn = IFN_LAST;
5082 : decl = NULL_TREE;
5083 : }
5084 : else
5085 : {
5086 343144 : if (DR_IS_READ (dr))
5087 : {
5088 258462 : if (targetm.vectorize.builtin_gather)
5089 258462 : decl = targetm.vectorize.builtin_gather (vectype, offtype, scale);
5090 : }
5091 : else
5092 : {
5093 84682 : if (targetm.vectorize.builtin_scatter)
5094 84682 : decl = targetm.vectorize.builtin_scatter (vectype, offtype, scale);
5095 : }
5096 343144 : ifn = IFN_LAST;
5097 : /* The offset vector type will be read from DECL when needed. */
5098 343144 : offset_vectype = NULL_TREE;
5099 : }
5100 :
5101 343144 : gcc_checking_assert (expr_invariant_in_loop_p (loop, base));
5102 343144 : gcc_checking_assert (!expr_invariant_in_loop_p (loop, off));
5103 :
5104 343144 : info->ifn = ifn;
5105 343144 : info->decl = decl;
5106 343144 : info->base = base;
5107 :
5108 686288 : info->alias_ptr = build_int_cst
5109 343144 : (reference_alias_ptr_type (DR_REF (dr)),
5110 343144 : get_object_alignment (DR_REF (dr)));
5111 :
5112 343144 : info->offset = off;
5113 343144 : info->offset_vectype = offset_vectype;
5114 343144 : info->scale = scale;
5115 343144 : info->element_type = TREE_TYPE (vectype);
5116 343144 : info->memory_type = memory_type;
5117 343144 : return true;
5118 : }
5119 :
5120 : /* Find the data references in STMT, analyze them with respect to LOOP and
5121 : append them to DATAREFS. Return false if datarefs in this stmt cannot
5122 : be handled. */
5123 :
5124 : opt_result
5125 31952228 : vect_find_stmt_data_reference (loop_p loop, gimple *stmt,
5126 : vec<data_reference_p> *datarefs,
5127 : vec<int> *dataref_groups, int group_id)
5128 : {
5129 : /* We can ignore clobbers for dataref analysis - they are removed during
5130 : loop vectorization and BB vectorization checks dependences with a
5131 : stmt walk. */
5132 31952228 : if (gimple_clobber_p (stmt))
5133 1087926 : return opt_result::success ();
5134 :
5135 57412285 : if (gimple_has_volatile_ops (stmt))
5136 320405 : return opt_result::failure_at (stmt, "not vectorized: volatile type: %G",
5137 : stmt);
5138 :
5139 30543897 : if (stmt_can_throw_internal (cfun, stmt))
5140 683519 : return opt_result::failure_at (stmt,
5141 : "not vectorized:"
5142 : " statement can throw an exception: %G",
5143 : stmt);
5144 :
5145 29860378 : auto_vec<data_reference_p, 2> refs;
5146 29860378 : opt_result res = find_data_references_in_stmt (loop, stmt, &refs);
5147 29860378 : if (!res)
5148 3652321 : return res;
5149 :
5150 26208057 : if (refs.is_empty ())
5151 15033258 : return opt_result::success ();
5152 :
5153 11174799 : if (refs.length () > 1)
5154 : {
5155 1241833 : while (!refs.is_empty ())
5156 828191 : free_data_ref (refs.pop ());
5157 413642 : return opt_result::failure_at (stmt,
5158 : "not vectorized: more than one "
5159 : "data ref in stmt: %G", stmt);
5160 : }
5161 :
5162 10761157 : data_reference_p dr = refs.pop ();
5163 10761157 : if (gcall *call = dyn_cast <gcall *> (stmt))
5164 20385 : if (!gimple_call_internal_p (call)
5165 20385 : || (gimple_call_internal_fn (call) != IFN_MASK_LOAD
5166 17314 : && gimple_call_internal_fn (call) != IFN_MASK_STORE))
5167 : {
5168 16802 : free_data_ref (dr);
5169 16802 : return opt_result::failure_at (stmt,
5170 : "not vectorized: dr in a call %G", stmt);
5171 : }
5172 :
5173 10744355 : if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
5174 10744355 : && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
5175 : {
5176 53634 : free_data_ref (dr);
5177 53634 : return opt_result::failure_at (stmt,
5178 : "not vectorized:"
5179 : " statement is an unsupported"
5180 : " bitfield access %G", stmt);
5181 : }
5182 :
5183 10690721 : if (DR_BASE_ADDRESS (dr)
5184 10603277 : && TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
5185 : {
5186 987 : free_data_ref (dr);
5187 987 : return opt_result::failure_at (stmt,
5188 : "not vectorized:"
5189 : " base addr of dr is a constant\n");
5190 : }
5191 :
5192 : /* Check whether this may be a SIMD lane access and adjust the
5193 : DR to make it easier for us to handle it. */
5194 10689734 : if (loop
5195 596191 : && loop->simduid
5196 10711 : && (!DR_BASE_ADDRESS (dr)
5197 2960 : || !DR_OFFSET (dr)
5198 2960 : || !DR_INIT (dr)
5199 2960 : || !DR_STEP (dr)))
5200 : {
5201 7751 : struct data_reference *newdr
5202 7751 : = create_data_ref (NULL, loop_containing_stmt (stmt), DR_REF (dr), stmt,
5203 7751 : DR_IS_READ (dr), DR_IS_CONDITIONAL_IN_STMT (dr));
5204 7751 : if (DR_BASE_ADDRESS (newdr)
5205 7751 : && DR_OFFSET (newdr)
5206 7751 : && DR_INIT (newdr)
5207 7751 : && DR_STEP (newdr)
5208 7751 : && TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
5209 15502 : && integer_zerop (DR_STEP (newdr)))
5210 : {
5211 7751 : tree base_address = DR_BASE_ADDRESS (newdr);
5212 7751 : tree off = DR_OFFSET (newdr);
5213 7751 : tree step = ssize_int (1);
5214 7751 : if (integer_zerop (off)
5215 7751 : && TREE_CODE (base_address) == POINTER_PLUS_EXPR)
5216 : {
5217 82 : off = TREE_OPERAND (base_address, 1);
5218 82 : base_address = TREE_OPERAND (base_address, 0);
5219 : }
5220 7751 : STRIP_NOPS (off);
5221 7751 : if (TREE_CODE (off) == MULT_EXPR
5222 7751 : && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
5223 : {
5224 7500 : step = TREE_OPERAND (off, 1);
5225 7500 : off = TREE_OPERAND (off, 0);
5226 7500 : STRIP_NOPS (off);
5227 : }
5228 541 : if (CONVERT_EXPR_P (off)
5229 7751 : && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off, 0)))
5230 7210 : < TYPE_PRECISION (TREE_TYPE (off))))
5231 7210 : off = TREE_OPERAND (off, 0);
5232 7751 : if (TREE_CODE (off) == SSA_NAME)
5233 : {
5234 7226 : gimple *def = SSA_NAME_DEF_STMT (off);
5235 : /* Look through widening conversion. */
5236 7226 : if (is_gimple_assign (def)
5237 7226 : && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
5238 : {
5239 0 : tree rhs1 = gimple_assign_rhs1 (def);
5240 0 : if (TREE_CODE (rhs1) == SSA_NAME
5241 0 : && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
5242 0 : && (TYPE_PRECISION (TREE_TYPE (off))
5243 0 : > TYPE_PRECISION (TREE_TYPE (rhs1))))
5244 0 : def = SSA_NAME_DEF_STMT (rhs1);
5245 : }
5246 7226 : if (is_gimple_call (def)
5247 7090 : && gimple_call_internal_p (def)
5248 14316 : && (gimple_call_internal_fn (def) == IFN_GOMP_SIMD_LANE))
5249 : {
5250 7090 : tree arg = gimple_call_arg (def, 0);
5251 7090 : tree reft = TREE_TYPE (DR_REF (newdr));
5252 7090 : gcc_assert (TREE_CODE (arg) == SSA_NAME);
5253 7090 : arg = SSA_NAME_VAR (arg);
5254 7090 : if (arg == loop->simduid
5255 : /* For now. */
5256 7090 : && tree_int_cst_equal (TYPE_SIZE_UNIT (reft), step))
5257 : {
5258 7065 : DR_BASE_ADDRESS (newdr) = base_address;
5259 7065 : DR_OFFSET (newdr) = ssize_int (0);
5260 7065 : DR_STEP (newdr) = step;
5261 7065 : DR_OFFSET_ALIGNMENT (newdr) = BIGGEST_ALIGNMENT;
5262 7065 : DR_STEP_ALIGNMENT (newdr) = highest_pow2_factor (step);
5263 : /* Mark as simd-lane access. */
5264 7065 : tree arg2 = gimple_call_arg (def, 1);
5265 7065 : newdr->aux = (void *) (-1 - tree_to_uhwi (arg2));
5266 7065 : free_data_ref (dr);
5267 7065 : datarefs->safe_push (newdr);
5268 7065 : if (dataref_groups)
5269 0 : dataref_groups->safe_push (group_id);
5270 7065 : return opt_result::success ();
5271 : }
5272 : }
5273 : }
5274 : }
5275 686 : free_data_ref (newdr);
5276 : }
5277 :
5278 10682669 : datarefs->safe_push (dr);
5279 10682669 : if (dataref_groups)
5280 10093543 : dataref_groups->safe_push (group_id);
5281 10682669 : return opt_result::success ();
5282 29860378 : }
5283 :
5284 : /* Function vect_analyze_data_refs.
5285 :
5286 : Find all the data references in the loop or basic block.
5287 :
5288 : The general structure of the analysis of data refs in the vectorizer is as
5289 : follows:
5290 : 1- vect_analyze_data_refs(loop/bb): call
5291 : compute_data_dependences_for_loop/bb to find and analyze all data-refs
5292 : in the loop/bb and their dependences.
5293 : 2- vect_analyze_dependences(): apply dependence testing using ddrs.
5294 : 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
5295 : 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
5296 :
5297 : */
5298 :
5299 : opt_result
5300 2700239 : vect_analyze_data_refs (vec_info *vinfo, bool *fatal)
5301 : {
5302 2700239 : class loop *loop = NULL;
5303 2700239 : unsigned int i;
5304 2700239 : struct data_reference *dr;
5305 2700239 : tree scalar_type;
5306 :
5307 2700239 : DUMP_VECT_SCOPE ("vect_analyze_data_refs");
5308 :
5309 2700239 : if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
5310 509543 : loop = LOOP_VINFO_LOOP (loop_vinfo);
5311 :
5312 : /* Go through the data-refs, check that the analysis succeeded. Update
5313 : pointer from stmt_vec_info struct to DR and vectype. */
5314 :
5315 2700239 : vec<data_reference_p> datarefs = vinfo->shared->datarefs;
5316 17711137 : FOR_EACH_VEC_ELT (datarefs, i, dr)
5317 : {
5318 15082690 : enum { SG_NONE, GATHER, SCATTER } gatherscatter = SG_NONE;
5319 :
5320 15082690 : gcc_assert (DR_REF (dr));
5321 15082690 : stmt_vec_info stmt_info = vinfo->lookup_stmt (DR_STMT (dr));
5322 15082690 : gcc_assert (!stmt_info->dr_aux.dr);
5323 15082690 : stmt_info->dr_aux.dr = dr;
5324 15082690 : stmt_info->dr_aux.stmt = stmt_info;
5325 :
5326 : /* Check that analysis of the data-ref succeeded. */
5327 15082690 : if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
5328 14965984 : || !DR_STEP (dr))
5329 : {
5330 233412 : bool maybe_gather
5331 116706 : = DR_IS_READ (dr)
5332 116706 : && !TREE_THIS_VOLATILE (DR_REF (dr));
5333 233412 : bool maybe_scatter
5334 : = DR_IS_WRITE (dr)
5335 116706 : && !TREE_THIS_VOLATILE (DR_REF (dr));
5336 :
5337 : /* If target supports vector gather loads or scatter stores,
5338 : see if they can't be used. */
5339 116706 : if (is_a <loop_vec_info> (vinfo)
5340 116706 : && !nested_in_vect_loop_p (loop, stmt_info))
5341 : {
5342 113335 : if (maybe_gather || maybe_scatter)
5343 : {
5344 113335 : if (maybe_gather)
5345 : gatherscatter = GATHER;
5346 : else
5347 21715 : gatherscatter = SCATTER;
5348 : }
5349 : }
5350 :
5351 21715 : if (gatherscatter == SG_NONE)
5352 : {
5353 3371 : if (dump_enabled_p ())
5354 5 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5355 : "not vectorized: data ref analysis "
5356 : "failed %G", stmt_info->stmt);
5357 3371 : if (is_a <bb_vec_info> (vinfo))
5358 : {
5359 : /* In BB vectorization the ref can still participate
5360 : in dependence analysis, we just can't vectorize it. */
5361 3021 : STMT_VINFO_VECTORIZABLE (stmt_info) = false;
5362 3021 : continue;
5363 : }
5364 350 : return opt_result::failure_at (stmt_info->stmt,
5365 : "not vectorized:"
5366 : " data ref analysis failed: %G",
5367 : stmt_info->stmt);
5368 : }
5369 : }
5370 :
5371 : /* See if this was detected as SIMD lane access. */
5372 15079319 : if (dr->aux == (void *)-1
5373 15079319 : || dr->aux == (void *)-2
5374 15070417 : || dr->aux == (void *)-3
5375 15069577 : || dr->aux == (void *)-4)
5376 : {
5377 10542 : if (nested_in_vect_loop_p (loop, stmt_info))
5378 0 : return opt_result::failure_at (stmt_info->stmt,
5379 : "not vectorized:"
5380 : " data ref analysis failed: %G",
5381 : stmt_info->stmt);
5382 10542 : STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info)
5383 10542 : = -(uintptr_t) dr->aux;
5384 : }
5385 :
5386 15079319 : tree base = get_base_address (DR_REF (dr));
5387 15079319 : if (base && VAR_P (base) && DECL_NONALIASED (base))
5388 : {
5389 8864 : if (dump_enabled_p ())
5390 186 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5391 : "not vectorized: base object not addressable "
5392 : "for stmt: %G", stmt_info->stmt);
5393 8864 : if (is_a <bb_vec_info> (vinfo))
5394 : {
5395 : /* In BB vectorization the ref can still participate
5396 : in dependence analysis, we just can't vectorize it. */
5397 8864 : STMT_VINFO_VECTORIZABLE (stmt_info) = false;
5398 8864 : continue;
5399 : }
5400 0 : return opt_result::failure_at (stmt_info->stmt,
5401 : "not vectorized: base object not"
5402 : " addressable for stmt: %G",
5403 : stmt_info->stmt);
5404 : }
5405 :
5406 15070455 : if (is_a <loop_vec_info> (vinfo)
5407 1159143 : && DR_STEP (dr)
5408 16116263 : && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
5409 : {
5410 43627 : if (nested_in_vect_loop_p (loop, stmt_info))
5411 372 : return opt_result::failure_at (stmt_info->stmt,
5412 : "not vectorized: "
5413 : "not suitable for strided load %G",
5414 : stmt_info->stmt);
5415 43255 : STMT_VINFO_STRIDED_P (stmt_info) = true;
5416 : }
5417 :
5418 : /* Update DR field in stmt_vec_info struct. */
5419 :
5420 : /* If the dataref is in an inner-loop of the loop that is considered for
5421 : for vectorization, we also want to analyze the access relative to
5422 : the outer-loop (DR contains information only relative to the
5423 : inner-most enclosing loop). We do that by building a reference to the
5424 : first location accessed by the inner-loop, and analyze it relative to
5425 : the outer-loop. */
5426 15070083 : if (loop && nested_in_vect_loop_p (loop, stmt_info))
5427 : {
5428 : /* Build a reference to the first location accessed by the
5429 : inner loop: *(BASE + INIT + OFFSET). By construction,
5430 : this address must be invariant in the inner loop, so we
5431 : can consider it as being used in the outer loop. */
5432 11882 : tree base = unshare_expr (DR_BASE_ADDRESS (dr));
5433 11882 : tree offset = unshare_expr (DR_OFFSET (dr));
5434 11882 : tree init = unshare_expr (DR_INIT (dr));
5435 11882 : tree init_offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset),
5436 : init, offset);
5437 11882 : tree init_addr = fold_build_pointer_plus (base, init_offset);
5438 11882 : tree init_ref = build_fold_indirect_ref (init_addr);
5439 :
5440 11882 : if (dump_enabled_p ())
5441 1222 : dump_printf_loc (MSG_NOTE, vect_location,
5442 : "analyze in outer loop: %T\n", init_ref);
5443 :
5444 11882 : opt_result res
5445 11882 : = dr_analyze_innermost (&STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info),
5446 11882 : init_ref, loop, stmt_info->stmt);
5447 11882 : if (!res)
5448 : /* dr_analyze_innermost already explained the failure. */
5449 161 : return res;
5450 :
5451 11721 : if (dump_enabled_p ())
5452 1218 : dump_printf_loc (MSG_NOTE, vect_location,
5453 : "\touter base_address: %T\n"
5454 : "\touter offset from base address: %T\n"
5455 : "\touter constant offset from base address: %T\n"
5456 : "\touter step: %T\n"
5457 : "\touter base alignment: %d\n\n"
5458 : "\touter base misalignment: %d\n"
5459 : "\touter offset alignment: %d\n"
5460 : "\touter step alignment: %d\n",
5461 : STMT_VINFO_DR_BASE_ADDRESS (stmt_info),
5462 : STMT_VINFO_DR_OFFSET (stmt_info),
5463 : STMT_VINFO_DR_INIT (stmt_info),
5464 : STMT_VINFO_DR_STEP (stmt_info),
5465 : STMT_VINFO_DR_BASE_ALIGNMENT (stmt_info),
5466 : STMT_VINFO_DR_BASE_MISALIGNMENT (stmt_info),
5467 : STMT_VINFO_DR_OFFSET_ALIGNMENT (stmt_info),
5468 : STMT_VINFO_DR_STEP_ALIGNMENT (stmt_info));
5469 : }
5470 :
5471 : /* Set vectype for STMT. */
5472 15069922 : scalar_type = TREE_TYPE (DR_REF (dr));
5473 15069922 : tree vectype = get_vectype_for_scalar_type (vinfo, scalar_type);
5474 15069922 : if (!vectype)
5475 : {
5476 1792589 : if (dump_enabled_p ())
5477 : {
5478 2042 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5479 : "not vectorized: no vectype for stmt: %G",
5480 : stmt_info->stmt);
5481 2042 : dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
5482 2042 : dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
5483 : scalar_type);
5484 2042 : dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5485 : }
5486 :
5487 1792589 : if (is_a <bb_vec_info> (vinfo))
5488 : {
5489 : /* No vector type is fine, the ref can still participate
5490 : in dependence analysis, we just can't vectorize it. */
5491 1729195 : STMT_VINFO_VECTORIZABLE (stmt_info) = false;
5492 1729195 : continue;
5493 : }
5494 63394 : if (fatal)
5495 63394 : *fatal = false;
5496 63394 : return opt_result::failure_at (stmt_info->stmt,
5497 : "not vectorized:"
5498 : " no vectype for stmt: %G"
5499 : " scalar_type: %T\n",
5500 : stmt_info->stmt, scalar_type);
5501 : }
5502 : else
5503 : {
5504 13277333 : if (dump_enabled_p ())
5505 82688 : dump_printf_loc (MSG_NOTE, vect_location,
5506 : "got vectype for stmt: %G%T\n",
5507 : stmt_info->stmt, vectype);
5508 : }
5509 :
5510 : /* Leave the BB vectorizer to pick the vector type later, based on
5511 : the final dataref group size and SLP node size. */
5512 13277333 : if (is_a <loop_vec_info> (vinfo))
5513 1095216 : STMT_VINFO_VECTYPE (stmt_info) = vectype;
5514 :
5515 13277333 : if (gatherscatter != SG_NONE)
5516 : {
5517 107619 : gather_scatter_info gs_info;
5518 107619 : if (!vect_check_gather_scatter (stmt_info, vectype,
5519 : as_a <loop_vec_info> (vinfo),
5520 : &gs_info)
5521 211220 : || !get_vectype_for_scalar_type (vinfo,
5522 103601 : TREE_TYPE (gs_info.offset)))
5523 : {
5524 7515 : if (fatal)
5525 7515 : *fatal = false;
5526 7515 : return opt_result::failure_at
5527 7883 : (stmt_info->stmt,
5528 : (gatherscatter == GATHER)
5529 : ? "not vectorized: not suitable for gather load %G"
5530 : : "not vectorized: not suitable for scatter store %G",
5531 : stmt_info->stmt);
5532 : }
5533 100104 : STMT_VINFO_GATHER_SCATTER_P (stmt_info) = gatherscatter;
5534 : }
5535 : }
5536 :
5537 : /* We used to stop processing and prune the list here. Verify we no
5538 : longer need to. */
5539 4204220 : gcc_assert (i == datarefs.length ());
5540 :
5541 2628447 : return opt_result::success ();
5542 : }
5543 :
5544 :
5545 : /* Function vect_get_new_vect_var.
5546 :
5547 : Returns a name for a new variable. The current naming scheme appends the
5548 : prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
5549 : the name of vectorizer generated variables, and appends that to NAME if
5550 : provided. */
5551 :
5552 : tree
5553 1933669 : vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
5554 : {
5555 1933669 : const char *prefix;
5556 1933669 : tree new_vect_var;
5557 :
5558 1933669 : switch (var_kind)
5559 : {
5560 : case vect_simple_var:
5561 : prefix = "vect";
5562 : break;
5563 22790 : case vect_scalar_var:
5564 22790 : prefix = "stmp";
5565 22790 : break;
5566 20095 : case vect_mask_var:
5567 20095 : prefix = "mask";
5568 20095 : break;
5569 1386958 : case vect_pointer_var:
5570 1386958 : prefix = "vectp";
5571 1386958 : break;
5572 0 : default:
5573 0 : gcc_unreachable ();
5574 : }
5575 :
5576 1933669 : if (name)
5577 : {
5578 1091400 : char* tmp = concat (prefix, "_", name, NULL);
5579 1091400 : new_vect_var = create_tmp_reg (type, tmp);
5580 1091400 : free (tmp);
5581 : }
5582 : else
5583 842269 : new_vect_var = create_tmp_reg (type, prefix);
5584 :
5585 1933669 : return new_vect_var;
5586 : }
5587 :
5588 : /* Like vect_get_new_vect_var but return an SSA name. */
5589 :
5590 : tree
5591 6539 : vect_get_new_ssa_name (tree type, enum vect_var_kind var_kind, const char *name)
5592 : {
5593 6539 : const char *prefix;
5594 6539 : tree new_vect_var;
5595 :
5596 6539 : switch (var_kind)
5597 : {
5598 : case vect_simple_var:
5599 : prefix = "vect";
5600 : break;
5601 312 : case vect_scalar_var:
5602 312 : prefix = "stmp";
5603 312 : break;
5604 0 : case vect_pointer_var:
5605 0 : prefix = "vectp";
5606 0 : break;
5607 0 : default:
5608 0 : gcc_unreachable ();
5609 : }
5610 :
5611 6539 : if (name)
5612 : {
5613 6062 : char* tmp = concat (prefix, "_", name, NULL);
5614 6062 : new_vect_var = make_temp_ssa_name (type, NULL, tmp);
5615 6062 : free (tmp);
5616 : }
5617 : else
5618 477 : new_vect_var = make_temp_ssa_name (type, NULL, prefix);
5619 :
5620 6539 : return new_vect_var;
5621 : }
5622 :
5623 : /* Duplicate points-to info on NAME from DR_INFO. */
5624 :
5625 : static void
5626 430080 : vect_duplicate_ssa_name_ptr_info (tree name, dr_vec_info *dr_info)
5627 : {
5628 430080 : if (DR_PTR_INFO (dr_info->dr))
5629 : {
5630 288670 : duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr_info->dr));
5631 : /* DR_PTR_INFO is for a base SSA name, not including constant or
5632 : variable offsets in the ref so its alignment info does not apply. */
5633 288670 : mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
5634 : }
5635 141410 : else if (!SSA_NAME_PTR_INFO (name))
5636 : {
5637 141410 : tree base = get_base_address (dr_info->dr->ref);
5638 141410 : if (VAR_P (base)
5639 : || TREE_CODE (base) == PARM_DECL
5640 : || TREE_CODE (base) == RESULT_DECL)
5641 : {
5642 129557 : struct ptr_info_def *pi = get_ptr_info (name);
5643 129557 : pt_solution_set_var (&pi->pt, base);
5644 : }
5645 : }
5646 430080 : }
5647 :
5648 : /* Function vect_create_addr_base_for_vector_ref.
5649 :
5650 : Create an expression that computes the address of the first memory location
5651 : that will be accessed for a data reference.
5652 :
5653 : Input:
5654 : STMT_INFO: The statement containing the data reference.
5655 : NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
5656 : OFFSET: Optional. If supplied, it is be added to the initial address.
5657 : LOOP: Specify relative to which loop-nest should the address be computed.
5658 : For example, when the dataref is in an inner-loop nested in an
5659 : outer-loop that is now being vectorized, LOOP can be either the
5660 : outer-loop, or the inner-loop. The first memory location accessed
5661 : by the following dataref ('in' points to short):
5662 :
5663 : for (i=0; i<N; i++)
5664 : for (j=0; j<M; j++)
5665 : s += in[i+j]
5666 :
5667 : is as follows:
5668 : if LOOP=i_loop: &in (relative to i_loop)
5669 : if LOOP=j_loop: &in+i*2B (relative to j_loop)
5670 :
5671 : Output:
5672 : 1. Return an SSA_NAME whose value is the address of the memory location of
5673 : the first vector of the data reference.
5674 : 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
5675 : these statement(s) which define the returned SSA_NAME.
5676 :
5677 : FORNOW: We are only handling array accesses with step 1. */
5678 :
5679 : tree
5680 693615 : vect_create_addr_base_for_vector_ref (vec_info *vinfo, stmt_vec_info stmt_info,
5681 : gimple_seq *new_stmt_list,
5682 : tree offset)
5683 : {
5684 693615 : dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
5685 693615 : struct data_reference *dr = dr_info->dr;
5686 693615 : const char *base_name;
5687 693615 : tree addr_base;
5688 693615 : tree dest;
5689 693615 : gimple_seq seq = NULL;
5690 693615 : tree vect_ptr_type;
5691 693615 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
5692 693615 : innermost_loop_behavior *drb = vect_dr_behavior (vinfo, dr_info);
5693 :
5694 693615 : tree data_ref_base = unshare_expr (drb->base_address);
5695 693615 : tree base_offset = unshare_expr (get_dr_vinfo_offset (vinfo, dr_info, true));
5696 693615 : tree init = unshare_expr (drb->init);
5697 :
5698 693615 : if (loop_vinfo)
5699 127961 : base_name = get_name (data_ref_base);
5700 : else
5701 : {
5702 565654 : base_offset = ssize_int (0);
5703 565654 : init = ssize_int (0);
5704 565654 : base_name = get_name (DR_REF (dr));
5705 : }
5706 :
5707 : /* Create base_offset */
5708 693615 : base_offset = size_binop (PLUS_EXPR,
5709 : fold_convert (sizetype, base_offset),
5710 : fold_convert (sizetype, init));
5711 :
5712 693615 : if (offset)
5713 : {
5714 3113 : offset = fold_convert (sizetype, offset);
5715 3113 : base_offset = fold_build2 (PLUS_EXPR, sizetype,
5716 : base_offset, offset);
5717 : }
5718 :
5719 : /* base + base_offset */
5720 693615 : if (loop_vinfo)
5721 127961 : addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
5722 : else
5723 1131308 : addr_base = build1 (ADDR_EXPR,
5724 565654 : build_pointer_type (TREE_TYPE (DR_REF (dr))),
5725 : /* Strip zero offset components since we don't need
5726 : them and they can confuse late diagnostics if
5727 : we CSE them wrongly. See PR106904 for example. */
5728 : unshare_expr (strip_zero_offset_components
5729 : (DR_REF (dr))));
5730 :
5731 693615 : vect_ptr_type = build_pointer_type (TREE_TYPE (DR_REF (dr)));
5732 693615 : dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
5733 693615 : addr_base = force_gimple_operand (addr_base, &seq, true, dest);
5734 693615 : gimple_seq_add_seq (new_stmt_list, seq);
5735 :
5736 693615 : if (TREE_CODE (addr_base) == SSA_NAME
5737 : /* We should only duplicate pointer info to newly created SSA names. */
5738 700212 : && SSA_NAME_VAR (addr_base) == dest)
5739 : {
5740 174046 : gcc_assert (!SSA_NAME_PTR_INFO (addr_base));
5741 174046 : vect_duplicate_ssa_name_ptr_info (addr_base, dr_info);
5742 : }
5743 :
5744 693615 : if (dump_enabled_p ())
5745 25220 : dump_printf_loc (MSG_NOTE, vect_location, "created %T\n", addr_base);
5746 :
5747 693615 : return addr_base;
5748 : }
5749 :
5750 :
5751 : /* Function vect_create_data_ref_ptr.
5752 :
5753 : Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
5754 : location accessed in the loop by STMT_INFO, along with the def-use update
5755 : chain to appropriately advance the pointer through the loop iterations.
5756 : Also set aliasing information for the pointer. This pointer is used by
5757 : the callers to this function to create a memory reference expression for
5758 : vector load/store access.
5759 :
5760 : Input:
5761 : 1. STMT_INFO: a stmt that references memory. Expected to be of the form
5762 : GIMPLE_ASSIGN <name, data-ref> or
5763 : GIMPLE_ASSIGN <data-ref, name>.
5764 : 2. AGGR_TYPE: the type of the reference, which should be either a vector
5765 : or an array.
5766 : 3. AT_LOOP: the loop where the vector memref is to be created.
5767 : 4. OFFSET (optional): a byte offset to be added to the initial address
5768 : accessed by the data-ref in STMT_INFO.
5769 : 5. BSI: location where the new stmts are to be placed if there is no loop
5770 : 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
5771 : pointing to the initial address.
5772 : 8. IV_STEP (optional, defaults to NULL): the amount that should be added
5773 : to the IV during each iteration of the loop. NULL says to move
5774 : by one copy of AGGR_TYPE up or down, depending on the step of the
5775 : data reference.
5776 :
5777 : Output:
5778 : 1. Declare a new ptr to vector_type, and have it point to the base of the
5779 : data reference (initial addressed accessed by the data reference).
5780 : For example, for vector of type V8HI, the following code is generated:
5781 :
5782 : v8hi *ap;
5783 : ap = (v8hi *)initial_address;
5784 :
5785 : if OFFSET is not supplied:
5786 : initial_address = &a[init];
5787 : if OFFSET is supplied:
5788 : initial_address = &a[init] + OFFSET;
5789 : if BYTE_OFFSET is supplied:
5790 : initial_address = &a[init] + BYTE_OFFSET;
5791 :
5792 : Return the initial_address in INITIAL_ADDRESS.
5793 :
5794 : 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
5795 : update the pointer in each iteration of the loop.
5796 :
5797 : Return the increment stmt that updates the pointer in PTR_INCR.
5798 :
5799 : 3. Return the pointer. */
5800 :
5801 : tree
5802 693343 : vect_create_data_ref_ptr (vec_info *vinfo, stmt_vec_info stmt_info,
5803 : tree aggr_type, class loop *at_loop, tree offset,
5804 : tree *initial_address, gimple_stmt_iterator *gsi,
5805 : gimple **ptr_incr, bool only_init,
5806 : tree iv_step)
5807 : {
5808 693343 : const char *base_name;
5809 693343 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
5810 693343 : class loop *loop = NULL;
5811 693343 : bool nested_in_vect_loop = false;
5812 693343 : class loop *containing_loop = NULL;
5813 693343 : tree aggr_ptr_type;
5814 693343 : tree aggr_ptr;
5815 693343 : tree new_temp;
5816 693343 : gimple_seq new_stmt_list = NULL;
5817 693343 : edge pe = NULL;
5818 693343 : basic_block new_bb;
5819 693343 : tree aggr_ptr_init;
5820 693343 : dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
5821 693343 : struct data_reference *dr = dr_info->dr;
5822 693343 : tree aptr;
5823 693343 : gimple_stmt_iterator incr_gsi;
5824 693343 : bool insert_after;
5825 693343 : tree indx_before_incr, indx_after_incr;
5826 693343 : gimple *incr;
5827 693343 : bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
5828 :
5829 693343 : gcc_assert (iv_step != NULL_TREE
5830 : || TREE_CODE (aggr_type) == ARRAY_TYPE
5831 : || TREE_CODE (aggr_type) == VECTOR_TYPE);
5832 :
5833 693343 : if (loop_vinfo)
5834 : {
5835 127689 : loop = LOOP_VINFO_LOOP (loop_vinfo);
5836 127689 : nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
5837 127689 : containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
5838 127689 : pe = loop_preheader_edge (loop);
5839 : }
5840 : else
5841 : {
5842 565654 : gcc_assert (bb_vinfo);
5843 565654 : only_init = true;
5844 565654 : *ptr_incr = NULL;
5845 : }
5846 :
5847 : /* Create an expression for the first address accessed by this load
5848 : in LOOP. */
5849 693343 : base_name = get_name (DR_BASE_ADDRESS (dr));
5850 :
5851 693343 : if (dump_enabled_p ())
5852 : {
5853 25125 : tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
5854 25125 : dump_printf_loc (MSG_NOTE, vect_location,
5855 : "create %s-pointer variable to type: %T",
5856 25125 : get_tree_code_name (TREE_CODE (aggr_type)),
5857 : aggr_type);
5858 25125 : if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
5859 13474 : dump_printf (MSG_NOTE, " vectorizing an array ref: ");
5860 11651 : else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
5861 0 : dump_printf (MSG_NOTE, " vectorizing a vector ref: ");
5862 11651 : else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
5863 1638 : dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
5864 : else
5865 10013 : dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
5866 25125 : dump_printf (MSG_NOTE, "%T\n", DR_BASE_OBJECT (dr));
5867 : }
5868 :
5869 : /* (1) Create the new aggregate-pointer variable.
5870 : Vector and array types inherit the alias set of their component
5871 : type by default so we need to use a ref-all pointer if the data
5872 : reference does not conflict with the created aggregated data
5873 : reference because it is not addressable. */
5874 693343 : bool need_ref_all = false;
5875 693343 : if (!alias_sets_conflict_p (get_alias_set (aggr_type),
5876 : get_alias_set (DR_REF (dr))))
5877 : need_ref_all = true;
5878 : /* Likewise for any of the data references in the stmt group. */
5879 591857 : else if (DR_GROUP_SIZE (stmt_info) > 1)
5880 : {
5881 478731 : stmt_vec_info sinfo = DR_GROUP_FIRST_ELEMENT (stmt_info);
5882 1337745 : do
5883 : {
5884 1337745 : struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
5885 1337745 : if (!alias_sets_conflict_p (get_alias_set (aggr_type),
5886 : get_alias_set (DR_REF (sdr))))
5887 : {
5888 : need_ref_all = true;
5889 : break;
5890 : }
5891 1336666 : sinfo = DR_GROUP_NEXT_ELEMENT (sinfo);
5892 : }
5893 1336666 : while (sinfo);
5894 : }
5895 693343 : aggr_ptr_type = build_pointer_type_for_mode (aggr_type, VOIDmode,
5896 : need_ref_all);
5897 693343 : aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
5898 :
5899 :
5900 : /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
5901 : vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
5902 : def-use update cycles for the pointer: one relative to the outer-loop
5903 : (LOOP), which is what steps (3) and (4) below do. The other is relative
5904 : to the inner-loop (which is the inner-most loop containing the dataref),
5905 : and this is done be step (5) below.
5906 :
5907 : When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
5908 : inner-most loop, and so steps (3),(4) work the same, and step (5) is
5909 : redundant. Steps (3),(4) create the following:
5910 :
5911 : vp0 = &base_addr;
5912 : LOOP: vp1 = phi(vp0,vp2)
5913 : ...
5914 : ...
5915 : vp2 = vp1 + step
5916 : goto LOOP
5917 :
5918 : If there is an inner-loop nested in loop, then step (5) will also be
5919 : applied, and an additional update in the inner-loop will be created:
5920 :
5921 : vp0 = &base_addr;
5922 : LOOP: vp1 = phi(vp0,vp2)
5923 : ...
5924 : inner: vp3 = phi(vp1,vp4)
5925 : vp4 = vp3 + inner_step
5926 : if () goto inner
5927 : ...
5928 : vp2 = vp1 + step
5929 : if () goto LOOP */
5930 :
5931 : /* (2) Calculate the initial address of the aggregate-pointer, and set
5932 : the aggregate-pointer to point to it before the loop. */
5933 :
5934 : /* Create: (&(base[init_val]+offset) in the loop preheader. */
5935 :
5936 693343 : new_temp = vect_create_addr_base_for_vector_ref (vinfo,
5937 : stmt_info, &new_stmt_list,
5938 : offset);
5939 693343 : if (new_stmt_list)
5940 : {
5941 173917 : if (pe)
5942 : {
5943 54400 : new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
5944 54400 : gcc_assert (!new_bb);
5945 : }
5946 : else
5947 119517 : gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
5948 : }
5949 :
5950 693343 : *initial_address = new_temp;
5951 693343 : aggr_ptr_init = new_temp;
5952 :
5953 : /* (3) Handle the updating of the aggregate-pointer inside the loop.
5954 : This is needed when ONLY_INIT is false, and also when AT_LOOP is the
5955 : inner-loop nested in LOOP (during outer-loop vectorization). */
5956 :
5957 : /* No update in loop is required. */
5958 693343 : if (only_init && (!loop_vinfo || at_loop == loop))
5959 : aptr = aggr_ptr_init;
5960 : else
5961 : {
5962 : /* Accesses to invariant addresses should be handled specially
5963 : by the caller. */
5964 127681 : tree step = vect_dr_behavior (vinfo, dr_info)->step;
5965 127681 : gcc_assert (!integer_zerop (step));
5966 :
5967 127681 : if (iv_step == NULL_TREE)
5968 : {
5969 : /* The step of the aggregate pointer is the type size,
5970 : negated for downward accesses. */
5971 0 : iv_step = TYPE_SIZE_UNIT (aggr_type);
5972 0 : if (tree_int_cst_sgn (step) == -1)
5973 0 : iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
5974 : }
5975 :
5976 127681 : standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5977 :
5978 127681 : create_iv (aggr_ptr_init, PLUS_EXPR,
5979 : iv_step, aggr_ptr, loop, &incr_gsi, insert_after,
5980 : &indx_before_incr, &indx_after_incr);
5981 127681 : incr = gsi_stmt (incr_gsi);
5982 :
5983 : /* Copy the points-to information if it exists. */
5984 127681 : vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr_info);
5985 127681 : vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr_info);
5986 127681 : if (ptr_incr)
5987 127681 : *ptr_incr = incr;
5988 :
5989 127681 : aptr = indx_before_incr;
5990 : }
5991 :
5992 693343 : if (!nested_in_vect_loop || only_init)
5993 : return aptr;
5994 :
5995 :
5996 : /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
5997 : nested in LOOP, if exists. */
5998 :
5999 336 : gcc_assert (nested_in_vect_loop);
6000 336 : if (!only_init)
6001 : {
6002 336 : standard_iv_increment_position (containing_loop, &incr_gsi,
6003 : &insert_after);
6004 336 : create_iv (aptr, PLUS_EXPR, DR_STEP (dr),
6005 : aggr_ptr, containing_loop, &incr_gsi, insert_after,
6006 : &indx_before_incr, &indx_after_incr);
6007 336 : incr = gsi_stmt (incr_gsi);
6008 :
6009 : /* Copy the points-to information if it exists. */
6010 336 : vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr_info);
6011 336 : vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr_info);
6012 336 : if (ptr_incr)
6013 336 : *ptr_incr = incr;
6014 :
6015 336 : return indx_before_incr;
6016 : }
6017 : else
6018 : gcc_unreachable ();
6019 : }
6020 :
6021 :
6022 : /* Function bump_vector_ptr
6023 :
6024 : Increment a pointer (to a vector type) by vector-size. If requested,
6025 : i.e. if PTR-INCR is given, then also connect the new increment stmt
6026 : to the existing def-use update-chain of the pointer, by modifying
6027 : the PTR_INCR as illustrated below:
6028 :
6029 : The pointer def-use update-chain before this function:
6030 : DATAREF_PTR = phi (p_0, p_2)
6031 : ....
6032 : PTR_INCR: p_2 = DATAREF_PTR + step
6033 :
6034 : The pointer def-use update-chain after this function:
6035 : DATAREF_PTR = phi (p_0, p_2)
6036 : ....
6037 : NEW_DATAREF_PTR = DATAREF_PTR + BUMP
6038 : ....
6039 : PTR_INCR: p_2 = NEW_DATAREF_PTR + step
6040 :
6041 : Input:
6042 : DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
6043 : in the loop.
6044 : PTR_INCR - optional. The stmt that updates the pointer in each iteration of
6045 : the loop. The increment amount across iterations is expected
6046 : to be vector_size.
6047 : BSI - location where the new update stmt is to be placed.
6048 : STMT_INFO - the original scalar memory-access stmt that is being vectorized.
6049 : UPDATE - The offset by which to bump the pointer.
6050 :
6051 : Output: Return NEW_DATAREF_PTR as illustrated above.
6052 :
6053 : */
6054 :
6055 : tree
6056 239204 : bump_vector_ptr (vec_info *vinfo,
6057 : tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi,
6058 : stmt_vec_info stmt_info, tree update)
6059 : {
6060 239204 : struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
6061 239204 : gimple *incr_stmt;
6062 239204 : ssa_op_iter iter;
6063 239204 : use_operand_p use_p;
6064 239204 : tree new_dataref_ptr;
6065 :
6066 239204 : if (TREE_CODE (dataref_ptr) == SSA_NAME)
6067 111153 : new_dataref_ptr = copy_ssa_name (dataref_ptr);
6068 128051 : else if (is_gimple_min_invariant (dataref_ptr))
6069 : /* When possible avoid emitting a separate increment stmt that will
6070 : force the addressed object addressable. */
6071 256102 : return build1 (ADDR_EXPR, TREE_TYPE (dataref_ptr),
6072 128051 : fold_build2 (MEM_REF,
6073 : TREE_TYPE (TREE_TYPE (dataref_ptr)),
6074 : dataref_ptr,
6075 128051 : fold_convert (ptr_type_node, update)));
6076 : else
6077 0 : new_dataref_ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
6078 111153 : incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
6079 : dataref_ptr, update);
6080 111153 : vect_finish_stmt_generation (vinfo, stmt_info, incr_stmt, gsi);
6081 : /* Fold the increment, avoiding excessive chains use-def chains of
6082 : those, leading to compile-time issues for passes until the next
6083 : forwprop pass which would do this as well. */
6084 111153 : gimple_stmt_iterator fold_gsi = gsi_for_stmt (incr_stmt);
6085 111153 : if (fold_stmt (&fold_gsi, follow_all_ssa_edges))
6086 : {
6087 72099 : incr_stmt = gsi_stmt (fold_gsi);
6088 72099 : update_stmt (incr_stmt);
6089 : }
6090 :
6091 : /* Copy the points-to information if it exists. */
6092 111153 : duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
6093 :
6094 111153 : if (!ptr_incr)
6095 : return new_dataref_ptr;
6096 :
6097 : /* Update the vector-pointer's cross-iteration increment. */
6098 112712 : FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
6099 : {
6100 56356 : tree use = USE_FROM_PTR (use_p);
6101 :
6102 56356 : if (use == dataref_ptr)
6103 56356 : SET_USE (use_p, new_dataref_ptr);
6104 : else
6105 0 : gcc_assert (operand_equal_p (use, update, 0));
6106 : }
6107 :
6108 : return new_dataref_ptr;
6109 : }
6110 :
6111 :
6112 : /* Copy memory reference info such as base/clique from the SRC reference
6113 : to the DEST MEM_REF. */
6114 :
6115 : void
6116 943547 : vect_copy_ref_info (tree dest, tree src)
6117 : {
6118 943547 : if (TREE_CODE (dest) != MEM_REF)
6119 : return;
6120 :
6121 : tree src_base = src;
6122 1892054 : while (handled_component_p (src_base))
6123 953166 : src_base = TREE_OPERAND (src_base, 0);
6124 938888 : if (TREE_CODE (src_base) != MEM_REF
6125 938888 : && TREE_CODE (src_base) != TARGET_MEM_REF)
6126 : return;
6127 :
6128 511907 : MR_DEPENDENCE_CLIQUE (dest) = MR_DEPENDENCE_CLIQUE (src_base);
6129 511907 : MR_DEPENDENCE_BASE (dest) = MR_DEPENDENCE_BASE (src_base);
6130 : }
6131 :
6132 :
6133 : /* Function vect_create_destination_var.
6134 :
6135 : Create a new temporary of type VECTYPE. */
6136 :
6137 : tree
6138 529901 : vect_create_destination_var (tree scalar_dest, tree vectype)
6139 : {
6140 529901 : tree vec_dest;
6141 529901 : const char *name;
6142 529901 : char *new_name;
6143 529901 : tree type;
6144 529901 : enum vect_var_kind kind;
6145 :
6146 529901 : kind = vectype
6147 1037012 : ? VECTOR_BOOLEAN_TYPE_P (vectype)
6148 507111 : ? vect_mask_var
6149 : : vect_simple_var
6150 : : vect_scalar_var;
6151 22790 : type = vectype ? vectype : TREE_TYPE (scalar_dest);
6152 :
6153 529901 : gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
6154 :
6155 529901 : name = get_name (scalar_dest);
6156 529901 : if (name)
6157 189588 : new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
6158 : else
6159 340313 : new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
6160 529901 : vec_dest = vect_get_new_vect_var (type, kind, new_name);
6161 529901 : free (new_name);
6162 :
6163 529901 : return vec_dest;
6164 : }
6165 :
6166 : /* Function vect_grouped_store_supported.
6167 :
6168 : Returns TRUE if interleave high and interleave low permutations
6169 : are supported, and FALSE otherwise. */
6170 :
6171 : bool
6172 2702 : vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
6173 : {
6174 2702 : machine_mode mode = TYPE_MODE (vectype);
6175 :
6176 : /* vect_permute_store_chain requires the group size to be equal to 3 or
6177 : be a power of two. */
6178 2702 : if (count != 3 && exact_log2 (count) == -1)
6179 : {
6180 552 : if (dump_enabled_p ())
6181 11 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6182 : "the size of the group of accesses"
6183 : " is not a power of 2 or not eqaul to 3\n");
6184 552 : return false;
6185 : }
6186 :
6187 : /* Check that the permutation is supported. */
6188 2150 : if (VECTOR_MODE_P (mode))
6189 : {
6190 2150 : unsigned int i;
6191 2150 : if (count == 3)
6192 : {
6193 948 : unsigned int j0 = 0, j1 = 0, j2 = 0;
6194 948 : unsigned int i, j;
6195 :
6196 948 : unsigned int nelt;
6197 1896 : if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
6198 : {
6199 : if (dump_enabled_p ())
6200 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6201 : "cannot handle groups of 3 stores for"
6202 : " variable-length vectors\n");
6203 : return false;
6204 : }
6205 :
6206 948 : vec_perm_builder sel (nelt, nelt, 1);
6207 948 : sel.quick_grow (nelt);
6208 948 : vec_perm_indices indices;
6209 3567 : for (j = 0; j < 3; j++)
6210 : {
6211 2694 : int nelt0 = ((3 - j) * nelt) % 3;
6212 2694 : int nelt1 = ((3 - j) * nelt + 1) % 3;
6213 2694 : int nelt2 = ((3 - j) * nelt + 2) % 3;
6214 9534 : for (i = 0; i < nelt; i++)
6215 : {
6216 6840 : if (3 * i + nelt0 < nelt)
6217 2318 : sel[3 * i + nelt0] = j0++;
6218 6840 : if (3 * i + nelt1 < nelt)
6219 2279 : sel[3 * i + nelt1] = nelt + j1++;
6220 6840 : if (3 * i + nelt2 < nelt)
6221 2243 : sel[3 * i + nelt2] = 0;
6222 : }
6223 2694 : indices.new_vector (sel, 2, nelt);
6224 2694 : if (!can_vec_perm_const_p (mode, mode, indices))
6225 : {
6226 66 : if (dump_enabled_p ())
6227 37 : dump_printf (MSG_MISSED_OPTIMIZATION,
6228 : "permutation op not supported by target.\n");
6229 66 : return false;
6230 : }
6231 :
6232 8892 : for (i = 0; i < nelt; i++)
6233 : {
6234 6264 : if (3 * i + nelt0 < nelt)
6235 2094 : sel[3 * i + nelt0] = 3 * i + nelt0;
6236 6264 : if (3 * i + nelt1 < nelt)
6237 2085 : sel[3 * i + nelt1] = 3 * i + nelt1;
6238 6264 : if (3 * i + nelt2 < nelt)
6239 2085 : sel[3 * i + nelt2] = nelt + j2++;
6240 : }
6241 2628 : indices.new_vector (sel, 2, nelt);
6242 2628 : if (!can_vec_perm_const_p (mode, mode, indices))
6243 : {
6244 9 : if (dump_enabled_p ())
6245 9 : dump_printf (MSG_MISSED_OPTIMIZATION,
6246 : "permutation op not supported by target.\n");
6247 9 : return false;
6248 : }
6249 : }
6250 : return true;
6251 948 : }
6252 : else
6253 : {
6254 : /* If length is not equal to 3 then only power of 2 is supported. */
6255 1202 : gcc_assert (pow2p_hwi (count));
6256 2404 : poly_uint64 nelt = GET_MODE_NUNITS (mode);
6257 :
6258 : /* The encoding has 2 interleaved stepped patterns. */
6259 2404 : if(!multiple_p (nelt, 2))
6260 1156 : return false;
6261 1202 : vec_perm_builder sel (nelt, 2, 3);
6262 1202 : sel.quick_grow (6);
6263 6010 : for (i = 0; i < 3; i++)
6264 : {
6265 3606 : sel[i * 2] = i;
6266 3606 : sel[i * 2 + 1] = i + nelt;
6267 : }
6268 1202 : vec_perm_indices indices (sel, 2, nelt);
6269 1202 : if (can_vec_perm_const_p (mode, mode, indices))
6270 : {
6271 8092 : for (i = 0; i < 6; i++)
6272 6936 : sel[i] += exact_div (nelt, 2);
6273 1156 : indices.new_vector (sel, 2, nelt);
6274 1156 : if (can_vec_perm_const_p (mode, mode, indices))
6275 1156 : return true;
6276 : }
6277 1202 : }
6278 : }
6279 :
6280 46 : if (dump_enabled_p ())
6281 3 : dump_printf (MSG_MISSED_OPTIMIZATION,
6282 : "permutation op not supported by target.\n");
6283 : return false;
6284 : }
6285 :
6286 : /* Return FN if vec_{mask_,mask_len_}store_lanes is available for COUNT vectors
6287 : of type VECTYPE. MASKED_P says whether the masked form is needed. */
6288 :
6289 : internal_fn
6290 39683 : vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count,
6291 : bool masked_p)
6292 : {
6293 39683 : if (vect_lanes_optab_supported_p ("vec_mask_len_store_lanes",
6294 : vec_mask_len_store_lanes_optab, vectype,
6295 : count))
6296 : return IFN_MASK_LEN_STORE_LANES;
6297 39683 : else if (masked_p)
6298 : {
6299 159 : if (vect_lanes_optab_supported_p ("vec_mask_store_lanes",
6300 : vec_mask_store_lanes_optab, vectype,
6301 : count))
6302 : return IFN_MASK_STORE_LANES;
6303 : }
6304 : else
6305 : {
6306 39524 : if (vect_lanes_optab_supported_p ("vec_store_lanes",
6307 : vec_store_lanes_optab, vectype, count))
6308 : return IFN_STORE_LANES;
6309 : }
6310 : return IFN_LAST;
6311 : }
6312 :
6313 :
6314 : /* Function vect_setup_realignment
6315 :
6316 : This function is called when vectorizing an unaligned load using
6317 : the dr_explicit_realign[_optimized] scheme.
6318 : This function generates the following code at the loop prolog:
6319 :
6320 : p = initial_addr;
6321 : x msq_init = *(floor(p)); # prolog load
6322 : realignment_token = call target_builtin;
6323 : loop:
6324 : x msq = phi (msq_init, ---)
6325 :
6326 : The stmts marked with x are generated only for the case of
6327 : dr_explicit_realign_optimized.
6328 :
6329 : The code above sets up a new (vector) pointer, pointing to the first
6330 : location accessed by STMT_INFO, and a "floor-aligned" load using that
6331 : pointer. It also generates code to compute the "realignment-token"
6332 : (if the relevant target hook was defined), and creates a phi-node at the
6333 : loop-header bb whose arguments are the result of the prolog-load (created
6334 : by this function) and the result of a load that takes place in the loop
6335 : (to be created by the caller to this function).
6336 :
6337 : For the case of dr_explicit_realign_optimized:
6338 : The caller to this function uses the phi-result (msq) to create the
6339 : realignment code inside the loop, and sets up the missing phi argument,
6340 : as follows:
6341 : loop:
6342 : msq = phi (msq_init, lsq)
6343 : lsq = *(floor(p')); # load in loop
6344 : result = realign_load (msq, lsq, realignment_token);
6345 :
6346 : For the case of dr_explicit_realign:
6347 : loop:
6348 : msq = *(floor(p)); # load in loop
6349 : p' = p + (VS-1);
6350 : lsq = *(floor(p')); # load in loop
6351 : result = realign_load (msq, lsq, realignment_token);
6352 :
6353 : Input:
6354 : STMT_INFO - (scalar) load stmt to be vectorized. This load accesses
6355 : a memory location that may be unaligned.
6356 : BSI - place where new code is to be inserted.
6357 : ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
6358 : is used.
6359 :
6360 : Output:
6361 : REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
6362 : target hook, if defined.
6363 : Return value - the result of the loop-header phi node. */
6364 :
6365 : tree
6366 0 : vect_setup_realignment (vec_info *vinfo, stmt_vec_info stmt_info, tree vectype,
6367 : gimple_stmt_iterator *gsi, tree *realignment_token,
6368 : enum dr_alignment_support alignment_support_scheme,
6369 : tree init_addr,
6370 : class loop **at_loop)
6371 : {
6372 0 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
6373 0 : dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
6374 0 : struct data_reference *dr = dr_info->dr;
6375 0 : class loop *loop = NULL;
6376 0 : edge pe = NULL;
6377 0 : tree scalar_dest = gimple_assign_lhs (stmt_info->stmt);
6378 0 : tree vec_dest;
6379 0 : gimple *inc;
6380 0 : tree ptr;
6381 0 : tree data_ref;
6382 0 : basic_block new_bb;
6383 0 : tree msq_init = NULL_TREE;
6384 0 : tree new_temp;
6385 0 : gphi *phi_stmt;
6386 0 : tree msq = NULL_TREE;
6387 0 : gimple_seq stmts = NULL;
6388 0 : bool compute_in_loop = false;
6389 0 : bool nested_in_vect_loop = false;
6390 0 : class loop *containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
6391 0 : class loop *loop_for_initial_load = NULL;
6392 :
6393 0 : if (loop_vinfo)
6394 : {
6395 0 : loop = LOOP_VINFO_LOOP (loop_vinfo);
6396 0 : nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
6397 : }
6398 :
6399 0 : gcc_assert (alignment_support_scheme == dr_explicit_realign
6400 : || alignment_support_scheme == dr_explicit_realign_optimized);
6401 :
6402 : /* We need to generate three things:
6403 : 1. the misalignment computation
6404 : 2. the extra vector load (for the optimized realignment scheme).
6405 : 3. the phi node for the two vectors from which the realignment is
6406 : done (for the optimized realignment scheme). */
6407 :
6408 : /* 1. Determine where to generate the misalignment computation.
6409 :
6410 : If INIT_ADDR is NULL_TREE, this indicates that the misalignment
6411 : calculation will be generated by this function, outside the loop (in the
6412 : preheader). Otherwise, INIT_ADDR had already been computed for us by the
6413 : caller, inside the loop.
6414 :
6415 : Background: If the misalignment remains fixed throughout the iterations of
6416 : the loop, then both realignment schemes are applicable, and also the
6417 : misalignment computation can be done outside LOOP. This is because we are
6418 : vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
6419 : are a multiple of VS (the Vector Size), and therefore the misalignment in
6420 : different vectorized LOOP iterations is always the same.
6421 : The problem arises only if the memory access is in an inner-loop nested
6422 : inside LOOP, which is now being vectorized using outer-loop vectorization.
6423 : This is the only case when the misalignment of the memory access may not
6424 : remain fixed throughout the iterations of the inner-loop (as explained in
6425 : detail in vect_supportable_dr_alignment). In this case, not only is the
6426 : optimized realignment scheme not applicable, but also the misalignment
6427 : computation (and generation of the realignment token that is passed to
6428 : REALIGN_LOAD) have to be done inside the loop.
6429 :
6430 : In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
6431 : or not, which in turn determines if the misalignment is computed inside
6432 : the inner-loop, or outside LOOP. */
6433 :
6434 0 : if (init_addr != NULL_TREE || !loop_vinfo)
6435 : {
6436 0 : compute_in_loop = true;
6437 0 : gcc_assert (alignment_support_scheme == dr_explicit_realign);
6438 : }
6439 :
6440 :
6441 : /* 2. Determine where to generate the extra vector load.
6442 :
6443 : For the optimized realignment scheme, instead of generating two vector
6444 : loads in each iteration, we generate a single extra vector load in the
6445 : preheader of the loop, and in each iteration reuse the result of the
6446 : vector load from the previous iteration. In case the memory access is in
6447 : an inner-loop nested inside LOOP, which is now being vectorized using
6448 : outer-loop vectorization, we need to determine whether this initial vector
6449 : load should be generated at the preheader of the inner-loop, or can be
6450 : generated at the preheader of LOOP. If the memory access has no evolution
6451 : in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
6452 : to be generated inside LOOP (in the preheader of the inner-loop). */
6453 :
6454 0 : if (nested_in_vect_loop)
6455 : {
6456 0 : tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
6457 0 : bool invariant_in_outerloop =
6458 0 : (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
6459 0 : loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
6460 : }
6461 : else
6462 : loop_for_initial_load = loop;
6463 0 : if (at_loop)
6464 0 : *at_loop = loop_for_initial_load;
6465 :
6466 0 : tree vuse = NULL_TREE;
6467 0 : if (loop_for_initial_load)
6468 : {
6469 0 : pe = loop_preheader_edge (loop_for_initial_load);
6470 0 : if (gphi *vphi = get_virtual_phi (loop_for_initial_load->header))
6471 0 : vuse = PHI_ARG_DEF_FROM_EDGE (vphi, pe);
6472 : }
6473 0 : if (!vuse)
6474 0 : vuse = gimple_vuse (gsi_stmt (*gsi));
6475 :
6476 : /* 3. For the case of the optimized realignment, create the first vector
6477 : load at the loop preheader. */
6478 :
6479 0 : if (alignment_support_scheme == dr_explicit_realign_optimized)
6480 : {
6481 : /* Create msq_init = *(floor(p1)) in the loop preheader */
6482 0 : gassign *new_stmt;
6483 :
6484 0 : gcc_assert (!compute_in_loop);
6485 0 : vec_dest = vect_create_destination_var (scalar_dest, vectype);
6486 0 : ptr = vect_create_data_ref_ptr (vinfo, stmt_info, vectype,
6487 : loop_for_initial_load, NULL_TREE,
6488 : &init_addr, NULL, &inc, true);
6489 0 : if (TREE_CODE (ptr) == SSA_NAME)
6490 0 : new_temp = copy_ssa_name (ptr);
6491 : else
6492 0 : new_temp = make_ssa_name (TREE_TYPE (ptr));
6493 0 : poly_uint64 align = DR_TARGET_ALIGNMENT (dr_info);
6494 0 : tree type = TREE_TYPE (ptr);
6495 0 : new_stmt = gimple_build_assign
6496 0 : (new_temp, BIT_AND_EXPR, ptr,
6497 0 : fold_build2 (MINUS_EXPR, type,
6498 : build_int_cst (type, 0),
6499 : build_int_cst (type, align)));
6500 0 : new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
6501 0 : gcc_assert (!new_bb);
6502 0 : data_ref
6503 0 : = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
6504 : build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
6505 0 : vect_copy_ref_info (data_ref, DR_REF (dr));
6506 0 : new_stmt = gimple_build_assign (vec_dest, data_ref);
6507 0 : new_temp = make_ssa_name (vec_dest, new_stmt);
6508 0 : gimple_assign_set_lhs (new_stmt, new_temp);
6509 0 : gimple_set_vuse (new_stmt, vuse);
6510 0 : if (pe)
6511 : {
6512 0 : new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
6513 0 : gcc_assert (!new_bb);
6514 : }
6515 : else
6516 0 : gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
6517 :
6518 0 : msq_init = gimple_assign_lhs (new_stmt);
6519 : }
6520 :
6521 : /* 4. Create realignment token using a target builtin, if available.
6522 : It is done either inside the containing loop, or before LOOP (as
6523 : determined above). */
6524 :
6525 0 : if (targetm.vectorize.builtin_mask_for_load)
6526 : {
6527 0 : gcall *new_stmt;
6528 0 : tree builtin_decl;
6529 :
6530 : /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
6531 0 : if (!init_addr)
6532 : {
6533 : /* Generate the INIT_ADDR computation outside LOOP. */
6534 0 : init_addr = vect_create_addr_base_for_vector_ref (vinfo,
6535 : stmt_info, &stmts,
6536 : NULL_TREE);
6537 0 : if (loop)
6538 : {
6539 0 : pe = loop_preheader_edge (loop);
6540 0 : new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
6541 0 : gcc_assert (!new_bb);
6542 : }
6543 : else
6544 0 : gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
6545 : }
6546 :
6547 0 : builtin_decl = targetm.vectorize.builtin_mask_for_load ();
6548 0 : new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
6549 0 : vec_dest =
6550 0 : vect_create_destination_var (scalar_dest,
6551 : gimple_call_return_type (new_stmt));
6552 0 : new_temp = make_ssa_name (vec_dest, new_stmt);
6553 0 : gimple_call_set_lhs (new_stmt, new_temp);
6554 :
6555 0 : if (compute_in_loop)
6556 0 : gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
6557 : else
6558 : {
6559 : /* Generate the misalignment computation outside LOOP. */
6560 0 : pe = loop_preheader_edge (loop);
6561 0 : new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
6562 0 : gcc_assert (!new_bb);
6563 : }
6564 :
6565 0 : *realignment_token = gimple_call_lhs (new_stmt);
6566 :
6567 : /* The result of the CALL_EXPR to this builtin is determined from
6568 : the value of the parameter and no global variables are touched
6569 : which makes the builtin a "const" function. Requiring the
6570 : builtin to have the "const" attribute makes it unnecessary
6571 : to call mark_call_clobbered. */
6572 0 : gcc_assert (TREE_READONLY (builtin_decl));
6573 : }
6574 :
6575 0 : if (alignment_support_scheme == dr_explicit_realign)
6576 : return msq;
6577 :
6578 0 : gcc_assert (!compute_in_loop);
6579 0 : gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
6580 :
6581 :
6582 : /* 5. Create msq = phi <msq_init, lsq> in loop */
6583 :
6584 0 : pe = loop_preheader_edge (containing_loop);
6585 0 : vec_dest = vect_create_destination_var (scalar_dest, vectype);
6586 0 : msq = make_ssa_name (vec_dest);
6587 0 : phi_stmt = create_phi_node (msq, containing_loop->header);
6588 0 : add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
6589 :
6590 0 : return msq;
6591 : }
6592 :
6593 :
6594 : /* Function vect_grouped_load_supported.
6595 :
6596 : COUNT is the size of the load group (the number of statements plus the
6597 : number of gaps). SINGLE_ELEMENT_P is true if there is actually
6598 : only one statement, with a gap of COUNT - 1.
6599 :
6600 : Returns true if a suitable permute exists. */
6601 :
6602 : bool
6603 1925 : vect_grouped_load_supported (tree vectype, bool single_element_p,
6604 : unsigned HOST_WIDE_INT count)
6605 : {
6606 1925 : machine_mode mode = TYPE_MODE (vectype);
6607 :
6608 : /* If this is single-element interleaving with an element distance
6609 : that leaves unused vector loads around punt - we at least create
6610 : very sub-optimal code in that case (and blow up memory,
6611 : see PR65518). */
6612 1925 : if (single_element_p && maybe_gt (count, TYPE_VECTOR_SUBPARTS (vectype)))
6613 : {
6614 24 : if (dump_enabled_p ())
6615 3 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6616 : "single-element interleaving not supported "
6617 : "for not adjacent vector loads\n");
6618 24 : return false;
6619 : }
6620 :
6621 : /* vect_permute_load_chain requires the group size to be equal to 3 or
6622 : be a power of two. */
6623 1901 : if (count != 3 && exact_log2 (count) == -1)
6624 : {
6625 226 : if (dump_enabled_p ())
6626 14 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6627 : "the size of the group of accesses"
6628 : " is not a power of 2 or not equal to 3\n");
6629 226 : return false;
6630 : }
6631 :
6632 : /* Check that the permutation is supported. */
6633 1675 : if (VECTOR_MODE_P (mode))
6634 : {
6635 1675 : unsigned int i, j;
6636 1675 : if (count == 3)
6637 : {
6638 835 : unsigned int nelt;
6639 1670 : if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
6640 : {
6641 : if (dump_enabled_p ())
6642 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6643 : "cannot handle groups of 3 loads for"
6644 : " variable-length vectors\n");
6645 : return false;
6646 : }
6647 :
6648 835 : vec_perm_builder sel (nelt, nelt, 1);
6649 835 : sel.quick_grow (nelt);
6650 835 : vec_perm_indices indices;
6651 835 : unsigned int k;
6652 3304 : for (k = 0; k < 3; k++)
6653 : {
6654 8825 : for (i = 0; i < nelt; i++)
6655 6344 : if (3 * i + k < 2 * nelt)
6656 4235 : sel[i] = 3 * i + k;
6657 : else
6658 2109 : sel[i] = 0;
6659 2481 : indices.new_vector (sel, 2, nelt);
6660 2481 : if (!can_vec_perm_const_p (mode, mode, indices))
6661 : {
6662 12 : if (dump_enabled_p ())
6663 4 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6664 : "shuffle of 3 loads is not supported by"
6665 : " target\n");
6666 12 : return false;
6667 : }
6668 8661 : for (i = 0, j = 0; i < nelt; i++)
6669 6192 : if (3 * i + k < 2 * nelt)
6670 4128 : sel[i] = i;
6671 : else
6672 2064 : sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
6673 2469 : indices.new_vector (sel, 2, nelt);
6674 2469 : if (!can_vec_perm_const_p (mode, mode, indices))
6675 : {
6676 0 : if (dump_enabled_p ())
6677 0 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6678 : "shuffle of 3 loads is not supported by"
6679 : " target\n");
6680 0 : return false;
6681 : }
6682 : }
6683 : return true;
6684 835 : }
6685 : else
6686 : {
6687 : /* If length is not equal to 3 then only power of 2 is supported. */
6688 840 : gcc_assert (pow2p_hwi (count));
6689 1680 : poly_uint64 nelt = GET_MODE_NUNITS (mode);
6690 :
6691 : /* The encoding has a single stepped pattern. */
6692 840 : vec_perm_builder sel (nelt, 1, 3);
6693 840 : sel.quick_grow (3);
6694 4200 : for (i = 0; i < 3; i++)
6695 2520 : sel[i] = i * 2;
6696 840 : vec_perm_indices indices (sel, 2, nelt);
6697 840 : if (can_vec_perm_const_p (mode, mode, indices))
6698 : {
6699 3348 : for (i = 0; i < 3; i++)
6700 2511 : sel[i] = i * 2 + 1;
6701 837 : indices.new_vector (sel, 2, nelt);
6702 837 : if (can_vec_perm_const_p (mode, mode, indices))
6703 837 : return true;
6704 : }
6705 840 : }
6706 : }
6707 :
6708 3 : if (dump_enabled_p ())
6709 2 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6710 : "extract even/odd not supported by target\n");
6711 : return false;
6712 : }
6713 :
6714 : /* Return FN if vec_{masked_,mask_len_}load_lanes is available for COUNT vectors
6715 : of type VECTYPE. MASKED_P says whether the masked form is needed.
6716 : If it is available and ELSVALS is nonzero store the possible else values
6717 : in the vector it points to. */
6718 :
6719 : internal_fn
6720 144045 : vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count,
6721 : bool masked_p, vec<int> *elsvals)
6722 : {
6723 144045 : if (vect_lanes_optab_supported_p ("vec_mask_len_load_lanes",
6724 : vec_mask_len_load_lanes_optab, vectype,
6725 : count, elsvals))
6726 : return IFN_MASK_LEN_LOAD_LANES;
6727 144045 : else if (masked_p)
6728 : {
6729 30 : if (vect_lanes_optab_supported_p ("vec_mask_load_lanes",
6730 : vec_mask_load_lanes_optab, vectype,
6731 : count, elsvals))
6732 : return IFN_MASK_LOAD_LANES;
6733 : }
6734 : else
6735 : {
6736 144015 : if (vect_lanes_optab_supported_p ("vec_load_lanes", vec_load_lanes_optab,
6737 : vectype, count, elsvals))
6738 : return IFN_LOAD_LANES;
6739 : }
6740 : return IFN_LAST;
6741 : }
6742 :
6743 : /* Function vect_force_dr_alignment_p.
6744 :
6745 : Returns whether the alignment of a DECL can be forced to be aligned
6746 : on ALIGNMENT bit boundary. */
6747 :
6748 : bool
6749 704759 : vect_can_force_dr_alignment_p (const_tree decl, poly_uint64 alignment)
6750 : {
6751 704759 : if (!VAR_P (decl))
6752 : return false;
6753 :
6754 210237 : if (decl_in_symtab_p (decl)
6755 210237 : && (!symtab_node::get (decl)
6756 22097 : || !symtab_node::get (decl)->can_increase_alignment_p ()))
6757 13432 : return false;
6758 :
6759 196805 : if (TREE_STATIC (decl))
6760 8665 : return (known_le (alignment,
6761 8665 : (unsigned HOST_WIDE_INT) MAX_OFILE_ALIGNMENT));
6762 : else
6763 188140 : return (known_le (alignment, (unsigned HOST_WIDE_INT) MAX_STACK_ALIGNMENT));
6764 : }
6765 :
6766 : /* Return whether the data reference DR_INFO is supported with respect to its
6767 : alignment.
6768 : If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
6769 : it is aligned, i.e., check if it is possible to vectorize it with different
6770 : alignment. If IS_GATHER_SCATTER is true we are dealing with a
6771 : gather/scatter. */
6772 :
6773 : enum dr_alignment_support
6774 2826535 : vect_supportable_dr_alignment (vec_info *vinfo, dr_vec_info *dr_info,
6775 : tree vectype, int misalignment,
6776 : bool is_gather_scatter)
6777 : {
6778 2826535 : data_reference *dr = dr_info->dr;
6779 2826535 : stmt_vec_info stmt_info = dr_info->stmt;
6780 2826535 : machine_mode mode = TYPE_MODE (vectype);
6781 2826535 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
6782 2826535 : class loop *vect_loop = NULL;
6783 2826535 : bool nested_in_vect_loop = false;
6784 :
6785 2826535 : if (misalignment == 0)
6786 : return dr_aligned;
6787 1739417 : else if (dr_safe_speculative_read_required (stmt_info))
6788 : return dr_unaligned_unsupported;
6789 :
6790 1353336 : if (loop_vinfo)
6791 : {
6792 946289 : vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
6793 946289 : nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt_info);
6794 : }
6795 :
6796 : /* Possibly unaligned access. */
6797 :
6798 : /* We can choose between using the implicit realignment scheme (generating
6799 : a misaligned_move stmt) and the explicit realignment scheme (generating
6800 : aligned loads with a REALIGN_LOAD). There are two variants to the
6801 : explicit realignment scheme: optimized, and unoptimized.
6802 : We can optimize the realignment only if the step between consecutive
6803 : vector loads is equal to the vector size. Since the vector memory
6804 : accesses advance in steps of VS (Vector Size) in the vectorized loop, it
6805 : is guaranteed that the misalignment amount remains the same throughout the
6806 : execution of the vectorized loop. Therefore, we can create the
6807 : "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
6808 : at the loop preheader.
6809 :
6810 : However, in the case of outer-loop vectorization, when vectorizing a
6811 : memory access in the inner-loop nested within the LOOP that is now being
6812 : vectorized, while it is guaranteed that the misalignment of the
6813 : vectorized memory access will remain the same in different outer-loop
6814 : iterations, it is *not* guaranteed that is will remain the same throughout
6815 : the execution of the inner-loop. This is because the inner-loop advances
6816 : with the original scalar step (and not in steps of VS). If the inner-loop
6817 : step happens to be a multiple of VS, then the misalignment remains fixed
6818 : and we can use the optimized realignment scheme. For example:
6819 :
6820 : for (i=0; i<N; i++)
6821 : for (j=0; j<M; j++)
6822 : s += a[i+j];
6823 :
6824 : When vectorizing the i-loop in the above example, the step between
6825 : consecutive vector loads is 1, and so the misalignment does not remain
6826 : fixed across the execution of the inner-loop, and the realignment cannot
6827 : be optimized (as illustrated in the following pseudo vectorized loop):
6828 :
6829 : for (i=0; i<N; i+=4)
6830 : for (j=0; j<M; j++){
6831 : vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
6832 : // when j is {0,1,2,3,4,5,6,7,...} respectively.
6833 : // (assuming that we start from an aligned address).
6834 : }
6835 :
6836 : We therefore have to use the unoptimized realignment scheme:
6837 :
6838 : for (i=0; i<N; i+=4)
6839 : for (j=k; j<M; j+=4)
6840 : vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
6841 : // that the misalignment of the initial address is
6842 : // 0).
6843 :
6844 : The loop can then be vectorized as follows:
6845 :
6846 : for (k=0; k<4; k++){
6847 : rt = get_realignment_token (&vp[k]);
6848 : for (i=0; i<N; i+=4){
6849 : v1 = vp[i+k];
6850 : for (j=k; j<M; j+=4){
6851 : v2 = vp[i+j+VS-1];
6852 : va = REALIGN_LOAD <v1,v2,rt>;
6853 : vs += va;
6854 : v1 = v2;
6855 : }
6856 : }
6857 : } */
6858 :
6859 1353336 : if (DR_IS_READ (dr) && !is_gather_scatter)
6860 : {
6861 609809 : if (can_implement_p (vec_realign_load_optab, mode)
6862 609809 : && (!targetm.vectorize.builtin_mask_for_load
6863 0 : || targetm.vectorize.builtin_mask_for_load ()))
6864 : {
6865 : /* If we are doing SLP then the accesses need not have the
6866 : same alignment, instead it depends on the SLP group size. */
6867 0 : if (loop_vinfo
6868 0 : && STMT_VINFO_GROUPED_ACCESS (stmt_info)
6869 0 : && !multiple_p (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6870 0 : * (DR_GROUP_SIZE
6871 0 : (DR_GROUP_FIRST_ELEMENT (stmt_info))),
6872 0 : TYPE_VECTOR_SUBPARTS (vectype)))
6873 : ;
6874 0 : else if (!loop_vinfo
6875 0 : || (nested_in_vect_loop
6876 0 : && maybe_ne (TREE_INT_CST_LOW (DR_STEP (dr)),
6877 0 : GET_MODE_SIZE (TYPE_MODE (vectype)))))
6878 0 : return dr_explicit_realign;
6879 : else
6880 0 : return dr_explicit_realign_optimized;
6881 : }
6882 : }
6883 :
6884 1353336 : bool is_packed = not_size_aligned (DR_REF (dr));
6885 1353336 : if (misalignment == DR_MISALIGNMENT_UNKNOWN
6886 1353336 : && is_gather_scatter)
6887 3234 : misalignment = (get_object_alignment (DR_REF (dr))
6888 3234 : % (GET_MODE_BITSIZE (GET_MODE_INNER (mode))))
6889 3234 : / BITS_PER_UNIT;
6890 1353336 : if (targetm.vectorize.support_vector_misalignment (mode, misalignment,
6891 : is_packed,
6892 : is_gather_scatter))
6893 : return dr_unaligned_supported;
6894 :
6895 : /* Unsupported. */
6896 : return dr_unaligned_unsupported;
6897 : }
|