Branch data Line data Source code
1 : : /* Data References Analysis and Manipulation Utilities for Vectorization.
2 : : Copyright (C) 2003-2025 Free Software Foundation, Inc.
3 : : Contributed by Dorit Naishlos <dorit@il.ibm.com>
4 : : and Ira Rosen <irar@il.ibm.com>
5 : :
6 : : This file is part of GCC.
7 : :
8 : : GCC is free software; you can redistribute it and/or modify it under
9 : : the terms of the GNU General Public License as published by the Free
10 : : Software Foundation; either version 3, or (at your option) any later
11 : : version.
12 : :
13 : : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 : : WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 : : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 : : for more details.
17 : :
18 : : You should have received a copy of the GNU General Public License
19 : : along with GCC; see the file COPYING3. If not see
20 : : <http://www.gnu.org/licenses/>. */
21 : :
22 : : #define INCLUDE_ALGORITHM
23 : : #include "config.h"
24 : : #include "system.h"
25 : : #include "coretypes.h"
26 : : #include "backend.h"
27 : : #include "target.h"
28 : : #include "rtl.h"
29 : : #include "tree.h"
30 : : #include "gimple.h"
31 : : #include "predict.h"
32 : : #include "memmodel.h"
33 : : #include "tm_p.h"
34 : : #include "ssa.h"
35 : : #include "optabs-tree.h"
36 : : #include "cgraph.h"
37 : : #include "dumpfile.h"
38 : : #include "pretty-print.h"
39 : : #include "alias.h"
40 : : #include "fold-const.h"
41 : : #include "stor-layout.h"
42 : : #include "tree-eh.h"
43 : : #include "gimplify.h"
44 : : #include "gimple-iterator.h"
45 : : #include "gimplify-me.h"
46 : : #include "tree-ssa-loop-ivopts.h"
47 : : #include "tree-ssa-loop-manip.h"
48 : : #include "tree-ssa-loop.h"
49 : : #include "cfgloop.h"
50 : : #include "tree-scalar-evolution.h"
51 : : #include "tree-vectorizer.h"
52 : : #include "expr.h"
53 : : #include "builtins.h"
54 : : #include "tree-cfg.h"
55 : : #include "tree-hash-traits.h"
56 : : #include "vec-perm-indices.h"
57 : : #include "internal-fn.h"
58 : : #include "gimple-fold.h"
59 : : #include "optabs-query.h"
60 : :
61 : : /* Return true if load- or store-lanes optab OPTAB is implemented for
62 : : COUNT vectors of type VECTYPE. NAME is the name of OPTAB.
63 : :
64 : : If it is implemented and ELSVALS is nonzero store the possible else
65 : : values in the vector it points to. */
66 : :
67 : : static bool
68 : 310338 : vect_lanes_optab_supported_p (const char *name, convert_optab optab,
69 : : tree vectype, unsigned HOST_WIDE_INT count,
70 : : vec<int> *elsvals = nullptr)
71 : : {
72 : 310338 : machine_mode mode, array_mode;
73 : 310338 : bool limit_p;
74 : :
75 : 310338 : mode = TYPE_MODE (vectype);
76 : 310338 : if (!targetm.array_mode (mode, count).exists (&array_mode))
77 : : {
78 : 620676 : poly_uint64 bits = count * GET_MODE_BITSIZE (mode);
79 : 310338 : limit_p = !targetm.array_mode_supported_p (mode, count);
80 : 310338 : if (!int_mode_for_size (bits, limit_p).exists (&array_mode))
81 : : {
82 : 276020 : if (dump_enabled_p ())
83 : 11832 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
84 : : "no array mode for %s[%wu]\n",
85 : 11832 : GET_MODE_NAME (mode), count);
86 : 276020 : return false;
87 : : }
88 : : }
89 : :
90 : 34318 : enum insn_code icode;
91 : 34318 : if ((icode = convert_optab_handler (optab, array_mode, mode))
92 : : == CODE_FOR_nothing)
93 : : {
94 : 34318 : if (dump_enabled_p ())
95 : 3962 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
96 : : "cannot use %s<%s><%s>\n", name,
97 : 3962 : GET_MODE_NAME (array_mode), GET_MODE_NAME (mode));
98 : 34318 : return false;
99 : : }
100 : :
101 : 0 : if (dump_enabled_p ())
102 : 0 : dump_printf_loc (MSG_NOTE, vect_location,
103 : 0 : "can use %s<%s><%s>\n", name, GET_MODE_NAME (array_mode),
104 : 0 : GET_MODE_NAME (mode));
105 : :
106 : 0 : if (elsvals)
107 : 0 : get_supported_else_vals (icode,
108 : 0 : internal_fn_else_index (IFN_MASK_LEN_LOAD_LANES),
109 : : *elsvals);
110 : :
111 : : return true;
112 : : }
113 : :
114 : : /* Helper function to identify a simd clone call. If this is a call to a
115 : : function with simd clones then return the corresponding cgraph_node,
116 : : otherwise return NULL. */
117 : :
118 : : static cgraph_node*
119 : 781779 : simd_clone_call_p (gimple *stmt)
120 : : {
121 : 845822 : gcall *call = dyn_cast <gcall *> (stmt);
122 : 65597 : if (!call)
123 : : return NULL;
124 : :
125 : 65597 : tree fndecl = NULL_TREE;
126 : 65597 : if (gimple_call_internal_p (call, IFN_MASK_CALL))
127 : 191 : fndecl = TREE_OPERAND (gimple_call_arg (stmt, 0), 0);
128 : : else
129 : 65406 : fndecl = gimple_call_fndecl (stmt);
130 : :
131 : 65597 : if (fndecl == NULL_TREE)
132 : : return NULL;
133 : :
134 : 31875 : cgraph_node *node = cgraph_node::get (fndecl);
135 : 31875 : if (node && node->simd_clones != NULL)
136 : : return node;
137 : :
138 : : return NULL;
139 : : }
140 : :
141 : :
142 : :
143 : : /* Return the smallest scalar part of STMT_INFO.
144 : : This is used to determine the vectype of the stmt. We generally set the
145 : : vectype according to the type of the result (lhs). For stmts whose
146 : : result-type is different than the type of the arguments (e.g., demotion,
147 : : promotion), vectype will be reset appropriately (later). Note that we have
148 : : to visit the smallest datatype in this function, because that determines the
149 : : VF. If the smallest datatype in the loop is present only as the rhs of a
150 : : promotion operation - we'd miss it.
151 : : Such a case, where a variable of this datatype does not appear in the lhs
152 : : anywhere in the loop, can only occur if it's an invariant: e.g.:
153 : : 'int_x = (int) short_inv', which we'd expect to have been optimized away by
154 : : invariant motion. However, we cannot rely on invariant motion to always
155 : : take invariants out of the loop, and so in the case of promotion we also
156 : : have to check the rhs.
157 : : LHS_SIZE_UNIT and RHS_SIZE_UNIT contain the sizes of the corresponding
158 : : types. */
159 : :
160 : : tree
161 : 5023347 : vect_get_smallest_scalar_type (stmt_vec_info stmt_info, tree scalar_type)
162 : : {
163 : 5023347 : HOST_WIDE_INT lhs, rhs;
164 : :
165 : : /* During the analysis phase, this function is called on arbitrary
166 : : statements that might not have scalar results. */
167 : 5023347 : if (!tree_fits_uhwi_p (TYPE_SIZE_UNIT (scalar_type)))
168 : : return scalar_type;
169 : :
170 : 5023347 : lhs = rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
171 : :
172 : 5023347 : gassign *assign = dyn_cast <gassign *> (stmt_info->stmt);
173 : 5023347 : if (assign)
174 : : {
175 : 4241568 : scalar_type = TREE_TYPE (gimple_assign_lhs (assign));
176 : 4241568 : if (gimple_assign_cast_p (assign)
177 : 3886897 : || gimple_assign_rhs_code (assign) == DOT_PROD_EXPR
178 : 3886530 : || gimple_assign_rhs_code (assign) == WIDEN_SUM_EXPR
179 : 3886530 : || gimple_assign_rhs_code (assign) == SAD_EXPR
180 : 3886410 : || gimple_assign_rhs_code (assign) == WIDEN_MULT_EXPR
181 : 3882593 : || gimple_assign_rhs_code (assign) == WIDEN_MULT_PLUS_EXPR
182 : 3882593 : || gimple_assign_rhs_code (assign) == WIDEN_MULT_MINUS_EXPR
183 : 3882593 : || gimple_assign_rhs_code (assign) == WIDEN_LSHIFT_EXPR
184 : 8124161 : || gimple_assign_rhs_code (assign) == FLOAT_EXPR)
185 : : {
186 : 370064 : tree rhs_type = TREE_TYPE (gimple_assign_rhs1 (assign));
187 : :
188 : 370064 : rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
189 : 370064 : if (rhs < lhs)
190 : 5023347 : scalar_type = rhs_type;
191 : : }
192 : : }
193 : 781779 : else if (cgraph_node *node = simd_clone_call_p (stmt_info->stmt))
194 : : {
195 : 1554 : auto clone = node->simd_clones->simdclone;
196 : 4763 : for (unsigned int i = 0; i < clone->nargs; ++i)
197 : : {
198 : 3209 : if (clone->args[i].arg_type == SIMD_CLONE_ARG_TYPE_VECTOR)
199 : : {
200 : 1830 : tree arg_scalar_type = TREE_TYPE (clone->args[i].vector_type);
201 : 1830 : rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (arg_scalar_type));
202 : 1830 : if (rhs < lhs)
203 : : {
204 : 3209 : scalar_type = arg_scalar_type;
205 : 3209 : lhs = rhs;
206 : : }
207 : : }
208 : : }
209 : : }
210 : 780225 : else if (gcall *call = dyn_cast <gcall *> (stmt_info->stmt))
211 : : {
212 : 64043 : unsigned int i = 0;
213 : 64043 : if (gimple_call_internal_p (call))
214 : : {
215 : 31431 : internal_fn ifn = gimple_call_internal_fn (call);
216 : 31431 : if (internal_load_fn_p (ifn))
217 : : /* For loads the LHS type does the trick. */
218 : : i = ~0U;
219 : 27997 : else if (internal_store_fn_p (ifn))
220 : : {
221 : : /* For stores use the tyep of the stored value. */
222 : 1502 : i = internal_fn_stored_value_index (ifn);
223 : 1502 : scalar_type = TREE_TYPE (gimple_call_arg (call, i));
224 : 1502 : i = ~0U;
225 : : }
226 : 26495 : else if (internal_fn_mask_index (ifn) == 0)
227 : 5090 : i = 1;
228 : : }
229 : 64043 : if (i < gimple_call_num_args (call))
230 : : {
231 : 54578 : tree rhs_type = TREE_TYPE (gimple_call_arg (call, i));
232 : 54578 : if (tree_fits_uhwi_p (TYPE_SIZE_UNIT (rhs_type)))
233 : : {
234 : 54578 : rhs = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (rhs_type));
235 : 54578 : if (rhs < lhs)
236 : 5023347 : scalar_type = rhs_type;
237 : : }
238 : : }
239 : : }
240 : :
241 : : return scalar_type;
242 : : }
243 : :
244 : :
245 : : /* Insert DDR into LOOP_VINFO list of ddrs that may alias and need to be
246 : : tested at run-time. Return TRUE if DDR was successfully inserted.
247 : : Return false if versioning is not supported. */
248 : :
249 : : static opt_result
250 : 132422 : vect_mark_for_runtime_alias_test (ddr_p ddr, loop_vec_info loop_vinfo)
251 : : {
252 : 132422 : class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
253 : :
254 : 132422 : if ((unsigned) param_vect_max_version_for_alias_checks == 0)
255 : 54 : return opt_result::failure_at (vect_location,
256 : : "will not create alias checks, as"
257 : : " --param vect-max-version-for-alias-checks"
258 : : " == 0\n");
259 : :
260 : 132368 : opt_result res
261 : 132368 : = runtime_alias_check_p (ddr, loop,
262 : 132368 : optimize_loop_nest_for_speed_p (loop));
263 : 132368 : if (!res)
264 : 125 : return res;
265 : :
266 : 132243 : LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo).safe_push (ddr);
267 : 132243 : return opt_result::success ();
268 : : }
269 : :
270 : : /* Record that loop LOOP_VINFO needs to check that VALUE is nonzero. */
271 : :
272 : : static void
273 : 1214 : vect_check_nonzero_value (loop_vec_info loop_vinfo, tree value)
274 : : {
275 : 1214 : const vec<tree> &checks = LOOP_VINFO_CHECK_NONZERO (loop_vinfo);
276 : 1865 : for (unsigned int i = 0; i < checks.length(); ++i)
277 : 657 : if (checks[i] == value)
278 : : return;
279 : :
280 : 1208 : if (dump_enabled_p ())
281 : 428 : dump_printf_loc (MSG_NOTE, vect_location,
282 : : "need run-time check that %T is nonzero\n",
283 : : value);
284 : 1208 : LOOP_VINFO_CHECK_NONZERO (loop_vinfo).safe_push (value);
285 : : }
286 : :
287 : : /* Return true if we know that the order of vectorized DR_INFO_A and
288 : : vectorized DR_INFO_B will be the same as the order of DR_INFO_A and
289 : : DR_INFO_B. At least one of the accesses is a write. */
290 : :
291 : : static bool
292 : 108658 : vect_preserves_scalar_order_p (dr_vec_info *dr_info_a, dr_vec_info *dr_info_b)
293 : : {
294 : 108658 : stmt_vec_info stmtinfo_a = dr_info_a->stmt;
295 : 108658 : stmt_vec_info stmtinfo_b = dr_info_b->stmt;
296 : :
297 : : /* Single statements are always kept in their original order. */
298 : 108658 : if (!STMT_VINFO_GROUPED_ACCESS (stmtinfo_a)
299 : 176075 : && !STMT_VINFO_GROUPED_ACCESS (stmtinfo_b))
300 : : return true;
301 : :
302 : : /* If there is a loop invariant read involved we might vectorize it in
303 : : the prologue, breaking scalar oder with respect to the in-loop store. */
304 : 21155 : if ((DR_IS_READ (dr_info_a->dr) && integer_zerop (DR_STEP (dr_info_a->dr)))
305 : 66272 : || (DR_IS_READ (dr_info_b->dr) && integer_zerop (DR_STEP (dr_info_b->dr))))
306 : 1222 : return false;
307 : :
308 : : /* STMT_A and STMT_B belong to overlapping groups. All loads are
309 : : emitted at the position of the first scalar load.
310 : : Stores in a group are emitted at the position of the last scalar store.
311 : : Compute that position and check whether the resulting order matches
312 : : the current one. */
313 : 44821 : stmt_vec_info il_a = DR_GROUP_FIRST_ELEMENT (stmtinfo_a);
314 : 44821 : if (il_a)
315 : : {
316 : 40945 : if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmtinfo_a)))
317 : 161867 : for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (il_a); s;
318 : 140526 : s = DR_GROUP_NEXT_ELEMENT (s))
319 : 140526 : il_a = get_later_stmt (il_a, s);
320 : : else /* DR_IS_READ */
321 : 78693 : for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (il_a); s;
322 : 59089 : s = DR_GROUP_NEXT_ELEMENT (s))
323 : 59089 : if (get_later_stmt (il_a, s) == il_a)
324 : 1574 : il_a = s;
325 : : }
326 : : else
327 : : il_a = stmtinfo_a;
328 : 44821 : stmt_vec_info il_b = DR_GROUP_FIRST_ELEMENT (stmtinfo_b);
329 : 44821 : if (il_b)
330 : : {
331 : 39943 : if (DR_IS_WRITE (STMT_VINFO_DATA_REF (stmtinfo_b)))
332 : 210656 : for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (il_b); s;
333 : 179028 : s = DR_GROUP_NEXT_ELEMENT (s))
334 : 179028 : il_b = get_later_stmt (il_b, s);
335 : : else /* DR_IS_READ */
336 : 38819 : for (stmt_vec_info s = DR_GROUP_NEXT_ELEMENT (il_b); s;
337 : 30504 : s = DR_GROUP_NEXT_ELEMENT (s))
338 : 30504 : if (get_later_stmt (il_b, s) == il_b)
339 : 153 : il_b = s;
340 : : }
341 : : else
342 : : il_b = stmtinfo_b;
343 : 44821 : bool a_after_b = (get_later_stmt (stmtinfo_a, stmtinfo_b) == stmtinfo_a);
344 : 44821 : return (get_later_stmt (il_a, il_b) == il_a) == a_after_b;
345 : : }
346 : :
347 : : /* A subroutine of vect_analyze_data_ref_dependence. Handle
348 : : DDR_COULD_BE_INDEPENDENT_P ddr DDR that has a known set of dependence
349 : : distances. These distances are conservatively correct but they don't
350 : : reflect a guaranteed dependence.
351 : :
352 : : Return true if this function does all the work necessary to avoid
353 : : an alias or false if the caller should use the dependence distances
354 : : to limit the vectorization factor in the usual way. LOOP_DEPTH is
355 : : the depth of the loop described by LOOP_VINFO and the other arguments
356 : : are as for vect_analyze_data_ref_dependence. */
357 : :
358 : : static bool
359 : 7887 : vect_analyze_possibly_independent_ddr (data_dependence_relation *ddr,
360 : : loop_vec_info loop_vinfo,
361 : : int loop_depth, unsigned int *max_vf)
362 : : {
363 : 7887 : class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
364 : 31566 : for (lambda_vector &dist_v : DDR_DIST_VECTS (ddr))
365 : : {
366 : 15624 : int dist = dist_v[loop_depth];
367 : 15624 : if (dist != 0 && !(dist > 0 && DDR_REVERSED_P (ddr)))
368 : : {
369 : : /* If the user asserted safelen >= DIST consecutive iterations
370 : : can be executed concurrently, assume independence.
371 : :
372 : : ??? An alternative would be to add the alias check even
373 : : in this case, and vectorize the fallback loop with the
374 : : maximum VF set to safelen. However, if the user has
375 : : explicitly given a length, it's less likely that that
376 : : would be a win. */
377 : 7751 : if (loop->safelen >= 2 && abs_hwi (dist) <= loop->safelen)
378 : : {
379 : 32 : if ((unsigned int) loop->safelen < *max_vf)
380 : 2 : *max_vf = loop->safelen;
381 : 32 : LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
382 : 32 : continue;
383 : : }
384 : :
385 : : /* For dependence distances of 2 or more, we have the option
386 : : of limiting VF or checking for an alias at runtime.
387 : : Prefer to check at runtime if we can, to avoid limiting
388 : : the VF unnecessarily when the bases are in fact independent.
389 : :
390 : : Note that the alias checks will be removed if the VF ends up
391 : : being small enough. */
392 : 7719 : dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (DDR_A (ddr));
393 : 7719 : dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (DDR_B (ddr));
394 : 7719 : return (!STMT_VINFO_GATHER_SCATTER_P (dr_info_a->stmt)
395 : 7719 : && !STMT_VINFO_GATHER_SCATTER_P (dr_info_b->stmt)
396 : 15446 : && vect_mark_for_runtime_alias_test (ddr, loop_vinfo));
397 : : }
398 : : }
399 : : return true;
400 : : }
401 : :
402 : :
403 : : /* Function vect_analyze_data_ref_dependence.
404 : :
405 : : FIXME: I needed to change the sense of the returned flag.
406 : :
407 : : Return FALSE if there (might) exist a dependence between a memory-reference
408 : : DRA and a memory-reference DRB. When versioning for alias may check a
409 : : dependence at run-time, return TRUE. Adjust *MAX_VF according to
410 : : the data dependence. */
411 : :
412 : : static opt_result
413 : 972534 : vect_analyze_data_ref_dependence (struct data_dependence_relation *ddr,
414 : : loop_vec_info loop_vinfo,
415 : : unsigned int *max_vf)
416 : : {
417 : 972534 : unsigned int i;
418 : 972534 : class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
419 : 972534 : struct data_reference *dra = DDR_A (ddr);
420 : 972534 : struct data_reference *drb = DDR_B (ddr);
421 : 972534 : dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (dra);
422 : 972534 : dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (drb);
423 : 972534 : stmt_vec_info stmtinfo_a = dr_info_a->stmt;
424 : 972534 : stmt_vec_info stmtinfo_b = dr_info_b->stmt;
425 : 972534 : lambda_vector dist_v;
426 : 972534 : unsigned int loop_depth;
427 : :
428 : : /* If user asserted safelen consecutive iterations can be
429 : : executed concurrently, assume independence. */
430 : 1112529 : auto apply_safelen = [&]()
431 : : {
432 : 139995 : if (loop->safelen >= 2)
433 : : {
434 : 7457 : if ((unsigned int) loop->safelen < *max_vf)
435 : 1896 : *max_vf = loop->safelen;
436 : 7457 : LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = false;
437 : 7457 : return true;
438 : : }
439 : : return false;
440 : 972534 : };
441 : :
442 : : /* In loop analysis all data references should be vectorizable. */
443 : 972534 : if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
444 : 972534 : || !STMT_VINFO_VECTORIZABLE (stmtinfo_b))
445 : 0 : gcc_unreachable ();
446 : :
447 : : /* Independent data accesses. */
448 : 972534 : if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
449 : 769151 : return opt_result::success ();
450 : :
451 : 203383 : if (dra == drb
452 : 203383 : || (DR_IS_READ (dra) && DR_IS_READ (drb)))
453 : 0 : return opt_result::success ();
454 : :
455 : : /* We do not have to consider dependences between accesses that belong
456 : : to the same group, unless the stride could be smaller than the
457 : : group size. */
458 : 203383 : if (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
459 : 90789 : && (DR_GROUP_FIRST_ELEMENT (stmtinfo_a)
460 : 90789 : == DR_GROUP_FIRST_ELEMENT (stmtinfo_b))
461 : 217072 : && !STMT_VINFO_STRIDED_P (stmtinfo_a))
462 : 1841 : return opt_result::success ();
463 : :
464 : : /* Even if we have an anti-dependence then, as the vectorized loop covers at
465 : : least two scalar iterations, there is always also a true dependence.
466 : : As the vectorizer does not re-order loads and stores we can ignore
467 : : the anti-dependence if TBAA can disambiguate both DRs similar to the
468 : : case with known negative distance anti-dependences (positive
469 : : distance anti-dependences would violate TBAA constraints). */
470 : 98328 : if (((DR_IS_READ (dra) && DR_IS_WRITE (drb))
471 : 103214 : || (DR_IS_WRITE (dra) && DR_IS_READ (drb)))
472 : 314190 : && !alias_sets_conflict_p (get_alias_set (DR_REF (dra)),
473 : : get_alias_set (DR_REF (drb))))
474 : 5378 : return opt_result::success ();
475 : :
476 : 196164 : if (STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a)
477 : 188816 : || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
478 : : {
479 : 9229 : if (apply_safelen ())
480 : 1394 : return opt_result::success ();
481 : :
482 : 7835 : return opt_result::failure_at
483 : 7835 : (stmtinfo_a->stmt,
484 : : "possible alias involving gather/scatter between %T and %T\n",
485 : : DR_REF (dra), DR_REF (drb));
486 : : }
487 : :
488 : : /* Unknown data dependence. */
489 : 186935 : if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
490 : : {
491 : 130267 : if (apply_safelen ())
492 : 6063 : return opt_result::success ();
493 : :
494 : 124204 : if (dump_enabled_p ())
495 : 7041 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmtinfo_a->stmt,
496 : : "versioning for alias required: "
497 : : "can't determine dependence between %T and %T\n",
498 : : DR_REF (dra), DR_REF (drb));
499 : :
500 : : /* Add to list of ddrs that need to be tested at run-time. */
501 : 124204 : return vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
502 : : }
503 : :
504 : : /* Known data dependence. */
505 : 56668 : if (DDR_NUM_DIST_VECTS (ddr) == 0)
506 : : {
507 : 499 : if (apply_safelen ())
508 : 0 : return opt_result::success ();
509 : :
510 : 499 : if (dump_enabled_p ())
511 : 114 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, stmtinfo_a->stmt,
512 : : "versioning for alias required: "
513 : : "bad dist vector for %T and %T\n",
514 : : DR_REF (dra), DR_REF (drb));
515 : : /* Add to list of ddrs that need to be tested at run-time. */
516 : 499 : return vect_mark_for_runtime_alias_test (ddr, loop_vinfo);
517 : : }
518 : :
519 : 56169 : loop_depth = index_in_loop_nest (loop->num, DDR_LOOP_NEST (ddr));
520 : :
521 : 56169 : if (DDR_COULD_BE_INDEPENDENT_P (ddr)
522 : 56169 : && vect_analyze_possibly_independent_ddr (ddr, loop_vinfo,
523 : : loop_depth, max_vf))
524 : 7879 : return opt_result::success ();
525 : :
526 : 90655 : FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
527 : : {
528 : 48310 : int dist = dist_v[loop_depth];
529 : :
530 : 48310 : if (dump_enabled_p ())
531 : 3644 : dump_printf_loc (MSG_NOTE, vect_location,
532 : : "dependence distance = %d.\n", dist);
533 : :
534 : 48310 : if (dist == 0)
535 : : {
536 : 38763 : if (dump_enabled_p ())
537 : 3256 : dump_printf_loc (MSG_NOTE, vect_location,
538 : : "dependence distance == 0 between %T and %T\n",
539 : : DR_REF (dra), DR_REF (drb));
540 : :
541 : : /* When we perform grouped accesses and perform implicit CSE
542 : : by detecting equal accesses and doing disambiguation with
543 : : runtime alias tests like for
544 : : .. = a[i];
545 : : .. = a[i+1];
546 : : a[i] = ..;
547 : : a[i+1] = ..;
548 : : *p = ..;
549 : : .. = a[i];
550 : : .. = a[i+1];
551 : : where we will end up loading { a[i], a[i+1] } once, make
552 : : sure that inserting group loads before the first load and
553 : : stores after the last store will do the right thing.
554 : : Similar for groups like
555 : : a[i] = ...;
556 : : ... = a[i];
557 : : a[i+1] = ...;
558 : : where loads from the group interleave with the store. */
559 : 38763 : if (!vect_preserves_scalar_order_p (dr_info_a, dr_info_b))
560 : 0 : return opt_result::failure_at (stmtinfo_a->stmt,
561 : : "READ_WRITE dependence"
562 : : " in interleaving.\n");
563 : :
564 : 38763 : if (loop->safelen < 2)
565 : : {
566 : 35023 : tree indicator = dr_zero_step_indicator (dra);
567 : 35023 : if (!indicator || integer_zerop (indicator))
568 : 0 : return opt_result::failure_at (stmtinfo_a->stmt,
569 : : "access also has a zero step\n");
570 : 35023 : else if (TREE_CODE (indicator) != INTEGER_CST)
571 : 1214 : vect_check_nonzero_value (loop_vinfo, indicator);
572 : : }
573 : 38763 : continue;
574 : 38763 : }
575 : :
576 : 9547 : if (dist > 0 && DDR_REVERSED_P (ddr))
577 : : {
578 : : /* If DDR_REVERSED_P the order of the data-refs in DDR was
579 : : reversed (to make distance vector positive), and the actual
580 : : distance is negative. */
581 : 3209 : if (dump_enabled_p ())
582 : 105 : dump_printf_loc (MSG_NOTE, vect_location,
583 : : "dependence distance negative.\n");
584 : : /* When doing outer loop vectorization, we need to check if there is
585 : : a backward dependence at the inner loop level if the dependence
586 : : at the outer loop is reversed. See PR81740. */
587 : 3209 : if (nested_in_vect_loop_p (loop, stmtinfo_a)
588 : 3197 : || nested_in_vect_loop_p (loop, stmtinfo_b))
589 : : {
590 : 12 : unsigned inner_depth = index_in_loop_nest (loop->inner->num,
591 : 12 : DDR_LOOP_NEST (ddr));
592 : 12 : if (dist_v[inner_depth] < 0)
593 : 9 : return opt_result::failure_at (stmtinfo_a->stmt,
594 : : "not vectorized, dependence "
595 : : "between data-refs %T and %T\n",
596 : : DR_REF (dra), DR_REF (drb));
597 : : }
598 : : /* Record a negative dependence distance to later limit the
599 : : amount of stmt copying / unrolling we can perform.
600 : : Only need to handle read-after-write dependence. */
601 : 3200 : if (DR_IS_READ (drb)
602 : 76 : && (STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) == 0
603 : 12 : || STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) > (unsigned)dist))
604 : 76 : STMT_VINFO_MIN_NEG_DIST (stmtinfo_b) = dist;
605 : 3200 : continue;
606 : 3200 : }
607 : :
608 : 6338 : unsigned int abs_dist = abs (dist);
609 : 6338 : if (abs_dist >= 2 && abs_dist < *max_vf)
610 : : {
611 : : /* The dependence distance requires reduction of the maximal
612 : : vectorization factor. */
613 : 309 : *max_vf = abs_dist;
614 : 309 : if (dump_enabled_p ())
615 : 26 : dump_printf_loc (MSG_NOTE, vect_location,
616 : : "adjusting maximal vectorization factor to %i\n",
617 : : *max_vf);
618 : : }
619 : :
620 : 6338 : if (abs_dist >= *max_vf)
621 : : {
622 : : /* Dependence distance does not create dependence, as far as
623 : : vectorization is concerned, in this case. */
624 : 402 : if (dump_enabled_p ())
625 : 37 : dump_printf_loc (MSG_NOTE, vect_location,
626 : : "dependence distance >= VF.\n");
627 : 402 : continue;
628 : : }
629 : :
630 : 5936 : return opt_result::failure_at (stmtinfo_a->stmt,
631 : : "not vectorized, possible dependence "
632 : : "between data-refs %T and %T\n",
633 : : DR_REF (dra), DR_REF (drb));
634 : : }
635 : :
636 : 42345 : return opt_result::success ();
637 : : }
638 : :
639 : : /* Function vect_analyze_early_break_dependences.
640 : :
641 : : Examine all the data references in the loop and make sure that if we have
642 : : multiple exits that we are able to safely move stores such that they become
643 : : safe for vectorization. The function also calculates the place where to move
644 : : the instructions to and computes what the new vUSE chain should be.
645 : :
646 : : This works in tandem with the CFG that will be produced by
647 : : slpeel_tree_duplicate_loop_to_edge_cfg later on.
648 : :
649 : : This function tries to validate whether an early break vectorization
650 : : is possible for the current instruction sequence. Returns True i
651 : : possible, otherwise False.
652 : :
653 : : Requirements:
654 : : - Any memory access must be to a fixed size buffer.
655 : : - There must not be any loads and stores to the same object.
656 : : - Multiple loads are allowed as long as they don't alias.
657 : :
658 : : NOTE:
659 : : This implementation is very conservative. Any overlapping loads/stores
660 : : that take place before the early break statement gets rejected aside from
661 : : WAR dependencies.
662 : :
663 : : i.e.:
664 : :
665 : : a[i] = 8
666 : : c = a[i]
667 : : if (b[i])
668 : : ...
669 : :
670 : : is not allowed, but
671 : :
672 : : c = a[i]
673 : : a[i] = 8
674 : : if (b[i])
675 : : ...
676 : :
677 : : is which is the common case. */
678 : :
679 : : static opt_result
680 : 117790 : vect_analyze_early_break_dependences (loop_vec_info loop_vinfo)
681 : : {
682 : 117790 : DUMP_VECT_SCOPE ("vect_analyze_early_break_dependences");
683 : :
684 : : /* List of all load data references found during traversal. */
685 : 117790 : auto_vec<data_reference *> bases;
686 : 117790 : basic_block dest_bb = NULL;
687 : :
688 : 117790 : class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
689 : 117790 : class loop *loop_nest = loop_outer (loop);
690 : :
691 : 117790 : if (dump_enabled_p ())
692 : 1318 : dump_printf_loc (MSG_NOTE, vect_location,
693 : : "loop contains multiple exits, analyzing"
694 : : " statement dependencies.\n");
695 : :
696 : 117790 : if (LOOP_VINFO_EARLY_BREAKS_VECT_PEELED (loop_vinfo))
697 : 5346 : if (dump_enabled_p ())
698 : 159 : dump_printf_loc (MSG_NOTE, vect_location,
699 : : "alternate exit has been chosen as main exit.\n");
700 : :
701 : : /* Since we don't support general control flow, the location we'll move the
702 : : side-effects to is always the latch connected exit. When we support
703 : : general control flow we can do better but for now this is fine. Move
704 : : side-effects to the in-loop destination of the last early exit. For the
705 : : PEELED case we move the side-effects to the latch block as this is
706 : : guaranteed to be the last block to be executed when a vector iteration
707 : : finished. */
708 : 117790 : if (LOOP_VINFO_EARLY_BREAKS_VECT_PEELED (loop_vinfo))
709 : 5346 : dest_bb = loop->latch;
710 : : else
711 : 112444 : dest_bb = single_pred (loop->latch);
712 : :
713 : : /* We start looking from dest_bb, for the non-PEELED case we don't want to
714 : : move any stores already present, but we do want to read and validate the
715 : : loads. */
716 : 117790 : basic_block bb = dest_bb;
717 : :
718 : : /* We move stores across all loads to the beginning of dest_bb, so
719 : : the first block processed below doesn't need dependence checking. */
720 : 117790 : bool check_deps = false;
721 : :
722 : 434618 : do
723 : : {
724 : 276204 : gimple_stmt_iterator gsi = gsi_last_bb (bb);
725 : :
726 : : /* Now analyze all the remaining statements and try to determine which
727 : : instructions are allowed/needed to be moved. */
728 : 2149972 : while (!gsi_end_p (gsi))
729 : : {
730 : 1873921 : gimple *stmt = gsi_stmt (gsi);
731 : 1873921 : gsi_prev (&gsi);
732 : 1873921 : if (is_gimple_debug (stmt))
733 : 1653787 : continue;
734 : :
735 : 1003016 : stmt_vec_info stmt_vinfo
736 : 1003016 : = vect_stmt_to_vectorize (loop_vinfo->lookup_stmt (stmt));
737 : 1003016 : auto dr_ref = STMT_VINFO_DATA_REF (stmt_vinfo);
738 : 1003016 : if (!dr_ref)
739 : 771288 : continue;
740 : :
741 : : /* We know everything below dest_bb is safe since we know we
742 : : had a full vector iteration when reaching it. Either by
743 : : the loop entry / IV exit test being last or because this
744 : : is the loop latch itself. */
745 : 231728 : if (!check_deps)
746 : 11594 : continue;
747 : :
748 : : /* Check if vector accesses to the object will be within bounds.
749 : : must be a constant or assume loop will be versioned or niters
750 : : bounded by VF so accesses are within range. We only need to check
751 : : the reads since writes are moved to a safe place where if we get
752 : : there we know they are safe to perform. */
753 : 220134 : if (DR_IS_READ (dr_ref))
754 : : {
755 : 206682 : dr_set_safe_speculative_read_required (stmt_vinfo, true);
756 : 206682 : bool inbounds = ref_within_array_bound (stmt, DR_REF (dr_ref));
757 : 206682 : DR_SCALAR_KNOWN_BOUNDS (STMT_VINFO_DR_INFO (stmt_vinfo)) = inbounds;
758 : :
759 : 206682 : if (dump_enabled_p ())
760 : 2184 : dump_printf_loc (MSG_NOTE, vect_location,
761 : : "marking DR (read) as possibly needing peeling "
762 : : "for alignment at %G", stmt);
763 : : }
764 : :
765 : 220134 : if (DR_IS_READ (dr_ref))
766 : 206682 : bases.safe_push (dr_ref);
767 : 13452 : else if (DR_IS_WRITE (dr_ref))
768 : : {
769 : : /* We are moving writes down in the CFG. To be sure that this
770 : : is valid after vectorization we have to check all the loads
771 : : we are sinking the stores past to see if any of them may
772 : : alias or are the same object.
773 : :
774 : : Same objects will not be an issue because unless the store
775 : : is marked volatile the value can be forwarded. If the
776 : : store is marked volatile we don't vectorize the loop
777 : : anyway.
778 : :
779 : : That leaves the check for aliasing. We don't really need
780 : : to care about the stores aliasing with each other since the
781 : : stores are moved in order so the effects are still observed
782 : : correctly. This leaves the check for WAR dependencies
783 : : which we would be introducing here if the DR can alias.
784 : : The check is quadratic in loads/stores but I have not found
785 : : a better API to do this. I believe all loads and stores
786 : : must be checked. We also must check them when we
787 : : encountered the store, since we don't care about loads past
788 : : the store. */
789 : :
790 : 45126 : for (auto dr_read : bases)
791 : 14781 : if (dr_may_alias_p (dr_ref, dr_read, loop_nest))
792 : : {
793 : 153 : if (dump_enabled_p ())
794 : 0 : dump_printf_loc (MSG_MISSED_OPTIMIZATION,
795 : : vect_location,
796 : : "early breaks not supported: "
797 : : "overlapping loads and stores "
798 : : "found before the break "
799 : : "statement.\n");
800 : :
801 : 153 : return opt_result::failure_at (stmt,
802 : : "can't safely apply code motion to dependencies"
803 : : " to vectorize the early exit. %G may alias with"
804 : : " %G\n", stmt, dr_read->stmt);
805 : : }
806 : : }
807 : :
808 : 439962 : if (gimple_vdef (stmt))
809 : : {
810 : 13299 : if (dump_enabled_p ())
811 : 251 : dump_printf_loc (MSG_NOTE, vect_location,
812 : : "==> recording stmt %G", stmt);
813 : :
814 : 13299 : LOOP_VINFO_EARLY_BRK_STORES (loop_vinfo).safe_push (stmt);
815 : : }
816 : 633345 : else if (gimple_vuse (stmt))
817 : : {
818 : 206682 : LOOP_VINFO_EARLY_BRK_VUSES (loop_vinfo).safe_insert (0, stmt);
819 : 206682 : if (dump_enabled_p ())
820 : 2184 : dump_printf_loc (MSG_NOTE, vect_location,
821 : : "marked statement for vUSE update: %G", stmt);
822 : : }
823 : : }
824 : :
825 : 276051 : if (!single_pred_p (bb))
826 : : {
827 : 117637 : gcc_assert (bb == loop->header);
828 : 117637 : break;
829 : : }
830 : :
831 : : /* If we possibly sink through a virtual PHI make sure to elide that. */
832 : 158414 : if (gphi *vphi = get_virtual_phi (bb))
833 : 35 : LOOP_VINFO_EARLY_BRK_STORES (loop_vinfo).safe_push (vphi);
834 : :
835 : : /* All earlier blocks need dependence checking. */
836 : 158414 : check_deps = true;
837 : 158414 : bb = single_pred (bb);
838 : 158414 : }
839 : : while (1);
840 : :
841 : : /* We don't allow outer -> inner loop transitions which should have been
842 : : trapped already during loop form analysis. */
843 : 117637 : gcc_assert (dest_bb->loop_father == loop);
844 : :
845 : : /* Check that the destination block we picked has only one pred. To relax this we
846 : : have to take special care when moving the statements. We don't currently support
847 : : such control flow however this check is there to simplify how we handle
848 : : labels that may be present anywhere in the IL. This check is to ensure that the
849 : : labels aren't significant for the CFG. */
850 : 117637 : if (!single_pred (dest_bb))
851 : 0 : return opt_result::failure_at (vect_location,
852 : : "chosen loop exit block (BB %d) does not have a "
853 : : "single predecessor which is currently not "
854 : : "supported for early break vectorization.\n",
855 : : dest_bb->index);
856 : :
857 : 117637 : LOOP_VINFO_EARLY_BRK_DEST_BB (loop_vinfo) = dest_bb;
858 : :
859 : 117637 : if (!LOOP_VINFO_EARLY_BRK_VUSES (loop_vinfo).is_empty ())
860 : : {
861 : : /* All uses shall be updated to that of the first load. Entries are
862 : : stored in reverse order. */
863 : 109342 : tree vuse = gimple_vuse (LOOP_VINFO_EARLY_BRK_VUSES (loop_vinfo).last ());
864 : 315869 : for (auto g : LOOP_VINFO_EARLY_BRK_VUSES (loop_vinfo))
865 : : {
866 : 206527 : if (dump_enabled_p ())
867 : 2184 : dump_printf_loc (MSG_NOTE, vect_location,
868 : : "will update use: %T, mem_ref: %G", vuse, g);
869 : : }
870 : : }
871 : :
872 : 117637 : if (dump_enabled_p ())
873 : 1318 : dump_printf_loc (MSG_NOTE, vect_location,
874 : : "recorded statements to be moved to BB %d\n",
875 : 1318 : LOOP_VINFO_EARLY_BRK_DEST_BB (loop_vinfo)->index);
876 : :
877 : 117637 : return opt_result::success ();
878 : 117790 : }
879 : :
880 : : /* Function vect_analyze_data_ref_dependences.
881 : :
882 : : Examine all the data references in the loop, and make sure there do not
883 : : exist any data dependences between them. Set *MAX_VF according to
884 : : the maximum vectorization factor the data dependences allow. */
885 : :
886 : : opt_result
887 : 296873 : vect_analyze_data_ref_dependences (loop_vec_info loop_vinfo,
888 : : unsigned int *max_vf)
889 : : {
890 : 296873 : unsigned int i;
891 : 296873 : struct data_dependence_relation *ddr;
892 : :
893 : 296873 : DUMP_VECT_SCOPE ("vect_analyze_data_ref_dependences");
894 : :
895 : 296873 : if (!LOOP_VINFO_DDRS (loop_vinfo).exists ())
896 : : {
897 : 147848 : LOOP_VINFO_DDRS (loop_vinfo)
898 : 147848 : .create (LOOP_VINFO_DATAREFS (loop_vinfo).length ()
899 : 147848 : * LOOP_VINFO_DATAREFS (loop_vinfo).length ());
900 : : /* We do not need read-read dependences. */
901 : 295696 : bool res = compute_all_dependences (LOOP_VINFO_DATAREFS (loop_vinfo),
902 : : &LOOP_VINFO_DDRS (loop_vinfo),
903 : 147848 : LOOP_VINFO_LOOP_NEST (loop_vinfo),
904 : : false);
905 : 147848 : gcc_assert (res);
906 : : }
907 : :
908 : 296873 : LOOP_VINFO_NO_DATA_DEPENDENCIES (loop_vinfo) = true;
909 : :
910 : : /* For epilogues we either have no aliases or alias versioning
911 : : was applied to original loop. Therefore we may just get max_vf
912 : : using VF of original loop. */
913 : 296873 : if (LOOP_VINFO_EPILOGUE_P (loop_vinfo))
914 : 14404 : *max_vf = LOOP_VINFO_ORIG_MAX_VECT_FACTOR (loop_vinfo);
915 : : else
916 : 1241052 : FOR_EACH_VEC_ELT (LOOP_VINFO_DDRS (loop_vinfo), i, ddr)
917 : : {
918 : 972534 : opt_result res
919 : 972534 : = vect_analyze_data_ref_dependence (ddr, loop_vinfo, max_vf);
920 : 972534 : if (!res)
921 : 13951 : return res;
922 : : }
923 : :
924 : : /* If we have early break statements in the loop, check to see if they
925 : : are of a form we can vectorizer. */
926 : 282922 : if (LOOP_VINFO_EARLY_BREAKS (loop_vinfo))
927 : 117790 : return vect_analyze_early_break_dependences (loop_vinfo);
928 : :
929 : 165132 : return opt_result::success ();
930 : : }
931 : :
932 : :
933 : : /* Function vect_slp_analyze_data_ref_dependence.
934 : :
935 : : Return TRUE if there (might) exist a dependence between a memory-reference
936 : : DRA and a memory-reference DRB for VINFO. When versioning for alias
937 : : may check a dependence at run-time, return FALSE. Adjust *MAX_VF
938 : : according to the data dependence. */
939 : :
940 : : static bool
941 : 6389577 : vect_slp_analyze_data_ref_dependence (vec_info *vinfo,
942 : : struct data_dependence_relation *ddr)
943 : : {
944 : 6389577 : struct data_reference *dra = DDR_A (ddr);
945 : 6389577 : struct data_reference *drb = DDR_B (ddr);
946 : 6389577 : dr_vec_info *dr_info_a = vinfo->lookup_dr (dra);
947 : 6389577 : dr_vec_info *dr_info_b = vinfo->lookup_dr (drb);
948 : :
949 : : /* We need to check dependences of statements marked as unvectorizable
950 : : as well, they still can prohibit vectorization. */
951 : :
952 : : /* Independent data accesses. */
953 : 6389577 : if (DDR_ARE_DEPENDENT (ddr) == chrec_known)
954 : : return false;
955 : :
956 : 1101261 : if (dra == drb)
957 : : return false;
958 : :
959 : : /* Read-read is OK. */
960 : 7858 : if (DR_IS_READ (dra) && DR_IS_READ (drb))
961 : : return false;
962 : :
963 : : /* If dra and drb are part of the same interleaving chain consider
964 : : them independent. */
965 : 7858 : if (STMT_VINFO_GROUPED_ACCESS (dr_info_a->stmt)
966 : 7858 : && (DR_GROUP_FIRST_ELEMENT (dr_info_a->stmt)
967 : 7858 : == DR_GROUP_FIRST_ELEMENT (dr_info_b->stmt)))
968 : : return false;
969 : :
970 : : /* Unknown data dependence. */
971 : 7858 : if (DDR_ARE_DEPENDENT (ddr) == chrec_dont_know)
972 : : {
973 : 7858 : if (dump_enabled_p ())
974 : 4 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
975 : : "can't determine dependence between %T and %T\n",
976 : : DR_REF (dra), DR_REF (drb));
977 : : }
978 : 0 : else if (dump_enabled_p ())
979 : 0 : dump_printf_loc (MSG_NOTE, vect_location,
980 : : "determined dependence between %T and %T\n",
981 : : DR_REF (dra), DR_REF (drb));
982 : :
983 : : return true;
984 : : }
985 : :
986 : :
987 : : /* Analyze dependences involved in the transform of a store SLP NODE. */
988 : :
989 : : static bool
990 : 662799 : vect_slp_analyze_store_dependences (vec_info *vinfo, slp_tree node)
991 : : {
992 : : /* This walks over all stmts involved in the SLP store done
993 : : in NODE verifying we can sink them up to the last stmt in the
994 : : group. */
995 : 662799 : stmt_vec_info last_access_info = vect_find_last_scalar_stmt_in_slp (node);
996 : 662799 : gcc_assert (DR_IS_WRITE (STMT_VINFO_DATA_REF (last_access_info)));
997 : :
998 : 2403951 : for (unsigned k = 0; k < SLP_TREE_SCALAR_STMTS (node).length (); ++k)
999 : : {
1000 : 1748977 : stmt_vec_info access_info
1001 : 1748977 : = vect_orig_stmt (SLP_TREE_SCALAR_STMTS (node)[k]);
1002 : 1748977 : if (access_info == last_access_info)
1003 : 655574 : continue;
1004 : 1093403 : data_reference *dr_a = STMT_VINFO_DATA_REF (access_info);
1005 : 1093403 : ao_ref ref;
1006 : 1093403 : bool ref_initialized_p = false;
1007 : 1093403 : for (gimple_stmt_iterator gsi = gsi_for_stmt (access_info->stmt);
1008 : 10397846 : gsi_stmt (gsi) != last_access_info->stmt; gsi_next (&gsi))
1009 : : {
1010 : 9312268 : gimple *stmt = gsi_stmt (gsi);
1011 : 16280706 : if (! gimple_vuse (stmt))
1012 : 2840308 : continue;
1013 : :
1014 : : /* If we couldn't record a (single) data reference for this
1015 : : stmt we have to resort to the alias oracle. */
1016 : 6471960 : stmt_vec_info stmt_info = vinfo->lookup_stmt (stmt);
1017 : 6471960 : data_reference *dr_b = STMT_VINFO_DATA_REF (stmt_info);
1018 : 6471960 : if (!dr_b)
1019 : : {
1020 : : /* We are moving a store - this means
1021 : : we cannot use TBAA for disambiguation. */
1022 : 82601 : if (!ref_initialized_p)
1023 : 82601 : ao_ref_init (&ref, DR_REF (dr_a));
1024 : 82601 : if (stmt_may_clobber_ref_p_1 (stmt, &ref, false)
1025 : 82601 : || ref_maybe_used_by_stmt_p (stmt, &ref, false))
1026 : 7825 : return false;
1027 : 82600 : continue;
1028 : : }
1029 : :
1030 : 6389359 : gcc_assert (!gimple_visited_p (stmt));
1031 : :
1032 : 6389359 : ddr_p ddr = initialize_data_dependence_relation (dr_a,
1033 : 6389359 : dr_b, vNULL);
1034 : 6389359 : bool dependent = vect_slp_analyze_data_ref_dependence (vinfo, ddr);
1035 : 6389359 : free_dependence_relation (ddr);
1036 : 6389359 : if (dependent)
1037 : : return false;
1038 : : }
1039 : : }
1040 : : return true;
1041 : : }
1042 : :
1043 : : /* Analyze dependences involved in the transform of a load SLP NODE. STORES
1044 : : contain the vector of scalar stores of this instance if we are
1045 : : disambiguating the loads. */
1046 : :
1047 : : static bool
1048 : 155147 : vect_slp_analyze_load_dependences (vec_info *vinfo, slp_tree node,
1049 : : vec<stmt_vec_info> stores,
1050 : : stmt_vec_info last_store_info)
1051 : : {
1052 : : /* This walks over all stmts involved in the SLP load done
1053 : : in NODE verifying we can hoist them up to the first stmt in the
1054 : : group. */
1055 : 155147 : stmt_vec_info first_access_info = vect_find_first_scalar_stmt_in_slp (node);
1056 : 155147 : gcc_assert (DR_IS_READ (STMT_VINFO_DATA_REF (first_access_info)));
1057 : :
1058 : 540925 : for (unsigned k = 0; k < SLP_TREE_SCALAR_STMTS (node).length (); ++k)
1059 : : {
1060 : 385838 : if (! SLP_TREE_SCALAR_STMTS (node)[k])
1061 : 162424 : continue;
1062 : 385838 : stmt_vec_info access_info
1063 : 385838 : = vect_orig_stmt (SLP_TREE_SCALAR_STMTS (node)[k]);
1064 : 385838 : if (access_info == first_access_info)
1065 : 162424 : continue;
1066 : 223414 : data_reference *dr_a = STMT_VINFO_DATA_REF (access_info);
1067 : 223414 : ao_ref ref;
1068 : 223414 : bool ref_initialized_p = false;
1069 : 223414 : hash_set<stmt_vec_info> grp_visited;
1070 : 223414 : for (gimple_stmt_iterator gsi = gsi_for_stmt (access_info->stmt);
1071 : 4783386 : gsi_stmt (gsi) != first_access_info->stmt; gsi_prev (&gsi))
1072 : : {
1073 : 2280046 : gimple *stmt = gsi_stmt (gsi);
1074 : 3670029 : if (! gimple_vdef (stmt))
1075 : 2194662 : continue;
1076 : :
1077 : 335945 : stmt_vec_info stmt_info = vinfo->lookup_stmt (stmt);
1078 : :
1079 : : /* If we run into a store of this same instance (we've just
1080 : : marked those) then delay dependence checking until we run
1081 : : into the last store because this is where it will have
1082 : : been sunk to (and we verified that we can do that already). */
1083 : 335945 : if (gimple_visited_p (stmt))
1084 : : {
1085 : 250561 : if (stmt_info != last_store_info)
1086 : 250559 : continue;
1087 : :
1088 : 10 : for (stmt_vec_info &store_info : stores)
1089 : : {
1090 : 4 : data_reference *store_dr = STMT_VINFO_DATA_REF (store_info);
1091 : 4 : ddr_p ddr = initialize_data_dependence_relation
1092 : 4 : (dr_a, store_dr, vNULL);
1093 : 4 : bool dependent
1094 : 4 : = vect_slp_analyze_data_ref_dependence (vinfo, ddr);
1095 : 4 : free_dependence_relation (ddr);
1096 : 4 : if (dependent)
1097 : 60 : return false;
1098 : : }
1099 : 2 : continue;
1100 : 2 : }
1101 : :
1102 : 174114 : auto check_hoist = [&] (stmt_vec_info stmt_info) -> bool
1103 : : {
1104 : : /* We are hoisting a load - this means we can use TBAA for
1105 : : disambiguation. */
1106 : 88730 : if (!ref_initialized_p)
1107 : 88730 : ao_ref_init (&ref, DR_REF (dr_a));
1108 : 88730 : if (stmt_may_clobber_ref_p_1 (stmt_info->stmt, &ref, true))
1109 : : {
1110 : : /* If we couldn't record a (single) data reference for this
1111 : : stmt we have to give up now. */
1112 : 240 : data_reference *dr_b = STMT_VINFO_DATA_REF (stmt_info);
1113 : 240 : if (!dr_b)
1114 : : return false;
1115 : 214 : ddr_p ddr = initialize_data_dependence_relation (dr_a,
1116 : 214 : dr_b, vNULL);
1117 : 214 : bool dependent
1118 : 214 : = vect_slp_analyze_data_ref_dependence (vinfo, ddr);
1119 : 214 : free_dependence_relation (ddr);
1120 : 214 : if (dependent)
1121 : : return false;
1122 : : }
1123 : : /* No dependence. */
1124 : : return true;
1125 : 85384 : };
1126 : 85384 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1127 : : {
1128 : : /* When we run into a store group we have to honor
1129 : : that earlier stores might be moved here. We don't
1130 : : know exactly which and where to since we lack a
1131 : : back-mapping from DR to SLP node, so assume all
1132 : : earlier stores are sunk here. It's enough to
1133 : : consider the last stmt of a group for this.
1134 : : ??? Both this and the fact that we disregard that
1135 : : the conflicting instance might be removed later
1136 : : is overly conservative. */
1137 : 66177 : if (!grp_visited.add (DR_GROUP_FIRST_ELEMENT (stmt_info)))
1138 : 12497 : for (auto store_info = DR_GROUP_FIRST_ELEMENT (stmt_info);
1139 : 153297 : store_info != NULL;
1140 : 140800 : store_info = DR_GROUP_NEXT_ELEMENT (store_info))
1141 : 140834 : if ((store_info == stmt_info
1142 : 128346 : || get_later_stmt (store_info, stmt_info) == stmt_info)
1143 : 197869 : && !check_hoist (store_info))
1144 : : return false;
1145 : : }
1146 : : else
1147 : : {
1148 : 19207 : if (!check_hoist (stmt_info))
1149 : : return false;
1150 : : }
1151 : : }
1152 : 223414 : }
1153 : : return true;
1154 : : }
1155 : :
1156 : :
1157 : : /* Function vect_analyze_data_ref_dependences.
1158 : :
1159 : : Examine all the data references in the basic-block, and make sure there
1160 : : do not exist any data dependences between them. Set *MAX_VF according to
1161 : : the maximum vectorization factor the data dependences allow. */
1162 : :
1163 : : bool
1164 : 792728 : vect_slp_analyze_instance_dependence (vec_info *vinfo, slp_instance instance)
1165 : : {
1166 : 792728 : DUMP_VECT_SCOPE ("vect_slp_analyze_instance_dependence");
1167 : :
1168 : : /* The stores of this instance are at the root of the SLP tree. */
1169 : 792728 : slp_tree store = NULL;
1170 : 792728 : if (SLP_INSTANCE_KIND (instance) == slp_inst_kind_store)
1171 : 662799 : store = SLP_INSTANCE_TREE (instance);
1172 : :
1173 : : /* Verify we can sink stores to the vectorized stmt insert location. */
1174 : 662799 : stmt_vec_info last_store_info = NULL;
1175 : 662799 : if (store)
1176 : : {
1177 : 662799 : if (! vect_slp_analyze_store_dependences (vinfo, store))
1178 : : return false;
1179 : :
1180 : : /* Mark stores in this instance and remember the last one. */
1181 : 654974 : last_store_info = vect_find_last_scalar_stmt_in_slp (store);
1182 : 2395496 : for (unsigned k = 0; k < SLP_TREE_SCALAR_STMTS (store).length (); ++k)
1183 : 1740522 : gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k]->stmt, true);
1184 : : }
1185 : :
1186 : 784903 : bool res = true;
1187 : :
1188 : : /* Verify we can sink loads to the vectorized stmt insert location,
1189 : : special-casing stores of this instance. */
1190 : 1181460 : for (slp_tree &load : SLP_INSTANCE_LOADS (instance))
1191 : 155147 : if (! vect_slp_analyze_load_dependences (vinfo, load,
1192 : : store
1193 : : ? SLP_TREE_SCALAR_STMTS (store)
1194 : : : vNULL, last_store_info))
1195 : : {
1196 : : res = false;
1197 : : break;
1198 : : }
1199 : :
1200 : : /* Unset the visited flag. */
1201 : 784903 : if (store)
1202 : 2395496 : for (unsigned k = 0; k < SLP_TREE_SCALAR_STMTS (store).length (); ++k)
1203 : 1740522 : gimple_set_visited (SLP_TREE_SCALAR_STMTS (store)[k]->stmt, false);
1204 : :
1205 : : /* If this is a SLP instance with a store check if there's a dependent
1206 : : load that cannot be forwarded from a previous iteration of a loop
1207 : : both are in. This is to avoid situations like that in PR115777. */
1208 : 784903 : if (res && store)
1209 : : {
1210 : 654925 : stmt_vec_info store_info
1211 : 654925 : = DR_GROUP_FIRST_ELEMENT (SLP_TREE_SCALAR_STMTS (store)[0]);
1212 : 654925 : class loop *store_loop = gimple_bb (store_info->stmt)->loop_father;
1213 : 654925 : if (! loop_outer (store_loop))
1214 : 558275 : return res;
1215 : 96650 : vec<loop_p> loop_nest;
1216 : 96650 : loop_nest.create (1);
1217 : 96650 : loop_nest.quick_push (store_loop);
1218 : 96650 : data_reference *drs = nullptr;
1219 : 182122 : for (slp_tree &load : SLP_INSTANCE_LOADS (instance))
1220 : : {
1221 : 38088 : if (! STMT_VINFO_GROUPED_ACCESS (SLP_TREE_SCALAR_STMTS (load)[0]))
1222 : 0 : continue;
1223 : 38088 : stmt_vec_info load_info
1224 : 38088 : = DR_GROUP_FIRST_ELEMENT (SLP_TREE_SCALAR_STMTS (load)[0]);
1225 : 38088 : if (gimple_bb (load_info->stmt)->loop_father != store_loop)
1226 : 4711 : continue;
1227 : :
1228 : : /* For now concern ourselves with write-after-read as we also
1229 : : only look for re-use of the store within the same SLP instance.
1230 : : We can still get a RAW here when the instance contais a PHI
1231 : : with a backedge though, thus this test. */
1232 : 33377 : if (! vect_stmt_dominates_stmt_p (STMT_VINFO_STMT (load_info),
1233 : : STMT_VINFO_STMT (store_info)))
1234 : 12003 : continue;
1235 : :
1236 : 21374 : if (! drs)
1237 : : {
1238 : 20384 : drs = create_data_ref (loop_preheader_edge (store_loop),
1239 : : store_loop,
1240 : 20384 : DR_REF (STMT_VINFO_DATA_REF (store_info)),
1241 : : store_info->stmt, false, false);
1242 : 20384 : if (! DR_BASE_ADDRESS (drs)
1243 : 16757 : || TREE_CODE (DR_STEP (drs)) != INTEGER_CST)
1244 : : break;
1245 : : }
1246 : 17446 : data_reference *drl
1247 : 17446 : = create_data_ref (loop_preheader_edge (store_loop),
1248 : : store_loop,
1249 : 17446 : DR_REF (STMT_VINFO_DATA_REF (load_info)),
1250 : : load_info->stmt, true, false);
1251 : :
1252 : : /* See whether the DRs have a known constant distance throughout
1253 : : the containing loop iteration. */
1254 : 33161 : if (! DR_BASE_ADDRESS (drl)
1255 : 14412 : || ! operand_equal_p (DR_STEP (drs), DR_STEP (drl))
1256 : 8685 : || ! operand_equal_p (DR_BASE_ADDRESS (drs),
1257 : 8685 : DR_BASE_ADDRESS (drl))
1258 : 19181 : || ! operand_equal_p (DR_OFFSET (drs), DR_OFFSET (drl)))
1259 : : {
1260 : 15715 : free_data_ref (drl);
1261 : 15715 : continue;
1262 : : }
1263 : :
1264 : : /* If the next iteration load overlaps with a non-power-of-two offset
1265 : : we are surely failing any STLF attempt. */
1266 : 1731 : HOST_WIDE_INT step = TREE_INT_CST_LOW (DR_STEP (drl));
1267 : 1731 : unsigned HOST_WIDE_INT sizes
1268 : 1731 : = (TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drs))))
1269 : 1731 : * DR_GROUP_SIZE (store_info));
1270 : 1731 : unsigned HOST_WIDE_INT sizel
1271 : 1731 : = (TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drl))))
1272 : 1731 : * DR_GROUP_SIZE (load_info));
1273 : 1731 : if (ranges_overlap_p (TREE_INT_CST_LOW (DR_INIT (drl)) + step, sizel,
1274 : 1731 : TREE_INT_CST_LOW (DR_INIT (drs)), sizes))
1275 : : {
1276 : 839 : unsigned HOST_WIDE_INT dist
1277 : 839 : = absu_hwi (TREE_INT_CST_LOW (DR_INIT (drl)) + step
1278 : 839 : - TREE_INT_CST_LOW (DR_INIT (drs)));
1279 : 839 : poly_uint64 loadsz = tree_to_poly_uint64
1280 : 839 : (TYPE_SIZE_UNIT (SLP_TREE_VECTYPE (load)));
1281 : 839 : poly_uint64 storesz = tree_to_poly_uint64
1282 : 839 : (TYPE_SIZE_UNIT (SLP_TREE_VECTYPE (store)));
1283 : : /* When the overlap aligns with vector sizes used for the loads
1284 : : and the vector stores are larger or equal to the loads
1285 : : forwarding should work. */
1286 : 1678 : if (maybe_gt (loadsz, storesz) || ! multiple_p (dist, loadsz))
1287 : 72 : load->avoid_stlf_fail = true;
1288 : : }
1289 : 1731 : free_data_ref (drl);
1290 : : }
1291 : 96650 : if (drs)
1292 : 20384 : free_data_ref (drs);
1293 : 96650 : loop_nest.release ();
1294 : : }
1295 : :
1296 : : return res;
1297 : : }
1298 : :
1299 : : /* Return the misalignment of DR_INFO accessed in VECTYPE with OFFSET
1300 : : applied. */
1301 : :
1302 : : int
1303 : 5549440 : dr_misalignment (dr_vec_info *dr_info, tree vectype, poly_int64 offset)
1304 : : {
1305 : 5549440 : HOST_WIDE_INT diff = 0;
1306 : : /* Alignment is only analyzed for the first element of a DR group,
1307 : : use that but adjust misalignment by the offset of the access. */
1308 : 5549440 : if (STMT_VINFO_GROUPED_ACCESS (dr_info->stmt))
1309 : : {
1310 : 2218431 : dr_vec_info *first_dr
1311 : 2218431 : = STMT_VINFO_DR_INFO (DR_GROUP_FIRST_ELEMENT (dr_info->stmt));
1312 : : /* vect_analyze_data_ref_accesses guarantees that DR_INIT are
1313 : : INTEGER_CSTs and the first element in the group has the lowest
1314 : : address. */
1315 : 2218431 : diff = (TREE_INT_CST_LOW (DR_INIT (dr_info->dr))
1316 : 2218431 : - TREE_INT_CST_LOW (DR_INIT (first_dr->dr)));
1317 : 2218431 : gcc_assert (diff >= 0);
1318 : : dr_info = first_dr;
1319 : : }
1320 : :
1321 : 5549440 : int misalign = dr_info->misalignment;
1322 : 5549440 : gcc_assert (misalign != DR_MISALIGNMENT_UNINITIALIZED);
1323 : 5549440 : if (misalign == DR_MISALIGNMENT_UNKNOWN)
1324 : : return misalign;
1325 : :
1326 : : /* If the access is only aligned for a vector type with smaller alignment
1327 : : requirement the access has unknown misalignment. */
1328 : 3468142 : if (maybe_lt (dr_info->target_alignment * BITS_PER_UNIT,
1329 : 3468142 : targetm.vectorize.preferred_vector_alignment (vectype)))
1330 : : return DR_MISALIGNMENT_UNKNOWN;
1331 : :
1332 : : /* Apply the offset from the DR group start and the externally supplied
1333 : : offset which can for example result from a negative stride access. */
1334 : 3468139 : poly_int64 misalignment = misalign + diff + offset;
1335 : :
1336 : : /* Below we reject compile-time non-constant target alignments, but if
1337 : : our misalignment is zero, then we are known to already be aligned
1338 : : w.r.t. any such possible target alignment. */
1339 : 3468139 : if (known_eq (misalignment, 0))
1340 : : return 0;
1341 : :
1342 : 606882 : unsigned HOST_WIDE_INT target_alignment_c;
1343 : 606882 : if (!dr_info->target_alignment.is_constant (&target_alignment_c)
1344 : 606882 : || !known_misalignment (misalignment, target_alignment_c, &misalign))
1345 : : return DR_MISALIGNMENT_UNKNOWN;
1346 : 606882 : return misalign;
1347 : : }
1348 : :
1349 : : /* Record the base alignment guarantee given by DRB, which occurs
1350 : : in STMT_INFO. */
1351 : :
1352 : : static void
1353 : 4448584 : vect_record_base_alignment (vec_info *vinfo, stmt_vec_info stmt_info,
1354 : : innermost_loop_behavior *drb)
1355 : : {
1356 : 4448584 : bool existed;
1357 : 4448584 : std::pair<stmt_vec_info, innermost_loop_behavior *> &entry
1358 : 4448584 : = vinfo->base_alignments.get_or_insert (drb->base_address, &existed);
1359 : 4448584 : if (!existed || entry.second->base_alignment < drb->base_alignment)
1360 : : {
1361 : 1341183 : entry = std::make_pair (stmt_info, drb);
1362 : 1341183 : if (dump_enabled_p ())
1363 : 30620 : dump_printf_loc (MSG_NOTE, vect_location,
1364 : : "recording new base alignment for %T\n"
1365 : : " alignment: %d\n"
1366 : : " misalignment: %d\n"
1367 : : " based on: %G",
1368 : : drb->base_address,
1369 : : drb->base_alignment,
1370 : : drb->base_misalignment,
1371 : : stmt_info->stmt);
1372 : : }
1373 : 4448584 : }
1374 : :
1375 : : /* If the region we're going to vectorize is reached, all unconditional
1376 : : data references occur at least once. We can therefore pool the base
1377 : : alignment guarantees from each unconditional reference. Do this by
1378 : : going through all the data references in VINFO and checking whether
1379 : : the containing statement makes the reference unconditionally. If so,
1380 : : record the alignment of the base address in VINFO so that it can be
1381 : : used for all other references with the same base. */
1382 : :
1383 : : void
1384 : 973295 : vect_record_base_alignments (vec_info *vinfo)
1385 : : {
1386 : 973295 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
1387 : 339510 : class loop *loop = loop_vinfo ? LOOP_VINFO_LOOP (loop_vinfo) : NULL;
1388 : 14729752 : for (data_reference *dr : vinfo->shared->datarefs)
1389 : : {
1390 : 11902273 : dr_vec_info *dr_info = vinfo->lookup_dr (dr);
1391 : 11902273 : stmt_vec_info stmt_info = dr_info->stmt;
1392 : 11902273 : if (!DR_IS_CONDITIONAL_IN_STMT (dr)
1393 : 11895645 : && STMT_VINFO_VECTORIZABLE (stmt_info)
1394 : 4460503 : && !STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1395 : : {
1396 : 4447179 : vect_record_base_alignment (vinfo, stmt_info, &DR_INNERMOST (dr));
1397 : :
1398 : : /* If DR is nested in the loop that is being vectorized, we can also
1399 : : record the alignment of the base wrt the outer loop. */
1400 : 12677100 : if (loop && nested_in_vect_loop_p (loop, stmt_info))
1401 : 1405 : vect_record_base_alignment
1402 : 1405 : (vinfo, stmt_info, &STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info));
1403 : : }
1404 : : }
1405 : 973295 : }
1406 : :
1407 : : /* Function vect_compute_data_ref_alignment
1408 : :
1409 : : Compute the misalignment of the data reference DR_INFO when vectorizing
1410 : : with VECTYPE.
1411 : :
1412 : : Output:
1413 : : 1. initialized misalignment info for DR_INFO
1414 : :
1415 : : FOR NOW: No analysis is actually performed. Misalignment is calculated
1416 : : only for trivial cases. TODO. */
1417 : :
1418 : : static void
1419 : 1462225 : vect_compute_data_ref_alignment (vec_info *vinfo, dr_vec_info *dr_info,
1420 : : tree vectype)
1421 : : {
1422 : 1462225 : stmt_vec_info stmt_info = dr_info->stmt;
1423 : 1462225 : vec_base_alignments *base_alignments = &vinfo->base_alignments;
1424 : 1462225 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
1425 : 1462225 : class loop *loop = NULL;
1426 : 1462225 : tree ref = DR_REF (dr_info->dr);
1427 : :
1428 : 1462225 : if (dump_enabled_p ())
1429 : 48263 : dump_printf_loc (MSG_NOTE, vect_location,
1430 : : "vect_compute_data_ref_alignment:\n");
1431 : :
1432 : 1462225 : if (loop_vinfo)
1433 : 679859 : loop = LOOP_VINFO_LOOP (loop_vinfo);
1434 : :
1435 : : /* Initialize misalignment to unknown. */
1436 : 1462225 : SET_DR_MISALIGNMENT (dr_info, DR_MISALIGNMENT_UNKNOWN);
1437 : :
1438 : 1462225 : if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
1439 : : return;
1440 : :
1441 : 1447349 : innermost_loop_behavior *drb = vect_dr_behavior (vinfo, dr_info);
1442 : 1447349 : bool step_preserves_misalignment_p;
1443 : :
1444 : 1447349 : poly_uint64 vector_alignment
1445 : 1447349 : = exact_div (targetm.vectorize.preferred_vector_alignment (vectype),
1446 : : BITS_PER_UNIT);
1447 : :
1448 : 1447349 : if (loop_vinfo
1449 : 1447349 : && dr_safe_speculative_read_required (stmt_info))
1450 : : {
1451 : : /* The required target alignment must be a power-of-2 value and is
1452 : : computed as the product of vector element size, VF and group size.
1453 : : We compute the constant part first as VF may be a variable. For
1454 : : variable VF, the power-of-2 check of VF is deferred to runtime. */
1455 : 274485 : auto align_factor_c
1456 : 274485 : = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1457 : 274485 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1458 : 80906 : align_factor_c *= DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info));
1459 : :
1460 : 274485 : poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
1461 : 274485 : poly_uint64 new_alignment = vf * align_factor_c;
1462 : :
1463 : 548970 : if ((vf.is_constant () && pow2p_hwi (new_alignment.to_constant ()))
1464 : : || (!vf.is_constant () && pow2p_hwi (align_factor_c)))
1465 : : {
1466 : 222359 : if (dump_enabled_p ())
1467 : : {
1468 : 3066 : dump_printf_loc (MSG_NOTE, vect_location,
1469 : : "alignment increased due to early break to ");
1470 : 3066 : dump_dec (MSG_NOTE, new_alignment);
1471 : 3066 : dump_printf (MSG_NOTE, " bytes.\n");
1472 : : }
1473 : 222359 : vector_alignment = new_alignment;
1474 : : }
1475 : : }
1476 : :
1477 : 1447349 : SET_DR_TARGET_ALIGNMENT (dr_info, vector_alignment);
1478 : :
1479 : : /* If the main loop has peeled for alignment we have no way of knowing
1480 : : whether the data accesses in the epilogues are aligned. We can't at
1481 : : compile time answer the question whether we have entered the main loop or
1482 : : not. Fixes PR 92351. */
1483 : 1447349 : if (loop_vinfo)
1484 : : {
1485 : 664983 : loop_vec_info orig_loop_vinfo = LOOP_VINFO_ORIG_LOOP_INFO (loop_vinfo);
1486 : 664983 : if (orig_loop_vinfo
1487 : 30217 : && LOOP_VINFO_PEELING_FOR_ALIGNMENT (orig_loop_vinfo) != 0)
1488 : : return;
1489 : : }
1490 : :
1491 : 1447132 : unsigned HOST_WIDE_INT vect_align_c;
1492 : 1447132 : if (!vector_alignment.is_constant (&vect_align_c))
1493 : : return;
1494 : :
1495 : : /* No step for BB vectorization. */
1496 : 1447132 : if (!loop)
1497 : : {
1498 : 782366 : gcc_assert (integer_zerop (drb->step));
1499 : : step_preserves_misalignment_p = true;
1500 : : }
1501 : :
1502 : : else
1503 : : {
1504 : : /* We can only use base and misalignment information relative to
1505 : : an innermost loop if the misalignment stays the same throughout the
1506 : : execution of the loop. As above, this is the case if the stride of
1507 : : the dataref evenly divides by the alignment. Make sure to check
1508 : : previous epilogues and the main loop. */
1509 : : step_preserves_misalignment_p = true;
1510 : : auto lvinfo = loop_vinfo;
1511 : 1360222 : while (lvinfo)
1512 : : {
1513 : 695456 : poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (lvinfo);
1514 : 695456 : step_preserves_misalignment_p
1515 : 695456 : &= multiple_p (drb->step_alignment * vf, vect_align_c);
1516 : 695456 : lvinfo = LOOP_VINFO_ORIG_LOOP_INFO (lvinfo);
1517 : : }
1518 : :
1519 : 664766 : if (!step_preserves_misalignment_p && dump_enabled_p ())
1520 : 271 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1521 : : "step doesn't divide the vector alignment.\n");
1522 : :
1523 : : /* In case the dataref is in an inner-loop of the loop that is being
1524 : : vectorized (LOOP), we use the base and misalignment information
1525 : : relative to the outer-loop (LOOP). This is ok only if the
1526 : : misalignment stays the same throughout the execution of the
1527 : : inner-loop, which is why we have to check that the stride of the
1528 : : dataref in the inner-loop evenly divides by the vector alignment. */
1529 : 664766 : if (step_preserves_misalignment_p
1530 : 664766 : && nested_in_vect_loop_p (loop, stmt_info))
1531 : : {
1532 : 1404 : step_preserves_misalignment_p
1533 : 1404 : = (DR_STEP_ALIGNMENT (dr_info->dr) % vect_align_c) == 0;
1534 : :
1535 : 1404 : if (dump_enabled_p ())
1536 : : {
1537 : 495 : if (step_preserves_misalignment_p)
1538 : 355 : dump_printf_loc (MSG_NOTE, vect_location,
1539 : : "inner step divides the vector alignment.\n");
1540 : : else
1541 : 140 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1542 : : "inner step doesn't divide the vector"
1543 : : " alignment.\n");
1544 : : }
1545 : : }
1546 : : }
1547 : :
1548 : 1447132 : unsigned int base_alignment = drb->base_alignment;
1549 : 1447132 : unsigned int base_misalignment = drb->base_misalignment;
1550 : :
1551 : : /* Calculate the maximum of the pooled base address alignment and the
1552 : : alignment that we can compute for DR itself. */
1553 : 1447132 : std::pair<stmt_vec_info, innermost_loop_behavior *> *entry
1554 : 1447132 : = base_alignments->get (drb->base_address);
1555 : 1447132 : if (entry
1556 : 1444360 : && base_alignment < (*entry).second->base_alignment
1557 : 1449119 : && (loop_vinfo
1558 : 1369 : || (dominated_by_p (CDI_DOMINATORS, gimple_bb (stmt_info->stmt),
1559 : 1369 : gimple_bb (entry->first->stmt))
1560 : 1090 : && (gimple_bb (stmt_info->stmt) != gimple_bb (entry->first->stmt)
1561 : 796 : || (entry->first->dr_aux.group <= dr_info->group)))))
1562 : : {
1563 : 1691 : base_alignment = entry->second->base_alignment;
1564 : 1691 : base_misalignment = entry->second->base_misalignment;
1565 : : }
1566 : :
1567 : 1447132 : if (drb->offset_alignment < vect_align_c
1568 : 1379593 : || !step_preserves_misalignment_p
1569 : : /* We need to know whether the step wrt the vectorized loop is
1570 : : negative when computing the starting misalignment below. */
1571 : 1369882 : || TREE_CODE (drb->step) != INTEGER_CST)
1572 : : {
1573 : 103887 : if (dump_enabled_p ())
1574 : 3440 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1575 : : "Unknown alignment for access: %T\n", ref);
1576 : 103887 : return;
1577 : : }
1578 : :
1579 : 1343245 : if (base_alignment < vect_align_c)
1580 : : {
1581 : 658848 : unsigned int max_alignment;
1582 : 658848 : tree base = get_base_for_alignment (drb->base_address, &max_alignment);
1583 : 658848 : if (max_alignment < vect_align_c
1584 : 656894 : || (loop_vinfo && LOOP_VINFO_EPILOGUE_P (loop_vinfo))
1585 : 1298225 : || !vect_can_force_dr_alignment_p (base,
1586 : 639377 : vect_align_c * BITS_PER_UNIT))
1587 : : {
1588 : 460677 : if (dump_enabled_p ())
1589 : 12466 : dump_printf_loc (MSG_NOTE, vect_location,
1590 : : "can't force alignment of ref: %T\n", ref);
1591 : 460677 : return;
1592 : : }
1593 : :
1594 : : /* Force the alignment of the decl.
1595 : : NOTE: This is the only change to the code we make during
1596 : : the analysis phase, before deciding to vectorize the loop. */
1597 : 198171 : if (dump_enabled_p ())
1598 : 8016 : dump_printf_loc (MSG_NOTE, vect_location,
1599 : : "force alignment of %T\n", ref);
1600 : :
1601 : 198171 : dr_info->base_decl = base;
1602 : 198171 : dr_info->base_misaligned = true;
1603 : 198171 : base_misalignment = 0;
1604 : : }
1605 : 882568 : poly_int64 misalignment
1606 : 882568 : = base_misalignment + wi::to_poly_offset (drb->init).force_shwi ();
1607 : :
1608 : 882568 : unsigned int const_misalignment;
1609 : 882568 : if (!known_misalignment (misalignment, vect_align_c, &const_misalignment))
1610 : : {
1611 : : if (dump_enabled_p ())
1612 : : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1613 : : "Non-constant misalignment for access: %T\n", ref);
1614 : : return;
1615 : : }
1616 : :
1617 : 882568 : SET_DR_MISALIGNMENT (dr_info, const_misalignment);
1618 : :
1619 : 882568 : if (dump_enabled_p ())
1620 : 31189 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1621 : : "misalign = %d bytes of ref %T\n",
1622 : : const_misalignment, ref);
1623 : :
1624 : : return;
1625 : : }
1626 : :
1627 : : /* Return whether DR_INFO, which is related to DR_PEEL_INFO in
1628 : : that it only differs in DR_INIT, is aligned if DR_PEEL_INFO
1629 : : is made aligned via peeling. */
1630 : :
1631 : : static bool
1632 : 1484172 : vect_dr_aligned_if_related_peeled_dr_is (dr_vec_info *dr_info,
1633 : : dr_vec_info *dr_peel_info)
1634 : : {
1635 : 1484172 : if (multiple_p (DR_TARGET_ALIGNMENT (dr_peel_info),
1636 : 1484971 : DR_TARGET_ALIGNMENT (dr_info)))
1637 : : {
1638 : 1483373 : poly_offset_int diff
1639 : 1483373 : = (wi::to_poly_offset (DR_INIT (dr_peel_info->dr))
1640 : 1483373 : - wi::to_poly_offset (DR_INIT (dr_info->dr)));
1641 : 1483373 : if (known_eq (diff, 0)
1642 : 1483373 : || multiple_p (diff, DR_TARGET_ALIGNMENT (dr_info)))
1643 : 507387 : return true;
1644 : : }
1645 : : return false;
1646 : : }
1647 : :
1648 : : /* Return whether DR_INFO is aligned if DR_PEEL_INFO is made
1649 : : aligned via peeling. */
1650 : :
1651 : : static bool
1652 : 157575 : vect_dr_aligned_if_peeled_dr_is (dr_vec_info *dr_info,
1653 : : dr_vec_info *dr_peel_info)
1654 : : {
1655 : 157575 : if (!operand_equal_p (DR_BASE_ADDRESS (dr_info->dr),
1656 : 157575 : DR_BASE_ADDRESS (dr_peel_info->dr), 0)
1657 : 45025 : || !operand_equal_p (DR_OFFSET (dr_info->dr),
1658 : 45025 : DR_OFFSET (dr_peel_info->dr), 0)
1659 : 201796 : || !operand_equal_p (DR_STEP (dr_info->dr),
1660 : 44221 : DR_STEP (dr_peel_info->dr), 0))
1661 : 113711 : return false;
1662 : :
1663 : 43864 : return vect_dr_aligned_if_related_peeled_dr_is (dr_info, dr_peel_info);
1664 : : }
1665 : :
1666 : : /* Compute the value for dr_info->misalign so that the access appears
1667 : : aligned. This is used by peeling to compensate for dr_misalignment
1668 : : applying the offset for negative step. */
1669 : :
1670 : : int
1671 : 15632 : vect_dr_misalign_for_aligned_access (dr_vec_info *dr_info)
1672 : : {
1673 : 15632 : if (tree_int_cst_sgn (DR_STEP (dr_info->dr)) >= 0)
1674 : : return 0;
1675 : :
1676 : 152 : tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
1677 : 152 : poly_int64 misalignment
1678 : 152 : = ((TYPE_VECTOR_SUBPARTS (vectype) - 1)
1679 : 152 : * TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (vectype))));
1680 : :
1681 : 152 : unsigned HOST_WIDE_INT target_alignment_c;
1682 : 152 : int misalign;
1683 : 152 : if (!dr_info->target_alignment.is_constant (&target_alignment_c)
1684 : 152 : || !known_misalignment (misalignment, target_alignment_c, &misalign))
1685 : : return DR_MISALIGNMENT_UNKNOWN;
1686 : 152 : return misalign;
1687 : : }
1688 : :
1689 : : /* Function vect_update_misalignment_for_peel.
1690 : : Sets DR_INFO's misalignment
1691 : : - to 0 if it has the same alignment as DR_PEEL_INFO,
1692 : : - to the misalignment computed using NPEEL if DR_INFO's salignment is known,
1693 : : - to -1 (unknown) otherwise.
1694 : :
1695 : : DR_INFO - the data reference whose misalignment is to be adjusted.
1696 : : DR_PEEL_INFO - the data reference whose misalignment is being made
1697 : : zero in the vector loop by the peel.
1698 : : NPEEL - the number of iterations in the peel loop if the misalignment
1699 : : of DR_PEEL_INFO is known at compile time. */
1700 : :
1701 : : static void
1702 : 2421 : vect_update_misalignment_for_peel (dr_vec_info *dr_info,
1703 : : dr_vec_info *dr_peel_info, int npeel)
1704 : : {
1705 : : /* If dr_info is aligned of dr_peel_info is, then mark it so. */
1706 : 2421 : if (vect_dr_aligned_if_peeled_dr_is (dr_info, dr_peel_info))
1707 : : {
1708 : 456 : SET_DR_MISALIGNMENT (dr_info,
1709 : : vect_dr_misalign_for_aligned_access (dr_peel_info));
1710 : 456 : return;
1711 : : }
1712 : :
1713 : 1965 : unsigned HOST_WIDE_INT alignment;
1714 : 1965 : if (DR_TARGET_ALIGNMENT (dr_info).is_constant (&alignment)
1715 : 1965 : && known_alignment_for_access_p (dr_info,
1716 : 1965 : STMT_VINFO_VECTYPE (dr_info->stmt))
1717 : 197 : && known_alignment_for_access_p (dr_peel_info,
1718 : 197 : STMT_VINFO_VECTYPE (dr_peel_info->stmt)))
1719 : : {
1720 : 185 : int misal = dr_info->misalignment;
1721 : 185 : misal += npeel * TREE_INT_CST_LOW (DR_STEP (dr_info->dr));
1722 : 185 : misal &= alignment - 1;
1723 : 185 : set_dr_misalignment (dr_info, misal);
1724 : 185 : return;
1725 : : }
1726 : :
1727 : 1780 : if (dump_enabled_p ())
1728 : 29 : dump_printf_loc (MSG_NOTE, vect_location, "Setting misalignment " \
1729 : : "to unknown (-1).\n");
1730 : 1780 : SET_DR_MISALIGNMENT (dr_info, DR_MISALIGNMENT_UNKNOWN);
1731 : : }
1732 : :
1733 : : /* Return true if alignment is relevant for DR_INFO. */
1734 : :
1735 : : static bool
1736 : 1415354 : vect_relevant_for_alignment_p (dr_vec_info *dr_info)
1737 : : {
1738 : 1415354 : stmt_vec_info stmt_info = dr_info->stmt;
1739 : :
1740 : 1415354 : if (!STMT_VINFO_RELEVANT_P (stmt_info))
1741 : : return false;
1742 : :
1743 : : /* For interleaving, only the alignment of the first access matters. */
1744 : 1414288 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info)
1745 : 1622949 : && DR_GROUP_FIRST_ELEMENT (stmt_info) != stmt_info)
1746 : : return false;
1747 : :
1748 : : /* Scatter-gather and invariant accesses continue to address individual
1749 : : scalars, so vector-level alignment is irrelevant. */
1750 : 1326234 : if (STMT_VINFO_GATHER_SCATTER_P (stmt_info)
1751 : 1326234 : || integer_zerop (DR_STEP (dr_info->dr)))
1752 : 40702 : return false;
1753 : :
1754 : : /* Strided accesses perform only component accesses, alignment is
1755 : : irrelevant for them. */
1756 : 1285532 : if (STMT_VINFO_STRIDED_P (stmt_info)
1757 : 1285532 : && !STMT_VINFO_GROUPED_ACCESS (stmt_info))
1758 : : return false;
1759 : :
1760 : : return true;
1761 : : }
1762 : :
1763 : : /* Given an memory reference EXP return whether its alignment is less
1764 : : than its size. */
1765 : :
1766 : : static bool
1767 : 1057101 : not_size_aligned (tree exp)
1768 : : {
1769 : 1057101 : if (!tree_fits_uhwi_p (TYPE_SIZE (TREE_TYPE (exp))))
1770 : : return true;
1771 : :
1772 : 1057101 : return (tree_to_uhwi (TYPE_SIZE (TREE_TYPE (exp)))
1773 : 1057101 : > get_object_alignment (exp));
1774 : : }
1775 : :
1776 : : /* Function vector_alignment_reachable_p
1777 : :
1778 : : Return true if vector alignment for DR_INFO is reachable by peeling
1779 : : a few loop iterations. Return false otherwise. */
1780 : :
1781 : : static bool
1782 : 484898 : vector_alignment_reachable_p (dr_vec_info *dr_info, poly_uint64 vf)
1783 : : {
1784 : 484898 : stmt_vec_info stmt_info = dr_info->stmt;
1785 : 484898 : tree vectype = STMT_VINFO_VECTYPE (stmt_info);
1786 : 484898 : poly_uint64 nelements = TYPE_VECTOR_SUBPARTS (vectype);
1787 : 969796 : poly_uint64 vector_size = GET_MODE_SIZE (TYPE_MODE (vectype));
1788 : 484898 : unsigned elem_size = vector_element_size (vector_size, nelements);
1789 : 484898 : unsigned group_size = 1;
1790 : :
1791 : 484898 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
1792 : : {
1793 : : /* For interleaved access we peel only if number of iterations in
1794 : : the prolog loop ({VF - misalignment}), is a multiple of the
1795 : : number of the interleaved accesses. */
1796 : :
1797 : : /* FORNOW: handle only known alignment. */
1798 : 75983 : if (!known_alignment_for_access_p (dr_info, vectype))
1799 : 484898 : return false;
1800 : :
1801 : 44310 : unsigned mis_in_elements = dr_misalignment (dr_info, vectype) / elem_size;
1802 : 55797 : if (!multiple_p (nelements - mis_in_elements, DR_GROUP_SIZE (stmt_info)))
1803 : : return false;
1804 : :
1805 : 11487 : group_size = DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info));
1806 : : }
1807 : :
1808 : : /* If the vectorization factor does not guarantee DR advancement of
1809 : : a multiple of the target alignment no peeling will help. */
1810 : 420402 : if (!multiple_p (elem_size * group_size * vf, dr_target_alignment (dr_info)))
1811 : 74 : return false;
1812 : :
1813 : : /* If misalignment is known at the compile time then allow peeling
1814 : : only if natural alignment is reachable through peeling. */
1815 : 420328 : if (known_alignment_for_access_p (dr_info, vectype)
1816 : 659623 : && !aligned_access_p (dr_info, vectype))
1817 : : {
1818 : 12259 : HOST_WIDE_INT elmsize =
1819 : 12259 : int_cst_value (TYPE_SIZE_UNIT (TREE_TYPE (vectype)));
1820 : 12259 : if (dump_enabled_p ())
1821 : : {
1822 : 748 : dump_printf_loc (MSG_NOTE, vect_location,
1823 : : "data size = %wd. misalignment = %d.\n", elmsize,
1824 : : dr_misalignment (dr_info, vectype));
1825 : : }
1826 : 12259 : if (dr_misalignment (dr_info, vectype) % elmsize)
1827 : : {
1828 : 53 : if (dump_enabled_p ())
1829 : 7 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1830 : : "data size does not divide the misalignment.\n");
1831 : 53 : return false;
1832 : : }
1833 : : }
1834 : :
1835 : 420275 : if (!known_alignment_for_access_p (dr_info, vectype))
1836 : : {
1837 : 181033 : tree type = TREE_TYPE (DR_REF (dr_info->dr));
1838 : 181033 : bool is_packed = not_size_aligned (DR_REF (dr_info->dr));
1839 : 181033 : if (dump_enabled_p ())
1840 : 13845 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
1841 : : "Unknown misalignment, %snaturally aligned\n",
1842 : : is_packed ? "not " : "");
1843 : 181033 : return targetm.vectorize.vector_alignment_reachable (type, is_packed);
1844 : : }
1845 : :
1846 : : return true;
1847 : : }
1848 : :
1849 : :
1850 : : /* Calculate the cost of the memory access represented by DR_INFO. */
1851 : :
1852 : : static void
1853 : 544564 : vect_get_data_access_cost (vec_info *vinfo, dr_vec_info *dr_info,
1854 : : dr_alignment_support alignment_support_scheme,
1855 : : int misalignment,
1856 : : unsigned int *inside_cost,
1857 : : unsigned int *outside_cost,
1858 : : stmt_vector_for_cost *body_cost_vec,
1859 : : stmt_vector_for_cost *prologue_cost_vec)
1860 : : {
1861 : 544564 : stmt_vec_info stmt_info = dr_info->stmt;
1862 : :
1863 : 544564 : if (DR_IS_READ (dr_info->dr))
1864 : 390194 : vect_get_load_cost (vinfo, stmt_info, NULL, 1,
1865 : : alignment_support_scheme, misalignment, true,
1866 : : inside_cost, outside_cost, prologue_cost_vec,
1867 : : body_cost_vec, false);
1868 : : else
1869 : 154370 : vect_get_store_cost (vinfo,stmt_info, NULL, 1,
1870 : : alignment_support_scheme, misalignment, inside_cost,
1871 : : body_cost_vec);
1872 : :
1873 : 544564 : if (dump_enabled_p ())
1874 : 26425 : dump_printf_loc (MSG_NOTE, vect_location,
1875 : : "vect_get_data_access_cost: inside_cost = %d, "
1876 : : "outside_cost = %d.\n", *inside_cost, *outside_cost);
1877 : 544564 : }
1878 : :
1879 : :
1880 : : typedef struct _vect_peel_info
1881 : : {
1882 : : dr_vec_info *dr_info;
1883 : : int npeel;
1884 : : unsigned int count;
1885 : : } *vect_peel_info;
1886 : :
1887 : : typedef struct _vect_peel_extended_info
1888 : : {
1889 : : vec_info *vinfo;
1890 : : struct _vect_peel_info peel_info;
1891 : : unsigned int inside_cost;
1892 : : unsigned int outside_cost;
1893 : : } *vect_peel_extended_info;
1894 : :
1895 : :
1896 : : /* Peeling hashtable helpers. */
1897 : :
1898 : : struct peel_info_hasher : free_ptr_hash <_vect_peel_info>
1899 : : {
1900 : : static inline hashval_t hash (const _vect_peel_info *);
1901 : : static inline bool equal (const _vect_peel_info *, const _vect_peel_info *);
1902 : : };
1903 : :
1904 : : inline hashval_t
1905 : 654812 : peel_info_hasher::hash (const _vect_peel_info *peel_info)
1906 : : {
1907 : 654812 : return (hashval_t) peel_info->npeel;
1908 : : }
1909 : :
1910 : : inline bool
1911 : 357659 : peel_info_hasher::equal (const _vect_peel_info *a, const _vect_peel_info *b)
1912 : : {
1913 : 357659 : return (a->npeel == b->npeel);
1914 : : }
1915 : :
1916 : :
1917 : : /* Insert DR_INFO into peeling hash table with NPEEL as key. */
1918 : :
1919 : : static void
1920 : 297877 : vect_peeling_hash_insert (hash_table<peel_info_hasher> *peeling_htab,
1921 : : loop_vec_info loop_vinfo, dr_vec_info *dr_info,
1922 : : int npeel, bool supportable_if_not_aligned)
1923 : : {
1924 : 297877 : struct _vect_peel_info elem, *slot;
1925 : 297877 : _vect_peel_info **new_slot;
1926 : :
1927 : 297877 : elem.npeel = npeel;
1928 : 297877 : slot = peeling_htab->find (&elem);
1929 : 297877 : if (slot)
1930 : 127449 : slot->count++;
1931 : : else
1932 : : {
1933 : 170428 : slot = XNEW (struct _vect_peel_info);
1934 : 170428 : slot->npeel = npeel;
1935 : 170428 : slot->dr_info = dr_info;
1936 : 170428 : slot->count = 1;
1937 : 170428 : new_slot = peeling_htab->find_slot (slot, INSERT);
1938 : 170428 : *new_slot = slot;
1939 : : }
1940 : :
1941 : : /* If this DR is not supported with unknown misalignment then bias
1942 : : this slot when the cost model is disabled. */
1943 : 297877 : if (!supportable_if_not_aligned
1944 : 297877 : && unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
1945 : 4794 : slot->count += VECT_MAX_COST;
1946 : 297877 : }
1947 : :
1948 : :
1949 : : /* Traverse peeling hash table to find peeling option that aligns maximum
1950 : : number of data accesses. */
1951 : :
1952 : : int
1953 : 35422 : vect_peeling_hash_get_most_frequent (_vect_peel_info **slot,
1954 : : _vect_peel_extended_info *max)
1955 : : {
1956 : 35422 : vect_peel_info elem = *slot;
1957 : :
1958 : 35422 : if (elem->count > max->peel_info.count
1959 : 21634 : || (elem->count == max->peel_info.count
1960 : 16911 : && max->peel_info.npeel > elem->npeel))
1961 : : {
1962 : 13802 : max->peel_info.npeel = elem->npeel;
1963 : 13802 : max->peel_info.count = elem->count;
1964 : 13802 : max->peel_info.dr_info = elem->dr_info;
1965 : : }
1966 : :
1967 : 35422 : return 1;
1968 : : }
1969 : :
1970 : : /* Get the costs of peeling NPEEL iterations for LOOP_VINFO, checking
1971 : : data access costs for all data refs. If UNKNOWN_MISALIGNMENT is true,
1972 : : npeel is computed at runtime but DR0_INFO's misalignment will be zero
1973 : : after peeling. */
1974 : :
1975 : : static void
1976 : 303644 : vect_get_peeling_costs_all_drs (loop_vec_info loop_vinfo,
1977 : : dr_vec_info *dr0_info,
1978 : : unsigned int *inside_cost,
1979 : : unsigned int *outside_cost,
1980 : : stmt_vector_for_cost *body_cost_vec,
1981 : : stmt_vector_for_cost *prologue_cost_vec,
1982 : : unsigned int npeel)
1983 : : {
1984 : 303644 : vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
1985 : :
1986 : 303644 : bool dr0_alignment_known_p
1987 : : = (dr0_info
1988 : 558505 : && known_alignment_for_access_p (dr0_info,
1989 : 254861 : STMT_VINFO_VECTYPE (dr0_info->stmt)));
1990 : :
1991 : 1488464 : for (data_reference *dr : datarefs)
1992 : : {
1993 : 577532 : dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
1994 : 577532 : if (!vect_relevant_for_alignment_p (dr_info))
1995 : 32968 : continue;
1996 : :
1997 : 544564 : tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
1998 : 544564 : dr_alignment_support alignment_support_scheme;
1999 : 544564 : int misalignment;
2000 : 544564 : unsigned HOST_WIDE_INT alignment;
2001 : :
2002 : 544564 : bool negative = tree_int_cst_compare (DR_STEP (dr_info->dr),
2003 : 544564 : size_zero_node) < 0;
2004 : 544564 : poly_int64 off = 0;
2005 : 544564 : if (negative)
2006 : 19952 : off = ((TYPE_VECTOR_SUBPARTS (vectype) - 1)
2007 : 19952 : * -TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (vectype))));
2008 : :
2009 : 544564 : if (npeel == 0)
2010 : 272726 : misalignment = dr_misalignment (dr_info, vectype, off);
2011 : 271838 : else if (dr_info == dr0_info
2012 : 271838 : || vect_dr_aligned_if_peeled_dr_is (dr_info, dr0_info))
2013 : : misalignment = 0;
2014 : 90456 : else if (!dr0_alignment_known_p
2015 : 7319 : || !known_alignment_for_access_p (dr_info, vectype)
2016 : 97775 : || !DR_TARGET_ALIGNMENT (dr_info).is_constant (&alignment))
2017 : : misalignment = DR_MISALIGNMENT_UNKNOWN;
2018 : : else
2019 : : {
2020 : 6313 : misalignment = dr_misalignment (dr_info, vectype, off);
2021 : 6313 : misalignment += npeel * TREE_INT_CST_LOW (DR_STEP (dr_info->dr));
2022 : 6313 : misalignment &= alignment - 1;
2023 : : }
2024 : 544564 : alignment_support_scheme
2025 : 544564 : = vect_supportable_dr_alignment (loop_vinfo, dr_info, vectype,
2026 : : misalignment);
2027 : :
2028 : 544564 : vect_get_data_access_cost (loop_vinfo, dr_info,
2029 : : alignment_support_scheme, misalignment,
2030 : : inside_cost, outside_cost,
2031 : : body_cost_vec, prologue_cost_vec);
2032 : : }
2033 : 303644 : }
2034 : :
2035 : : /* Traverse peeling hash table and calculate cost for each peeling option.
2036 : : Find the one with the lowest cost. */
2037 : :
2038 : : int
2039 : 115621 : vect_peeling_hash_get_lowest_cost (_vect_peel_info **slot,
2040 : : _vect_peel_extended_info *min)
2041 : : {
2042 : 115621 : vect_peel_info elem = *slot;
2043 : 115621 : int dummy;
2044 : 115621 : unsigned int inside_cost = 0, outside_cost = 0;
2045 : 115621 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (min->vinfo);
2046 : 115621 : stmt_vector_for_cost prologue_cost_vec, body_cost_vec,
2047 : : epilogue_cost_vec;
2048 : :
2049 : 115621 : prologue_cost_vec.create (2);
2050 : 115621 : body_cost_vec.create (2);
2051 : 115621 : epilogue_cost_vec.create (2);
2052 : :
2053 : 115621 : vect_get_peeling_costs_all_drs (loop_vinfo, elem->dr_info, &inside_cost,
2054 : : &outside_cost, &body_cost_vec,
2055 : 115621 : &prologue_cost_vec, elem->npeel);
2056 : :
2057 : 115621 : body_cost_vec.release ();
2058 : :
2059 : 231242 : outside_cost += vect_get_known_peeling_cost
2060 : 115621 : (loop_vinfo, elem->npeel, &dummy,
2061 : : &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
2062 : : &prologue_cost_vec, &epilogue_cost_vec);
2063 : :
2064 : : /* Prologue and epilogue costs are added to the target model later.
2065 : : These costs depend only on the scalar iteration cost, the
2066 : : number of peeling iterations finally chosen, and the number of
2067 : : misaligned statements. So discard the information found here. */
2068 : 115621 : prologue_cost_vec.release ();
2069 : 115621 : epilogue_cost_vec.release ();
2070 : :
2071 : 115621 : if (inside_cost < min->inside_cost
2072 : 1407 : || (inside_cost == min->inside_cost
2073 : 1049 : && outside_cost < min->outside_cost))
2074 : : {
2075 : 114220 : min->inside_cost = inside_cost;
2076 : 114220 : min->outside_cost = outside_cost;
2077 : 114220 : min->peel_info.dr_info = elem->dr_info;
2078 : 114220 : min->peel_info.npeel = elem->npeel;
2079 : 114220 : min->peel_info.count = elem->count;
2080 : : }
2081 : :
2082 : 115621 : return 1;
2083 : : }
2084 : :
2085 : :
2086 : : /* Choose best peeling option by traversing peeling hash table and either
2087 : : choosing an option with the lowest cost (if cost model is enabled) or the
2088 : : option that aligns as many accesses as possible. */
2089 : :
2090 : : static struct _vect_peel_extended_info
2091 : 126639 : vect_peeling_hash_choose_best_peeling (hash_table<peel_info_hasher> *peeling_htab,
2092 : : loop_vec_info loop_vinfo)
2093 : : {
2094 : 126639 : struct _vect_peel_extended_info res;
2095 : :
2096 : 126639 : res.peel_info.dr_info = NULL;
2097 : 126639 : res.vinfo = loop_vinfo;
2098 : :
2099 : 126639 : if (!unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
2100 : : {
2101 : 112904 : res.inside_cost = INT_MAX;
2102 : 112904 : res.outside_cost = INT_MAX;
2103 : 112904 : peeling_htab->traverse <_vect_peel_extended_info *,
2104 : 228525 : vect_peeling_hash_get_lowest_cost> (&res);
2105 : : }
2106 : : else
2107 : : {
2108 : 13735 : res.peel_info.count = 0;
2109 : 13735 : peeling_htab->traverse <_vect_peel_extended_info *,
2110 : 49157 : vect_peeling_hash_get_most_frequent> (&res);
2111 : 13735 : res.inside_cost = 0;
2112 : 13735 : res.outside_cost = 0;
2113 : : }
2114 : :
2115 : 126639 : return res;
2116 : : }
2117 : :
2118 : : /* Return if vectorization is definitely, possibly, or unlikely to be
2119 : : supportable after loop peeling. */
2120 : :
2121 : : static enum peeling_support
2122 : 60865 : vect_peeling_supportable (loop_vec_info loop_vinfo, dr_vec_info *dr0_info,
2123 : : unsigned npeel)
2124 : : {
2125 : 60865 : vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
2126 : 60865 : enum dr_alignment_support supportable_dr_alignment;
2127 : :
2128 : 60865 : bool dr0_alignment_known_p
2129 : 121730 : = known_alignment_for_access_p (dr0_info,
2130 : 60865 : STMT_VINFO_VECTYPE (dr0_info->stmt));
2131 : 60865 : bool has_unsupported_dr_p = false;
2132 : 60865 : unsigned int dr0_step = tree_to_shwi (DR_STEP (dr0_info->dr));
2133 : 60865 : int known_unsupported_misalignment = DR_MISALIGNMENT_UNKNOWN;
2134 : :
2135 : : /* Check if each data ref can be vectorized after peeling. */
2136 : 262558 : for (data_reference *dr : datarefs)
2137 : : {
2138 : 94438 : if (dr == dr0_info->dr)
2139 : 59854 : continue;
2140 : :
2141 : 34584 : dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
2142 : 34584 : if (!vect_relevant_for_alignment_p (dr_info)
2143 : 34584 : || vect_dr_aligned_if_peeled_dr_is (dr_info, dr0_info))
2144 : 5380 : continue;
2145 : :
2146 : 29204 : tree vectype = STMT_VINFO_VECTYPE (dr_info->stmt);
2147 : 29204 : int misalignment;
2148 : 29204 : unsigned HOST_WIDE_INT alignment;
2149 : 29204 : if (!dr0_alignment_known_p
2150 : 1889 : || !known_alignment_for_access_p (dr_info, vectype)
2151 : 31093 : || !DR_TARGET_ALIGNMENT (dr_info).is_constant (&alignment))
2152 : : misalignment = DR_MISALIGNMENT_UNKNOWN;
2153 : : else
2154 : : {
2155 : 1875 : misalignment = dr_misalignment (dr_info, vectype);
2156 : 1875 : misalignment += npeel * TREE_INT_CST_LOW (DR_STEP (dr_info->dr));
2157 : 1875 : misalignment &= alignment - 1;
2158 : : }
2159 : 29204 : supportable_dr_alignment
2160 : 29204 : = vect_supportable_dr_alignment (loop_vinfo, dr_info, vectype,
2161 : : misalignment);
2162 : 29204 : if (supportable_dr_alignment == dr_unaligned_unsupported)
2163 : : {
2164 : 28292 : has_unsupported_dr_p = true;
2165 : :
2166 : : /* If unaligned unsupported DRs exist, we do following checks to see
2167 : : if they can be mutually aligned to support vectorization. If yes,
2168 : : we can try peeling and create a runtime (mutual alignment) check
2169 : : to guard the peeled loop. If no, return PEELING_UNSUPPORTED. */
2170 : :
2171 : : /* 1) If unaligned unsupported DRs have different alignment steps, the
2172 : : probability of DRs being mutually aligned is very low, and it's
2173 : : quite complex to check mutual alignment at runtime. We return
2174 : : PEELING_UNSUPPORTED in this case. */
2175 : 28292 : if (tree_to_shwi (DR_STEP (dr)) != dr0_step)
2176 : 60865 : return peeling_unsupported;
2177 : :
2178 : : /* 2) Based on above same alignment step condition, if one known
2179 : : misaligned DR has zero misalignment, or different misalignment
2180 : : amount from another known misaligned DR, peeling is unable to
2181 : : help make all these DRs aligned together. We won't try peeling
2182 : : with versioning anymore. */
2183 : 23966 : int curr_dr_misalignment = dr_misalignment (dr_info, vectype);
2184 : 23966 : if (curr_dr_misalignment == 0)
2185 : : return peeling_unsupported;
2186 : 13817 : if (known_unsupported_misalignment != DR_MISALIGNMENT_UNKNOWN)
2187 : : {
2188 : 8 : if (curr_dr_misalignment != DR_MISALIGNMENT_UNKNOWN
2189 : 8 : && curr_dr_misalignment != known_unsupported_misalignment)
2190 : : return peeling_unsupported;
2191 : : }
2192 : : else
2193 : : known_unsupported_misalignment = curr_dr_misalignment;
2194 : : }
2195 : : }
2196 : :
2197 : : /* Vectorization is known to be supportable with peeling alone when there is
2198 : : no unsupported DR. */
2199 : 46390 : return has_unsupported_dr_p ? peeling_maybe_supported
2200 : : : peeling_known_supported;
2201 : : }
2202 : :
2203 : : /* Compare two data-references DRA and DRB to group them into chunks
2204 : : with related alignment. */
2205 : :
2206 : : static int
2207 : 3713395 : dr_align_group_sort_cmp (const void *dra_, const void *drb_)
2208 : : {
2209 : 3713395 : data_reference_p dra = *(data_reference_p *)const_cast<void *>(dra_);
2210 : 3713395 : data_reference_p drb = *(data_reference_p *)const_cast<void *>(drb_);
2211 : 3713395 : int cmp;
2212 : :
2213 : : /* Stabilize sort. */
2214 : 3713395 : if (dra == drb)
2215 : : return 0;
2216 : :
2217 : : /* Ordering of DRs according to base. */
2218 : 3713395 : cmp = data_ref_compare_tree (DR_BASE_ADDRESS (dra),
2219 : : DR_BASE_ADDRESS (drb));
2220 : 3713395 : if (cmp != 0)
2221 : : return cmp;
2222 : :
2223 : : /* And according to DR_OFFSET. */
2224 : 1658398 : cmp = data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
2225 : 1658398 : if (cmp != 0)
2226 : : return cmp;
2227 : :
2228 : : /* And after step. */
2229 : 1645043 : cmp = data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb));
2230 : 1645043 : if (cmp != 0)
2231 : : return cmp;
2232 : :
2233 : : /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
2234 : 1640548 : cmp = data_ref_compare_tree (DR_INIT (dra), DR_INIT (drb));
2235 : 1640548 : if (cmp == 0)
2236 : 195312 : return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
2237 : : return cmp;
2238 : : }
2239 : :
2240 : : /* Function vect_enhance_data_refs_alignment
2241 : :
2242 : : This pass will use loop versioning and loop peeling in order to enhance
2243 : : the alignment of data references in the loop.
2244 : :
2245 : : FOR NOW: we assume that whatever versioning/peeling takes place, only the
2246 : : original loop is to be vectorized. Any other loops that are created by
2247 : : the transformations performed in this pass - are not supposed to be
2248 : : vectorized. This restriction will be relaxed.
2249 : :
2250 : : This pass will require a cost model to guide it whether to apply peeling
2251 : : or versioning or a combination of the two. For example, the scheme that
2252 : : intel uses when given a loop with several memory accesses, is as follows:
2253 : : choose one memory access ('p') which alignment you want to force by doing
2254 : : peeling. Then, either (1) generate a loop in which 'p' is aligned and all
2255 : : other accesses are not necessarily aligned, or (2) use loop versioning to
2256 : : generate one loop in which all accesses are aligned, and another loop in
2257 : : which only 'p' is necessarily aligned.
2258 : :
2259 : : ("Automatic Intra-Register Vectorization for the Intel Architecture",
2260 : : Aart J.C. Bik, Milind Girkar, Paul M. Grey and Ximmin Tian, International
2261 : : Journal of Parallel Programming, Vol. 30, No. 2, April 2002.)
2262 : :
2263 : : Devising a cost model is the most critical aspect of this work. It will
2264 : : guide us on which access to peel for, whether to use loop versioning, how
2265 : : many versions to create, etc. The cost model will probably consist of
2266 : : generic considerations as well as target specific considerations (on
2267 : : powerpc for example, misaligned stores are more painful than misaligned
2268 : : loads).
2269 : :
2270 : : Here are the general steps involved in alignment enhancements:
2271 : :
2272 : : -- original loop, before alignment analysis:
2273 : : for (i=0; i<N; i++){
2274 : : x = q[i]; # DR_MISALIGNMENT(q) = unknown
2275 : : p[i] = y; # DR_MISALIGNMENT(p) = unknown
2276 : : }
2277 : :
2278 : : -- After vect_compute_data_refs_alignment:
2279 : : for (i=0; i<N; i++){
2280 : : x = q[i]; # DR_MISALIGNMENT(q) = 3
2281 : : p[i] = y; # DR_MISALIGNMENT(p) = unknown
2282 : : }
2283 : :
2284 : : -- Possibility 1: we do loop versioning:
2285 : : if (p is aligned) {
2286 : : for (i=0; i<N; i++){ # loop 1A
2287 : : x = q[i]; # DR_MISALIGNMENT(q) = 3
2288 : : p[i] = y; # DR_MISALIGNMENT(p) = 0
2289 : : }
2290 : : }
2291 : : else {
2292 : : for (i=0; i<N; i++){ # loop 1B
2293 : : x = q[i]; # DR_MISALIGNMENT(q) = 3
2294 : : p[i] = y; # DR_MISALIGNMENT(p) = unaligned
2295 : : }
2296 : : }
2297 : :
2298 : : -- Possibility 2: we do loop peeling:
2299 : : for (i = 0; i < 3; i++){ # (scalar loop, not to be vectorized).
2300 : : x = q[i];
2301 : : p[i] = y;
2302 : : }
2303 : : for (i = 3; i < N; i++){ # loop 2A
2304 : : x = q[i]; # DR_MISALIGNMENT(q) = 0
2305 : : p[i] = y; # DR_MISALIGNMENT(p) = unknown
2306 : : }
2307 : :
2308 : : -- Possibility 3: combination of loop peeling and versioning:
2309 : : if (p & q are mutually aligned) {
2310 : : for (i=0; i<3; i++){ # (peeled loop iterations).
2311 : : x = q[i];
2312 : : p[i] = y;
2313 : : }
2314 : : for (i=3; i<N; i++){ # loop 3A
2315 : : x = q[i]; # DR_MISALIGNMENT(q) = 0
2316 : : p[i] = y; # DR_MISALIGNMENT(p) = 0
2317 : : }
2318 : : }
2319 : : else {
2320 : : for (i=0; i<N; i++){ # (scalar loop, not to be vectorized).
2321 : : x = q[i]; # DR_MISALIGNMENT(q) = 3
2322 : : p[i] = y; # DR_MISALIGNMENT(p) = unknown
2323 : : }
2324 : : }
2325 : :
2326 : : These loops are later passed to loop_transform to be vectorized. The
2327 : : vectorizer will use the alignment information to guide the transformation
2328 : : (whether to generate regular loads/stores, or with special handling for
2329 : : misalignment). */
2330 : :
2331 : : opt_result
2332 : 309282 : vect_enhance_data_refs_alignment (loop_vec_info loop_vinfo)
2333 : : {
2334 : 309282 : class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
2335 : 309282 : dr_vec_info *first_store = NULL;
2336 : 309282 : dr_vec_info *dr0_info = NULL;
2337 : 309282 : struct data_reference *dr;
2338 : 309282 : unsigned int i;
2339 : 309282 : bool do_peeling = false;
2340 : 309282 : bool do_versioning = false;
2341 : 309282 : bool try_peeling_with_versioning = false;
2342 : 309282 : unsigned int npeel = 0;
2343 : 309282 : bool one_misalignment_known = false;
2344 : 309282 : bool one_misalignment_unknown = false;
2345 : 309282 : bool one_dr_unsupportable = false;
2346 : 309282 : dr_vec_info *unsupportable_dr_info = NULL;
2347 : 309282 : unsigned int dr0_same_align_drs = 0, first_store_same_align_drs = 0;
2348 : 309282 : hash_table<peel_info_hasher> peeling_htab (1);
2349 : :
2350 : 309282 : DUMP_VECT_SCOPE ("vect_enhance_data_refs_alignment");
2351 : :
2352 : : /* Reset data so we can safely be called multiple times. */
2353 : 309282 : LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
2354 : 309282 : LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = 0;
2355 : :
2356 : 309282 : if (LOOP_VINFO_DATAREFS (loop_vinfo).is_empty ())
2357 : 11190 : return opt_result::success ();
2358 : :
2359 : : /* Sort the vector of datarefs so DRs that have the same or dependent
2360 : : alignment are next to each other. */
2361 : 298092 : auto_vec<data_reference_p> datarefs
2362 : 298092 : = LOOP_VINFO_DATAREFS (loop_vinfo).copy ();
2363 : 298092 : datarefs.qsort (dr_align_group_sort_cmp);
2364 : :
2365 : : /* Compute the number of DRs that become aligned when we peel
2366 : : a dataref so it becomes aligned. */
2367 : 596184 : auto_vec<unsigned> n_same_align_refs (datarefs.length ());
2368 : 298092 : n_same_align_refs.quick_grow_cleared (datarefs.length ());
2369 : 298092 : unsigned i0;
2370 : 610407 : for (i0 = 0; i0 < datarefs.length (); ++i0)
2371 : 309310 : if (DR_BASE_ADDRESS (datarefs[i0]))
2372 : : break;
2373 : 1953816 : for (i = i0 + 1; i <= datarefs.length (); ++i)
2374 : : {
2375 : 678816 : if (i == datarefs.length ()
2376 : 383729 : || !operand_equal_p (DR_BASE_ADDRESS (datarefs[i0]),
2377 : 383729 : DR_BASE_ADDRESS (datarefs[i]), 0)
2378 : 187230 : || !operand_equal_p (DR_OFFSET (datarefs[i0]),
2379 : 187230 : DR_OFFSET (datarefs[i]), 0)
2380 : 864860 : || !operand_equal_p (DR_STEP (datarefs[i0]),
2381 : 186044 : DR_STEP (datarefs[i]), 0))
2382 : : {
2383 : : /* The subgroup [i0, i-1] now only differs in DR_INIT and
2384 : : possibly DR_TARGET_ALIGNMENT. Still the whole subgroup
2385 : : will get known misalignment if we align one of the refs
2386 : : with the largest DR_TARGET_ALIGNMENT. */
2387 : 1172091 : for (unsigned j = i0; j < i; ++j)
2388 : : {
2389 : 678816 : dr_vec_info *dr_infoj = loop_vinfo->lookup_dr (datarefs[j]);
2390 : 2797940 : for (unsigned k = i0; k < i; ++k)
2391 : : {
2392 : 2119124 : if (k == j)
2393 : 678816 : continue;
2394 : 1440308 : dr_vec_info *dr_infok = loop_vinfo->lookup_dr (datarefs[k]);
2395 : 1440308 : if (vect_dr_aligned_if_related_peeled_dr_is (dr_infok,
2396 : : dr_infoj))
2397 : 471437 : n_same_align_refs[j]++;
2398 : : }
2399 : : }
2400 : : i0 = i;
2401 : : }
2402 : : }
2403 : :
2404 : : /* While cost model enhancements are expected in the future, the high level
2405 : : view of the code at this time is as follows:
2406 : :
2407 : : A) If there is a misaligned access then see if doing peeling alone can
2408 : : make all data references satisfy vect_supportable_dr_alignment. If so,
2409 : : update data structures and return.
2410 : :
2411 : : B) If peeling alone wasn't possible and there is a data reference with an
2412 : : unknown misalignment that does not satisfy vect_supportable_dr_alignment
2413 : : then we may use either of the following two approaches.
2414 : :
2415 : : B1) Try peeling with versioning: Add a runtime loop versioning check to
2416 : : see if all unsupportable data references are mutually aligned, which
2417 : : means they will be uniformly aligned after a certain amount of loop
2418 : : peeling. If peeling and versioning can be used together, set
2419 : : LOOP_VINFO_ALLOW_MUTUAL_ALIGNMENT_P to TRUE and return.
2420 : :
2421 : : B2) Try versioning alone: Add a runtime loop versioning check to see if
2422 : : all unsupportable data references are already uniformly aligned
2423 : : without loop peeling. If versioning can be applied alone, set
2424 : : LOOP_VINFO_ALLOW_MUTUAL_ALIGNMENT_P to FALSE and return.
2425 : :
2426 : : Above B1 is more powerful and more likely to be adopted than B2. But B2
2427 : : is still available and useful in some cases, for example, the cost model
2428 : : does not allow much peeling.
2429 : :
2430 : : C) If none of above was successful then the alignment was not enhanced,
2431 : : just return. */
2432 : :
2433 : : /* (1) Peeling to force alignment. */
2434 : :
2435 : : /* (1.1) Decide whether to perform peeling, how many iterations to peel, and
2436 : : if vectorization may be supported by peeling with versioning.
2437 : : Considerations:
2438 : : - How many accesses will become aligned due to the peeling
2439 : : - How many accesses will become unaligned due to the peeling,
2440 : : and the cost of misaligned accesses.
2441 : : - The cost of peeling (the extra runtime checks, the increase
2442 : : in code size). */
2443 : :
2444 : 298092 : poly_uint64 vf = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
2445 : 839744 : FOR_EACH_VEC_ELT (datarefs, i, dr)
2446 : : {
2447 : 583241 : dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
2448 : 583241 : if (!vect_relevant_for_alignment_p (dr_info))
2449 : 98343 : continue;
2450 : :
2451 : 484898 : stmt_vec_info stmt_info = dr_info->stmt;
2452 : 484898 : tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2453 : :
2454 : : /* With variable VF, unsafe speculative read can be avoided for known
2455 : : inbounds DRs as long as partial vectors are used. */
2456 : 484898 : if (!vf.is_constant ()
2457 : : && dr_safe_speculative_read_required (stmt_info)
2458 : : && DR_SCALAR_KNOWN_BOUNDS (dr_info))
2459 : : {
2460 : : dr_set_safe_speculative_read_required (stmt_info, false);
2461 : : LOOP_VINFO_MUST_USE_PARTIAL_VECTORS_P (loop_vinfo) = true;
2462 : : }
2463 : :
2464 : 484898 : do_peeling = vector_alignment_reachable_p (dr_info, vf);
2465 : 484898 : if (do_peeling)
2466 : : {
2467 : 418300 : if (known_alignment_for_access_p (dr_info, vectype))
2468 : : {
2469 : 239242 : unsigned int npeel_tmp = 0;
2470 : 239242 : bool negative = tree_int_cst_compare (DR_STEP (dr),
2471 : 239242 : size_zero_node) < 0;
2472 : :
2473 : : /* If known_alignment_for_access_p then we have set
2474 : : DR_MISALIGNMENT which is only done if we know it at compiler
2475 : : time, so it is safe to assume target alignment is constant.
2476 : : */
2477 : 239242 : unsigned int target_align =
2478 : 239242 : DR_TARGET_ALIGNMENT (dr_info).to_constant ();
2479 : 239242 : unsigned HOST_WIDE_INT dr_size = vect_get_scalar_dr_size (dr_info);
2480 : 239242 : poly_int64 off = 0;
2481 : 239242 : if (negative)
2482 : 2066 : off = (TYPE_VECTOR_SUBPARTS (vectype) - 1) * -dr_size;
2483 : 239242 : unsigned int mis = dr_misalignment (dr_info, vectype, off);
2484 : 239242 : mis = negative ? mis : -mis;
2485 : 239242 : if (mis != 0)
2486 : 11424 : npeel_tmp = (mis & (target_align - 1)) / dr_size;
2487 : :
2488 : : /* For multiple types, it is possible that the bigger type access
2489 : : will have more than one peeling option. E.g., a loop with two
2490 : : types: one of size (vector size / 4), and the other one of
2491 : : size (vector size / 8). Vectorization factor will 8. If both
2492 : : accesses are misaligned by 3, the first one needs one scalar
2493 : : iteration to be aligned, and the second one needs 5. But the
2494 : : first one will be aligned also by peeling 5 scalar
2495 : : iterations, and in that case both accesses will be aligned.
2496 : : Hence, except for the immediate peeling amount, we also want
2497 : : to try to add full vector size, while we don't exceed
2498 : : vectorization factor.
2499 : : We do this automatically for cost model, since we calculate
2500 : : cost for every peeling option. */
2501 : 239242 : poly_uint64 nscalars = npeel_tmp;
2502 : 239242 : if (unlimited_cost_model (LOOP_VINFO_LOOP (loop_vinfo)))
2503 : : {
2504 : 39355 : unsigned group_size = 1;
2505 : 39355 : if (STMT_SLP_TYPE (stmt_info)
2506 : 39355 : && STMT_VINFO_GROUPED_ACCESS (stmt_info))
2507 : 1885 : group_size = DR_GROUP_SIZE (stmt_info);
2508 : 39355 : nscalars = vf * group_size;
2509 : : }
2510 : :
2511 : : /* Save info about DR in the hash table. Also include peeling
2512 : : amounts according to the explanation above. Indicate
2513 : : the alignment status when the ref is not aligned.
2514 : : ??? Rather than using unknown alignment here we should
2515 : : prune all entries from the peeling hashtable which cause
2516 : : DRs to be not supported. */
2517 : 239242 : bool supportable_if_not_aligned
2518 : : = vect_supportable_dr_alignment
2519 : 239242 : (loop_vinfo, dr_info, vectype, DR_MISALIGNMENT_UNKNOWN);
2520 : 537119 : while (known_le (npeel_tmp, nscalars))
2521 : : {
2522 : 297877 : vect_peeling_hash_insert (&peeling_htab, loop_vinfo,
2523 : : dr_info, npeel_tmp,
2524 : : supportable_if_not_aligned);
2525 : 297877 : npeel_tmp += MAX (1, target_align / dr_size);
2526 : : }
2527 : :
2528 : 239242 : one_misalignment_known = true;
2529 : : }
2530 : : else
2531 : : {
2532 : : /* If we don't know any misalignment values, we prefer
2533 : : peeling for data-ref that has the maximum number of data-refs
2534 : : with the same alignment, unless the target prefers to align
2535 : : stores over load. */
2536 : 179058 : unsigned same_align_drs = n_same_align_refs[i];
2537 : 179058 : if (!dr0_info
2538 : 179058 : || dr0_same_align_drs < same_align_drs)
2539 : : {
2540 : : dr0_same_align_drs = same_align_drs;
2541 : : dr0_info = dr_info;
2542 : : }
2543 : : /* For data-refs with the same number of related
2544 : : accesses prefer the one where the misalign
2545 : : computation will be invariant in the outermost loop. */
2546 : 56960 : else if (dr0_same_align_drs == same_align_drs)
2547 : : {
2548 : 56019 : class loop *ivloop0, *ivloop;
2549 : 56019 : ivloop0 = outermost_invariant_loop_for_expr
2550 : 56019 : (loop, DR_BASE_ADDRESS (dr0_info->dr));
2551 : 56019 : ivloop = outermost_invariant_loop_for_expr
2552 : 56019 : (loop, DR_BASE_ADDRESS (dr));
2553 : 56019 : if ((ivloop && !ivloop0)
2554 : 56019 : || (ivloop && ivloop0
2555 : 56013 : && flow_loop_nested_p (ivloop, ivloop0)))
2556 : : dr0_info = dr_info;
2557 : : }
2558 : :
2559 : 179058 : one_misalignment_unknown = true;
2560 : :
2561 : : /* Check for data refs with unsupportable alignment that
2562 : : can be peeled. */
2563 : 179058 : enum dr_alignment_support supportable_dr_alignment
2564 : 179058 : = vect_supportable_dr_alignment (loop_vinfo, dr_info, vectype,
2565 : : DR_MISALIGNMENT_UNKNOWN);
2566 : 179058 : if (supportable_dr_alignment == dr_unaligned_unsupported)
2567 : : {
2568 : 75638 : one_dr_unsupportable = true;
2569 : 75638 : unsupportable_dr_info = dr_info;
2570 : : }
2571 : :
2572 : 179058 : if (!first_store && DR_IS_WRITE (dr))
2573 : : {
2574 : 43450 : first_store = dr_info;
2575 : 43450 : first_store_same_align_drs = same_align_drs;
2576 : : }
2577 : : }
2578 : : }
2579 : : else
2580 : : {
2581 : 66598 : if (!aligned_access_p (dr_info, vectype))
2582 : : {
2583 : 41589 : if (dump_enabled_p ())
2584 : 1924 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
2585 : : "vector alignment may not be reachable\n");
2586 : : break;
2587 : : }
2588 : : }
2589 : : }
2590 : :
2591 : : /* Check if we can possibly peel the loop. */
2592 : 298092 : if (!vect_can_advance_ivs_p (loop_vinfo)
2593 : 291555 : || !slpeel_can_duplicate_loop_p (loop, LOOP_VINFO_IV_EXIT (loop_vinfo),
2594 : 291555 : loop_preheader_edge (loop))
2595 : 291555 : || loop->inner
2596 : : /* We don't currently maintaing the LCSSA for prologue peeled inversed
2597 : : loops. */
2598 : 588283 : || LOOP_VINFO_EARLY_BREAKS_VECT_PEELED (loop_vinfo))
2599 : : do_peeling = false;
2600 : :
2601 : 298092 : struct _vect_peel_extended_info peel_for_known_alignment;
2602 : 298092 : struct _vect_peel_extended_info peel_for_unknown_alignment;
2603 : 298092 : struct _vect_peel_extended_info best_peel;
2604 : :
2605 : 298092 : peel_for_unknown_alignment.inside_cost = INT_MAX;
2606 : 298092 : peel_for_unknown_alignment.outside_cost = INT_MAX;
2607 : 298092 : peel_for_unknown_alignment.peel_info.count = 0;
2608 : :
2609 : 298092 : if (do_peeling
2610 : 298092 : && one_misalignment_unknown)
2611 : : {
2612 : : /* Check if the target requires to prefer stores over loads, i.e., if
2613 : : misaligned stores are more expensive than misaligned loads (taking
2614 : : drs with same alignment into account). */
2615 : 105753 : unsigned int load_inside_cost = 0;
2616 : 105753 : unsigned int load_outside_cost = 0;
2617 : 105753 : unsigned int store_inside_cost = 0;
2618 : 105753 : unsigned int store_outside_cost = 0;
2619 : 105753 : unsigned int estimated_npeels = vect_vf_for_cost (loop_vinfo) / 2;
2620 : :
2621 : 105753 : stmt_vector_for_cost dummy;
2622 : 105753 : dummy.create (2);
2623 : 105753 : vect_get_peeling_costs_all_drs (loop_vinfo, dr0_info,
2624 : : &load_inside_cost,
2625 : : &load_outside_cost,
2626 : : &dummy, &dummy, estimated_npeels);
2627 : 105753 : dummy.release ();
2628 : :
2629 : 105753 : if (first_store)
2630 : : {
2631 : 33487 : dummy.create (2);
2632 : 33487 : vect_get_peeling_costs_all_drs (loop_vinfo, first_store,
2633 : : &store_inside_cost,
2634 : : &store_outside_cost,
2635 : : &dummy, &dummy,
2636 : : estimated_npeels);
2637 : 33487 : dummy.release ();
2638 : : }
2639 : : else
2640 : : {
2641 : 72266 : store_inside_cost = INT_MAX;
2642 : 72266 : store_outside_cost = INT_MAX;
2643 : : }
2644 : :
2645 : 105753 : if (load_inside_cost > store_inside_cost
2646 : 105753 : || (load_inside_cost == store_inside_cost
2647 : 33206 : && load_outside_cost > store_outside_cost))
2648 : : {
2649 : 105753 : dr0_info = first_store;
2650 : 105753 : dr0_same_align_drs = first_store_same_align_drs;
2651 : 105753 : peel_for_unknown_alignment.inside_cost = store_inside_cost;
2652 : 105753 : peel_for_unknown_alignment.outside_cost = store_outside_cost;
2653 : : }
2654 : : else
2655 : : {
2656 : 105753 : peel_for_unknown_alignment.inside_cost = load_inside_cost;
2657 : 105753 : peel_for_unknown_alignment.outside_cost = load_outside_cost;
2658 : : }
2659 : :
2660 : 105753 : stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
2661 : 105753 : prologue_cost_vec.create (2);
2662 : 105753 : epilogue_cost_vec.create (2);
2663 : :
2664 : 105753 : int dummy2;
2665 : 211506 : peel_for_unknown_alignment.outside_cost += vect_get_known_peeling_cost
2666 : 105753 : (loop_vinfo, estimated_npeels, &dummy2,
2667 : : &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
2668 : : &prologue_cost_vec, &epilogue_cost_vec);
2669 : :
2670 : 105753 : prologue_cost_vec.release ();
2671 : 105753 : epilogue_cost_vec.release ();
2672 : :
2673 : 105753 : peel_for_unknown_alignment.peel_info.count = dr0_same_align_drs + 1;
2674 : : }
2675 : :
2676 : 298092 : peel_for_unknown_alignment.peel_info.npeel = 0;
2677 : 298092 : peel_for_unknown_alignment.peel_info.dr_info = dr0_info;
2678 : :
2679 : 298092 : best_peel = peel_for_unknown_alignment;
2680 : :
2681 : 298092 : peel_for_known_alignment.inside_cost = INT_MAX;
2682 : 298092 : peel_for_known_alignment.outside_cost = INT_MAX;
2683 : 298092 : peel_for_known_alignment.peel_info.count = 0;
2684 : 298092 : peel_for_known_alignment.peel_info.dr_info = NULL;
2685 : :
2686 : 298092 : if (do_peeling && one_misalignment_known)
2687 : : {
2688 : : /* Peeling is possible, but there is no data access that is not supported
2689 : : unless aligned. So we try to choose the best possible peeling from
2690 : : the hash table. */
2691 : 126639 : peel_for_known_alignment = vect_peeling_hash_choose_best_peeling
2692 : 126639 : (&peeling_htab, loop_vinfo);
2693 : : }
2694 : :
2695 : : /* Compare costs of peeling for known and unknown alignment. */
2696 : 298092 : if (peel_for_known_alignment.peel_info.dr_info != NULL
2697 : 126639 : && peel_for_unknown_alignment.inside_cost
2698 : : >= peel_for_known_alignment.inside_cost)
2699 : : {
2700 : 113864 : best_peel = peel_for_known_alignment;
2701 : :
2702 : : /* If the best peeling for known alignment has NPEEL == 0, perform no
2703 : : peeling at all except if there is an unsupportable dr that we can
2704 : : align. */
2705 : 113864 : if (best_peel.peel_info.npeel == 0 && !one_dr_unsupportable)
2706 : : do_peeling = false;
2707 : : }
2708 : :
2709 : : /* If there is an unsupportable data ref, prefer this over all choices so far
2710 : : since we'd have to discard a chosen peeling except when it accidentally
2711 : : aligned the unsupportable data ref. */
2712 : 190473 : if (one_dr_unsupportable)
2713 : : dr0_info = unsupportable_dr_info;
2714 : 237062 : else if (do_peeling)
2715 : : {
2716 : : /* Calculate the penalty for no peeling, i.e. leaving everything as-is.
2717 : : TODO: Use nopeel_outside_cost or get rid of it? */
2718 : 48783 : unsigned nopeel_inside_cost = 0;
2719 : 48783 : unsigned nopeel_outside_cost = 0;
2720 : :
2721 : 48783 : stmt_vector_for_cost dummy;
2722 : 48783 : dummy.create (2);
2723 : 48783 : vect_get_peeling_costs_all_drs (loop_vinfo, NULL, &nopeel_inside_cost,
2724 : : &nopeel_outside_cost, &dummy, &dummy, 0);
2725 : 48783 : dummy.release ();
2726 : :
2727 : : /* Add epilogue costs. As we do not peel for alignment here, no prologue
2728 : : costs will be recorded. */
2729 : 48783 : stmt_vector_for_cost prologue_cost_vec, epilogue_cost_vec;
2730 : 48783 : prologue_cost_vec.create (2);
2731 : 48783 : epilogue_cost_vec.create (2);
2732 : :
2733 : 48783 : int dummy2;
2734 : 97566 : nopeel_outside_cost += vect_get_known_peeling_cost
2735 : 48783 : (loop_vinfo, 0, &dummy2,
2736 : : &LOOP_VINFO_SCALAR_ITERATION_COST (loop_vinfo),
2737 : : &prologue_cost_vec, &epilogue_cost_vec);
2738 : :
2739 : 48783 : prologue_cost_vec.release ();
2740 : 48783 : epilogue_cost_vec.release ();
2741 : :
2742 : 48783 : npeel = best_peel.peel_info.npeel;
2743 : 48783 : dr0_info = best_peel.peel_info.dr_info;
2744 : :
2745 : : /* If no peeling is not more expensive than the best peeling we
2746 : : have so far, don't perform any peeling. */
2747 : 48783 : if (nopeel_inside_cost <= best_peel.inside_cost)
2748 : 43481 : do_peeling = false;
2749 : : }
2750 : :
2751 : 109813 : if (do_peeling)
2752 : : {
2753 : 60865 : stmt_vec_info stmt_info = dr0_info->stmt;
2754 : 60865 : if (known_alignment_for_access_p (dr0_info,
2755 : : STMT_VINFO_VECTYPE (stmt_info)))
2756 : : {
2757 : 5286 : bool negative = tree_int_cst_compare (DR_STEP (dr0_info->dr),
2758 : 5286 : size_zero_node) < 0;
2759 : 5286 : if (!npeel)
2760 : : {
2761 : : /* Since it's known at compile time, compute the number of
2762 : : iterations in the peeled loop (the peeling factor) for use in
2763 : : updating DR_MISALIGNMENT values. The peeling factor is the
2764 : : vectorization factor minus the misalignment as an element
2765 : : count. */
2766 : 0 : tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2767 : 0 : poly_int64 off = 0;
2768 : 0 : if (negative)
2769 : 0 : off = ((TYPE_VECTOR_SUBPARTS (vectype) - 1)
2770 : 0 : * -TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (vectype))));
2771 : 0 : unsigned int mis
2772 : 0 : = dr_misalignment (dr0_info, vectype, off);
2773 : 0 : mis = negative ? mis : -mis;
2774 : : /* If known_alignment_for_access_p then we have set
2775 : : DR_MISALIGNMENT which is only done if we know it at compiler
2776 : : time, so it is safe to assume target alignment is constant.
2777 : : */
2778 : 0 : unsigned int target_align =
2779 : 0 : DR_TARGET_ALIGNMENT (dr0_info).to_constant ();
2780 : 0 : npeel = ((mis & (target_align - 1))
2781 : 0 : / vect_get_scalar_dr_size (dr0_info));
2782 : : }
2783 : :
2784 : : /* For interleaved data access every iteration accesses all the
2785 : : members of the group, therefore we divide the number of iterations
2786 : : by the group size. */
2787 : 5286 : if (STMT_VINFO_GROUPED_ACCESS (stmt_info))
2788 : 319 : npeel /= DR_GROUP_SIZE (stmt_info);
2789 : :
2790 : 5286 : if (dump_enabled_p ())
2791 : 264 : dump_printf_loc (MSG_NOTE, vect_location,
2792 : : "Try peeling by %d\n", npeel);
2793 : : }
2794 : :
2795 : : /* Check how peeling for alignment can support vectorization. Function
2796 : : vect_peeling_supportable returns one of the three possible values:
2797 : : - PEELING_KNOWN_SUPPORTED: indicates that we know all unsupported
2798 : : datarefs can be aligned after peeling. We can use peeling alone.
2799 : : - PEELING_MAYBE_SUPPORTED: indicates that peeling may be able to make
2800 : : these datarefs aligned but we are not sure about it at compile time.
2801 : : We will try peeling with versioning to add a runtime check to guard
2802 : : the peeled loop.
2803 : : - PEELING_UNSUPPORTED: indicates that peeling is almost impossible to
2804 : : support vectorization. We will stop trying peeling. */
2805 : 60865 : switch (vect_peeling_supportable (loop_vinfo, dr0_info, npeel))
2806 : : {
2807 : : case peeling_known_supported:
2808 : : break;
2809 : 12755 : case peeling_maybe_supported:
2810 : 12755 : try_peeling_with_versioning = true;
2811 : 12755 : break;
2812 : 14475 : case peeling_unsupported:
2813 : 14475 : do_peeling = false;
2814 : 14475 : break;
2815 : : }
2816 : :
2817 : : /* Check if all datarefs are supportable and log. */
2818 : 60865 : if (do_peeling
2819 : 60865 : && npeel == 0
2820 : 60865 : && known_alignment_for_access_p (dr0_info,
2821 : : STMT_VINFO_VECTYPE (stmt_info)))
2822 : 3 : return opt_result::success ();
2823 : :
2824 : : /* Cost model #1 - honor --param vect-max-peeling-for-alignment. */
2825 : 60862 : if (do_peeling)
2826 : : {
2827 : 46387 : unsigned max_allowed_peel
2828 : 46387 : = param_vect_max_peeling_for_alignment;
2829 : 46387 : if (loop_cost_model (loop) <= VECT_COST_MODEL_CHEAP)
2830 : : max_allowed_peel = 0;
2831 : 8355 : if (max_allowed_peel != (unsigned)-1)
2832 : : {
2833 : 38035 : unsigned max_peel = npeel;
2834 : 38035 : if (max_peel == 0)
2835 : : {
2836 : 35596 : poly_uint64 target_align = DR_TARGET_ALIGNMENT (dr0_info);
2837 : 35596 : unsigned HOST_WIDE_INT target_align_c;
2838 : 35596 : if (target_align.is_constant (&target_align_c))
2839 : 71192 : max_peel =
2840 : 35596 : target_align_c / vect_get_scalar_dr_size (dr0_info) - 1;
2841 : : else
2842 : : {
2843 : : do_peeling = false;
2844 : : if (dump_enabled_p ())
2845 : : dump_printf_loc (MSG_NOTE, vect_location,
2846 : : "Disable peeling, max peels set and vector"
2847 : : " alignment unknown\n");
2848 : : }
2849 : : }
2850 : 38035 : if (max_peel > max_allowed_peel)
2851 : : {
2852 : 38035 : do_peeling = false;
2853 : 38035 : if (dump_enabled_p ())
2854 : 51 : dump_printf_loc (MSG_NOTE, vect_location,
2855 : : "Disable peeling, max peels reached: %d\n", max_peel);
2856 : : }
2857 : : }
2858 : : }
2859 : :
2860 : : /* Cost model #2 - if peeling may result in a remaining loop not
2861 : : iterating enough to be vectorized then do not peel. Since this
2862 : : is a cost heuristic rather than a correctness decision, use the
2863 : : most likely runtime value for variable vectorization factors. */
2864 : 51 : if (do_peeling
2865 : 8352 : && LOOP_VINFO_NITERS_KNOWN_P (loop_vinfo))
2866 : : {
2867 : 3669 : unsigned int assumed_vf = vect_vf_for_cost (loop_vinfo);
2868 : 3669 : unsigned int max_peel = npeel == 0 ? assumed_vf - 1 : npeel;
2869 : 3669 : if ((unsigned HOST_WIDE_INT) LOOP_VINFO_INT_NITERS (loop_vinfo)
2870 : 3669 : < assumed_vf + max_peel)
2871 : : do_peeling = false;
2872 : : }
2873 : :
2874 : : if (do_peeling)
2875 : : {
2876 : : /* (1.2) Update the DR_MISALIGNMENT of each data reference DR_i.
2877 : : If the misalignment of DR_i is identical to that of dr0 then set
2878 : : DR_MISALIGNMENT (DR_i) to zero. If the misalignment of DR_i and
2879 : : dr0 are known at compile time then increment DR_MISALIGNMENT (DR_i)
2880 : : by the peeling factor times the element size of DR_i (MOD the
2881 : : vectorization factor times the size). Otherwise, the
2882 : : misalignment of DR_i must be set to unknown. */
2883 : 18025 : FOR_EACH_VEC_ELT (datarefs, i, dr)
2884 : 10389 : if (dr != dr0_info->dr)
2885 : : {
2886 : 2753 : dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
2887 : 2753 : if (!vect_relevant_for_alignment_p (dr_info))
2888 : 332 : continue;
2889 : :
2890 : 2421 : vect_update_misalignment_for_peel (dr_info, dr0_info, npeel);
2891 : : }
2892 : : }
2893 : :
2894 : 60862 : if (do_peeling && !try_peeling_with_versioning)
2895 : : {
2896 : : /* Update data structures if peeling will be applied alone. */
2897 : 6349 : LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0_info;
2898 : 6349 : if (npeel)
2899 : 1173 : LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = npeel;
2900 : : else
2901 : 5176 : LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = -1;
2902 : 6349 : SET_DR_MISALIGNMENT (dr0_info,
2903 : : vect_dr_misalign_for_aligned_access (dr0_info));
2904 : 6349 : if (dump_enabled_p ())
2905 : : {
2906 : 281 : dump_printf_loc (MSG_NOTE, vect_location,
2907 : : "Alignment of access forced using peeling.\n");
2908 : 281 : dump_printf_loc (MSG_NOTE, vect_location,
2909 : : "Peeling for alignment will be applied.\n");
2910 : : }
2911 : :
2912 : : /* The inside-loop cost will be accounted for in vectorizable_load
2913 : : and vectorizable_store correctly with adjusted alignments.
2914 : : Drop the body_cst_vec on the floor here. */
2915 : 6349 : return opt_result::success ();
2916 : : }
2917 : : }
2918 : :
2919 : : /* (2) Versioning to force alignment. */
2920 : :
2921 : : /* Try versioning if:
2922 : : 1) optimize loop for speed and the cost-model is not cheap
2923 : : 2) there is at least one unsupported misaligned data ref with an unknown
2924 : : misalignment, and
2925 : : 3) all misaligned data refs with a known misalignment are supported, and
2926 : : 4) the number of runtime alignment checks is within reason. */
2927 : :
2928 : 291740 : do_versioning
2929 : 291740 : = (optimize_loop_nest_for_speed_p (loop)
2930 : 291305 : && !loop->inner /* FORNOW */
2931 : 581681 : && loop_cost_model (loop) > VECT_COST_MODEL_CHEAP);
2932 : :
2933 : : if (do_versioning)
2934 : : {
2935 : 286602 : FOR_EACH_VEC_ELT (datarefs, i, dr)
2936 : : {
2937 : 217244 : dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
2938 : 217244 : if (!vect_relevant_for_alignment_p (dr_info))
2939 : 157578 : continue;
2940 : :
2941 : 150447 : stmt_vec_info stmt_info = dr_info->stmt;
2942 : 150447 : if (STMT_VINFO_STRIDED_P (stmt_info))
2943 : : {
2944 : : do_versioning = false;
2945 : 5097 : break;
2946 : : }
2947 : :
2948 : 149672 : tree vectype = STMT_VINFO_VECTYPE (stmt_info);
2949 : 149672 : bool negative = tree_int_cst_compare (DR_STEP (dr),
2950 : 149672 : size_zero_node) < 0;
2951 : 149672 : poly_int64 off = 0;
2952 : 149672 : if (negative)
2953 : 2836 : off = ((TYPE_VECTOR_SUBPARTS (vectype) - 1)
2954 : 2836 : * -TREE_INT_CST_LOW (TYPE_SIZE_UNIT (TREE_TYPE (vectype))));
2955 : 149672 : int misalignment;
2956 : 149672 : if ((misalignment = dr_misalignment (dr_info, vectype, off)) == 0)
2957 : 90781 : continue;
2958 : :
2959 : 58891 : enum dr_alignment_support supportable_dr_alignment
2960 : 58891 : = vect_supportable_dr_alignment (loop_vinfo, dr_info, vectype,
2961 : : misalignment);
2962 : 58891 : if (supportable_dr_alignment == dr_unaligned_unsupported)
2963 : : {
2964 : 16177 : if (misalignment != DR_MISALIGNMENT_UNKNOWN
2965 : 16177 : || (LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).length ()
2966 : 12441 : >= (unsigned) param_vect_max_version_for_alignment_checks))
2967 : : {
2968 : : do_versioning = false;
2969 : 5097 : break;
2970 : : }
2971 : :
2972 : : /* Forcing alignment in the first iteration is no good if
2973 : : we don't keep it across iterations. For now, just disable
2974 : : versioning in this case.
2975 : : ?? We could actually unroll the loop to achieve the required
2976 : : overall step alignment, and forcing the alignment could be
2977 : : done by doing some iterations of the non-vectorized loop. */
2978 : 11967 : if (!multiple_p (vf * DR_STEP_ALIGNMENT (dr),
2979 : 11967 : DR_TARGET_ALIGNMENT (dr_info)))
2980 : : {
2981 : : do_versioning = false;
2982 : : break;
2983 : : }
2984 : :
2985 : : /* Use "mask = DR_TARGET_ALIGNMENT - 1" to test rightmost address
2986 : : bits for runtime alignment check. For example, for 16 bytes
2987 : : target alignment the mask is 15 = 0xf. */
2988 : 11967 : poly_uint64 mask = DR_TARGET_ALIGNMENT (dr_info) - 1;
2989 : :
2990 : : /* FORNOW: use the same mask to test all potentially unaligned
2991 : : references in the loop. */
2992 : 11967 : if (maybe_ne (LOOP_VINFO_PTR_MASK (loop_vinfo), 0U)
2993 : 11967 : && maybe_ne (LOOP_VINFO_PTR_MASK (loop_vinfo), mask))
2994 : : {
2995 : : do_versioning = false;
2996 : : break;
2997 : : }
2998 : :
2999 : 11855 : LOOP_VINFO_PTR_MASK (loop_vinfo) = mask;
3000 : 11855 : LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).safe_push (stmt_info);
3001 : : }
3002 : : }
3003 : :
3004 : : /* Versioning requires at least one misaligned data reference. */
3005 : 74455 : if (!LOOP_REQUIRES_VERSIONING_FOR_ALIGNMENT (loop_vinfo))
3006 : : do_versioning = false;
3007 : 5737 : else if (!do_versioning)
3008 : 598 : LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo).truncate (0);
3009 : : }
3010 : :
3011 : : /* If we are trying peeling with versioning but versioning is disabled for
3012 : : some reason, peeling should be turned off together. */
3013 : 291740 : if (try_peeling_with_versioning && !do_versioning)
3014 : : do_peeling = false;
3015 : :
3016 : 280368 : if (do_versioning)
3017 : : {
3018 : : const vec<stmt_vec_info> &may_misalign_stmts
3019 : : = LOOP_VINFO_MAY_MISALIGN_STMTS (loop_vinfo);
3020 : : stmt_vec_info stmt_info;
3021 : :
3022 : : /* It can now be assumed that the data references in the statements
3023 : : in LOOP_VINFO_MAY_MISALIGN_STMTS will be aligned in the version
3024 : : of the loop being vectorized. */
3025 : 13966 : FOR_EACH_VEC_ELT (may_misalign_stmts, i, stmt_info)
3026 : : {
3027 : 8827 : dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
3028 : 8827 : SET_DR_MISALIGNMENT (dr_info,
3029 : : vect_dr_misalign_for_aligned_access (dr_info));
3030 : 8827 : if (dump_enabled_p ())
3031 : 139 : dump_printf_loc (MSG_NOTE, vect_location,
3032 : : "Alignment of access forced using versioning.\n");
3033 : : }
3034 : :
3035 : 5139 : if (do_peeling)
3036 : : {
3037 : : /* This point is reached if peeling and versioning are used together
3038 : : to ensure alignment. Update data structures to make sure the loop
3039 : : is correctly peeled and a right runtime check is added for loop
3040 : : versioning. */
3041 : 1287 : gcc_assert (try_peeling_with_versioning);
3042 : 1287 : LOOP_VINFO_UNALIGNED_DR (loop_vinfo) = dr0_info;
3043 : 1287 : LOOP_VINFO_PEELING_FOR_ALIGNMENT (loop_vinfo) = -1;
3044 : 1287 : LOOP_VINFO_ALLOW_MUTUAL_ALIGNMENT (loop_vinfo) = true;
3045 : 1287 : if (dump_enabled_p ())
3046 : 6 : dump_printf_loc (MSG_NOTE, vect_location,
3047 : : "Both peeling and versioning will be applied.\n");
3048 : : }
3049 : : else
3050 : : {
3051 : : /* This point is reached if versioning is used alone. */
3052 : 3852 : LOOP_VINFO_ALLOW_MUTUAL_ALIGNMENT (loop_vinfo) = false;
3053 : 3852 : if (dump_enabled_p ())
3054 : 74 : dump_printf_loc (MSG_NOTE, vect_location,
3055 : : "Versioning for alignment will be applied.\n");
3056 : : }
3057 : :
3058 : 5139 : return opt_result::success ();
3059 : : }
3060 : :
3061 : : /* This point is reached if neither peeling nor versioning is being done. */
3062 : 286601 : gcc_assert (! (do_peeling || do_versioning));
3063 : :
3064 : 286601 : return opt_result::success ();
3065 : 607374 : }
3066 : :
3067 : :
3068 : : /* Function vect_analyze_data_refs_alignment
3069 : :
3070 : : Analyze the alignment of the data-references in the loop.
3071 : : Return FALSE if a data reference is found that cannot be vectorized. */
3072 : :
3073 : : opt_result
3074 : 339510 : vect_analyze_data_refs_alignment (loop_vec_info loop_vinfo)
3075 : : {
3076 : 339510 : DUMP_VECT_SCOPE ("vect_analyze_data_refs_alignment");
3077 : :
3078 : 339510 : vec<data_reference_p> datarefs = LOOP_VINFO_DATAREFS (loop_vinfo);
3079 : 339510 : struct data_reference *dr;
3080 : 339510 : unsigned int i;
3081 : :
3082 : 339510 : vect_record_base_alignments (loop_vinfo);
3083 : 1132248 : FOR_EACH_VEC_ELT (datarefs, i, dr)
3084 : : {
3085 : 792738 : dr_vec_info *dr_info = loop_vinfo->lookup_dr (dr);
3086 : 792738 : if (STMT_VINFO_VECTORIZABLE (dr_info->stmt))
3087 : : {
3088 : 792738 : if (STMT_VINFO_GROUPED_ACCESS (dr_info->stmt)
3089 : 1043402 : && DR_GROUP_FIRST_ELEMENT (dr_info->stmt) != dr_info->stmt)
3090 : 112879 : continue;
3091 : :
3092 : 679859 : vect_compute_data_ref_alignment (loop_vinfo, dr_info,
3093 : : STMT_VINFO_VECTYPE (dr_info->stmt));
3094 : : }
3095 : : }
3096 : :
3097 : 339510 : return opt_result::success ();
3098 : : }
3099 : :
3100 : :
3101 : : /* Analyze alignment of DRs of stmts in NODE. */
3102 : :
3103 : : static bool
3104 : 821242 : vect_slp_analyze_node_alignment (vec_info *vinfo, slp_tree node)
3105 : : {
3106 : : /* Alignment is maintained in the first element of the group. */
3107 : 821242 : stmt_vec_info first_stmt_info = SLP_TREE_SCALAR_STMTS (node)[0];
3108 : 821242 : first_stmt_info = DR_GROUP_FIRST_ELEMENT (first_stmt_info);
3109 : 821242 : dr_vec_info *dr_info = STMT_VINFO_DR_INFO (first_stmt_info);
3110 : 821242 : tree vectype = SLP_TREE_VECTYPE (node);
3111 : 821242 : poly_uint64 vector_alignment
3112 : 821242 : = exact_div (targetm.vectorize.preferred_vector_alignment (vectype),
3113 : : BITS_PER_UNIT);
3114 : 821242 : if (dr_info->misalignment == DR_MISALIGNMENT_UNINITIALIZED)
3115 : 782286 : vect_compute_data_ref_alignment (vinfo, dr_info, SLP_TREE_VECTYPE (node));
3116 : : /* Re-analyze alignment when we're facing a vectorization with a bigger
3117 : : alignment requirement. */
3118 : 38956 : else if (known_lt (dr_info->target_alignment, vector_alignment))
3119 : : {
3120 : 80 : poly_uint64 old_target_alignment = dr_info->target_alignment;
3121 : 80 : int old_misalignment = dr_info->misalignment;
3122 : 80 : vect_compute_data_ref_alignment (vinfo, dr_info, SLP_TREE_VECTYPE (node));
3123 : : /* But keep knowledge about a smaller alignment. */
3124 : 80 : if (old_misalignment != DR_MISALIGNMENT_UNKNOWN
3125 : 48 : && dr_info->misalignment == DR_MISALIGNMENT_UNKNOWN)
3126 : : {
3127 : 1 : dr_info->target_alignment = old_target_alignment;
3128 : 1 : dr_info->misalignment = old_misalignment;
3129 : : }
3130 : : }
3131 : : /* When we ever face unordered target alignments the first one wins in terms
3132 : : of analyzing and the other will become unknown in dr_misalignment. */
3133 : 821242 : return true;
3134 : : }
3135 : :
3136 : : /* Function vect_slp_analyze_instance_alignment
3137 : :
3138 : : Analyze the alignment of the data-references in the SLP instance.
3139 : : Return FALSE if a data reference is found that cannot be vectorized. */
3140 : :
3141 : : bool
3142 : 792728 : vect_slp_analyze_instance_alignment (vec_info *vinfo,
3143 : : slp_instance instance)
3144 : : {
3145 : 792728 : DUMP_VECT_SCOPE ("vect_slp_analyze_instance_alignment");
3146 : :
3147 : 792728 : slp_tree node;
3148 : 792728 : unsigned i;
3149 : 951171 : FOR_EACH_VEC_ELT (SLP_INSTANCE_LOADS (instance), i, node)
3150 : 158443 : if (! vect_slp_analyze_node_alignment (vinfo, node))
3151 : : return false;
3152 : :
3153 : 792728 : if (SLP_INSTANCE_KIND (instance) == slp_inst_kind_store
3154 : 792728 : && ! vect_slp_analyze_node_alignment
3155 : 662799 : (vinfo, SLP_INSTANCE_TREE (instance)))
3156 : : return false;
3157 : :
3158 : : return true;
3159 : : }
3160 : :
3161 : :
3162 : : /* Analyze groups of accesses: check that DR_INFO belongs to a group of
3163 : : accesses of legal size, step, etc. Detect gaps, single element
3164 : : interleaving, and other special cases. Set grouped access info.
3165 : : Collect groups of strided stores for further use in SLP analysis.
3166 : : Worker for vect_analyze_group_access. */
3167 : :
3168 : : static bool
3169 : 12664065 : vect_analyze_group_access_1 (vec_info *vinfo, dr_vec_info *dr_info)
3170 : : {
3171 : 12664065 : data_reference *dr = dr_info->dr;
3172 : 12664065 : tree step = DR_STEP (dr);
3173 : 12664065 : tree scalar_type = TREE_TYPE (DR_REF (dr));
3174 : 12664065 : HOST_WIDE_INT type_size = TREE_INT_CST_LOW (TYPE_SIZE_UNIT (scalar_type));
3175 : 12664065 : stmt_vec_info stmt_info = dr_info->stmt;
3176 : 12664065 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
3177 : 12664065 : bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
3178 : 12664065 : HOST_WIDE_INT dr_step = -1;
3179 : 12664065 : HOST_WIDE_INT groupsize, last_accessed_element = 1;
3180 : 12664065 : bool slp_impossible = false;
3181 : :
3182 : : /* For interleaving, GROUPSIZE is STEP counted in elements, i.e., the
3183 : : size of the interleaving group (including gaps). */
3184 : 12664065 : if (tree_fits_shwi_p (step))
3185 : : {
3186 : 12656444 : dr_step = tree_to_shwi (step);
3187 : : /* Check that STEP is a multiple of type size. Otherwise there is
3188 : : a non-element-sized gap at the end of the group which we
3189 : : cannot represent in DR_GROUP_GAP or DR_GROUP_SIZE.
3190 : : ??? As we can handle non-constant step fine here we should
3191 : : simply remove uses of DR_GROUP_GAP between the last and first
3192 : : element and instead rely on DR_STEP. DR_GROUP_SIZE then would
3193 : : simply not include that gap. */
3194 : 12656444 : if ((dr_step % type_size) != 0)
3195 : : {
3196 : 490 : if (dump_enabled_p ())
3197 : 27 : dump_printf_loc (MSG_NOTE, vect_location,
3198 : : "Step %T is not a multiple of the element size"
3199 : : " for %T\n",
3200 : : step, DR_REF (dr));
3201 : 490 : return false;
3202 : : }
3203 : 12655954 : groupsize = absu_hwi (dr_step) / type_size;
3204 : : }
3205 : : else
3206 : : groupsize = 0;
3207 : :
3208 : : /* Not consecutive access is possible only if it is a part of interleaving. */
3209 : 12663575 : if (!DR_GROUP_FIRST_ELEMENT (stmt_info))
3210 : : {
3211 : : /* Check if it this DR is a part of interleaving, and is a single
3212 : : element of the group that is accessed in the loop. */
3213 : :
3214 : : /* Gaps are supported only for loads. STEP must be a multiple of the type
3215 : : size. */
3216 : 8574055 : if (DR_IS_READ (dr)
3217 : 5141181 : && (dr_step % type_size) == 0
3218 : : && groupsize > 0
3219 : : /* This could be UINT_MAX but as we are generating code in a very
3220 : : inefficient way we have to cap earlier.
3221 : : See PR91403 for example. */
3222 : 5141181 : && groupsize <= 4096)
3223 : : {
3224 : 58368 : DR_GROUP_FIRST_ELEMENT (stmt_info) = stmt_info;
3225 : 58368 : DR_GROUP_SIZE (stmt_info) = groupsize;
3226 : 58368 : DR_GROUP_GAP (stmt_info) = groupsize - 1;
3227 : 58368 : if (dump_enabled_p ())
3228 : 1215 : dump_printf_loc (MSG_NOTE, vect_location,
3229 : : "Detected single element interleaving %T"
3230 : : " step %T\n",
3231 : : DR_REF (dr), step);
3232 : :
3233 : 58368 : return true;
3234 : : }
3235 : :
3236 : 8515687 : if (dump_enabled_p ())
3237 : 3089 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3238 : : "not consecutive access %G", stmt_info->stmt);
3239 : :
3240 : 8515687 : if (bb_vinfo)
3241 : : {
3242 : : /* Mark the statement as unvectorizable. */
3243 : 8498792 : STMT_VINFO_VECTORIZABLE (stmt_info) = false;
3244 : 8498792 : return true;
3245 : : }
3246 : :
3247 : 16895 : if (dump_enabled_p ())
3248 : 282 : dump_printf_loc (MSG_NOTE, vect_location, "using strided accesses\n");
3249 : 16895 : STMT_VINFO_STRIDED_P (stmt_info) = true;
3250 : 16895 : return true;
3251 : : }
3252 : :
3253 : 4089520 : if (DR_GROUP_FIRST_ELEMENT (stmt_info) == stmt_info)
3254 : : {
3255 : : /* First stmt in the interleaving chain. Check the chain. */
3256 : 1496497 : stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (stmt_info);
3257 : 1496497 : struct data_reference *data_ref = dr;
3258 : 1496497 : unsigned int count = 1;
3259 : 1496497 : tree prev_init = DR_INIT (data_ref);
3260 : 1496497 : HOST_WIDE_INT diff, gaps = 0;
3261 : :
3262 : : /* By construction, all group members have INTEGER_CST DR_INITs. */
3263 : 4089529 : while (next)
3264 : : {
3265 : : /* We never have the same DR multiple times. */
3266 : 2593094 : gcc_assert (tree_int_cst_compare (DR_INIT (data_ref),
3267 : : DR_INIT (STMT_VINFO_DATA_REF (next))) != 0);
3268 : :
3269 : 2593094 : data_ref = STMT_VINFO_DATA_REF (next);
3270 : :
3271 : : /* All group members have the same STEP by construction. */
3272 : 2593094 : gcc_checking_assert (operand_equal_p (DR_STEP (data_ref), step, 0));
3273 : :
3274 : : /* Check that the distance between two accesses is equal to the type
3275 : : size. Otherwise, we have gaps. */
3276 : 2593094 : diff = (TREE_INT_CST_LOW (DR_INIT (data_ref))
3277 : 2593094 : - TREE_INT_CST_LOW (prev_init)) / type_size;
3278 : 2593094 : if (diff < 1 || diff > UINT_MAX)
3279 : : {
3280 : : /* For artificial testcases with array accesses with large
3281 : : constant indices we can run into overflow issues which
3282 : : can end up fooling the groupsize constraint below so
3283 : : check the individual gaps (which are represented as
3284 : : unsigned int) as well. */
3285 : 0 : if (dump_enabled_p ())
3286 : 0 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3287 : : "interleaved access with gap larger "
3288 : : "than representable\n");
3289 : 0 : return false;
3290 : : }
3291 : 2593094 : if (diff != 1)
3292 : : {
3293 : : /* FORNOW: SLP of accesses with gaps is not supported. */
3294 : 102844 : slp_impossible = true;
3295 : 102844 : if (DR_IS_WRITE (data_ref))
3296 : : {
3297 : 62 : if (dump_enabled_p ())
3298 : 0 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3299 : : "interleaved store with gaps\n");
3300 : 62 : return false;
3301 : : }
3302 : :
3303 : 102782 : gaps += diff - 1;
3304 : : }
3305 : :
3306 : 2593032 : last_accessed_element += diff;
3307 : :
3308 : : /* Store the gap from the previous member of the group. If there is no
3309 : : gap in the access, DR_GROUP_GAP is always 1. */
3310 : 2593032 : DR_GROUP_GAP (next) = diff;
3311 : :
3312 : 2593032 : prev_init = DR_INIT (data_ref);
3313 : 2593032 : next = DR_GROUP_NEXT_ELEMENT (next);
3314 : : /* Count the number of data-refs in the chain. */
3315 : 2593032 : count++;
3316 : : }
3317 : :
3318 : 1496435 : if (groupsize == 0)
3319 : 1436543 : groupsize = count + gaps;
3320 : :
3321 : : /* This could be UINT_MAX but as we are generating code in a very
3322 : : inefficient way we have to cap earlier. See PR78699 for example. */
3323 : 1496435 : if (groupsize > 4096)
3324 : : {
3325 : 1 : if (dump_enabled_p ())
3326 : 1 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3327 : : "group is too large\n");
3328 : 1 : return false;
3329 : : }
3330 : :
3331 : : /* Check that the size of the interleaving is equal to count for stores,
3332 : : i.e., that there are no gaps. */
3333 : 1496434 : if (groupsize != count
3334 : 109343 : && !DR_IS_READ (dr))
3335 : : {
3336 : 8856 : groupsize = count;
3337 : 8856 : STMT_VINFO_STRIDED_P (stmt_info) = true;
3338 : : }
3339 : :
3340 : : /* If there is a gap after the last load in the group it is the
3341 : : difference between the groupsize and the last accessed
3342 : : element.
3343 : : When there is no gap, this difference should be 0. */
3344 : 1496434 : DR_GROUP_GAP (stmt_info) = groupsize - last_accessed_element;
3345 : :
3346 : 1496434 : DR_GROUP_SIZE (stmt_info) = groupsize;
3347 : 1496434 : if (dump_enabled_p ())
3348 : : {
3349 : 7485 : dump_printf_loc (MSG_NOTE, vect_location,
3350 : : "Detected interleaving ");
3351 : 7485 : if (DR_IS_READ (dr))
3352 : 3986 : dump_printf (MSG_NOTE, "load ");
3353 : 3499 : else if (STMT_VINFO_STRIDED_P (stmt_info))
3354 : 472 : dump_printf (MSG_NOTE, "strided store ");
3355 : : else
3356 : 3027 : dump_printf (MSG_NOTE, "store ");
3357 : 7485 : dump_printf (MSG_NOTE, "of size %u\n",
3358 : : (unsigned)groupsize);
3359 : 7485 : dump_printf_loc (MSG_NOTE, vect_location, "\t%G", stmt_info->stmt);
3360 : 7485 : next = DR_GROUP_NEXT_ELEMENT (stmt_info);
3361 : 36582 : while (next)
3362 : : {
3363 : 29097 : if (DR_GROUP_GAP (next) != 1)
3364 : 252 : dump_printf_loc (MSG_NOTE, vect_location,
3365 : : "\t<gap of %d elements>\n",
3366 : 252 : DR_GROUP_GAP (next) - 1);
3367 : 29097 : dump_printf_loc (MSG_NOTE, vect_location, "\t%G", next->stmt);
3368 : 29097 : next = DR_GROUP_NEXT_ELEMENT (next);
3369 : : }
3370 : 7485 : if (DR_GROUP_GAP (stmt_info) != 0)
3371 : 309 : dump_printf_loc (MSG_NOTE, vect_location,
3372 : : "\t<gap of %d elements>\n",
3373 : 309 : DR_GROUP_GAP (stmt_info));
3374 : : }
3375 : :
3376 : : /* SLP: create an SLP data structure for every interleaving group of
3377 : : stores for further analysis in vect_analyse_slp. */
3378 : 1496434 : if (DR_IS_WRITE (dr) && !slp_impossible)
3379 : : {
3380 : 910064 : if (loop_vinfo)
3381 : 22018 : LOOP_VINFO_GROUPED_STORES (loop_vinfo).safe_push (stmt_info);
3382 : 910064 : if (bb_vinfo)
3383 : 888046 : BB_VINFO_GROUPED_STORES (bb_vinfo).safe_push (stmt_info);
3384 : : }
3385 : : }
3386 : :
3387 : : return true;
3388 : : }
3389 : :
3390 : : /* Analyze groups of accesses: check that DR_INFO belongs to a group of
3391 : : accesses of legal size, step, etc. Detect gaps, single element
3392 : : interleaving, and other special cases. Set grouped access info.
3393 : : Collect groups of strided stores for further use in SLP analysis. */
3394 : :
3395 : : static bool
3396 : 12664065 : vect_analyze_group_access (vec_info *vinfo, dr_vec_info *dr_info)
3397 : : {
3398 : 12664065 : if (!vect_analyze_group_access_1 (vinfo, dr_info))
3399 : : {
3400 : : /* Dissolve the group if present. */
3401 : 553 : stmt_vec_info stmt_info = DR_GROUP_FIRST_ELEMENT (dr_info->stmt);
3402 : 1592 : while (stmt_info)
3403 : : {
3404 : 1039 : stmt_vec_info next = DR_GROUP_NEXT_ELEMENT (stmt_info);
3405 : 1039 : DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
3406 : 1039 : DR_GROUP_NEXT_ELEMENT (stmt_info) = NULL;
3407 : 1039 : stmt_info = next;
3408 : : }
3409 : : return false;
3410 : : }
3411 : : return true;
3412 : : }
3413 : :
3414 : : /* Analyze the access pattern of the data-reference DR_INFO.
3415 : : In case of non-consecutive accesses call vect_analyze_group_access() to
3416 : : analyze groups of accesses. */
3417 : :
3418 : : static bool
3419 : 13180629 : vect_analyze_data_ref_access (vec_info *vinfo, dr_vec_info *dr_info)
3420 : : {
3421 : 13180629 : data_reference *dr = dr_info->dr;
3422 : 13180629 : tree step = DR_STEP (dr);
3423 : 13180629 : tree scalar_type = TREE_TYPE (DR_REF (dr));
3424 : 13180629 : stmt_vec_info stmt_info = dr_info->stmt;
3425 : 13180629 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
3426 : 13180629 : class loop *loop = NULL;
3427 : :
3428 : 13180629 : if (STMT_VINFO_GATHER_SCATTER_P (stmt_info))
3429 : : return true;
3430 : :
3431 : 13146186 : if (loop_vinfo)
3432 : 739251 : loop = LOOP_VINFO_LOOP (loop_vinfo);
3433 : :
3434 : 13146186 : if (loop_vinfo && !step)
3435 : : {
3436 : 0 : if (dump_enabled_p ())
3437 : 0 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3438 : : "bad data-ref access in loop\n");
3439 : 0 : return false;
3440 : : }
3441 : :
3442 : : /* Allow loads with zero step in inner-loop vectorization. */
3443 : 13146186 : if (loop_vinfo && integer_zerop (step))
3444 : : {
3445 : 13273 : DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
3446 : 13273 : DR_GROUP_NEXT_ELEMENT (stmt_info) = NULL;
3447 : 13273 : if (!nested_in_vect_loop_p (loop, stmt_info))
3448 : 13006 : return DR_IS_READ (dr);
3449 : : /* Allow references with zero step for outer loops marked
3450 : : with pragma omp simd only - it guarantees absence of
3451 : : loop-carried dependencies between inner loop iterations. */
3452 : 267 : if (loop->safelen < 2)
3453 : : {
3454 : 231 : if (dump_enabled_p ())
3455 : 5 : dump_printf_loc (MSG_NOTE, vect_location,
3456 : : "zero step in inner loop of nest\n");
3457 : 231 : return false;
3458 : : }
3459 : : }
3460 : :
3461 : 13132913 : if (loop && nested_in_vect_loop_p (loop, stmt_info))
3462 : : {
3463 : : /* Interleaved accesses are not yet supported within outer-loop
3464 : : vectorization for references in the inner-loop. */
3465 : 5195 : DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
3466 : 5195 : DR_GROUP_NEXT_ELEMENT (stmt_info) = NULL;
3467 : :
3468 : : /* For the rest of the analysis we use the outer-loop step. */
3469 : 5195 : step = STMT_VINFO_DR_STEP (stmt_info);
3470 : 5195 : if (integer_zerop (step))
3471 : : {
3472 : 1195 : if (dump_enabled_p ())
3473 : 228 : dump_printf_loc (MSG_NOTE, vect_location,
3474 : : "zero step in outer loop.\n");
3475 : 1195 : return DR_IS_READ (dr);
3476 : : }
3477 : : }
3478 : :
3479 : : /* Consecutive? */
3480 : 13131754 : if (TREE_CODE (step) == INTEGER_CST)
3481 : : {
3482 : 13096594 : HOST_WIDE_INT dr_step = TREE_INT_CST_LOW (step);
3483 : 13096594 : if (!tree_int_cst_compare (step, TYPE_SIZE_UNIT (scalar_type))
3484 : 13096594 : || (dr_step < 0
3485 : 22097 : && !compare_tree_int (TYPE_SIZE_UNIT (scalar_type), -dr_step)))
3486 : : {
3487 : : /* Mark that it is not interleaving. */
3488 : 437668 : DR_GROUP_FIRST_ELEMENT (stmt_info) = NULL;
3489 : 437668 : DR_GROUP_NEXT_ELEMENT (stmt_info) = NULL;
3490 : 437668 : return true;
3491 : : }
3492 : : }
3493 : :
3494 : 12694086 : if (loop && nested_in_vect_loop_p (loop, stmt_info))
3495 : : {
3496 : 2895 : if (dump_enabled_p ())
3497 : 146 : dump_printf_loc (MSG_NOTE, vect_location,
3498 : : "grouped access in outer loop.\n");
3499 : 2895 : return false;
3500 : : }
3501 : :
3502 : :
3503 : : /* Assume this is a DR handled by non-constant strided load case. */
3504 : 12691191 : if (TREE_CODE (step) != INTEGER_CST)
3505 : 34747 : return (STMT_VINFO_STRIDED_P (stmt_info)
3506 : 34747 : && (!STMT_VINFO_GROUPED_ACCESS (stmt_info)
3507 : 7621 : || vect_analyze_group_access (vinfo, dr_info)));
3508 : :
3509 : : /* Not consecutive access - check if it's a part of interleaving group. */
3510 : 12656444 : return vect_analyze_group_access (vinfo, dr_info);
3511 : : }
3512 : :
3513 : : /* Compare two data-references DRA and DRB to group them into chunks
3514 : : suitable for grouping. */
3515 : :
3516 : : static int
3517 : 343603197 : dr_group_sort_cmp (const void *dra_, const void *drb_)
3518 : : {
3519 : 343603197 : dr_vec_info *dra_info = *(dr_vec_info **)const_cast<void *>(dra_);
3520 : 343603197 : dr_vec_info *drb_info = *(dr_vec_info **)const_cast<void *>(drb_);
3521 : 343603197 : data_reference_p dra = dra_info->dr;
3522 : 343603197 : data_reference_p drb = drb_info->dr;
3523 : 343603197 : int cmp;
3524 : :
3525 : : /* Stabilize sort. */
3526 : 343603197 : if (dra == drb)
3527 : : return 0;
3528 : :
3529 : : /* Different group IDs lead never belong to the same group. */
3530 : 343603197 : if (dra_info->group != drb_info->group)
3531 : 376620176 : return dra_info->group < drb_info->group ? -1 : 1;
3532 : :
3533 : : /* Ordering of DRs according to base. */
3534 : 95869907 : cmp = data_ref_compare_tree (DR_BASE_ADDRESS (dra),
3535 : : DR_BASE_ADDRESS (drb));
3536 : 95869907 : if (cmp != 0)
3537 : : return cmp;
3538 : :
3539 : : /* And according to DR_OFFSET. */
3540 : 51567594 : cmp = data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb));
3541 : 51567594 : if (cmp != 0)
3542 : : return cmp;
3543 : :
3544 : : /* Put reads before writes. */
3545 : 51226204 : if (DR_IS_READ (dra) != DR_IS_READ (drb))
3546 : 4200661 : return DR_IS_READ (dra) ? -1 : 1;
3547 : :
3548 : : /* Then sort after access size. */
3549 : 48347063 : cmp = data_ref_compare_tree (TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra))),
3550 : 48347063 : TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb))));
3551 : 48347063 : if (cmp != 0)
3552 : : return cmp;
3553 : :
3554 : : /* And after step. */
3555 : 41907110 : cmp = data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb));
3556 : 41907110 : if (cmp != 0)
3557 : : return cmp;
3558 : :
3559 : : /* Then sort after DR_INIT. In case of identical DRs sort after stmt UID. */
3560 : 41900811 : cmp = data_ref_compare_tree (DR_INIT (dra), DR_INIT (drb));
3561 : 41900811 : if (cmp == 0)
3562 : 396806 : return gimple_uid (DR_STMT (dra)) < gimple_uid (DR_STMT (drb)) ? -1 : 1;
3563 : : return cmp;
3564 : : }
3565 : :
3566 : : /* If OP is the result of a conversion, return the unconverted value,
3567 : : otherwise return null. */
3568 : :
3569 : : static tree
3570 : 203 : strip_conversion (tree op)
3571 : : {
3572 : 203 : if (TREE_CODE (op) != SSA_NAME)
3573 : : return NULL_TREE;
3574 : 203 : gimple *stmt = SSA_NAME_DEF_STMT (op);
3575 : 203 : if (!is_gimple_assign (stmt)
3576 : 203 : || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt)))
3577 : : return NULL_TREE;
3578 : 92 : return gimple_assign_rhs1 (stmt);
3579 : : }
3580 : :
3581 : : /* Return true if vectorizable_* routines can handle statements STMT1_INFO
3582 : : and STMT2_INFO being in a single group. When ALLOW_SLP_P, masked loads can
3583 : : be grouped in SLP mode. */
3584 : :
3585 : : static bool
3586 : 6940293 : can_group_stmts_p (stmt_vec_info stmt1_info, stmt_vec_info stmt2_info,
3587 : : bool allow_slp_p)
3588 : : {
3589 : 6940293 : if (gimple_assign_single_p (stmt1_info->stmt))
3590 : 6939839 : return gimple_assign_single_p (stmt2_info->stmt);
3591 : :
3592 : 454 : gcall *call1 = dyn_cast <gcall *> (stmt1_info->stmt);
3593 : 454 : if (call1 && gimple_call_internal_p (call1))
3594 : : {
3595 : : /* Check for two masked loads or two masked stores. */
3596 : 595 : gcall *call2 = dyn_cast <gcall *> (stmt2_info->stmt);
3597 : 443 : if (!call2 || !gimple_call_internal_p (call2))
3598 : : return false;
3599 : 443 : internal_fn ifn = gimple_call_internal_fn (call1);
3600 : 443 : if (ifn != IFN_MASK_LOAD && ifn != IFN_MASK_STORE)
3601 : : return false;
3602 : 443 : if (ifn != gimple_call_internal_fn (call2))
3603 : : return false;
3604 : :
3605 : : /* Check that the masks are the same. Cope with casts of masks,
3606 : : like those created by build_mask_conversion. */
3607 : 443 : tree mask1 = gimple_call_arg (call1, 2);
3608 : 443 : tree mask2 = gimple_call_arg (call2, 2);
3609 : 443 : if (!operand_equal_p (mask1, mask2, 0) && !allow_slp_p)
3610 : : {
3611 : 157 : mask1 = strip_conversion (mask1);
3612 : 157 : if (!mask1)
3613 : : return false;
3614 : 46 : mask2 = strip_conversion (mask2);
3615 : 46 : if (!mask2)
3616 : : return false;
3617 : 46 : if (!operand_equal_p (mask1, mask2, 0))
3618 : : return false;
3619 : : }
3620 : 302 : return true;
3621 : : }
3622 : :
3623 : : return false;
3624 : : }
3625 : :
3626 : : /* Function vect_analyze_data_ref_accesses.
3627 : :
3628 : : Analyze the access pattern of all the data references in the loop.
3629 : :
3630 : : FORNOW: the only access pattern that is considered vectorizable is a
3631 : : simple step 1 (consecutive) access.
3632 : :
3633 : : FORNOW: handle only arrays and pointer accesses. */
3634 : :
3635 : : opt_result
3636 : 2704325 : vect_analyze_data_ref_accesses (vec_info *vinfo,
3637 : : vec<int> *dataref_groups)
3638 : : {
3639 : 2704325 : unsigned int i;
3640 : 2704325 : vec<data_reference_p> datarefs = vinfo->shared->datarefs;
3641 : :
3642 : 2704325 : DUMP_VECT_SCOPE ("vect_analyze_data_ref_accesses");
3643 : :
3644 : 2704325 : if (datarefs.is_empty ())
3645 : 1198258 : return opt_result::success ();
3646 : :
3647 : : /* Sort the array of datarefs to make building the interleaving chains
3648 : : linear. Don't modify the original vector's order, it is needed for
3649 : : determining what dependencies are reversed. */
3650 : 1506067 : vec<dr_vec_info *> datarefs_copy;
3651 : 1506067 : datarefs_copy.create (datarefs.length ());
3652 : 16422934 : for (unsigned i = 0; i < datarefs.length (); i++)
3653 : : {
3654 : 14916867 : dr_vec_info *dr_info = vinfo->lookup_dr (datarefs[i]);
3655 : : /* If the caller computed DR grouping use that, otherwise group by
3656 : : basic blocks. */
3657 : 14916867 : if (dataref_groups)
3658 : 14129885 : dr_info->group = (*dataref_groups)[i];
3659 : : else
3660 : 786982 : dr_info->group = gimple_bb (DR_STMT (datarefs[i]))->index;
3661 : 14916867 : datarefs_copy.quick_push (dr_info);
3662 : : }
3663 : 1506067 : datarefs_copy.qsort (dr_group_sort_cmp);
3664 : 1506067 : hash_set<stmt_vec_info> to_fixup;
3665 : :
3666 : : /* Build the interleaving chains. */
3667 : 14055273 : for (i = 0; i < datarefs_copy.length () - 1;)
3668 : : {
3669 : 11043139 : dr_vec_info *dr_info_a = datarefs_copy[i];
3670 : 11043139 : data_reference_p dra = dr_info_a->dr;
3671 : 11043139 : int dra_group_id = dr_info_a->group;
3672 : 11043139 : stmt_vec_info stmtinfo_a = dr_info_a->stmt;
3673 : 11043139 : stmt_vec_info lastinfo = NULL;
3674 : 11043139 : if (!STMT_VINFO_VECTORIZABLE (stmtinfo_a)
3675 : 9456031 : || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_a))
3676 : : {
3677 : 1615534 : ++i;
3678 : 1615534 : continue;
3679 : : }
3680 : 24603122 : for (i = i + 1; i < datarefs_copy.length (); ++i)
3681 : : {
3682 : 11795266 : dr_vec_info *dr_info_b = datarefs_copy[i];
3683 : 11795266 : data_reference_p drb = dr_info_b->dr;
3684 : 11795266 : int drb_group_id = dr_info_b->group;
3685 : 11795266 : stmt_vec_info stmtinfo_b = dr_info_b->stmt;
3686 : 11795266 : if (!STMT_VINFO_VECTORIZABLE (stmtinfo_b)
3687 : 11489211 : || STMT_VINFO_GATHER_SCATTER_P (stmtinfo_b))
3688 : : break;
3689 : :
3690 : : /* ??? Imperfect sorting (non-compatible types, non-modulo
3691 : : accesses, same accesses) can lead to a group to be artificially
3692 : : split here as we don't just skip over those. If it really
3693 : : matters we can push those to a worklist and re-iterate
3694 : : over them. The we can just skip ahead to the next DR here. */
3695 : :
3696 : : /* DRs in a different DR group should not be put into the same
3697 : : interleaving group. */
3698 : 11485917 : if (dra_group_id != drb_group_id)
3699 : : break;
3700 : :
3701 : : /* Check that the data-refs have same first location (except init)
3702 : : and they are both either store or load (not load and store,
3703 : : not masked loads or stores). */
3704 : 7229932 : if (DR_IS_READ (dra) != DR_IS_READ (drb)
3705 : 5935297 : || data_ref_compare_tree (DR_BASE_ADDRESS (dra),
3706 : : DR_BASE_ADDRESS (drb)) != 0
3707 : 4335305 : || data_ref_compare_tree (DR_OFFSET (dra), DR_OFFSET (drb)) != 0
3708 : 11543944 : || !can_group_stmts_p (stmtinfo_a, stmtinfo_b, true))
3709 : : break;
3710 : :
3711 : : /* Check that the data-refs have the same constant size. */
3712 : 4313995 : tree sza = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (dra)));
3713 : 4313995 : tree szb = TYPE_SIZE_UNIT (TREE_TYPE (DR_REF (drb)));
3714 : 4313995 : if (!tree_fits_uhwi_p (sza)
3715 : 4313995 : || !tree_fits_uhwi_p (szb)
3716 : 8627990 : || !tree_int_cst_equal (sza, szb))
3717 : : break;
3718 : :
3719 : : /* Check that the data-refs have the same step. */
3720 : 3970322 : if (data_ref_compare_tree (DR_STEP (dra), DR_STEP (drb)) != 0)
3721 : : break;
3722 : :
3723 : : /* Check the types are compatible.
3724 : : ??? We don't distinguish this during sorting. */
3725 : 3969688 : if (!types_compatible_p (TREE_TYPE (DR_REF (dra)),
3726 : 3969688 : TREE_TYPE (DR_REF (drb))))
3727 : : break;
3728 : :
3729 : : /* Check that the DR_INITs are compile-time constants. */
3730 : 2830025 : if (!tree_fits_shwi_p (DR_INIT (dra))
3731 : 2830025 : || !tree_fits_shwi_p (DR_INIT (drb)))
3732 : : break;
3733 : :
3734 : : /* Different .GOMP_SIMD_LANE calls still give the same lane,
3735 : : just hold extra information. */
3736 : 2830025 : if (STMT_VINFO_SIMD_LANE_ACCESS_P (stmtinfo_a)
3737 : 1240 : && STMT_VINFO_SIMD_LANE_ACCESS_P (stmtinfo_b)
3738 : 2831265 : && data_ref_compare_tree (DR_INIT (dra), DR_INIT (drb)) == 0)
3739 : : break;
3740 : :
3741 : : /* Sorting has ensured that DR_INIT (dra) <= DR_INIT (drb). */
3742 : 2828785 : HOST_WIDE_INT init_a = TREE_INT_CST_LOW (DR_INIT (dra));
3743 : 2828785 : HOST_WIDE_INT init_b = TREE_INT_CST_LOW (DR_INIT (drb));
3744 : 2828785 : HOST_WIDE_INT init_prev
3745 : 2828785 : = TREE_INT_CST_LOW (DR_INIT (datarefs_copy[i-1]->dr));
3746 : 2828785 : gcc_assert (init_a <= init_b
3747 : : && init_a <= init_prev
3748 : : && init_prev <= init_b);
3749 : :
3750 : : /* Do not place the same access in the interleaving chain twice. */
3751 : 2828785 : if (init_b == init_prev)
3752 : : {
3753 : 28384 : gcc_assert (gimple_uid (DR_STMT (datarefs_copy[i-1]->dr))
3754 : : < gimple_uid (DR_STMT (drb)));
3755 : : /* Simply link in duplicates and fix up the chain below. */
3756 : : }
3757 : : else
3758 : : {
3759 : : /* If init_b == init_a + the size of the type * k, we have an
3760 : : interleaving, and DRA is accessed before DRB. */
3761 : 2800401 : unsigned HOST_WIDE_INT type_size_a = tree_to_uhwi (sza);
3762 : 2800401 : if (type_size_a == 0
3763 : 2800401 : || (((unsigned HOST_WIDE_INT)init_b - init_a)
3764 : 2800401 : % type_size_a != 0))
3765 : : break;
3766 : :
3767 : : /* If we have a store, the accesses are adjacent. This splits
3768 : : groups into chunks we support (we don't support vectorization
3769 : : of stores with gaps). */
3770 : 2798609 : if (!DR_IS_READ (dra)
3771 : 1832452 : && (((unsigned HOST_WIDE_INT)init_b - init_prev)
3772 : : != type_size_a))
3773 : : break;
3774 : :
3775 : : /* For datarefs with big gap, it's better to split them into different
3776 : : groups.
3777 : : .i.e a[0], a[1], a[2], .. a[7], a[100], a[101],..., a[107] */
3778 : 2621289 : if ((unsigned HOST_WIDE_INT)(init_b - init_prev)
3779 : : > MAX_BITSIZE_MODE_ANY_MODE / BITS_PER_UNIT)
3780 : : break;
3781 : :
3782 : : /* If the step (if not zero or non-constant) is smaller than the
3783 : : difference between data-refs' inits this splits groups into
3784 : : suitable sizes. */
3785 : 2611406 : if (tree_fits_shwi_p (DR_STEP (dra)))
3786 : : {
3787 : 2606590 : unsigned HOST_WIDE_INT step
3788 : 2606590 : = absu_hwi (tree_to_shwi (DR_STEP (dra)));
3789 : 2606590 : if (step != 0
3790 : 131753 : && step <= ((unsigned HOST_WIDE_INT)init_b - init_a))
3791 : : break;
3792 : : }
3793 : : }
3794 : :
3795 : 2626311 : if (dump_enabled_p ())
3796 : 29781 : dump_printf_loc (MSG_NOTE, vect_location,
3797 : 29781 : DR_IS_READ (dra)
3798 : : ? "Detected interleaving load %T and %T\n"
3799 : : : "Detected interleaving store %T and %T\n",
3800 : : DR_REF (dra), DR_REF (drb));
3801 : :
3802 : : /* Link the found element into the group list. */
3803 : 2626311 : if (!DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
3804 : : {
3805 : 1475803 : DR_GROUP_FIRST_ELEMENT (stmtinfo_a) = stmtinfo_a;
3806 : 1475803 : lastinfo = stmtinfo_a;
3807 : : }
3808 : 2626311 : DR_GROUP_FIRST_ELEMENT (stmtinfo_b) = stmtinfo_a;
3809 : 2626311 : DR_GROUP_NEXT_ELEMENT (lastinfo) = stmtinfo_b;
3810 : 2626311 : lastinfo = stmtinfo_b;
3811 : :
3812 : 2626311 : if (! STMT_VINFO_SLP_VECT_ONLY (stmtinfo_a))
3813 : : {
3814 : 2626281 : STMT_VINFO_SLP_VECT_ONLY (stmtinfo_a)
3815 : 2626281 : = !can_group_stmts_p (stmtinfo_a, stmtinfo_b, false);
3816 : :
3817 : 2626281 : if (dump_enabled_p () && STMT_VINFO_SLP_VECT_ONLY (stmtinfo_a))
3818 : 72 : dump_printf_loc (MSG_NOTE, vect_location,
3819 : : "Load suitable for SLP vectorization only.\n");
3820 : : }
3821 : :
3822 : 2626311 : if (init_b == init_prev
3823 : 28384 : && !to_fixup.add (DR_GROUP_FIRST_ELEMENT (stmtinfo_a))
3824 : 2642633 : && dump_enabled_p ())
3825 : 217 : dump_printf_loc (MSG_NOTE, vect_location,
3826 : : "Queuing group with duplicate access for fixup\n");
3827 : : }
3828 : : }
3829 : :
3830 : : /* Fixup groups with duplicate entries by splitting it. */
3831 : 1548076 : while (1)
3832 : : {
3833 : 1548076 : hash_set<stmt_vec_info>::iterator it = to_fixup.begin ();
3834 : 1548076 : if (!(it != to_fixup.end ()))
3835 : : break;
3836 : 42009 : stmt_vec_info grp = *it;
3837 : 42009 : to_fixup.remove (grp);
3838 : :
3839 : : /* Find the earliest duplicate group member. */
3840 : 42009 : unsigned first_duplicate = -1u;
3841 : 42009 : stmt_vec_info next, g = grp;
3842 : 268020 : while ((next = DR_GROUP_NEXT_ELEMENT (g)))
3843 : : {
3844 : 184002 : if (tree_int_cst_equal (DR_INIT (STMT_VINFO_DR_INFO (next)->dr),
3845 : 184002 : DR_INIT (STMT_VINFO_DR_INFO (g)->dr))
3846 : 184002 : && gimple_uid (STMT_VINFO_STMT (next)) < first_duplicate)
3847 : : first_duplicate = gimple_uid (STMT_VINFO_STMT (next));
3848 : : g = next;
3849 : : }
3850 : 42009 : if (first_duplicate == -1U)
3851 : 16322 : continue;
3852 : :
3853 : : /* Then move all stmts after the first duplicate to a new group.
3854 : : Note this is a heuristic but one with the property that *it
3855 : : is fixed up completely. */
3856 : 25687 : g = grp;
3857 : 25687 : stmt_vec_info newgroup = NULL, ng = grp;
3858 : 233317 : while ((next = DR_GROUP_NEXT_ELEMENT (g)))
3859 : : {
3860 : 181943 : if (gimple_uid (STMT_VINFO_STMT (next)) >= first_duplicate)
3861 : : {
3862 : 176394 : DR_GROUP_NEXT_ELEMENT (g) = DR_GROUP_NEXT_ELEMENT (next);
3863 : 176394 : if (!newgroup)
3864 : : {
3865 : 25687 : newgroup = next;
3866 : 25687 : STMT_VINFO_SLP_VECT_ONLY (newgroup)
3867 : 25687 : = STMT_VINFO_SLP_VECT_ONLY (grp);
3868 : : }
3869 : : else
3870 : 150707 : DR_GROUP_NEXT_ELEMENT (ng) = next;
3871 : 176394 : ng = next;
3872 : 176394 : DR_GROUP_FIRST_ELEMENT (ng) = newgroup;
3873 : : }
3874 : : else
3875 : : g = DR_GROUP_NEXT_ELEMENT (g);
3876 : : }
3877 : 25687 : DR_GROUP_NEXT_ELEMENT (ng) = NULL;
3878 : :
3879 : : /* Fixup the new group which still may contain duplicates. */
3880 : 25687 : to_fixup.add (newgroup);
3881 : : }
3882 : :
3883 : 1506067 : dr_vec_info *dr_info;
3884 : 16402848 : FOR_EACH_VEC_ELT (datarefs_copy, i, dr_info)
3885 : : {
3886 : 14903579 : if (STMT_VINFO_VECTORIZABLE (dr_info->stmt)
3887 : 14903579 : && !vect_analyze_data_ref_access (vinfo, dr_info))
3888 : : {
3889 : 6852 : if (dump_enabled_p ())
3890 : 262 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
3891 : : "not vectorized: complicated access pattern.\n");
3892 : :
3893 : 6852 : if (is_a <bb_vec_info> (vinfo))
3894 : : {
3895 : : /* Mark the statement as not vectorizable. */
3896 : 54 : STMT_VINFO_VECTORIZABLE (dr_info->stmt) = false;
3897 : 54 : continue;
3898 : : }
3899 : : else
3900 : : {
3901 : 6798 : datarefs_copy.release ();
3902 : 6798 : return opt_result::failure_at (dr_info->stmt->stmt,
3903 : : "not vectorized:"
3904 : : " complicated access pattern.\n");
3905 : : }
3906 : : }
3907 : : }
3908 : :
3909 : 1499269 : datarefs_copy.release ();
3910 : 1499269 : return opt_result::success ();
3911 : 1506067 : }
3912 : :
3913 : : /* Function vect_vfa_segment_size.
3914 : :
3915 : : Input:
3916 : : DR_INFO: The data reference.
3917 : : LENGTH_FACTOR: segment length to consider.
3918 : :
3919 : : Return a value suitable for the dr_with_seg_len::seg_len field.
3920 : : This is the "distance travelled" by the pointer from the first
3921 : : iteration in the segment to the last. Note that it does not include
3922 : : the size of the access; in effect it only describes the first byte. */
3923 : :
3924 : : static tree
3925 : 118196 : vect_vfa_segment_size (dr_vec_info *dr_info, tree length_factor)
3926 : : {
3927 : 118196 : length_factor = size_binop (MINUS_EXPR,
3928 : : fold_convert (sizetype, length_factor),
3929 : : size_one_node);
3930 : 118196 : return size_binop (MULT_EXPR, fold_convert (sizetype, DR_STEP (dr_info->dr)),
3931 : : length_factor);
3932 : : }
3933 : :
3934 : : /* Return a value that, when added to abs (vect_vfa_segment_size (DR_INFO)),
3935 : : gives the worst-case number of bytes covered by the segment. */
3936 : :
3937 : : static unsigned HOST_WIDE_INT
3938 : 118678 : vect_vfa_access_size (vec_info *vinfo, dr_vec_info *dr_info)
3939 : : {
3940 : 118678 : stmt_vec_info stmt_vinfo = dr_info->stmt;
3941 : 118678 : tree ref_type = TREE_TYPE (DR_REF (dr_info->dr));
3942 : 118678 : unsigned HOST_WIDE_INT ref_size = tree_to_uhwi (TYPE_SIZE_UNIT (ref_type));
3943 : 118678 : unsigned HOST_WIDE_INT access_size = ref_size;
3944 : 118678 : if (DR_GROUP_FIRST_ELEMENT (stmt_vinfo))
3945 : : {
3946 : 37116 : gcc_assert (DR_GROUP_FIRST_ELEMENT (stmt_vinfo) == stmt_vinfo);
3947 : 37116 : access_size *= DR_GROUP_SIZE (stmt_vinfo) - DR_GROUP_GAP (stmt_vinfo);
3948 : : }
3949 : 118678 : tree vectype = STMT_VINFO_VECTYPE (stmt_vinfo);
3950 : 118678 : int misalignment;
3951 : 237356 : if (((misalignment = dr_misalignment (dr_info, vectype)), true)
3952 : 118678 : && (vect_supportable_dr_alignment (vinfo, dr_info, vectype, misalignment)
3953 : : == dr_explicit_realign_optimized))
3954 : : {
3955 : : /* We might access a full vector's worth. */
3956 : 0 : access_size += tree_to_uhwi (TYPE_SIZE_UNIT (vectype)) - ref_size;
3957 : : }
3958 : 118678 : return access_size;
3959 : : }
3960 : :
3961 : : /* Get the minimum alignment for all the scalar accesses that DR_INFO
3962 : : describes. */
3963 : :
3964 : : static unsigned int
3965 : 118678 : vect_vfa_align (dr_vec_info *dr_info)
3966 : : {
3967 : 0 : return dr_alignment (dr_info->dr);
3968 : : }
3969 : :
3970 : : /* Function vect_no_alias_p.
3971 : :
3972 : : Given data references A and B with equal base and offset, see whether
3973 : : the alias relation can be decided at compilation time. Return 1 if
3974 : : it can and the references alias, 0 if it can and the references do
3975 : : not alias, and -1 if we cannot decide at compile time. SEGMENT_LENGTH_A,
3976 : : SEGMENT_LENGTH_B, ACCESS_SIZE_A and ACCESS_SIZE_B are the equivalent
3977 : : of dr_with_seg_len::{seg_len,access_size} for A and B. */
3978 : :
3979 : : static int
3980 : 3516 : vect_compile_time_alias (dr_vec_info *a, dr_vec_info *b,
3981 : : tree segment_length_a, tree segment_length_b,
3982 : : unsigned HOST_WIDE_INT access_size_a,
3983 : : unsigned HOST_WIDE_INT access_size_b)
3984 : : {
3985 : 3516 : poly_offset_int offset_a = wi::to_poly_offset (DR_INIT (a->dr));
3986 : 3516 : poly_offset_int offset_b = wi::to_poly_offset (DR_INIT (b->dr));
3987 : 3516 : poly_uint64 const_length_a;
3988 : 3516 : poly_uint64 const_length_b;
3989 : :
3990 : : /* For negative step, we need to adjust address range by TYPE_SIZE_UNIT
3991 : : bytes, e.g., int a[3] -> a[1] range is [a+4, a+16) instead of
3992 : : [a, a+12) */
3993 : 3516 : if (tree_int_cst_compare (DR_STEP (a->dr), size_zero_node) < 0)
3994 : : {
3995 : 222 : const_length_a = (-wi::to_poly_wide (segment_length_a)).force_uhwi ();
3996 : 222 : offset_a -= const_length_a;
3997 : : }
3998 : : else
3999 : 3294 : const_length_a = tree_to_poly_uint64 (segment_length_a);
4000 : 3516 : if (tree_int_cst_compare (DR_STEP (b->dr), size_zero_node) < 0)
4001 : : {
4002 : 363 : const_length_b = (-wi::to_poly_wide (segment_length_b)).force_uhwi ();
4003 : 363 : offset_b -= const_length_b;
4004 : : }
4005 : : else
4006 : 3153 : const_length_b = tree_to_poly_uint64 (segment_length_b);
4007 : :
4008 : 3516 : const_length_a += access_size_a;
4009 : 3516 : const_length_b += access_size_b;
4010 : :
4011 : 3516 : if (ranges_known_overlap_p (offset_a, const_length_a,
4012 : : offset_b, const_length_b))
4013 : : return 1;
4014 : :
4015 : 422 : if (!ranges_maybe_overlap_p (offset_a, const_length_a,
4016 : : offset_b, const_length_b))
4017 : 422 : return 0;
4018 : :
4019 : : return -1;
4020 : : }
4021 : :
4022 : : /* Return true if the minimum nonzero dependence distance for loop LOOP_DEPTH
4023 : : in DDR is >= VF. */
4024 : :
4025 : : static bool
4026 : 69972 : dependence_distance_ge_vf (data_dependence_relation *ddr,
4027 : : unsigned int loop_depth, poly_uint64 vf)
4028 : : {
4029 : 69972 : if (DDR_ARE_DEPENDENT (ddr) != NULL_TREE
4030 : 74753 : || DDR_NUM_DIST_VECTS (ddr) == 0)
4031 : : return false;
4032 : :
4033 : : /* If the dependence is exact, we should have limited the VF instead. */
4034 : 4812 : gcc_checking_assert (DDR_COULD_BE_INDEPENDENT_P (ddr));
4035 : :
4036 : : unsigned int i;
4037 : : lambda_vector dist_v;
4038 : 9651 : FOR_EACH_VEC_ELT (DDR_DIST_VECTS (ddr), i, dist_v)
4039 : : {
4040 : 9620 : HOST_WIDE_INT dist = dist_v[loop_depth];
4041 : 9620 : if (dist != 0
4042 : 4812 : && !(dist > 0 && DDR_REVERSED_P (ddr))
4043 : 14432 : && maybe_lt ((unsigned HOST_WIDE_INT) abs_hwi (dist), vf))
4044 : : return false;
4045 : : }
4046 : :
4047 : 31 : if (dump_enabled_p ())
4048 : 2 : dump_printf_loc (MSG_NOTE, vect_location,
4049 : : "dependence distance between %T and %T is >= VF\n",
4050 : 2 : DR_REF (DDR_A (ddr)), DR_REF (DDR_B (ddr)));
4051 : :
4052 : : return true;
4053 : : }
4054 : :
4055 : : /* Dump LOWER_BOUND using flags DUMP_KIND. Dumps are known to be enabled. */
4056 : :
4057 : : static void
4058 : 439 : dump_lower_bound (dump_flags_t dump_kind, const vec_lower_bound &lower_bound)
4059 : : {
4060 : 439 : dump_printf (dump_kind, "%s (%T) >= ",
4061 : 439 : lower_bound.unsigned_p ? "unsigned" : "abs",
4062 : 439 : lower_bound.expr);
4063 : 439 : dump_dec (dump_kind, lower_bound.min_value);
4064 : 439 : }
4065 : :
4066 : : /* Record that the vectorized loop requires the vec_lower_bound described
4067 : : by EXPR, UNSIGNED_P and MIN_VALUE. */
4068 : :
4069 : : static void
4070 : 6287 : vect_check_lower_bound (loop_vec_info loop_vinfo, tree expr, bool unsigned_p,
4071 : : poly_uint64 min_value)
4072 : : {
4073 : 6287 : vec<vec_lower_bound> &lower_bounds
4074 : : = LOOP_VINFO_LOWER_BOUNDS (loop_vinfo);
4075 : 7259 : for (unsigned int i = 0; i < lower_bounds.length (); ++i)
4076 : 5809 : if (operand_equal_p (lower_bounds[i].expr, expr, 0))
4077 : : {
4078 : 4837 : unsigned_p &= lower_bounds[i].unsigned_p;
4079 : 4837 : min_value = upper_bound (lower_bounds[i].min_value, min_value);
4080 : 4837 : if (lower_bounds[i].unsigned_p != unsigned_p
4081 : 4837 : || maybe_lt (lower_bounds[i].min_value, min_value))
4082 : : {
4083 : 776 : lower_bounds[i].unsigned_p = unsigned_p;
4084 : 776 : lower_bounds[i].min_value = min_value;
4085 : 776 : if (dump_enabled_p ())
4086 : : {
4087 : 246 : dump_printf_loc (MSG_NOTE, vect_location,
4088 : : "updating run-time check to ");
4089 : 246 : dump_lower_bound (MSG_NOTE, lower_bounds[i]);
4090 : 246 : dump_printf (MSG_NOTE, "\n");
4091 : : }
4092 : : }
4093 : 4837 : return;
4094 : : }
4095 : :
4096 : 1450 : vec_lower_bound lower_bound (expr, unsigned_p, min_value);
4097 : 1450 : if (dump_enabled_p ())
4098 : : {
4099 : 193 : dump_printf_loc (MSG_NOTE, vect_location, "need a run-time check that ");
4100 : 193 : dump_lower_bound (MSG_NOTE, lower_bound);
4101 : 193 : dump_printf (MSG_NOTE, "\n");
4102 : : }
4103 : 1450 : LOOP_VINFO_LOWER_BOUNDS (loop_vinfo).safe_push (lower_bound);
4104 : : }
4105 : :
4106 : : /* Return true if it's unlikely that the step of the vectorized form of DR_INFO
4107 : : will span fewer than GAP bytes. */
4108 : :
4109 : : static bool
4110 : 5069 : vect_small_gap_p (loop_vec_info loop_vinfo, dr_vec_info *dr_info,
4111 : : poly_int64 gap)
4112 : : {
4113 : 5069 : stmt_vec_info stmt_info = dr_info->stmt;
4114 : 5069 : HOST_WIDE_INT count
4115 : 5069 : = estimated_poly_value (LOOP_VINFO_VECT_FACTOR (loop_vinfo));
4116 : 5069 : if (DR_GROUP_FIRST_ELEMENT (stmt_info))
4117 : 4357 : count *= DR_GROUP_SIZE (DR_GROUP_FIRST_ELEMENT (stmt_info));
4118 : 5069 : return (estimated_poly_value (gap)
4119 : 5069 : <= count * vect_get_scalar_dr_size (dr_info));
4120 : : }
4121 : :
4122 : : /* Return true if we know that there is no alias between DR_INFO_A and
4123 : : DR_INFO_B when abs (DR_STEP (DR_INFO_A->dr)) >= N for some N.
4124 : : When returning true, set *LOWER_BOUND_OUT to this N. */
4125 : :
4126 : : static bool
4127 : 18305 : vectorizable_with_step_bound_p (dr_vec_info *dr_info_a, dr_vec_info *dr_info_b,
4128 : : poly_uint64 *lower_bound_out)
4129 : : {
4130 : : /* Check that there is a constant gap of known sign between DR_A
4131 : : and DR_B. */
4132 : 18305 : data_reference *dr_a = dr_info_a->dr;
4133 : 18305 : data_reference *dr_b = dr_info_b->dr;
4134 : 18305 : poly_int64 init_a, init_b;
4135 : 18305 : if (!operand_equal_p (DR_BASE_ADDRESS (dr_a), DR_BASE_ADDRESS (dr_b), 0)
4136 : 7823 : || !operand_equal_p (DR_OFFSET (dr_a), DR_OFFSET (dr_b), 0)
4137 : 7127 : || !operand_equal_p (DR_STEP (dr_a), DR_STEP (dr_b), 0)
4138 : 7117 : || !poly_int_tree_p (DR_INIT (dr_a), &init_a)
4139 : 7117 : || !poly_int_tree_p (DR_INIT (dr_b), &init_b)
4140 : 18305 : || !ordered_p (init_a, init_b))
4141 : 11188 : return false;
4142 : :
4143 : : /* Sort DR_A and DR_B by the address they access. */
4144 : 7117 : if (maybe_lt (init_b, init_a))
4145 : : {
4146 : 128 : std::swap (init_a, init_b);
4147 : 128 : std::swap (dr_info_a, dr_info_b);
4148 : 128 : std::swap (dr_a, dr_b);
4149 : : }
4150 : :
4151 : : /* If the two accesses could be dependent within a scalar iteration,
4152 : : make sure that we'd retain their order. */
4153 : 7117 : if (maybe_gt (init_a + vect_get_scalar_dr_size (dr_info_a), init_b)
4154 : 7117 : && !vect_preserves_scalar_order_p (dr_info_a, dr_info_b))
4155 : : return false;
4156 : :
4157 : : /* There is no alias if abs (DR_STEP) is greater than or equal to
4158 : : the bytes spanned by the combination of the two accesses. */
4159 : 7117 : *lower_bound_out = init_b + vect_get_scalar_dr_size (dr_info_b) - init_a;
4160 : 7117 : return true;
4161 : : }
4162 : :
4163 : : /* Function vect_prune_runtime_alias_test_list.
4164 : :
4165 : : Prune a list of ddrs to be tested at run-time by versioning for alias.
4166 : : Merge several alias checks into one if possible.
4167 : : Return FALSE if resulting list of ddrs is longer then allowed by
4168 : : PARAM_VECT_MAX_VERSION_FOR_ALIAS_CHECKS, otherwise return TRUE. */
4169 : :
4170 : : opt_result
4171 : 339510 : vect_prune_runtime_alias_test_list (loop_vec_info loop_vinfo)
4172 : : {
4173 : 339510 : typedef pair_hash <tree_operand_hash, tree_operand_hash> tree_pair_hash;
4174 : 339510 : hash_set <tree_pair_hash> compared_objects;
4175 : :
4176 : 339510 : const vec<ddr_p> &may_alias_ddrs = LOOP_VINFO_MAY_ALIAS_DDRS (loop_vinfo);
4177 : 339510 : vec<dr_with_seg_len_pair_t> &comp_alias_ddrs
4178 : : = LOOP_VINFO_COMP_ALIAS_DDRS (loop_vinfo);
4179 : 339510 : const vec<vec_object_pair> &check_unequal_addrs
4180 : : = LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo);
4181 : 339510 : poly_uint64 vect_factor = LOOP_VINFO_VECT_FACTOR (loop_vinfo);
4182 : 339510 : tree scalar_loop_iters = LOOP_VINFO_NITERS (loop_vinfo);
4183 : :
4184 : 339510 : ddr_p ddr;
4185 : 339510 : unsigned int i;
4186 : 339510 : tree length_factor;
4187 : :
4188 : 339510 : DUMP_VECT_SCOPE ("vect_prune_runtime_alias_test_list");
4189 : :
4190 : : /* Step values are irrelevant for aliasing if the number of vector
4191 : : iterations is equal to the number of scalar iterations (which can
4192 : : happen for fully-SLP loops). */
4193 : 339510 : bool vf_one_p = known_eq (LOOP_VINFO_VECT_FACTOR (loop_vinfo), 1U);
4194 : :
4195 : 339510 : if (!vf_one_p)
4196 : : {
4197 : : /* Convert the checks for nonzero steps into bound tests. */
4198 : : tree value;
4199 : 337475 : FOR_EACH_VEC_ELT (LOOP_VINFO_CHECK_NONZERO (loop_vinfo), i, value)
4200 : 1527 : vect_check_lower_bound (loop_vinfo, value, true, 1);
4201 : : }
4202 : :
4203 : 339510 : if (may_alias_ddrs.is_empty ())
4204 : 317905 : return opt_result::success ();
4205 : :
4206 : 21605 : comp_alias_ddrs.create (may_alias_ddrs.length ());
4207 : :
4208 : 21605 : unsigned int loop_depth
4209 : 21605 : = index_in_loop_nest (LOOP_VINFO_LOOP (loop_vinfo)->num,
4210 : 21605 : LOOP_VINFO_LOOP_NEST (loop_vinfo));
4211 : :
4212 : : /* First, we collect all data ref pairs for aliasing checks. */
4213 : 88483 : FOR_EACH_VEC_ELT (may_alias_ddrs, i, ddr)
4214 : : {
4215 : 69972 : poly_uint64 lower_bound;
4216 : 69972 : tree segment_length_a, segment_length_b;
4217 : 69972 : unsigned HOST_WIDE_INT access_size_a, access_size_b;
4218 : 69972 : unsigned HOST_WIDE_INT align_a, align_b;
4219 : :
4220 : : /* Ignore the alias if the VF we chose ended up being no greater
4221 : : than the dependence distance. */
4222 : 69972 : if (dependence_distance_ge_vf (ddr, loop_depth, vect_factor))
4223 : 11055 : continue;
4224 : :
4225 : 69941 : if (DDR_OBJECT_A (ddr))
4226 : : {
4227 : 46 : vec_object_pair new_pair (DDR_OBJECT_A (ddr), DDR_OBJECT_B (ddr));
4228 : 46 : if (!compared_objects.add (new_pair))
4229 : : {
4230 : 14 : if (dump_enabled_p ())
4231 : 8 : dump_printf_loc (MSG_NOTE, vect_location,
4232 : : "checking that %T and %T"
4233 : : " have different addresses\n",
4234 : : new_pair.first, new_pair.second);
4235 : 14 : LOOP_VINFO_CHECK_UNEQUAL_ADDRS (loop_vinfo).safe_push (new_pair);
4236 : : }
4237 : 46 : continue;
4238 : 46 : }
4239 : :
4240 : 69895 : dr_vec_info *dr_info_a = loop_vinfo->lookup_dr (DDR_A (ddr));
4241 : 69895 : stmt_vec_info stmt_info_a = dr_info_a->stmt;
4242 : :
4243 : 69895 : dr_vec_info *dr_info_b = loop_vinfo->lookup_dr (DDR_B (ddr));
4244 : 69895 : stmt_vec_info stmt_info_b = dr_info_b->stmt;
4245 : :
4246 : 69895 : bool preserves_scalar_order_p
4247 : 69895 : = vect_preserves_scalar_order_p (dr_info_a, dr_info_b);
4248 : 69895 : bool ignore_step_p
4249 : : = (vf_one_p
4250 : 69895 : && (preserves_scalar_order_p
4251 : 2817 : || operand_equal_p (DR_STEP (dr_info_a->dr),
4252 : 2817 : DR_STEP (dr_info_b->dr))));
4253 : :
4254 : : /* Skip the pair if inter-iteration dependencies are irrelevant
4255 : : and intra-iteration dependencies are guaranteed to be honored. */
4256 : 11833 : if (ignore_step_p
4257 : 6037 : && (preserves_scalar_order_p
4258 : 2558 : || vectorizable_with_step_bound_p (dr_info_a, dr_info_b,
4259 : : &lower_bound)))
4260 : : {
4261 : 5796 : if (dump_enabled_p ())
4262 : 2352 : dump_printf_loc (MSG_NOTE, vect_location,
4263 : : "no need for alias check between "
4264 : : "%T and %T when VF is 1\n",
4265 : 2352 : DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr));
4266 : 5796 : continue;
4267 : : }
4268 : :
4269 : : /* See whether we can handle the alias using a bounds check on
4270 : : the step, and whether that's likely to be the best approach.
4271 : : (It might not be, for example, if the minimum step is much larger
4272 : : than the number of bytes handled by one vector iteration.) */
4273 : 64099 : if (!ignore_step_p
4274 : 63858 : && TREE_CODE (DR_STEP (dr_info_a->dr)) != INTEGER_CST
4275 : 15747 : && vectorizable_with_step_bound_p (dr_info_a, dr_info_b,
4276 : : &lower_bound)
4277 : 68899 : && (vect_small_gap_p (loop_vinfo, dr_info_a, lower_bound)
4278 : 269 : || vect_small_gap_p (loop_vinfo, dr_info_b, lower_bound)))
4279 : : {
4280 : 4760 : bool unsigned_p = dr_known_forward_stride_p (dr_info_a->dr);
4281 : 4760 : if (dump_enabled_p ())
4282 : : {
4283 : 3312 : dump_printf_loc (MSG_NOTE, vect_location, "no alias between "
4284 : : "%T and %T when the step %T is outside ",
4285 : : DR_REF (dr_info_a->dr),
4286 : 1656 : DR_REF (dr_info_b->dr),
4287 : 1656 : DR_STEP (dr_info_a->dr));
4288 : 1656 : if (unsigned_p)
4289 : 504 : dump_printf (MSG_NOTE, "[0");
4290 : : else
4291 : : {
4292 : 1152 : dump_printf (MSG_NOTE, "(");
4293 : 1152 : dump_dec (MSG_NOTE, poly_int64 (-lower_bound));
4294 : : }
4295 : 1656 : dump_printf (MSG_NOTE, ", ");
4296 : 1656 : dump_dec (MSG_NOTE, lower_bound);
4297 : 1656 : dump_printf (MSG_NOTE, ")\n");
4298 : : }
4299 : 4760 : vect_check_lower_bound (loop_vinfo, DR_STEP (dr_info_a->dr),
4300 : : unsigned_p, lower_bound);
4301 : 4760 : continue;
4302 : 4760 : }
4303 : :
4304 : 59339 : stmt_vec_info dr_group_first_a = DR_GROUP_FIRST_ELEMENT (stmt_info_a);
4305 : 59339 : if (dr_group_first_a)
4306 : : {
4307 : 18190 : stmt_info_a = dr_group_first_a;
4308 : 18190 : dr_info_a = STMT_VINFO_DR_INFO (stmt_info_a);
4309 : : }
4310 : :
4311 : 59339 : stmt_vec_info dr_group_first_b = DR_GROUP_FIRST_ELEMENT (stmt_info_b);
4312 : 59339 : if (dr_group_first_b)
4313 : : {
4314 : 18926 : stmt_info_b = dr_group_first_b;
4315 : 18926 : dr_info_b = STMT_VINFO_DR_INFO (stmt_info_b);
4316 : : }
4317 : :
4318 : 59339 : if (ignore_step_p)
4319 : : {
4320 : 241 : segment_length_a = size_zero_node;
4321 : 241 : segment_length_b = size_zero_node;
4322 : : }
4323 : : else
4324 : : {
4325 : 59098 : if (!operand_equal_p (DR_STEP (dr_info_a->dr),
4326 : 59098 : DR_STEP (dr_info_b->dr), 0))
4327 : : length_factor = scalar_loop_iters;
4328 : : else
4329 : 46714 : length_factor = size_int (vect_factor);
4330 : 59098 : segment_length_a = vect_vfa_segment_size (dr_info_a, length_factor);
4331 : 59098 : segment_length_b = vect_vfa_segment_size (dr_info_b, length_factor);
4332 : : }
4333 : 59339 : access_size_a = vect_vfa_access_size (loop_vinfo, dr_info_a);
4334 : 59339 : access_size_b = vect_vfa_access_size (loop_vinfo, dr_info_b);
4335 : 59339 : align_a = vect_vfa_align (dr_info_a);
4336 : 59339 : align_b = vect_vfa_align (dr_info_b);
4337 : :
4338 : : /* See whether the alias is known at compilation time. */
4339 : 59339 : if (operand_equal_p (DR_BASE_ADDRESS (dr_info_a->dr),
4340 : 59339 : DR_BASE_ADDRESS (dr_info_b->dr), 0)
4341 : 5323 : && operand_equal_p (DR_OFFSET (dr_info_a->dr),
4342 : 5323 : DR_OFFSET (dr_info_b->dr), 0)
4343 : 3644 : && TREE_CODE (DR_STEP (dr_info_a->dr)) == INTEGER_CST
4344 : 3594 : && TREE_CODE (DR_STEP (dr_info_b->dr)) == INTEGER_CST
4345 : 3584 : && poly_int_tree_p (segment_length_a)
4346 : 62874 : && poly_int_tree_p (segment_length_b))
4347 : : {
4348 : 3516 : int res = vect_compile_time_alias (dr_info_a, dr_info_b,
4349 : : segment_length_a,
4350 : : segment_length_b,
4351 : : access_size_a,
4352 : : access_size_b);
4353 : 3516 : if (res >= 0 && dump_enabled_p ())
4354 : : {
4355 : 220 : dump_printf_loc (MSG_NOTE, vect_location,
4356 : : "can tell at compile time that %T and %T",
4357 : 110 : DR_REF (dr_info_a->dr), DR_REF (dr_info_b->dr));
4358 : 110 : if (res == 0)
4359 : 57 : dump_printf (MSG_NOTE, " do not alias\n");
4360 : : else
4361 : 53 : dump_printf (MSG_NOTE, " alias\n");
4362 : : }
4363 : :
4364 : 3516 : if (res == 0)
4365 : 422 : continue;
4366 : :
4367 : 3094 : if (res == 1)
4368 : 3094 : return opt_result::failure_at (stmt_info_b->stmt,
4369 : : "not vectorized:"
4370 : : " compilation time alias: %G%G",
4371 : : stmt_info_a->stmt,
4372 : : stmt_info_b->stmt);
4373 : : }
4374 : :
4375 : : /* dr_with_seg_len requires the alignment to apply to the segment length
4376 : : and access size, not just the start address. The access size can be
4377 : : smaller than the pointer alignment for grouped accesses and bitfield
4378 : : references; see PR115192 and PR116125 respectively. */
4379 : 55823 : align_a = std::min (align_a, least_bit_hwi (access_size_a));
4380 : 55823 : align_b = std::min (align_b, least_bit_hwi (access_size_b));
4381 : :
4382 : 55823 : dr_with_seg_len dr_a (dr_info_a->dr, segment_length_a,
4383 : 55823 : access_size_a, align_a);
4384 : 55823 : dr_with_seg_len dr_b (dr_info_b->dr, segment_length_b,
4385 : 55823 : access_size_b, align_b);
4386 : : /* Canonicalize the order to be the one that's needed for accurate
4387 : : RAW, WAR and WAW flags, in cases where the data references are
4388 : : well-ordered. The order doesn't really matter otherwise,
4389 : : but we might as well be consistent. */
4390 : 55823 : if (get_later_stmt (stmt_info_a, stmt_info_b) == stmt_info_a)
4391 : 4317 : std::swap (dr_a, dr_b);
4392 : :
4393 : 55823 : dr_with_seg_len_pair_t dr_with_seg_len_pair
4394 : : (dr_a, dr_b, (preserves_scalar_order_p
4395 : : ? dr_with_seg_len_pair_t::WELL_ORDERED
4396 : 61438 : : dr_with_seg_len_pair_t::REORDERED));
4397 : :
4398 : 55823 : comp_alias_ddrs.safe_push (dr_with_seg_len_pair);
4399 : : }
4400 : :
4401 : 18511 : prune_runtime_alias_test_list (&comp_alias_ddrs, vect_factor);
4402 : :
4403 : 37022 : unsigned int count = (comp_alias_ddrs.length ()
4404 : 18511 : + check_unequal_addrs.length ());
4405 : :
4406 : 18511 : if (count
4407 : 18511 : && (loop_cost_model (LOOP_VINFO_LOOP (loop_vinfo))
4408 : : == VECT_COST_MODEL_VERY_CHEAP))
4409 : 12778 : return opt_result::failure_at
4410 : 12778 : (vect_location, "would need a runtime alias check\n");
4411 : :
4412 : 5733 : if (dump_enabled_p ())
4413 : 1852 : dump_printf_loc (MSG_NOTE, vect_location,
4414 : : "improved number of alias checks from %d to %d\n",
4415 : : may_alias_ddrs.length (), count);
4416 : 5733 : unsigned limit = param_vect_max_version_for_alias_checks;
4417 : 5733 : if (loop_cost_model (LOOP_VINFO_LOOP (loop_vinfo)) == VECT_COST_MODEL_CHEAP)
4418 : 762 : limit = param_vect_max_version_for_alias_checks * 6 / 10;
4419 : 5733 : if (count > limit)
4420 : 162 : return opt_result::failure_at
4421 : 162 : (vect_location,
4422 : : "number of versioning for alias run-time tests exceeds %d "
4423 : : "(--param vect-max-version-for-alias-checks)\n", limit);
4424 : :
4425 : 5571 : return opt_result::success ();
4426 : 339510 : }
4427 : :
4428 : : /* Check whether we can use an internal function for a gather load
4429 : : or scatter store. READ_P is true for loads and false for stores.
4430 : : MASKED_P is true if the load or store is conditional. MEMORY_TYPE is
4431 : : the type of the memory elements being loaded or stored. OFFSET_TYPE
4432 : : is the type of the offset that is being applied to the invariant
4433 : : base address. If OFFSET_TYPE is scalar the function chooses an
4434 : : appropriate vector type for it. SCALE is the amount by which the
4435 : : offset should be multiplied *after* it has been converted to address width.
4436 : :
4437 : : Return true if the function is supported, storing the function id in
4438 : : *IFN_OUT and the vector type for the offset in *OFFSET_VECTYPE_OUT.
4439 : :
4440 : : If we can use gather and store the possible else values in ELSVALS. */
4441 : :
4442 : : bool
4443 : 66206 : vect_gather_scatter_fn_p (vec_info *vinfo, bool read_p, bool masked_p,
4444 : : tree vectype, tree memory_type, tree offset_type,
4445 : : int scale, internal_fn *ifn_out,
4446 : : tree *offset_vectype_out, vec<int> *elsvals)
4447 : : {
4448 : 66206 : unsigned int memory_bits = tree_to_uhwi (TYPE_SIZE (memory_type));
4449 : 66206 : unsigned int element_bits = vector_element_bits (vectype);
4450 : 66206 : if (element_bits != memory_bits)
4451 : : /* For now the vector elements must be the same width as the
4452 : : memory elements. */
4453 : : return false;
4454 : :
4455 : : /* Work out which function we need. */
4456 : 66206 : internal_fn ifn, alt_ifn, alt_ifn2;
4457 : 66206 : if (read_p)
4458 : : {
4459 : 41645 : ifn = masked_p ? IFN_MASK_GATHER_LOAD : IFN_GATHER_LOAD;
4460 : : alt_ifn = IFN_MASK_GATHER_LOAD;
4461 : : /* When target supports MASK_LEN_GATHER_LOAD, we always
4462 : : use MASK_LEN_GATHER_LOAD regardless whether len and
4463 : : mask are valid or not. */
4464 : : alt_ifn2 = IFN_MASK_LEN_GATHER_LOAD;
4465 : : }
4466 : : else
4467 : : {
4468 : 24561 : ifn = masked_p ? IFN_MASK_SCATTER_STORE : IFN_SCATTER_STORE;
4469 : 66206 : alt_ifn = IFN_MASK_SCATTER_STORE;
4470 : : /* When target supports MASK_LEN_SCATTER_STORE, we always
4471 : : use MASK_LEN_SCATTER_STORE regardless whether len and
4472 : : mask are valid or not. */
4473 : 66206 : alt_ifn2 = IFN_MASK_LEN_SCATTER_STORE;
4474 : : }
4475 : :
4476 : 465254 : for (;;)
4477 : : {
4478 : 265730 : tree offset_vectype;
4479 : 265730 : if (VECTOR_TYPE_P (offset_type))
4480 : : offset_vectype = offset_type;
4481 : : else
4482 : : {
4483 : 258622 : offset_vectype = get_vectype_for_scalar_type (vinfo, offset_type);
4484 : 258622 : if (!offset_vectype)
4485 : : return false;
4486 : : }
4487 : :
4488 : : /* Test whether the target supports this combination. */
4489 : 261502 : if (internal_gather_scatter_fn_supported_p (ifn, vectype, memory_type,
4490 : : offset_vectype, scale,
4491 : : elsvals))
4492 : : {
4493 : 0 : *ifn_out = ifn;
4494 : 0 : *offset_vectype_out = offset_vectype;
4495 : 0 : return true;
4496 : : }
4497 : 261502 : else if (!masked_p
4498 : 261502 : && internal_gather_scatter_fn_supported_p (alt_ifn, vectype,
4499 : : memory_type,
4500 : : offset_vectype,
4501 : : scale, elsvals))
4502 : : {
4503 : 0 : *ifn_out = alt_ifn;
4504 : 0 : *offset_vectype_out = offset_vectype;
4505 : 0 : return true;
4506 : : }
4507 : 261502 : else if (internal_gather_scatter_fn_supported_p (alt_ifn2, vectype,
4508 : : memory_type,
4509 : : offset_vectype, scale,
4510 : : elsvals))
4511 : : {
4512 : 0 : *ifn_out = alt_ifn2;
4513 : 0 : *offset_vectype_out = offset_vectype;
4514 : 0 : return true;
4515 : : }
4516 : :
4517 : : /* For fixed offset vector type we're done. */
4518 : 261502 : if (VECTOR_TYPE_P (offset_type))
4519 : : return false;
4520 : :
4521 : 254394 : if (TYPE_PRECISION (offset_type) >= POINTER_SIZE
4522 : 254394 : && TYPE_PRECISION (offset_type) >= element_bits)
4523 : : return false;
4524 : :
4525 : : /* Try a larger offset vector type. */
4526 : 199524 : offset_type = build_nonstandard_integer_type
4527 : 199524 : (TYPE_PRECISION (offset_type) * 2, TYPE_UNSIGNED (offset_type));
4528 : 199524 : }
4529 : : }
4530 : :
4531 : : /* STMT_INFO is a call to an internal gather load or scatter store function.
4532 : : Describe the operation in INFO. */
4533 : :
4534 : : void
4535 : 0 : vect_describe_gather_scatter_call (stmt_vec_info stmt_info,
4536 : : gather_scatter_info *info)
4537 : : {
4538 : 0 : gcall *call = as_a <gcall *> (stmt_info->stmt);
4539 : 0 : tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4540 : 0 : data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4541 : :
4542 : 0 : info->ifn = gimple_call_internal_fn (call);
4543 : 0 : info->decl = NULL_TREE;
4544 : 0 : info->base = gimple_call_arg (call, 0);
4545 : 0 : info->alias_ptr = gimple_call_arg
4546 : 0 : (call, internal_fn_alias_ptr_index (info->ifn));
4547 : 0 : info->offset = gimple_call_arg
4548 : 0 : (call, internal_fn_offset_index (info->ifn));
4549 : 0 : info->offset_vectype = NULL_TREE;
4550 : 0 : info->scale = TREE_INT_CST_LOW (gimple_call_arg
4551 : : (call, internal_fn_scale_index (info->ifn)));
4552 : 0 : info->element_type = TREE_TYPE (vectype);
4553 : 0 : info->memory_type = TREE_TYPE (DR_REF (dr));
4554 : 0 : }
4555 : :
4556 : : /* Return true if a non-affine read or write in STMT_INFO is suitable for a
4557 : : gather load or scatter store. Describe the operation in *INFO if so.
4558 : : If it is suitable and ELSVALS is nonzero store the supported else values
4559 : : in the vector it points to. */
4560 : :
4561 : : bool
4562 : 177519 : vect_check_gather_scatter (stmt_vec_info stmt_info, loop_vec_info loop_vinfo,
4563 : : gather_scatter_info *info, vec<int> *elsvals)
4564 : : {
4565 : 177519 : HOST_WIDE_INT scale = 1;
4566 : 177519 : poly_int64 pbitpos, pbitsize;
4567 : 177519 : class loop *loop = LOOP_VINFO_LOOP (loop_vinfo);
4568 : 177519 : struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
4569 : 177519 : tree offtype = NULL_TREE;
4570 : 177519 : tree decl = NULL_TREE, base, off;
4571 : 177519 : tree vectype = STMT_VINFO_VECTYPE (stmt_info);
4572 : 177519 : tree memory_type = TREE_TYPE (DR_REF (dr));
4573 : 177519 : machine_mode pmode;
4574 : 177519 : int punsignedp, reversep, pvolatilep = 0;
4575 : 177519 : internal_fn ifn;
4576 : 177519 : tree offset_vectype;
4577 : 177519 : bool masked_p = false;
4578 : :
4579 : : /* See whether this is already a call to a gather/scatter internal function.
4580 : : If not, see whether it's a masked load or store. */
4581 : 177519 : gcall *call = dyn_cast <gcall *> (stmt_info->stmt);
4582 : 5677 : if (call && gimple_call_internal_p (call))
4583 : : {
4584 : 5677 : ifn = gimple_call_internal_fn (call);
4585 : 5677 : if (internal_gather_scatter_fn_p (ifn))
4586 : : {
4587 : 0 : vect_describe_gather_scatter_call (stmt_info, info);
4588 : :
4589 : : /* In pattern recog we simply used a ZERO else value that
4590 : : we need to correct here. To that end just re-use the
4591 : : (already succesful) check if we support a gather IFN
4592 : : and have it populate the else values. */
4593 : 0 : if (DR_IS_READ (dr) && internal_fn_mask_index (ifn) >= 0 && elsvals)
4594 : 0 : supports_vec_gather_load_p (TYPE_MODE (vectype), elsvals);
4595 : 0 : return true;
4596 : : }
4597 : 5677 : masked_p = (ifn == IFN_MASK_LOAD || ifn == IFN_MASK_STORE);
4598 : : }
4599 : :
4600 : : /* True if we should aim to use internal functions rather than
4601 : : built-in functions. */
4602 : 177519 : bool use_ifn_p = (DR_IS_READ (dr)
4603 : 177519 : ? supports_vec_gather_load_p (TYPE_MODE (vectype),
4604 : : elsvals)
4605 : 177519 : : supports_vec_scatter_store_p (TYPE_MODE (vectype)));
4606 : :
4607 : 177519 : base = DR_REF (dr);
4608 : : /* For masked loads/stores, DR_REF (dr) is an artificial MEM_REF,
4609 : : see if we can use the def stmt of the address. */
4610 : 177519 : if (masked_p
4611 : 5677 : && TREE_CODE (base) == MEM_REF
4612 : 5677 : && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME
4613 : 5677 : && integer_zerop (TREE_OPERAND (base, 1))
4614 : 183196 : && !expr_invariant_in_loop_p (loop, TREE_OPERAND (base, 0)))
4615 : : {
4616 : 5677 : gimple *def_stmt = SSA_NAME_DEF_STMT (TREE_OPERAND (base, 0));
4617 : 5677 : if (is_gimple_assign (def_stmt)
4618 : 5677 : && gimple_assign_rhs_code (def_stmt) == ADDR_EXPR)
4619 : 570 : base = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0);
4620 : : }
4621 : :
4622 : : /* The gather and scatter builtins need address of the form
4623 : : loop_invariant + vector * {1, 2, 4, 8}
4624 : : or
4625 : : loop_invariant + sign_extend (vector) * { 1, 2, 4, 8 }.
4626 : : Unfortunately DR_BASE_ADDRESS/DR_OFFSET can be a mixture
4627 : : of loop invariants/SSA_NAMEs defined in the loop, with casts,
4628 : : multiplications and additions in it. To get a vector, we need
4629 : : a single SSA_NAME that will be defined in the loop and will
4630 : : contain everything that is not loop invariant and that can be
4631 : : vectorized. The following code attempts to find such a preexistng
4632 : : SSA_NAME OFF and put the loop invariants into a tree BASE
4633 : : that can be gimplified before the loop. */
4634 : 177519 : base = get_inner_reference (base, &pbitsize, &pbitpos, &off, &pmode,
4635 : : &punsignedp, &reversep, &pvolatilep);
4636 : 177519 : if (reversep)
4637 : : return false;
4638 : :
4639 : : /* PR 107346. Packed structs can have fields at offsets that are not
4640 : : multiples of BITS_PER_UNIT. Do not use gather/scatters in such cases. */
4641 : 177519 : if (!multiple_p (pbitpos, BITS_PER_UNIT))
4642 : : return false;
4643 : :
4644 : : /* We need to be able to form an address to the base which for example
4645 : : isn't possible for hard registers. */
4646 : 177519 : if (may_be_nonaddressable_p (base))
4647 : : return false;
4648 : :
4649 : 177511 : poly_int64 pbytepos = exact_div (pbitpos, BITS_PER_UNIT);
4650 : :
4651 : 177511 : if (TREE_CODE (base) == MEM_REF)
4652 : : {
4653 : 124305 : if (!integer_zerop (TREE_OPERAND (base, 1)))
4654 : : {
4655 : 4468 : if (off == NULL_TREE)
4656 : 4291 : off = wide_int_to_tree (sizetype, mem_ref_offset (base));
4657 : : else
4658 : 177 : off = size_binop (PLUS_EXPR, off,
4659 : : fold_convert (sizetype, TREE_OPERAND (base, 1)));
4660 : : }
4661 : 124305 : base = TREE_OPERAND (base, 0);
4662 : : }
4663 : : else
4664 : 53206 : base = build_fold_addr_expr (base);
4665 : :
4666 : 177511 : if (off == NULL_TREE)
4667 : 103418 : off = size_zero_node;
4668 : :
4669 : : /* BASE must be loop invariant. If it is not invariant, but OFF is, then we
4670 : : * can fix that by swapping BASE and OFF. */
4671 : 177511 : if (!expr_invariant_in_loop_p (loop, base))
4672 : : {
4673 : 108194 : if (!expr_invariant_in_loop_p (loop, off))
4674 : : return false;
4675 : :
4676 : 107919 : std::swap (base, off);
4677 : : }
4678 : :
4679 : 177236 : base = fold_convert (sizetype, base);
4680 : 177236 : base = size_binop (PLUS_EXPR, base, size_int (pbytepos));
4681 : :
4682 : : /* OFF at this point may be either a SSA_NAME or some tree expression
4683 : : from get_inner_reference. Try to peel off loop invariants from it
4684 : : into BASE as long as possible. */
4685 : 177236 : STRIP_NOPS (off);
4686 : 573058 : while (offtype == NULL_TREE)
4687 : : {
4688 : 502449 : enum tree_code code;
4689 : 502449 : tree op0, op1, add = NULL_TREE;
4690 : :
4691 : 502449 : if (TREE_CODE (off) == SSA_NAME)
4692 : : {
4693 : 351556 : gimple *def_stmt = SSA_NAME_DEF_STMT (off);
4694 : :
4695 : 351556 : if (expr_invariant_in_loop_p (loop, off))
4696 : 0 : return false;
4697 : :
4698 : 351556 : if (gimple_code (def_stmt) != GIMPLE_ASSIGN)
4699 : : break;
4700 : :
4701 : 322383 : op0 = gimple_assign_rhs1 (def_stmt);
4702 : 322383 : code = gimple_assign_rhs_code (def_stmt);
4703 : 322383 : op1 = gimple_assign_rhs2 (def_stmt);
4704 : : }
4705 : : else
4706 : : {
4707 : 150893 : if (get_gimple_rhs_class (TREE_CODE (off)) == GIMPLE_TERNARY_RHS)
4708 : : return false;
4709 : 150893 : code = TREE_CODE (off);
4710 : 150893 : extract_ops_from_tree (off, &code, &op0, &op1);
4711 : : }
4712 : 473276 : switch (code)
4713 : : {
4714 : 137552 : case POINTER_PLUS_EXPR:
4715 : 137552 : case PLUS_EXPR:
4716 : 137552 : if (expr_invariant_in_loop_p (loop, op0))
4717 : : {
4718 : 85038 : add = op0;
4719 : 85038 : off = op1;
4720 : 127299 : do_add:
4721 : 127299 : add = fold_convert (sizetype, add);
4722 : 127299 : if (scale != 1)
4723 : 44235 : add = size_binop (MULT_EXPR, add, size_int (scale));
4724 : 127299 : base = size_binop (PLUS_EXPR, base, add);
4725 : 395822 : continue;
4726 : : }
4727 : 52514 : if (expr_invariant_in_loop_p (loop, op1))
4728 : : {
4729 : 42035 : add = op1;
4730 : 42035 : off = op0;
4731 : 42035 : goto do_add;
4732 : : }
4733 : : break;
4734 : 388 : case MINUS_EXPR:
4735 : 388 : if (expr_invariant_in_loop_p (loop, op1))
4736 : : {
4737 : 226 : add = fold_convert (sizetype, op1);
4738 : 226 : add = size_binop (MINUS_EXPR, size_zero_node, add);
4739 : 226 : off = op0;
4740 : 226 : goto do_add;
4741 : : }
4742 : : break;
4743 : 157318 : case MULT_EXPR:
4744 : 157318 : if (scale == 1 && tree_fits_shwi_p (op1))
4745 : : {
4746 : 124049 : int new_scale = tree_to_shwi (op1);
4747 : : /* Only treat this as a scaling operation if the target
4748 : : supports it for at least some offset type. */
4749 : 124049 : if (use_ifn_p
4750 : 0 : && !vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr),
4751 : : masked_p, vectype, memory_type,
4752 : : signed_char_type_node,
4753 : : new_scale, &ifn,
4754 : : &offset_vectype,
4755 : : elsvals)
4756 : 124049 : && !vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr),
4757 : : masked_p, vectype, memory_type,
4758 : : unsigned_char_type_node,
4759 : : new_scale, &ifn,
4760 : : &offset_vectype,
4761 : : elsvals))
4762 : : break;
4763 : 124049 : scale = new_scale;
4764 : 124049 : off = op0;
4765 : 124049 : continue;
4766 : 124049 : }
4767 : : break;
4768 : 0 : case SSA_NAME:
4769 : 0 : off = op0;
4770 : 0 : continue;
4771 : 149314 : CASE_CONVERT:
4772 : 298612 : if (!POINTER_TYPE_P (TREE_TYPE (op0))
4773 : 298612 : && !INTEGRAL_TYPE_P (TREE_TYPE (op0)))
4774 : : break;
4775 : :
4776 : : /* Don't include the conversion if the target is happy with
4777 : : the current offset type. */
4778 : 149314 : if (use_ifn_p
4779 : 0 : && TREE_CODE (off) == SSA_NAME
4780 : 0 : && !POINTER_TYPE_P (TREE_TYPE (off))
4781 : 149314 : && vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr),
4782 : : masked_p, vectype, memory_type,
4783 : 0 : TREE_TYPE (off), scale, &ifn,
4784 : : &offset_vectype, elsvals))
4785 : : break;
4786 : :
4787 : 149314 : if (TYPE_PRECISION (TREE_TYPE (op0))
4788 : 149314 : == TYPE_PRECISION (TREE_TYPE (off)))
4789 : : {
4790 : 73865 : off = op0;
4791 : 73865 : continue;
4792 : : }
4793 : :
4794 : : /* Include the conversion if it is widening and we're using
4795 : : the IFN path or the target can handle the converted from
4796 : : offset or the current size is not already the same as the
4797 : : data vector element size. */
4798 : 75449 : if ((TYPE_PRECISION (TREE_TYPE (op0))
4799 : 75449 : < TYPE_PRECISION (TREE_TYPE (off)))
4800 : 75449 : && (use_ifn_p
4801 : 75351 : || (DR_IS_READ (dr)
4802 : 47154 : ? (targetm.vectorize.builtin_gather
4803 : 47154 : && targetm.vectorize.builtin_gather (vectype,
4804 : 47154 : TREE_TYPE (op0),
4805 : : scale))
4806 : 28197 : : (targetm.vectorize.builtin_scatter
4807 : 28197 : && targetm.vectorize.builtin_scatter (vectype,
4808 : 28197 : TREE_TYPE (op0),
4809 : : scale)))
4810 : 74687 : || !operand_equal_p (TYPE_SIZE (TREE_TYPE (off)),
4811 : 74687 : TYPE_SIZE (TREE_TYPE (vectype)), 0)))
4812 : : {
4813 : 70609 : off = op0;
4814 : 70609 : offtype = TREE_TYPE (off);
4815 : 70609 : STRIP_NOPS (off);
4816 : 70609 : continue;
4817 : : }
4818 : : break;
4819 : : default:
4820 : : break;
4821 : 0 : }
4822 : : break;
4823 : : }
4824 : :
4825 : : /* If at the end OFF still isn't a SSA_NAME or isn't
4826 : : defined in the loop, punt. */
4827 : 177236 : if (TREE_CODE (off) != SSA_NAME
4828 : 177236 : || expr_invariant_in_loop_p (loop, off))
4829 : 5497 : return false;
4830 : :
4831 : 171739 : if (offtype == NULL_TREE)
4832 : 101352 : offtype = TREE_TYPE (off);
4833 : :
4834 : 171739 : if (use_ifn_p)
4835 : : {
4836 : 0 : if (!vect_gather_scatter_fn_p (loop_vinfo, DR_IS_READ (dr), masked_p,
4837 : : vectype, memory_type, offtype, scale,
4838 : : &ifn, &offset_vectype, elsvals))
4839 : 0 : ifn = IFN_LAST;
4840 : : decl = NULL_TREE;
4841 : : }
4842 : : else
4843 : : {
4844 : 171739 : if (DR_IS_READ (dr))
4845 : : {
4846 : 109614 : if (targetm.vectorize.builtin_gather)
4847 : 109614 : decl = targetm.vectorize.builtin_gather (vectype, offtype, scale);
4848 : : }
4849 : : else
4850 : : {
4851 : 62125 : if (targetm.vectorize.builtin_scatter)
4852 : 62125 : decl = targetm.vectorize.builtin_scatter (vectype, offtype, scale);
4853 : : }
4854 : 171739 : ifn = IFN_LAST;
4855 : : /* The offset vector type will be read from DECL when needed. */
4856 : 171739 : offset_vectype = NULL_TREE;
4857 : : }
4858 : :
4859 : 171739 : gcc_checking_assert (expr_invariant_in_loop_p (loop, base));
4860 : 171739 : gcc_checking_assert (!expr_invariant_in_loop_p (loop, off));
4861 : :
4862 : 171739 : info->ifn = ifn;
4863 : 171739 : info->decl = decl;
4864 : 171739 : info->base = base;
4865 : :
4866 : 343478 : info->alias_ptr = build_int_cst
4867 : 171739 : (reference_alias_ptr_type (DR_REF (dr)),
4868 : 171739 : get_object_alignment (DR_REF (dr)));
4869 : :
4870 : 171739 : info->offset = off;
4871 : 171739 : info->offset_vectype = offset_vectype;
4872 : 171739 : info->scale = scale;
4873 : 171739 : info->element_type = TREE_TYPE (vectype);
4874 : 171739 : info->memory_type = memory_type;
4875 : 171739 : return true;
4876 : : }
4877 : :
4878 : : /* Find the data references in STMT, analyze them with respect to LOOP and
4879 : : append them to DATAREFS. Return false if datarefs in this stmt cannot
4880 : : be handled. */
4881 : :
4882 : : opt_result
4883 : 32127258 : vect_find_stmt_data_reference (loop_p loop, gimple *stmt,
4884 : : vec<data_reference_p> *datarefs,
4885 : : vec<int> *dataref_groups, int group_id)
4886 : : {
4887 : : /* We can ignore clobbers for dataref analysis - they are removed during
4888 : : loop vectorization and BB vectorization checks dependences with a
4889 : : stmt walk. */
4890 : 32127258 : if (gimple_clobber_p (stmt))
4891 : 1355832 : return opt_result::success ();
4892 : :
4893 : 57238482 : if (gimple_has_volatile_ops (stmt))
4894 : 318538 : return opt_result::failure_at (stmt, "not vectorized: volatile type: %G",
4895 : : stmt);
4896 : :
4897 : 30452888 : if (stmt_can_throw_internal (cfun, stmt))
4898 : 804900 : return opt_result::failure_at (stmt,
4899 : : "not vectorized:"
4900 : : " statement can throw an exception: %G",
4901 : : stmt);
4902 : :
4903 : 29647988 : auto_vec<data_reference_p, 2> refs;
4904 : 29647988 : opt_result res = find_data_references_in_stmt (loop, stmt, &refs);
4905 : 29647988 : if (!res)
4906 : 3554174 : return res;
4907 : :
4908 : 26093814 : if (refs.is_empty ())
4909 : 14832413 : return opt_result::success ();
4910 : :
4911 : 11261401 : if (refs.length () > 1)
4912 : : {
4913 : 1242334 : while (!refs.is_empty ())
4914 : 828525 : free_data_ref (refs.pop ());
4915 : 413809 : return opt_result::failure_at (stmt,
4916 : : "not vectorized: more than one "
4917 : : "data ref in stmt: %G", stmt);
4918 : : }
4919 : :
4920 : 10847592 : data_reference_p dr = refs.pop ();
4921 : 10847592 : if (gcall *call = dyn_cast <gcall *> (stmt))
4922 : 4336 : if (!gimple_call_internal_p (call)
4923 : 4336 : || (gimple_call_internal_fn (call) != IFN_MASK_LOAD
4924 : 1440 : && gimple_call_internal_fn (call) != IFN_MASK_STORE))
4925 : : {
4926 : 1316 : free_data_ref (dr);
4927 : 1316 : return opt_result::failure_at (stmt,
4928 : : "not vectorized: dr in a call %G", stmt);
4929 : : }
4930 : :
4931 : 10846276 : if (TREE_CODE (DR_REF (dr)) == COMPONENT_REF
4932 : 10846276 : && DECL_BIT_FIELD (TREE_OPERAND (DR_REF (dr), 1)))
4933 : : {
4934 : 84607 : free_data_ref (dr);
4935 : 84607 : return opt_result::failure_at (stmt,
4936 : : "not vectorized:"
4937 : : " statement is an unsupported"
4938 : : " bitfield access %G", stmt);
4939 : : }
4940 : :
4941 : 10761669 : if (DR_BASE_ADDRESS (dr)
4942 : 10725439 : && TREE_CODE (DR_BASE_ADDRESS (dr)) == INTEGER_CST)
4943 : : {
4944 : 955 : free_data_ref (dr);
4945 : 955 : return opt_result::failure_at (stmt,
4946 : : "not vectorized:"
4947 : : " base addr of dr is a constant\n");
4948 : : }
4949 : :
4950 : : /* Check whether this may be a SIMD lane access and adjust the
4951 : : DR to make it easier for us to handle it. */
4952 : 10760714 : if (loop
4953 : 491636 : && loop->simduid
4954 : 10706 : && (!DR_BASE_ADDRESS (dr)
4955 : 2956 : || !DR_OFFSET (dr)
4956 : 2956 : || !DR_INIT (dr)
4957 : 2956 : || !DR_STEP (dr)))
4958 : : {
4959 : 7750 : struct data_reference *newdr
4960 : 7750 : = create_data_ref (NULL, loop_containing_stmt (stmt), DR_REF (dr), stmt,
4961 : 7750 : DR_IS_READ (dr), DR_IS_CONDITIONAL_IN_STMT (dr));
4962 : 7750 : if (DR_BASE_ADDRESS (newdr)
4963 : 7750 : && DR_OFFSET (newdr)
4964 : 7750 : && DR_INIT (newdr)
4965 : 7750 : && DR_STEP (newdr)
4966 : 7750 : && TREE_CODE (DR_INIT (newdr)) == INTEGER_CST
4967 : 15500 : && integer_zerop (DR_STEP (newdr)))
4968 : : {
4969 : 7750 : tree base_address = DR_BASE_ADDRESS (newdr);
4970 : 7750 : tree off = DR_OFFSET (newdr);
4971 : 7750 : tree step = ssize_int (1);
4972 : 7750 : if (integer_zerop (off)
4973 : 7750 : && TREE_CODE (base_address) == POINTER_PLUS_EXPR)
4974 : : {
4975 : 89 : off = TREE_OPERAND (base_address, 1);
4976 : 89 : base_address = TREE_OPERAND (base_address, 0);
4977 : : }
4978 : 7750 : STRIP_NOPS (off);
4979 : 7750 : if (TREE_CODE (off) == MULT_EXPR
4980 : 7750 : && tree_fits_uhwi_p (TREE_OPERAND (off, 1)))
4981 : : {
4982 : 7500 : step = TREE_OPERAND (off, 1);
4983 : 7500 : off = TREE_OPERAND (off, 0);
4984 : 7500 : STRIP_NOPS (off);
4985 : : }
4986 : 544 : if (CONVERT_EXPR_P (off)
4987 : 7750 : && (TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (off, 0)))
4988 : 7206 : < TYPE_PRECISION (TREE_TYPE (off))))
4989 : 7206 : off = TREE_OPERAND (off, 0);
4990 : 7750 : if (TREE_CODE (off) == SSA_NAME)
4991 : : {
4992 : 7222 : gimple *def = SSA_NAME_DEF_STMT (off);
4993 : : /* Look through widening conversion. */
4994 : 7222 : if (is_gimple_assign (def)
4995 : 7222 : && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
4996 : : {
4997 : 0 : tree rhs1 = gimple_assign_rhs1 (def);
4998 : 0 : if (TREE_CODE (rhs1) == SSA_NAME
4999 : 0 : && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))
5000 : 0 : && (TYPE_PRECISION (TREE_TYPE (off))
5001 : 0 : > TYPE_PRECISION (TREE_TYPE (rhs1))))
5002 : 0 : def = SSA_NAME_DEF_STMT (rhs1);
5003 : : }
5004 : 7222 : if (is_gimple_call (def)
5005 : 7083 : && gimple_call_internal_p (def)
5006 : 14305 : && (gimple_call_internal_fn (def) == IFN_GOMP_SIMD_LANE))
5007 : : {
5008 : 7083 : tree arg = gimple_call_arg (def, 0);
5009 : 7083 : tree reft = TREE_TYPE (DR_REF (newdr));
5010 : 7083 : gcc_assert (TREE_CODE (arg) == SSA_NAME);
5011 : 7083 : arg = SSA_NAME_VAR (arg);
5012 : 7083 : if (arg == loop->simduid
5013 : : /* For now. */
5014 : 7083 : && tree_int_cst_equal (TYPE_SIZE_UNIT (reft), step))
5015 : : {
5016 : 7058 : DR_BASE_ADDRESS (newdr) = base_address;
5017 : 7058 : DR_OFFSET (newdr) = ssize_int (0);
5018 : 7058 : DR_STEP (newdr) = step;
5019 : 7058 : DR_OFFSET_ALIGNMENT (newdr) = BIGGEST_ALIGNMENT;
5020 : 7058 : DR_STEP_ALIGNMENT (newdr) = highest_pow2_factor (step);
5021 : : /* Mark as simd-lane access. */
5022 : 7058 : tree arg2 = gimple_call_arg (def, 1);
5023 : 7058 : newdr->aux = (void *) (-1 - tree_to_uhwi (arg2));
5024 : 7058 : free_data_ref (dr);
5025 : 7058 : datarefs->safe_push (newdr);
5026 : 7058 : if (dataref_groups)
5027 : 0 : dataref_groups->safe_push (group_id);
5028 : 7058 : return opt_result::success ();
5029 : : }
5030 : : }
5031 : : }
5032 : : }
5033 : 692 : free_data_ref (newdr);
5034 : : }
5035 : :
5036 : 10753656 : datarefs->safe_push (dr);
5037 : 10753656 : if (dataref_groups)
5038 : 10269078 : dataref_groups->safe_push (group_id);
5039 : 10753656 : return opt_result::success ();
5040 : 29647988 : }
5041 : :
5042 : : /* Function vect_analyze_data_refs.
5043 : :
5044 : : Find all the data references in the loop or basic block.
5045 : :
5046 : : The general structure of the analysis of data refs in the vectorizer is as
5047 : : follows:
5048 : : 1- vect_analyze_data_refs(loop/bb): call
5049 : : compute_data_dependences_for_loop/bb to find and analyze all data-refs
5050 : : in the loop/bb and their dependences.
5051 : : 2- vect_analyze_dependences(): apply dependence testing using ddrs.
5052 : : 3- vect_analyze_drs_alignment(): check that ref_stmt.alignment is ok.
5053 : : 4- vect_analyze_drs_access(): check that ref_stmt.step is ok.
5054 : :
5055 : : */
5056 : :
5057 : : opt_result
5058 : 2754372 : vect_analyze_data_refs (vec_info *vinfo, bool *fatal)
5059 : : {
5060 : 2754372 : class loop *loop = NULL;
5061 : 2754372 : unsigned int i;
5062 : 2754372 : struct data_reference *dr;
5063 : 2754372 : tree scalar_type;
5064 : :
5065 : 2754372 : DUMP_VECT_SCOPE ("vect_analyze_data_refs");
5066 : :
5067 : 2754372 : if (loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo))
5068 : 367429 : loop = LOOP_VINFO_LOOP (loop_vinfo);
5069 : :
5070 : : /* Go through the data-refs, check that the analysis succeeded. Update
5071 : : pointer from stmt_vec_info struct to DR and vectype. */
5072 : :
5073 : 2754372 : vec<data_reference_p> datarefs = vinfo->shared->datarefs;
5074 : 17681895 : FOR_EACH_VEC_ELT (datarefs, i, dr)
5075 : : {
5076 : 14977570 : enum { SG_NONE, GATHER, SCATTER } gatherscatter = SG_NONE;
5077 : :
5078 : 14977570 : gcc_assert (DR_REF (dr));
5079 : 14977570 : stmt_vec_info stmt_info = vinfo->lookup_stmt (DR_STMT (dr));
5080 : 14977570 : gcc_assert (!stmt_info->dr_aux.dr);
5081 : 14977570 : stmt_info->dr_aux.dr = dr;
5082 : 14977570 : stmt_info->dr_aux.stmt = stmt_info;
5083 : :
5084 : : /* Check that analysis of the data-ref succeeded. */
5085 : 14977570 : if (!DR_BASE_ADDRESS (dr) || !DR_OFFSET (dr) || !DR_INIT (dr)
5086 : 14933122 : || !DR_STEP (dr))
5087 : : {
5088 : 88896 : bool maybe_gather
5089 : 44448 : = DR_IS_READ (dr)
5090 : 44448 : && !TREE_THIS_VOLATILE (DR_REF (dr));
5091 : 88896 : bool maybe_scatter
5092 : : = DR_IS_WRITE (dr)
5093 : 44448 : && !TREE_THIS_VOLATILE (DR_REF (dr));
5094 : :
5095 : : /* If target supports vector gather loads or scatter stores,
5096 : : see if they can't be used. */
5097 : 44448 : if (is_a <loop_vec_info> (vinfo)
5098 : 44448 : && !nested_in_vect_loop_p (loop, stmt_info))
5099 : : {
5100 : 40811 : if (maybe_gather || maybe_scatter)
5101 : : {
5102 : 40811 : if (maybe_gather)
5103 : : gatherscatter = GATHER;
5104 : : else
5105 : 12703 : gatherscatter = SCATTER;
5106 : : }
5107 : : }
5108 : :
5109 : 12703 : if (gatherscatter == SG_NONE)
5110 : : {
5111 : 3637 : if (dump_enabled_p ())
5112 : 5 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5113 : : "not vectorized: data ref analysis "
5114 : : "failed %G", stmt_info->stmt);
5115 : 3637 : if (is_a <bb_vec_info> (vinfo))
5116 : : {
5117 : : /* In BB vectorization the ref can still participate
5118 : : in dependence analysis, we just can't vectorize it. */
5119 : 3023 : STMT_VINFO_VECTORIZABLE (stmt_info) = false;
5120 : 3023 : continue;
5121 : : }
5122 : 614 : return opt_result::failure_at (stmt_info->stmt,
5123 : : "not vectorized:"
5124 : : " data ref analysis failed: %G",
5125 : : stmt_info->stmt);
5126 : : }
5127 : : }
5128 : :
5129 : : /* See if this was detected as SIMD lane access. */
5130 : 14973933 : if (dr->aux == (void *)-1
5131 : 14973933 : || dr->aux == (void *)-2
5132 : 14965024 : || dr->aux == (void *)-3
5133 : 14964184 : || dr->aux == (void *)-4)
5134 : : {
5135 : 10549 : if (nested_in_vect_loop_p (loop, stmt_info))
5136 : 0 : return opt_result::failure_at (stmt_info->stmt,
5137 : : "not vectorized:"
5138 : : " data ref analysis failed: %G",
5139 : : stmt_info->stmt);
5140 : 10549 : STMT_VINFO_SIMD_LANE_ACCESS_P (stmt_info)
5141 : 10549 : = -(uintptr_t) dr->aux;
5142 : : }
5143 : :
5144 : 14973933 : tree base = get_base_address (DR_REF (dr));
5145 : 14973933 : if (base && VAR_P (base) && DECL_NONALIASED (base))
5146 : : {
5147 : 8082 : if (dump_enabled_p ())
5148 : 186 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5149 : : "not vectorized: base object not addressable "
5150 : : "for stmt: %G", stmt_info->stmt);
5151 : 8082 : if (is_a <bb_vec_info> (vinfo))
5152 : : {
5153 : : /* In BB vectorization the ref can still participate
5154 : : in dependence analysis, we just can't vectorize it. */
5155 : 8082 : STMT_VINFO_VECTORIZABLE (stmt_info) = false;
5156 : 8082 : continue;
5157 : : }
5158 : 0 : return opt_result::failure_at (stmt_info->stmt,
5159 : : "not vectorized: base object not"
5160 : : " addressable for stmt: %G",
5161 : : stmt_info->stmt);
5162 : : }
5163 : :
5164 : 14965851 : if (is_a <loop_vec_info> (vinfo)
5165 : 847071 : && DR_STEP (dr)
5166 : 15772111 : && TREE_CODE (DR_STEP (dr)) != INTEGER_CST)
5167 : : {
5168 : 40065 : if (nested_in_vect_loop_p (loop, stmt_info))
5169 : 381 : return opt_result::failure_at (stmt_info->stmt,
5170 : : "not vectorized: "
5171 : : "not suitable for strided load %G",
5172 : : stmt_info->stmt);
5173 : 39684 : STMT_VINFO_STRIDED_P (stmt_info) = true;
5174 : : }
5175 : :
5176 : : /* Update DR field in stmt_vec_info struct. */
5177 : :
5178 : : /* If the dataref is in an inner-loop of the loop that is considered for
5179 : : for vectorization, we also want to analyze the access relative to
5180 : : the outer-loop (DR contains information only relative to the
5181 : : inner-most enclosing loop). We do that by building a reference to the
5182 : : first location accessed by the inner-loop, and analyze it relative to
5183 : : the outer-loop. */
5184 : 14965470 : if (loop && nested_in_vect_loop_p (loop, stmt_info))
5185 : : {
5186 : : /* Build a reference to the first location accessed by the
5187 : : inner loop: *(BASE + INIT + OFFSET). By construction,
5188 : : this address must be invariant in the inner loop, so we
5189 : : can consider it as being used in the outer loop. */
5190 : 11142 : tree base = unshare_expr (DR_BASE_ADDRESS (dr));
5191 : 11142 : tree offset = unshare_expr (DR_OFFSET (dr));
5192 : 11142 : tree init = unshare_expr (DR_INIT (dr));
5193 : 11142 : tree init_offset = fold_build2 (PLUS_EXPR, TREE_TYPE (offset),
5194 : : init, offset);
5195 : 11142 : tree init_addr = fold_build_pointer_plus (base, init_offset);
5196 : 11142 : tree init_ref = build_fold_indirect_ref (init_addr);
5197 : :
5198 : 11142 : if (dump_enabled_p ())
5199 : 1159 : dump_printf_loc (MSG_NOTE, vect_location,
5200 : : "analyze in outer loop: %T\n", init_ref);
5201 : :
5202 : 11142 : opt_result res
5203 : 11142 : = dr_analyze_innermost (&STMT_VINFO_DR_WRT_VEC_LOOP (stmt_info),
5204 : 11142 : init_ref, loop, stmt_info->stmt);
5205 : 11142 : if (!res)
5206 : : /* dr_analyze_innermost already explained the failure. */
5207 : 160 : return res;
5208 : :
5209 : 10982 : if (dump_enabled_p ())
5210 : 1159 : dump_printf_loc (MSG_NOTE, vect_location,
5211 : : "\touter base_address: %T\n"
5212 : : "\touter offset from base address: %T\n"
5213 : : "\touter constant offset from base address: %T\n"
5214 : : "\touter step: %T\n"
5215 : : "\touter base alignment: %d\n\n"
5216 : : "\touter base misalignment: %d\n"
5217 : : "\touter offset alignment: %d\n"
5218 : : "\touter step alignment: %d\n",
5219 : : STMT_VINFO_DR_BASE_ADDRESS (stmt_info),
5220 : : STMT_VINFO_DR_OFFSET (stmt_info),
5221 : : STMT_VINFO_DR_INIT (stmt_info),
5222 : : STMT_VINFO_DR_STEP (stmt_info),
5223 : : STMT_VINFO_DR_BASE_ALIGNMENT (stmt_info),
5224 : : STMT_VINFO_DR_BASE_MISALIGNMENT (stmt_info),
5225 : : STMT_VINFO_DR_OFFSET_ALIGNMENT (stmt_info),
5226 : : STMT_VINFO_DR_STEP_ALIGNMENT (stmt_info));
5227 : : }
5228 : :
5229 : : /* Set vectype for STMT. */
5230 : 14965310 : scalar_type = TREE_TYPE (DR_REF (dr));
5231 : 14965310 : tree vectype = get_vectype_for_scalar_type (vinfo, scalar_type);
5232 : 14965310 : if (!vectype)
5233 : : {
5234 : 1757006 : if (dump_enabled_p ())
5235 : : {
5236 : 1813 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5237 : : "not vectorized: no vectype for stmt: %G",
5238 : : stmt_info->stmt);
5239 : 1813 : dump_printf (MSG_MISSED_OPTIMIZATION, " scalar_type: ");
5240 : 1813 : dump_generic_expr (MSG_MISSED_OPTIMIZATION, TDF_DETAILS,
5241 : : scalar_type);
5242 : 1813 : dump_printf (MSG_MISSED_OPTIMIZATION, "\n");
5243 : : }
5244 : :
5245 : 1757006 : if (is_a <bb_vec_info> (vinfo))
5246 : : {
5247 : : /* No vector type is fine, the ref can still participate
5248 : : in dependence analysis, we just can't vectorize it. */
5249 : 1711845 : STMT_VINFO_VECTORIZABLE (stmt_info) = false;
5250 : 1711845 : continue;
5251 : : }
5252 : 45161 : if (fatal)
5253 : 45161 : *fatal = false;
5254 : 45161 : return opt_result::failure_at (stmt_info->stmt,
5255 : : "not vectorized:"
5256 : : " no vectype for stmt: %G"
5257 : : " scalar_type: %T\n",
5258 : : stmt_info->stmt, scalar_type);
5259 : : }
5260 : : else
5261 : : {
5262 : 13208304 : if (dump_enabled_p ())
5263 : 76701 : dump_printf_loc (MSG_NOTE, vect_location,
5264 : : "got vectype for stmt: %G%T\n",
5265 : : stmt_info->stmt, vectype);
5266 : : }
5267 : :
5268 : : /* Leave the BB vectorizer to pick the vector type later, based on
5269 : : the final dataref group size and SLP node size. */
5270 : 13208304 : if (is_a <loop_vec_info> (vinfo))
5271 : 801369 : STMT_VINFO_VECTYPE (stmt_info) = vectype;
5272 : :
5273 : 13208304 : if (gatherscatter != SG_NONE)
5274 : : {
5275 : 38505 : gather_scatter_info gs_info;
5276 : 38505 : if (!vect_check_gather_scatter (stmt_info,
5277 : : as_a <loop_vec_info> (vinfo),
5278 : : &gs_info)
5279 : 73886 : || !get_vectype_for_scalar_type (vinfo,
5280 : 35381 : TREE_TYPE (gs_info.offset)))
5281 : : {
5282 : 3731 : if (fatal)
5283 : 3731 : *fatal = false;
5284 : 3731 : return opt_result::failure_at
5285 : 4025 : (stmt_info->stmt,
5286 : : (gatherscatter == GATHER)
5287 : : ? "not vectorized: not suitable for gather load %G"
5288 : : : "not vectorized: not suitable for scatter store %G",
5289 : : stmt_info->stmt);
5290 : : }
5291 : 34774 : STMT_VINFO_GATHER_SCATTER_P (stmt_info) = gatherscatter;
5292 : : }
5293 : : }
5294 : :
5295 : : /* We used to stop processing and prune the list here. Verify we no
5296 : : longer need to. */
5297 : 4210392 : gcc_assert (i == datarefs.length ());
5298 : :
5299 : 2704325 : return opt_result::success ();
5300 : : }
5301 : :
5302 : :
5303 : : /* Function vect_get_new_vect_var.
5304 : :
5305 : : Returns a name for a new variable. The current naming scheme appends the
5306 : : prefix "vect_" or "vect_p" (depending on the value of VAR_KIND) to
5307 : : the name of vectorizer generated variables, and appends that to NAME if
5308 : : provided. */
5309 : :
5310 : : tree
5311 : 1871549 : vect_get_new_vect_var (tree type, enum vect_var_kind var_kind, const char *name)
5312 : : {
5313 : 1871549 : const char *prefix;
5314 : 1871549 : tree new_vect_var;
5315 : :
5316 : 1871549 : switch (var_kind)
5317 : : {
5318 : : case vect_simple_var:
5319 : : prefix = "vect";
5320 : : break;
5321 : 22014 : case vect_scalar_var:
5322 : 22014 : prefix = "stmp";
5323 : 22014 : break;
5324 : 14060 : case vect_mask_var:
5325 : 14060 : prefix = "mask";
5326 : 14060 : break;
5327 : 1384960 : case vect_pointer_var:
5328 : 1384960 : prefix = "vectp";
5329 : 1384960 : break;
5330 : 0 : default:
5331 : 0 : gcc_unreachable ();
5332 : : }
5333 : :
5334 : 1871549 : if (name)
5335 : : {
5336 : 1023504 : char* tmp = concat (prefix, "_", name, NULL);
5337 : 1023504 : new_vect_var = create_tmp_reg (type, tmp);
5338 : 1023504 : free (tmp);
5339 : : }
5340 : : else
5341 : 848045 : new_vect_var = create_tmp_reg (type, prefix);
5342 : :
5343 : 1871549 : return new_vect_var;
5344 : : }
5345 : :
5346 : : /* Like vect_get_new_vect_var but return an SSA name. */
5347 : :
5348 : : tree
5349 : 6672 : vect_get_new_ssa_name (tree type, enum vect_var_kind var_kind, const char *name)
5350 : : {
5351 : 6672 : const char *prefix;
5352 : 6672 : tree new_vect_var;
5353 : :
5354 : 6672 : switch (var_kind)
5355 : : {
5356 : : case vect_simple_var:
5357 : : prefix = "vect";
5358 : : break;
5359 : 313 : case vect_scalar_var:
5360 : 313 : prefix = "stmp";
5361 : 313 : break;
5362 : 0 : case vect_pointer_var:
5363 : 0 : prefix = "vectp";
5364 : 0 : break;
5365 : 0 : default:
5366 : 0 : gcc_unreachable ();
5367 : : }
5368 : :
5369 : 6672 : if (name)
5370 : : {
5371 : 6186 : char* tmp = concat (prefix, "_", name, NULL);
5372 : 6186 : new_vect_var = make_temp_ssa_name (type, NULL, tmp);
5373 : 6186 : free (tmp);
5374 : : }
5375 : : else
5376 : 486 : new_vect_var = make_temp_ssa_name (type, NULL, prefix);
5377 : :
5378 : 6672 : return new_vect_var;
5379 : : }
5380 : :
5381 : : /* Duplicate points-to info on NAME from DR_INFO. */
5382 : :
5383 : : static void
5384 : 286143 : vect_duplicate_ssa_name_ptr_info (tree name, dr_vec_info *dr_info)
5385 : : {
5386 : 286143 : duplicate_ssa_name_ptr_info (name, DR_PTR_INFO (dr_info->dr));
5387 : : /* DR_PTR_INFO is for a base SSA name, not including constant or
5388 : : variable offsets in the ref so its alignment info does not apply. */
5389 : 286143 : mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (name));
5390 : 286143 : }
5391 : :
5392 : : /* Function vect_create_addr_base_for_vector_ref.
5393 : :
5394 : : Create an expression that computes the address of the first memory location
5395 : : that will be accessed for a data reference.
5396 : :
5397 : : Input:
5398 : : STMT_INFO: The statement containing the data reference.
5399 : : NEW_STMT_LIST: Must be initialized to NULL_TREE or a statement list.
5400 : : OFFSET: Optional. If supplied, it is be added to the initial address.
5401 : : LOOP: Specify relative to which loop-nest should the address be computed.
5402 : : For example, when the dataref is in an inner-loop nested in an
5403 : : outer-loop that is now being vectorized, LOOP can be either the
5404 : : outer-loop, or the inner-loop. The first memory location accessed
5405 : : by the following dataref ('in' points to short):
5406 : :
5407 : : for (i=0; i<N; i++)
5408 : : for (j=0; j<M; j++)
5409 : : s += in[i+j]
5410 : :
5411 : : is as follows:
5412 : : if LOOP=i_loop: &in (relative to i_loop)
5413 : : if LOOP=j_loop: &in+i*2B (relative to j_loop)
5414 : :
5415 : : Output:
5416 : : 1. Return an SSA_NAME whose value is the address of the memory location of
5417 : : the first vector of the data reference.
5418 : : 2. If new_stmt_list is not NULL_TREE after return then the caller must insert
5419 : : these statement(s) which define the returned SSA_NAME.
5420 : :
5421 : : FORNOW: We are only handling array accesses with step 1. */
5422 : :
5423 : : tree
5424 : 692595 : vect_create_addr_base_for_vector_ref (vec_info *vinfo, stmt_vec_info stmt_info,
5425 : : gimple_seq *new_stmt_list,
5426 : : tree offset)
5427 : : {
5428 : 692595 : dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
5429 : 692595 : struct data_reference *dr = dr_info->dr;
5430 : 692595 : const char *base_name;
5431 : 692595 : tree addr_base;
5432 : 692595 : tree dest;
5433 : 692595 : gimple_seq seq = NULL;
5434 : 692595 : tree vect_ptr_type;
5435 : 692595 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
5436 : 692595 : innermost_loop_behavior *drb = vect_dr_behavior (vinfo, dr_info);
5437 : :
5438 : 692595 : tree data_ref_base = unshare_expr (drb->base_address);
5439 : 692595 : tree base_offset = unshare_expr (get_dr_vinfo_offset (vinfo, dr_info, true));
5440 : 692595 : tree init = unshare_expr (drb->init);
5441 : :
5442 : 692595 : if (loop_vinfo)
5443 : 122561 : base_name = get_name (data_ref_base);
5444 : : else
5445 : : {
5446 : 570034 : base_offset = ssize_int (0);
5447 : 570034 : init = ssize_int (0);
5448 : 570034 : base_name = get_name (DR_REF (dr));
5449 : : }
5450 : :
5451 : : /* Create base_offset */
5452 : 692595 : base_offset = size_binop (PLUS_EXPR,
5453 : : fold_convert (sizetype, base_offset),
5454 : : fold_convert (sizetype, init));
5455 : :
5456 : 692595 : if (offset)
5457 : : {
5458 : 3042 : offset = fold_convert (sizetype, offset);
5459 : 3042 : base_offset = fold_build2 (PLUS_EXPR, sizetype,
5460 : : base_offset, offset);
5461 : : }
5462 : :
5463 : : /* base + base_offset */
5464 : 692595 : if (loop_vinfo)
5465 : 122561 : addr_base = fold_build_pointer_plus (data_ref_base, base_offset);
5466 : : else
5467 : 1140068 : addr_base = build1 (ADDR_EXPR,
5468 : 570034 : build_pointer_type (TREE_TYPE (DR_REF (dr))),
5469 : : /* Strip zero offset components since we don't need
5470 : : them and they can confuse late diagnostics if
5471 : : we CSE them wrongly. See PR106904 for example. */
5472 : : unshare_expr (strip_zero_offset_components
5473 : : (DR_REF (dr))));
5474 : :
5475 : 692595 : vect_ptr_type = build_pointer_type (TREE_TYPE (DR_REF (dr)));
5476 : 692595 : dest = vect_get_new_vect_var (vect_ptr_type, vect_pointer_var, base_name);
5477 : 692595 : addr_base = force_gimple_operand (addr_base, &seq, true, dest);
5478 : 692595 : gimple_seq_add_seq (new_stmt_list, seq);
5479 : :
5480 : 692595 : if (DR_PTR_INFO (dr)
5481 : 178314 : && TREE_CODE (addr_base) == SSA_NAME
5482 : : /* We should only duplicate pointer info to newly created SSA names. */
5483 : 870131 : && SSA_NAME_VAR (addr_base) == dest)
5484 : : {
5485 : 150515 : gcc_assert (!SSA_NAME_PTR_INFO (addr_base));
5486 : 150515 : vect_duplicate_ssa_name_ptr_info (addr_base, dr_info);
5487 : : }
5488 : :
5489 : 692595 : if (dump_enabled_p ())
5490 : 23885 : dump_printf_loc (MSG_NOTE, vect_location, "created %T\n", addr_base);
5491 : :
5492 : 692595 : return addr_base;
5493 : : }
5494 : :
5495 : :
5496 : : /* Function vect_create_data_ref_ptr.
5497 : :
5498 : : Create a new pointer-to-AGGR_TYPE variable (ap), that points to the first
5499 : : location accessed in the loop by STMT_INFO, along with the def-use update
5500 : : chain to appropriately advance the pointer through the loop iterations.
5501 : : Also set aliasing information for the pointer. This pointer is used by
5502 : : the callers to this function to create a memory reference expression for
5503 : : vector load/store access.
5504 : :
5505 : : Input:
5506 : : 1. STMT_INFO: a stmt that references memory. Expected to be of the form
5507 : : GIMPLE_ASSIGN <name, data-ref> or
5508 : : GIMPLE_ASSIGN <data-ref, name>.
5509 : : 2. AGGR_TYPE: the type of the reference, which should be either a vector
5510 : : or an array.
5511 : : 3. AT_LOOP: the loop where the vector memref is to be created.
5512 : : 4. OFFSET (optional): a byte offset to be added to the initial address
5513 : : accessed by the data-ref in STMT_INFO.
5514 : : 5. BSI: location where the new stmts are to be placed if there is no loop
5515 : : 6. ONLY_INIT: indicate if ap is to be updated in the loop, or remain
5516 : : pointing to the initial address.
5517 : : 8. IV_STEP (optional, defaults to NULL): the amount that should be added
5518 : : to the IV during each iteration of the loop. NULL says to move
5519 : : by one copy of AGGR_TYPE up or down, depending on the step of the
5520 : : data reference.
5521 : :
5522 : : Output:
5523 : : 1. Declare a new ptr to vector_type, and have it point to the base of the
5524 : : data reference (initial addressed accessed by the data reference).
5525 : : For example, for vector of type V8HI, the following code is generated:
5526 : :
5527 : : v8hi *ap;
5528 : : ap = (v8hi *)initial_address;
5529 : :
5530 : : if OFFSET is not supplied:
5531 : : initial_address = &a[init];
5532 : : if OFFSET is supplied:
5533 : : initial_address = &a[init] + OFFSET;
5534 : : if BYTE_OFFSET is supplied:
5535 : : initial_address = &a[init] + BYTE_OFFSET;
5536 : :
5537 : : Return the initial_address in INITIAL_ADDRESS.
5538 : :
5539 : : 2. If ONLY_INIT is true, just return the initial pointer. Otherwise, also
5540 : : update the pointer in each iteration of the loop.
5541 : :
5542 : : Return the increment stmt that updates the pointer in PTR_INCR.
5543 : :
5544 : : 3. Return the pointer. */
5545 : :
5546 : : tree
5547 : 692365 : vect_create_data_ref_ptr (vec_info *vinfo, stmt_vec_info stmt_info,
5548 : : tree aggr_type, class loop *at_loop, tree offset,
5549 : : tree *initial_address, gimple_stmt_iterator *gsi,
5550 : : gimple **ptr_incr, bool only_init,
5551 : : tree iv_step)
5552 : : {
5553 : 692365 : const char *base_name;
5554 : 692365 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
5555 : 692365 : class loop *loop = NULL;
5556 : 692365 : bool nested_in_vect_loop = false;
5557 : 692365 : class loop *containing_loop = NULL;
5558 : 692365 : tree aggr_ptr_type;
5559 : 692365 : tree aggr_ptr;
5560 : 692365 : tree new_temp;
5561 : 692365 : gimple_seq new_stmt_list = NULL;
5562 : 692365 : edge pe = NULL;
5563 : 692365 : basic_block new_bb;
5564 : 692365 : tree aggr_ptr_init;
5565 : 692365 : dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
5566 : 692365 : struct data_reference *dr = dr_info->dr;
5567 : 692365 : tree aptr;
5568 : 692365 : gimple_stmt_iterator incr_gsi;
5569 : 692365 : bool insert_after;
5570 : 692365 : tree indx_before_incr, indx_after_incr;
5571 : 692365 : gimple *incr;
5572 : 692365 : bb_vec_info bb_vinfo = dyn_cast <bb_vec_info> (vinfo);
5573 : :
5574 : 692365 : gcc_assert (iv_step != NULL_TREE
5575 : : || TREE_CODE (aggr_type) == ARRAY_TYPE
5576 : : || TREE_CODE (aggr_type) == VECTOR_TYPE);
5577 : :
5578 : 692365 : if (loop_vinfo)
5579 : : {
5580 : 122331 : loop = LOOP_VINFO_LOOP (loop_vinfo);
5581 : 122331 : nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
5582 : 122331 : containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
5583 : 122331 : pe = loop_preheader_edge (loop);
5584 : : }
5585 : : else
5586 : : {
5587 : 570034 : gcc_assert (bb_vinfo);
5588 : 570034 : only_init = true;
5589 : 570034 : *ptr_incr = NULL;
5590 : : }
5591 : :
5592 : : /* Create an expression for the first address accessed by this load
5593 : : in LOOP. */
5594 : 692365 : base_name = get_name (DR_BASE_ADDRESS (dr));
5595 : :
5596 : 692365 : if (dump_enabled_p ())
5597 : : {
5598 : 23853 : tree dr_base_type = TREE_TYPE (DR_BASE_OBJECT (dr));
5599 : 23853 : dump_printf_loc (MSG_NOTE, vect_location,
5600 : : "create %s-pointer variable to type: %T",
5601 : 23853 : get_tree_code_name (TREE_CODE (aggr_type)),
5602 : : aggr_type);
5603 : 23853 : if (TREE_CODE (dr_base_type) == ARRAY_TYPE)
5604 : 13081 : dump_printf (MSG_NOTE, " vectorizing an array ref: ");
5605 : 10772 : else if (TREE_CODE (dr_base_type) == VECTOR_TYPE)
5606 : 0 : dump_printf (MSG_NOTE, " vectorizing a vector ref: ");
5607 : 10772 : else if (TREE_CODE (dr_base_type) == RECORD_TYPE)
5608 : 1558 : dump_printf (MSG_NOTE, " vectorizing a record based array ref: ");
5609 : : else
5610 : 9214 : dump_printf (MSG_NOTE, " vectorizing a pointer ref: ");
5611 : 23853 : dump_printf (MSG_NOTE, "%T\n", DR_BASE_OBJECT (dr));
5612 : : }
5613 : :
5614 : : /* (1) Create the new aggregate-pointer variable.
5615 : : Vector and array types inherit the alias set of their component
5616 : : type by default so we need to use a ref-all pointer if the data
5617 : : reference does not conflict with the created aggregated data
5618 : : reference because it is not addressable. */
5619 : 692365 : bool need_ref_all = false;
5620 : 692365 : if (!alias_sets_conflict_p (get_alias_set (aggr_type),
5621 : : get_alias_set (DR_REF (dr))))
5622 : : need_ref_all = true;
5623 : : /* Likewise for any of the data references in the stmt group. */
5624 : 577888 : else if (DR_GROUP_SIZE (stmt_info) > 1)
5625 : : {
5626 : 471012 : stmt_vec_info sinfo = DR_GROUP_FIRST_ELEMENT (stmt_info);
5627 : 1293196 : do
5628 : : {
5629 : 1293196 : struct data_reference *sdr = STMT_VINFO_DATA_REF (sinfo);
5630 : 1293196 : if (!alias_sets_conflict_p (get_alias_set (aggr_type),
5631 : : get_alias_set (DR_REF (sdr))))
5632 : : {
5633 : : need_ref_all = true;
5634 : : break;
5635 : : }
5636 : 1291338 : sinfo = DR_GROUP_NEXT_ELEMENT (sinfo);
5637 : : }
5638 : 1291338 : while (sinfo);
5639 : : }
5640 : 692365 : aggr_ptr_type = build_pointer_type_for_mode (aggr_type, VOIDmode,
5641 : : need_ref_all);
5642 : 692365 : aggr_ptr = vect_get_new_vect_var (aggr_ptr_type, vect_pointer_var, base_name);
5643 : :
5644 : :
5645 : : /* Note: If the dataref is in an inner-loop nested in LOOP, and we are
5646 : : vectorizing LOOP (i.e., outer-loop vectorization), we need to create two
5647 : : def-use update cycles for the pointer: one relative to the outer-loop
5648 : : (LOOP), which is what steps (3) and (4) below do. The other is relative
5649 : : to the inner-loop (which is the inner-most loop containing the dataref),
5650 : : and this is done be step (5) below.
5651 : :
5652 : : When vectorizing inner-most loops, the vectorized loop (LOOP) is also the
5653 : : inner-most loop, and so steps (3),(4) work the same, and step (5) is
5654 : : redundant. Steps (3),(4) create the following:
5655 : :
5656 : : vp0 = &base_addr;
5657 : : LOOP: vp1 = phi(vp0,vp2)
5658 : : ...
5659 : : ...
5660 : : vp2 = vp1 + step
5661 : : goto LOOP
5662 : :
5663 : : If there is an inner-loop nested in loop, then step (5) will also be
5664 : : applied, and an additional update in the inner-loop will be created:
5665 : :
5666 : : vp0 = &base_addr;
5667 : : LOOP: vp1 = phi(vp0,vp2)
5668 : : ...
5669 : : inner: vp3 = phi(vp1,vp4)
5670 : : vp4 = vp3 + inner_step
5671 : : if () goto inner
5672 : : ...
5673 : : vp2 = vp1 + step
5674 : : if () goto LOOP */
5675 : :
5676 : : /* (2) Calculate the initial address of the aggregate-pointer, and set
5677 : : the aggregate-pointer to point to it before the loop. */
5678 : :
5679 : : /* Create: (&(base[init_val]+offset) in the loop preheader. */
5680 : :
5681 : 692365 : new_temp = vect_create_addr_base_for_vector_ref (vinfo,
5682 : : stmt_info, &new_stmt_list,
5683 : : offset);
5684 : 692365 : if (new_stmt_list)
5685 : : {
5686 : 171672 : if (pe)
5687 : : {
5688 : 54662 : new_bb = gsi_insert_seq_on_edge_immediate (pe, new_stmt_list);
5689 : 54662 : gcc_assert (!new_bb);
5690 : : }
5691 : : else
5692 : 117010 : gsi_insert_seq_before (gsi, new_stmt_list, GSI_SAME_STMT);
5693 : : }
5694 : :
5695 : 692365 : *initial_address = new_temp;
5696 : 692365 : aggr_ptr_init = new_temp;
5697 : :
5698 : : /* (3) Handle the updating of the aggregate-pointer inside the loop.
5699 : : This is needed when ONLY_INIT is false, and also when AT_LOOP is the
5700 : : inner-loop nested in LOOP (during outer-loop vectorization). */
5701 : :
5702 : : /* No update in loop is required. */
5703 : 692365 : if (only_init && (!loop_vinfo || at_loop == loop))
5704 : : aptr = aggr_ptr_init;
5705 : : else
5706 : : {
5707 : : /* Accesses to invariant addresses should be handled specially
5708 : : by the caller. */
5709 : 122323 : tree step = vect_dr_behavior (vinfo, dr_info)->step;
5710 : 122323 : gcc_assert (!integer_zerop (step));
5711 : :
5712 : 122323 : if (iv_step == NULL_TREE)
5713 : : {
5714 : : /* The step of the aggregate pointer is the type size,
5715 : : negated for downward accesses. */
5716 : 0 : iv_step = TYPE_SIZE_UNIT (aggr_type);
5717 : 0 : if (tree_int_cst_sgn (step) == -1)
5718 : 0 : iv_step = fold_build1 (NEGATE_EXPR, TREE_TYPE (iv_step), iv_step);
5719 : : }
5720 : :
5721 : 122323 : standard_iv_increment_position (loop, &incr_gsi, &insert_after);
5722 : :
5723 : 122323 : create_iv (aggr_ptr_init, PLUS_EXPR,
5724 : : iv_step, aggr_ptr, loop, &incr_gsi, insert_after,
5725 : : &indx_before_incr, &indx_after_incr);
5726 : 122323 : incr = gsi_stmt (incr_gsi);
5727 : :
5728 : : /* Copy the points-to information if it exists. */
5729 : 122323 : if (DR_PTR_INFO (dr))
5730 : : {
5731 : 67739 : vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr_info);
5732 : 67739 : vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr_info);
5733 : : }
5734 : 122323 : if (ptr_incr)
5735 : 122323 : *ptr_incr = incr;
5736 : :
5737 : 122323 : aptr = indx_before_incr;
5738 : : }
5739 : :
5740 : 692365 : if (!nested_in_vect_loop || only_init)
5741 : : return aptr;
5742 : :
5743 : :
5744 : : /* (4) Handle the updating of the aggregate-pointer inside the inner-loop
5745 : : nested in LOOP, if exists. */
5746 : :
5747 : 341 : gcc_assert (nested_in_vect_loop);
5748 : 341 : if (!only_init)
5749 : : {
5750 : 341 : standard_iv_increment_position (containing_loop, &incr_gsi,
5751 : : &insert_after);
5752 : 341 : create_iv (aptr, PLUS_EXPR, DR_STEP (dr),
5753 : : aggr_ptr, containing_loop, &incr_gsi, insert_after,
5754 : : &indx_before_incr, &indx_after_incr);
5755 : 341 : incr = gsi_stmt (incr_gsi);
5756 : :
5757 : : /* Copy the points-to information if it exists. */
5758 : 341 : if (DR_PTR_INFO (dr))
5759 : : {
5760 : 75 : vect_duplicate_ssa_name_ptr_info (indx_before_incr, dr_info);
5761 : 75 : vect_duplicate_ssa_name_ptr_info (indx_after_incr, dr_info);
5762 : : }
5763 : 341 : if (ptr_incr)
5764 : 341 : *ptr_incr = incr;
5765 : :
5766 : 341 : return indx_before_incr;
5767 : : }
5768 : : else
5769 : : gcc_unreachable ();
5770 : : }
5771 : :
5772 : :
5773 : : /* Function bump_vector_ptr
5774 : :
5775 : : Increment a pointer (to a vector type) by vector-size. If requested,
5776 : : i.e. if PTR-INCR is given, then also connect the new increment stmt
5777 : : to the existing def-use update-chain of the pointer, by modifying
5778 : : the PTR_INCR as illustrated below:
5779 : :
5780 : : The pointer def-use update-chain before this function:
5781 : : DATAREF_PTR = phi (p_0, p_2)
5782 : : ....
5783 : : PTR_INCR: p_2 = DATAREF_PTR + step
5784 : :
5785 : : The pointer def-use update-chain after this function:
5786 : : DATAREF_PTR = phi (p_0, p_2)
5787 : : ....
5788 : : NEW_DATAREF_PTR = DATAREF_PTR + BUMP
5789 : : ....
5790 : : PTR_INCR: p_2 = NEW_DATAREF_PTR + step
5791 : :
5792 : : Input:
5793 : : DATAREF_PTR - ssa_name of a pointer (to vector type) that is being updated
5794 : : in the loop.
5795 : : PTR_INCR - optional. The stmt that updates the pointer in each iteration of
5796 : : the loop. The increment amount across iterations is expected
5797 : : to be vector_size.
5798 : : BSI - location where the new update stmt is to be placed.
5799 : : STMT_INFO - the original scalar memory-access stmt that is being vectorized.
5800 : : BUMP - optional. The offset by which to bump the pointer. If not given,
5801 : : the offset is assumed to be vector_size.
5802 : :
5803 : : Output: Return NEW_DATAREF_PTR as illustrated above.
5804 : :
5805 : : */
5806 : :
5807 : : tree
5808 : 199428 : bump_vector_ptr (vec_info *vinfo,
5809 : : tree dataref_ptr, gimple *ptr_incr, gimple_stmt_iterator *gsi,
5810 : : stmt_vec_info stmt_info, tree bump)
5811 : : {
5812 : 199428 : struct data_reference *dr = STMT_VINFO_DATA_REF (stmt_info);
5813 : 199428 : tree vectype = STMT_VINFO_VECTYPE (stmt_info);
5814 : 199428 : tree update = TYPE_SIZE_UNIT (vectype);
5815 : 199428 : gimple *incr_stmt;
5816 : 199428 : ssa_op_iter iter;
5817 : 199428 : use_operand_p use_p;
5818 : 199428 : tree new_dataref_ptr;
5819 : :
5820 : 199428 : if (bump)
5821 : 199428 : update = bump;
5822 : :
5823 : 199428 : if (TREE_CODE (dataref_ptr) == SSA_NAME)
5824 : 81760 : new_dataref_ptr = copy_ssa_name (dataref_ptr);
5825 : 117668 : else if (is_gimple_min_invariant (dataref_ptr))
5826 : : /* When possible avoid emitting a separate increment stmt that will
5827 : : force the addressed object addressable. */
5828 : 235336 : return build1 (ADDR_EXPR, TREE_TYPE (dataref_ptr),
5829 : 117668 : fold_build2 (MEM_REF,
5830 : : TREE_TYPE (TREE_TYPE (dataref_ptr)),
5831 : : dataref_ptr,
5832 : 117668 : fold_convert (ptr_type_node, update)));
5833 : : else
5834 : 0 : new_dataref_ptr = make_ssa_name (TREE_TYPE (dataref_ptr));
5835 : 81760 : incr_stmt = gimple_build_assign (new_dataref_ptr, POINTER_PLUS_EXPR,
5836 : : dataref_ptr, update);
5837 : 81760 : vect_finish_stmt_generation (vinfo, stmt_info, incr_stmt, gsi);
5838 : : /* Fold the increment, avoiding excessive chains use-def chains of
5839 : : those, leading to compile-time issues for passes until the next
5840 : : forwprop pass which would do this as well. */
5841 : 81760 : gimple_stmt_iterator fold_gsi = gsi_for_stmt (incr_stmt);
5842 : 81760 : if (fold_stmt (&fold_gsi, follow_all_ssa_edges))
5843 : : {
5844 : 43128 : incr_stmt = gsi_stmt (fold_gsi);
5845 : 43128 : update_stmt (incr_stmt);
5846 : : }
5847 : :
5848 : : /* Copy the points-to information if it exists. */
5849 : 81760 : if (DR_PTR_INFO (dr))
5850 : : {
5851 : 60541 : duplicate_ssa_name_ptr_info (new_dataref_ptr, DR_PTR_INFO (dr));
5852 : 60541 : mark_ptr_info_alignment_unknown (SSA_NAME_PTR_INFO (new_dataref_ptr));
5853 : : }
5854 : :
5855 : 81760 : if (!ptr_incr)
5856 : : return new_dataref_ptr;
5857 : :
5858 : : /* Update the vector-pointer's cross-iteration increment. */
5859 : 86584 : FOR_EACH_SSA_USE_OPERAND (use_p, ptr_incr, iter, SSA_OP_USE)
5860 : : {
5861 : 43292 : tree use = USE_FROM_PTR (use_p);
5862 : :
5863 : 43292 : if (use == dataref_ptr)
5864 : 43292 : SET_USE (use_p, new_dataref_ptr);
5865 : : else
5866 : 0 : gcc_assert (operand_equal_p (use, update, 0));
5867 : : }
5868 : :
5869 : : return new_dataref_ptr;
5870 : : }
5871 : :
5872 : :
5873 : : /* Copy memory reference info such as base/clique from the SRC reference
5874 : : to the DEST MEM_REF. */
5875 : :
5876 : : void
5877 : 904267 : vect_copy_ref_info (tree dest, tree src)
5878 : : {
5879 : 904267 : if (TREE_CODE (dest) != MEM_REF)
5880 : : return;
5881 : :
5882 : : tree src_base = src;
5883 : 1863590 : while (handled_component_p (src_base))
5884 : 959848 : src_base = TREE_OPERAND (src_base, 0);
5885 : 903742 : if (TREE_CODE (src_base) != MEM_REF
5886 : 903742 : && TREE_CODE (src_base) != TARGET_MEM_REF)
5887 : : return;
5888 : :
5889 : 478734 : MR_DEPENDENCE_CLIQUE (dest) = MR_DEPENDENCE_CLIQUE (src_base);
5890 : 478734 : MR_DEPENDENCE_BASE (dest) = MR_DEPENDENCE_BASE (src_base);
5891 : : }
5892 : :
5893 : :
5894 : : /* Function vect_create_destination_var.
5895 : :
5896 : : Create a new temporary of type VECTYPE. */
5897 : :
5898 : : tree
5899 : 469940 : vect_create_destination_var (tree scalar_dest, tree vectype)
5900 : : {
5901 : 469940 : tree vec_dest;
5902 : 469940 : const char *name;
5903 : 469940 : char *new_name;
5904 : 469940 : tree type;
5905 : 469940 : enum vect_var_kind kind;
5906 : :
5907 : 469940 : kind = vectype
5908 : 917866 : ? VECTOR_BOOLEAN_TYPE_P (vectype)
5909 : 447926 : ? vect_mask_var
5910 : : : vect_simple_var
5911 : : : vect_scalar_var;
5912 : 22014 : type = vectype ? vectype : TREE_TYPE (scalar_dest);
5913 : :
5914 : 469940 : gcc_assert (TREE_CODE (scalar_dest) == SSA_NAME);
5915 : :
5916 : 469940 : name = get_name (scalar_dest);
5917 : 469940 : if (name)
5918 : 169440 : new_name = xasprintf ("%s_%u", name, SSA_NAME_VERSION (scalar_dest));
5919 : : else
5920 : 300500 : new_name = xasprintf ("_%u", SSA_NAME_VERSION (scalar_dest));
5921 : 469940 : vec_dest = vect_get_new_vect_var (type, kind, new_name);
5922 : 469940 : free (new_name);
5923 : :
5924 : 469940 : return vec_dest;
5925 : : }
5926 : :
5927 : : /* Function vect_grouped_store_supported.
5928 : :
5929 : : Returns TRUE if interleave high and interleave low permutations
5930 : : are supported, and FALSE otherwise. */
5931 : :
5932 : : bool
5933 : 2667 : vect_grouped_store_supported (tree vectype, unsigned HOST_WIDE_INT count)
5934 : : {
5935 : 2667 : machine_mode mode = TYPE_MODE (vectype);
5936 : :
5937 : : /* vect_permute_store_chain requires the group size to be equal to 3 or
5938 : : be a power of two. */
5939 : 2667 : if (count != 3 && exact_log2 (count) == -1)
5940 : : {
5941 : 567 : if (dump_enabled_p ())
5942 : 9 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5943 : : "the size of the group of accesses"
5944 : : " is not a power of 2 or not eqaul to 3\n");
5945 : 567 : return false;
5946 : : }
5947 : :
5948 : : /* Check that the permutation is supported. */
5949 : 2100 : if (VECTOR_MODE_P (mode))
5950 : : {
5951 : 2100 : unsigned int i;
5952 : 2100 : if (count == 3)
5953 : : {
5954 : 958 : unsigned int j0 = 0, j1 = 0, j2 = 0;
5955 : 958 : unsigned int i, j;
5956 : :
5957 : 958 : unsigned int nelt;
5958 : 1916 : if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
5959 : : {
5960 : : if (dump_enabled_p ())
5961 : : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
5962 : : "cannot handle groups of 3 stores for"
5963 : : " variable-length vectors\n");
5964 : : return false;
5965 : : }
5966 : :
5967 : 958 : vec_perm_builder sel (nelt, nelt, 1);
5968 : 958 : sel.quick_grow (nelt);
5969 : 958 : vec_perm_indices indices;
5970 : 3607 : for (j = 0; j < 3; j++)
5971 : : {
5972 : 2724 : int nelt0 = ((3 - j) * nelt) % 3;
5973 : 2724 : int nelt1 = ((3 - j) * nelt + 1) % 3;
5974 : 2724 : int nelt2 = ((3 - j) * nelt + 2) % 3;
5975 : 9684 : for (i = 0; i < nelt; i++)
5976 : : {
5977 : 6960 : if (3 * i + nelt0 < nelt)
5978 : 2358 : sel[3 * i + nelt0] = j0++;
5979 : 6960 : if (3 * i + nelt1 < nelt)
5980 : 2319 : sel[3 * i + nelt1] = nelt + j1++;
5981 : 6960 : if (3 * i + nelt2 < nelt)
5982 : 2283 : sel[3 * i + nelt2] = 0;
5983 : : }
5984 : 2724 : indices.new_vector (sel, 2, nelt);
5985 : 2724 : if (!can_vec_perm_const_p (mode, mode, indices))
5986 : : {
5987 : 66 : if (dump_enabled_p ())
5988 : 37 : dump_printf (MSG_MISSED_OPTIMIZATION,
5989 : : "permutation op not supported by target.\n");
5990 : 66 : return false;
5991 : : }
5992 : :
5993 : 9042 : for (i = 0; i < nelt; i++)
5994 : : {
5995 : 6384 : if (3 * i + nelt0 < nelt)
5996 : 2134 : sel[3 * i + nelt0] = 3 * i + nelt0;
5997 : 6384 : if (3 * i + nelt1 < nelt)
5998 : 2125 : sel[3 * i + nelt1] = 3 * i + nelt1;
5999 : 6384 : if (3 * i + nelt2 < nelt)
6000 : 2125 : sel[3 * i + nelt2] = nelt + j2++;
6001 : : }
6002 : 2658 : indices.new_vector (sel, 2, nelt);
6003 : 2658 : if (!can_vec_perm_const_p (mode, mode, indices))
6004 : : {
6005 : 9 : if (dump_enabled_p ())
6006 : 9 : dump_printf (MSG_MISSED_OPTIMIZATION,
6007 : : "permutation op not supported by target.\n");
6008 : 9 : return false;
6009 : : }
6010 : : }
6011 : : return true;
6012 : 958 : }
6013 : : else
6014 : : {
6015 : : /* If length is not equal to 3 then only power of 2 is supported. */
6016 : 1142 : gcc_assert (pow2p_hwi (count));
6017 : 2284 : poly_uint64 nelt = GET_MODE_NUNITS (mode);
6018 : :
6019 : : /* The encoding has 2 interleaved stepped patterns. */
6020 : 2284 : if(!multiple_p (nelt, 2))
6021 : 1094 : return false;
6022 : 1142 : vec_perm_builder sel (nelt, 2, 3);
6023 : 1142 : sel.quick_grow (6);
6024 : 5710 : for (i = 0; i < 3; i++)
6025 : : {
6026 : 3426 : sel[i * 2] = i;
6027 : 3426 : sel[i * 2 + 1] = i + nelt;
6028 : : }
6029 : 1142 : vec_perm_indices indices (sel, 2, nelt);
6030 : 1142 : if (can_vec_perm_const_p (mode, mode, indices))
6031 : : {
6032 : 7658 : for (i = 0; i < 6; i++)
6033 : 6564 : sel[i] += exact_div (nelt, 2);
6034 : 1094 : indices.new_vector (sel, 2, nelt);
6035 : 1094 : if (can_vec_perm_const_p (mode, mode, indices))
6036 : 1094 : return true;
6037 : : }
6038 : 1142 : }
6039 : : }
6040 : :
6041 : 48 : if (dump_enabled_p ())
6042 : 3 : dump_printf (MSG_MISSED_OPTIMIZATION,
6043 : : "permutation op not supported by target.\n");
6044 : : return false;
6045 : : }
6046 : :
6047 : : /* Return FN if vec_{mask_,mask_len_}store_lanes is available for COUNT vectors
6048 : : of type VECTYPE. MASKED_P says whether the masked form is needed. */
6049 : :
6050 : : internal_fn
6051 : 30843 : vect_store_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count,
6052 : : bool masked_p)
6053 : : {
6054 : 30843 : if (vect_lanes_optab_supported_p ("vec_mask_len_store_lanes",
6055 : : vec_mask_len_store_lanes_optab, vectype,
6056 : : count))
6057 : : return IFN_MASK_LEN_STORE_LANES;
6058 : 30843 : else if (masked_p)
6059 : : {
6060 : 148 : if (vect_lanes_optab_supported_p ("vec_mask_store_lanes",
6061 : : vec_mask_store_lanes_optab, vectype,
6062 : : count))
6063 : : return IFN_MASK_STORE_LANES;
6064 : : }
6065 : : else
6066 : : {
6067 : 30695 : if (vect_lanes_optab_supported_p ("vec_store_lanes",
6068 : : vec_store_lanes_optab, vectype, count))
6069 : : return IFN_STORE_LANES;
6070 : : }
6071 : : return IFN_LAST;
6072 : : }
6073 : :
6074 : :
6075 : : /* Function vect_setup_realignment
6076 : :
6077 : : This function is called when vectorizing an unaligned load using
6078 : : the dr_explicit_realign[_optimized] scheme.
6079 : : This function generates the following code at the loop prolog:
6080 : :
6081 : : p = initial_addr;
6082 : : x msq_init = *(floor(p)); # prolog load
6083 : : realignment_token = call target_builtin;
6084 : : loop:
6085 : : x msq = phi (msq_init, ---)
6086 : :
6087 : : The stmts marked with x are generated only for the case of
6088 : : dr_explicit_realign_optimized.
6089 : :
6090 : : The code above sets up a new (vector) pointer, pointing to the first
6091 : : location accessed by STMT_INFO, and a "floor-aligned" load using that
6092 : : pointer. It also generates code to compute the "realignment-token"
6093 : : (if the relevant target hook was defined), and creates a phi-node at the
6094 : : loop-header bb whose arguments are the result of the prolog-load (created
6095 : : by this function) and the result of a load that takes place in the loop
6096 : : (to be created by the caller to this function).
6097 : :
6098 : : For the case of dr_explicit_realign_optimized:
6099 : : The caller to this function uses the phi-result (msq) to create the
6100 : : realignment code inside the loop, and sets up the missing phi argument,
6101 : : as follows:
6102 : : loop:
6103 : : msq = phi (msq_init, lsq)
6104 : : lsq = *(floor(p')); # load in loop
6105 : : result = realign_load (msq, lsq, realignment_token);
6106 : :
6107 : : For the case of dr_explicit_realign:
6108 : : loop:
6109 : : msq = *(floor(p)); # load in loop
6110 : : p' = p + (VS-1);
6111 : : lsq = *(floor(p')); # load in loop
6112 : : result = realign_load (msq, lsq, realignment_token);
6113 : :
6114 : : Input:
6115 : : STMT_INFO - (scalar) load stmt to be vectorized. This load accesses
6116 : : a memory location that may be unaligned.
6117 : : BSI - place where new code is to be inserted.
6118 : : ALIGNMENT_SUPPORT_SCHEME - which of the two misalignment handling schemes
6119 : : is used.
6120 : :
6121 : : Output:
6122 : : REALIGNMENT_TOKEN - the result of a call to the builtin_mask_for_load
6123 : : target hook, if defined.
6124 : : Return value - the result of the loop-header phi node. */
6125 : :
6126 : : tree
6127 : 0 : vect_setup_realignment (vec_info *vinfo, stmt_vec_info stmt_info,
6128 : : gimple_stmt_iterator *gsi, tree *realignment_token,
6129 : : enum dr_alignment_support alignment_support_scheme,
6130 : : tree init_addr,
6131 : : class loop **at_loop)
6132 : : {
6133 : 0 : tree vectype = STMT_VINFO_VECTYPE (stmt_info);
6134 : 0 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
6135 : 0 : dr_vec_info *dr_info = STMT_VINFO_DR_INFO (stmt_info);
6136 : 0 : struct data_reference *dr = dr_info->dr;
6137 : 0 : class loop *loop = NULL;
6138 : 0 : edge pe = NULL;
6139 : 0 : tree scalar_dest = gimple_assign_lhs (stmt_info->stmt);
6140 : 0 : tree vec_dest;
6141 : 0 : gimple *inc;
6142 : 0 : tree ptr;
6143 : 0 : tree data_ref;
6144 : 0 : basic_block new_bb;
6145 : 0 : tree msq_init = NULL_TREE;
6146 : 0 : tree new_temp;
6147 : 0 : gphi *phi_stmt;
6148 : 0 : tree msq = NULL_TREE;
6149 : 0 : gimple_seq stmts = NULL;
6150 : 0 : bool compute_in_loop = false;
6151 : 0 : bool nested_in_vect_loop = false;
6152 : 0 : class loop *containing_loop = (gimple_bb (stmt_info->stmt))->loop_father;
6153 : 0 : class loop *loop_for_initial_load = NULL;
6154 : :
6155 : 0 : if (loop_vinfo)
6156 : : {
6157 : 0 : loop = LOOP_VINFO_LOOP (loop_vinfo);
6158 : 0 : nested_in_vect_loop = nested_in_vect_loop_p (loop, stmt_info);
6159 : : }
6160 : :
6161 : 0 : gcc_assert (alignment_support_scheme == dr_explicit_realign
6162 : : || alignment_support_scheme == dr_explicit_realign_optimized);
6163 : :
6164 : : /* We need to generate three things:
6165 : : 1. the misalignment computation
6166 : : 2. the extra vector load (for the optimized realignment scheme).
6167 : : 3. the phi node for the two vectors from which the realignment is
6168 : : done (for the optimized realignment scheme). */
6169 : :
6170 : : /* 1. Determine where to generate the misalignment computation.
6171 : :
6172 : : If INIT_ADDR is NULL_TREE, this indicates that the misalignment
6173 : : calculation will be generated by this function, outside the loop (in the
6174 : : preheader). Otherwise, INIT_ADDR had already been computed for us by the
6175 : : caller, inside the loop.
6176 : :
6177 : : Background: If the misalignment remains fixed throughout the iterations of
6178 : : the loop, then both realignment schemes are applicable, and also the
6179 : : misalignment computation can be done outside LOOP. This is because we are
6180 : : vectorizing LOOP, and so the memory accesses in LOOP advance in steps that
6181 : : are a multiple of VS (the Vector Size), and therefore the misalignment in
6182 : : different vectorized LOOP iterations is always the same.
6183 : : The problem arises only if the memory access is in an inner-loop nested
6184 : : inside LOOP, which is now being vectorized using outer-loop vectorization.
6185 : : This is the only case when the misalignment of the memory access may not
6186 : : remain fixed throughout the iterations of the inner-loop (as explained in
6187 : : detail in vect_supportable_dr_alignment). In this case, not only is the
6188 : : optimized realignment scheme not applicable, but also the misalignment
6189 : : computation (and generation of the realignment token that is passed to
6190 : : REALIGN_LOAD) have to be done inside the loop.
6191 : :
6192 : : In short, INIT_ADDR indicates whether we are in a COMPUTE_IN_LOOP mode
6193 : : or not, which in turn determines if the misalignment is computed inside
6194 : : the inner-loop, or outside LOOP. */
6195 : :
6196 : 0 : if (init_addr != NULL_TREE || !loop_vinfo)
6197 : : {
6198 : 0 : compute_in_loop = true;
6199 : 0 : gcc_assert (alignment_support_scheme == dr_explicit_realign);
6200 : : }
6201 : :
6202 : :
6203 : : /* 2. Determine where to generate the extra vector load.
6204 : :
6205 : : For the optimized realignment scheme, instead of generating two vector
6206 : : loads in each iteration, we generate a single extra vector load in the
6207 : : preheader of the loop, and in each iteration reuse the result of the
6208 : : vector load from the previous iteration. In case the memory access is in
6209 : : an inner-loop nested inside LOOP, which is now being vectorized using
6210 : : outer-loop vectorization, we need to determine whether this initial vector
6211 : : load should be generated at the preheader of the inner-loop, or can be
6212 : : generated at the preheader of LOOP. If the memory access has no evolution
6213 : : in LOOP, it can be generated in the preheader of LOOP. Otherwise, it has
6214 : : to be generated inside LOOP (in the preheader of the inner-loop). */
6215 : :
6216 : 0 : if (nested_in_vect_loop)
6217 : : {
6218 : 0 : tree outerloop_step = STMT_VINFO_DR_STEP (stmt_info);
6219 : 0 : bool invariant_in_outerloop =
6220 : 0 : (tree_int_cst_compare (outerloop_step, size_zero_node) == 0);
6221 : 0 : loop_for_initial_load = (invariant_in_outerloop ? loop : loop->inner);
6222 : : }
6223 : : else
6224 : : loop_for_initial_load = loop;
6225 : 0 : if (at_loop)
6226 : 0 : *at_loop = loop_for_initial_load;
6227 : :
6228 : 0 : tree vuse = NULL_TREE;
6229 : 0 : if (loop_for_initial_load)
6230 : : {
6231 : 0 : pe = loop_preheader_edge (loop_for_initial_load);
6232 : 0 : if (gphi *vphi = get_virtual_phi (loop_for_initial_load->header))
6233 : 0 : vuse = PHI_ARG_DEF_FROM_EDGE (vphi, pe);
6234 : : }
6235 : 0 : if (!vuse)
6236 : 0 : vuse = gimple_vuse (gsi_stmt (*gsi));
6237 : :
6238 : : /* 3. For the case of the optimized realignment, create the first vector
6239 : : load at the loop preheader. */
6240 : :
6241 : 0 : if (alignment_support_scheme == dr_explicit_realign_optimized)
6242 : : {
6243 : : /* Create msq_init = *(floor(p1)) in the loop preheader */
6244 : 0 : gassign *new_stmt;
6245 : :
6246 : 0 : gcc_assert (!compute_in_loop);
6247 : 0 : vec_dest = vect_create_destination_var (scalar_dest, vectype);
6248 : 0 : ptr = vect_create_data_ref_ptr (vinfo, stmt_info, vectype,
6249 : : loop_for_initial_load, NULL_TREE,
6250 : : &init_addr, NULL, &inc, true);
6251 : 0 : if (TREE_CODE (ptr) == SSA_NAME)
6252 : 0 : new_temp = copy_ssa_name (ptr);
6253 : : else
6254 : 0 : new_temp = make_ssa_name (TREE_TYPE (ptr));
6255 : 0 : poly_uint64 align = DR_TARGET_ALIGNMENT (dr_info);
6256 : 0 : tree type = TREE_TYPE (ptr);
6257 : 0 : new_stmt = gimple_build_assign
6258 : 0 : (new_temp, BIT_AND_EXPR, ptr,
6259 : 0 : fold_build2 (MINUS_EXPR, type,
6260 : : build_int_cst (type, 0),
6261 : : build_int_cst (type, align)));
6262 : 0 : new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
6263 : 0 : gcc_assert (!new_bb);
6264 : 0 : data_ref
6265 : 0 : = build2 (MEM_REF, TREE_TYPE (vec_dest), new_temp,
6266 : : build_int_cst (reference_alias_ptr_type (DR_REF (dr)), 0));
6267 : 0 : vect_copy_ref_info (data_ref, DR_REF (dr));
6268 : 0 : new_stmt = gimple_build_assign (vec_dest, data_ref);
6269 : 0 : new_temp = make_ssa_name (vec_dest, new_stmt);
6270 : 0 : gimple_assign_set_lhs (new_stmt, new_temp);
6271 : 0 : gimple_set_vuse (new_stmt, vuse);
6272 : 0 : if (pe)
6273 : : {
6274 : 0 : new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
6275 : 0 : gcc_assert (!new_bb);
6276 : : }
6277 : : else
6278 : 0 : gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
6279 : :
6280 : 0 : msq_init = gimple_assign_lhs (new_stmt);
6281 : : }
6282 : :
6283 : : /* 4. Create realignment token using a target builtin, if available.
6284 : : It is done either inside the containing loop, or before LOOP (as
6285 : : determined above). */
6286 : :
6287 : 0 : if (targetm.vectorize.builtin_mask_for_load)
6288 : : {
6289 : 0 : gcall *new_stmt;
6290 : 0 : tree builtin_decl;
6291 : :
6292 : : /* Compute INIT_ADDR - the initial addressed accessed by this memref. */
6293 : 0 : if (!init_addr)
6294 : : {
6295 : : /* Generate the INIT_ADDR computation outside LOOP. */
6296 : 0 : init_addr = vect_create_addr_base_for_vector_ref (vinfo,
6297 : : stmt_info, &stmts,
6298 : : NULL_TREE);
6299 : 0 : if (loop)
6300 : : {
6301 : 0 : pe = loop_preheader_edge (loop);
6302 : 0 : new_bb = gsi_insert_seq_on_edge_immediate (pe, stmts);
6303 : 0 : gcc_assert (!new_bb);
6304 : : }
6305 : : else
6306 : 0 : gsi_insert_seq_before (gsi, stmts, GSI_SAME_STMT);
6307 : : }
6308 : :
6309 : 0 : builtin_decl = targetm.vectorize.builtin_mask_for_load ();
6310 : 0 : new_stmt = gimple_build_call (builtin_decl, 1, init_addr);
6311 : 0 : vec_dest =
6312 : 0 : vect_create_destination_var (scalar_dest,
6313 : : gimple_call_return_type (new_stmt));
6314 : 0 : new_temp = make_ssa_name (vec_dest, new_stmt);
6315 : 0 : gimple_call_set_lhs (new_stmt, new_temp);
6316 : :
6317 : 0 : if (compute_in_loop)
6318 : 0 : gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
6319 : : else
6320 : : {
6321 : : /* Generate the misalignment computation outside LOOP. */
6322 : 0 : pe = loop_preheader_edge (loop);
6323 : 0 : new_bb = gsi_insert_on_edge_immediate (pe, new_stmt);
6324 : 0 : gcc_assert (!new_bb);
6325 : : }
6326 : :
6327 : 0 : *realignment_token = gimple_call_lhs (new_stmt);
6328 : :
6329 : : /* The result of the CALL_EXPR to this builtin is determined from
6330 : : the value of the parameter and no global variables are touched
6331 : : which makes the builtin a "const" function. Requiring the
6332 : : builtin to have the "const" attribute makes it unnecessary
6333 : : to call mark_call_clobbered. */
6334 : 0 : gcc_assert (TREE_READONLY (builtin_decl));
6335 : : }
6336 : :
6337 : 0 : if (alignment_support_scheme == dr_explicit_realign)
6338 : : return msq;
6339 : :
6340 : 0 : gcc_assert (!compute_in_loop);
6341 : 0 : gcc_assert (alignment_support_scheme == dr_explicit_realign_optimized);
6342 : :
6343 : :
6344 : : /* 5. Create msq = phi <msq_init, lsq> in loop */
6345 : :
6346 : 0 : pe = loop_preheader_edge (containing_loop);
6347 : 0 : vec_dest = vect_create_destination_var (scalar_dest, vectype);
6348 : 0 : msq = make_ssa_name (vec_dest);
6349 : 0 : phi_stmt = create_phi_node (msq, containing_loop->header);
6350 : 0 : add_phi_arg (phi_stmt, msq_init, pe, UNKNOWN_LOCATION);
6351 : :
6352 : 0 : return msq;
6353 : : }
6354 : :
6355 : :
6356 : : /* Function vect_grouped_load_supported.
6357 : :
6358 : : COUNT is the size of the load group (the number of statements plus the
6359 : : number of gaps). SINGLE_ELEMENT_P is true if there is actually
6360 : : only one statement, with a gap of COUNT - 1.
6361 : :
6362 : : Returns true if a suitable permute exists. */
6363 : :
6364 : : bool
6365 : 1855 : vect_grouped_load_supported (tree vectype, bool single_element_p,
6366 : : unsigned HOST_WIDE_INT count)
6367 : : {
6368 : 1855 : machine_mode mode = TYPE_MODE (vectype);
6369 : :
6370 : : /* If this is single-element interleaving with an element distance
6371 : : that leaves unused vector loads around punt - we at least create
6372 : : very sub-optimal code in that case (and blow up memory,
6373 : : see PR65518). */
6374 : 1855 : if (single_element_p && maybe_gt (count, TYPE_VECTOR_SUBPARTS (vectype)))
6375 : : {
6376 : 33 : if (dump_enabled_p ())
6377 : 13 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6378 : : "single-element interleaving not supported "
6379 : : "for not adjacent vector loads\n");
6380 : 33 : return false;
6381 : : }
6382 : :
6383 : : /* vect_permute_load_chain requires the group size to be equal to 3 or
6384 : : be a power of two. */
6385 : 1822 : if (count != 3 && exact_log2 (count) == -1)
6386 : : {
6387 : 342 : if (dump_enabled_p ())
6388 : 8 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6389 : : "the size of the group of accesses"
6390 : : " is not a power of 2 or not equal to 3\n");
6391 : 342 : return false;
6392 : : }
6393 : :
6394 : : /* Check that the permutation is supported. */
6395 : 1480 : if (VECTOR_MODE_P (mode))
6396 : : {
6397 : 1480 : unsigned int i, j;
6398 : 1480 : if (count == 3)
6399 : : {
6400 : 712 : unsigned int nelt;
6401 : 1424 : if (!GET_MODE_NUNITS (mode).is_constant (&nelt))
6402 : : {
6403 : : if (dump_enabled_p ())
6404 : : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6405 : : "cannot handle groups of 3 loads for"
6406 : : " variable-length vectors\n");
6407 : : return false;
6408 : : }
6409 : :
6410 : 712 : vec_perm_builder sel (nelt, nelt, 1);
6411 : 712 : sel.quick_grow (nelt);
6412 : 712 : vec_perm_indices indices;
6413 : 712 : unsigned int k;
6414 : 2812 : for (k = 0; k < 3; k++)
6415 : : {
6416 : 7508 : for (i = 0; i < nelt; i++)
6417 : 5396 : if (3 * i + k < 2 * nelt)
6418 : 3603 : sel[i] = 3 * i + k;
6419 : : else
6420 : 1793 : sel[i] = 0;
6421 : 2112 : indices.new_vector (sel, 2, nelt);
6422 : 2112 : if (!can_vec_perm_const_p (mode, mode, indices))
6423 : : {
6424 : 12 : if (dump_enabled_p ())
6425 : 4 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6426 : : "shuffle of 3 loads is not supported by"
6427 : : " target\n");
6428 : 12 : return false;
6429 : : }
6430 : 7344 : for (i = 0, j = 0; i < nelt; i++)
6431 : 5244 : if (3 * i + k < 2 * nelt)
6432 : 3496 : sel[i] = i;
6433 : : else
6434 : 1748 : sel[i] = nelt + ((nelt + k) % 3) + 3 * (j++);
6435 : 2100 : indices.new_vector (sel, 2, nelt);
6436 : 2100 : if (!can_vec_perm_const_p (mode, mode, indices))
6437 : : {
6438 : 0 : if (dump_enabled_p ())
6439 : 0 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6440 : : "shuffle of 3 loads is not supported by"
6441 : : " target\n");
6442 : 0 : return false;
6443 : : }
6444 : : }
6445 : : return true;
6446 : 712 : }
6447 : : else
6448 : : {
6449 : : /* If length is not equal to 3 then only power of 2 is supported. */
6450 : 768 : gcc_assert (pow2p_hwi (count));
6451 : 1536 : poly_uint64 nelt = GET_MODE_NUNITS (mode);
6452 : :
6453 : : /* The encoding has a single stepped pattern. */
6454 : 768 : vec_perm_builder sel (nelt, 1, 3);
6455 : 768 : sel.quick_grow (3);
6456 : 3840 : for (i = 0; i < 3; i++)
6457 : 2304 : sel[i] = i * 2;
6458 : 768 : vec_perm_indices indices (sel, 2, nelt);
6459 : 768 : if (can_vec_perm_const_p (mode, mode, indices))
6460 : : {
6461 : 3056 : for (i = 0; i < 3; i++)
6462 : 2292 : sel[i] = i * 2 + 1;
6463 : 764 : indices.new_vector (sel, 2, nelt);
6464 : 764 : if (can_vec_perm_const_p (mode, mode, indices))
6465 : 764 : return true;
6466 : : }
6467 : 768 : }
6468 : : }
6469 : :
6470 : 4 : if (dump_enabled_p ())
6471 : 2 : dump_printf_loc (MSG_MISSED_OPTIMIZATION, vect_location,
6472 : : "extract even/odd not supported by target\n");
6473 : : return false;
6474 : : }
6475 : :
6476 : : /* Return FN if vec_{masked_,mask_len_}load_lanes is available for COUNT vectors
6477 : : of type VECTYPE. MASKED_P says whether the masked form is needed.
6478 : : If it is available and ELSVALS is nonzero store the possible else values
6479 : : in the vector it points to. */
6480 : :
6481 : : internal_fn
6482 : 124326 : vect_load_lanes_supported (tree vectype, unsigned HOST_WIDE_INT count,
6483 : : bool masked_p, vec<int> *elsvals)
6484 : : {
6485 : 124326 : if (vect_lanes_optab_supported_p ("vec_mask_len_load_lanes",
6486 : : vec_mask_len_load_lanes_optab, vectype,
6487 : : count, elsvals))
6488 : : return IFN_MASK_LEN_LOAD_LANES;
6489 : 124326 : else if (masked_p)
6490 : : {
6491 : 0 : if (vect_lanes_optab_supported_p ("vec_mask_load_lanes",
6492 : : vec_mask_load_lanes_optab, vectype,
6493 : : count, elsvals))
6494 : : return IFN_MASK_LOAD_LANES;
6495 : : }
6496 : : else
6497 : : {
6498 : 124326 : if (vect_lanes_optab_supported_p ("vec_load_lanes", vec_load_lanes_optab,
6499 : : vectype, count, elsvals))
6500 : : return IFN_LOAD_LANES;
6501 : : }
6502 : : return IFN_LAST;
6503 : : }
6504 : :
6505 : : /* Function vect_force_dr_alignment_p.
6506 : :
6507 : : Returns whether the alignment of a DECL can be forced to be aligned
6508 : : on ALIGNMENT bit boundary. */
6509 : :
6510 : : bool
6511 : 639377 : vect_can_force_dr_alignment_p (const_tree decl, poly_uint64 alignment)
6512 : : {
6513 : 639377 : if (!VAR_P (decl))
6514 : : return false;
6515 : :
6516 : 209973 : if (decl_in_symtab_p (decl)
6517 : 209973 : && (!symtab_node::get (decl)
6518 : 20727 : || !symtab_node::get (decl)->can_increase_alignment_p ()))
6519 : 11802 : return false;
6520 : :
6521 : 198171 : if (TREE_STATIC (decl))
6522 : 8925 : return (known_le (alignment,
6523 : 8925 : (unsigned HOST_WIDE_INT) MAX_OFILE_ALIGNMENT));
6524 : : else
6525 : 189246 : return (known_le (alignment, (unsigned HOST_WIDE_INT) MAX_STACK_ALIGNMENT));
6526 : : }
6527 : :
6528 : : /* Return whether the data reference DR_INFO is supported with respect to its
6529 : : alignment.
6530 : : If CHECK_ALIGNED_ACCESSES is TRUE, check if the access is supported even
6531 : : it is aligned, i.e., check if it is possible to vectorize it with different
6532 : : alignment. If GS_INFO is passed we are dealing with a gather/scatter. */
6533 : :
6534 : : enum dr_alignment_support
6535 : 2377213 : vect_supportable_dr_alignment (vec_info *vinfo, dr_vec_info *dr_info,
6536 : : tree vectype, int misalignment,
6537 : : bool is_gather_scatter)
6538 : : {
6539 : 2377213 : data_reference *dr = dr_info->dr;
6540 : 2377213 : stmt_vec_info stmt_info = dr_info->stmt;
6541 : 2377213 : machine_mode mode = TYPE_MODE (vectype);
6542 : 2377213 : loop_vec_info loop_vinfo = dyn_cast <loop_vec_info> (vinfo);
6543 : 2377213 : class loop *vect_loop = NULL;
6544 : 2377213 : bool nested_in_vect_loop = false;
6545 : :
6546 : 2377213 : if (misalignment == 0)
6547 : : return dr_aligned;
6548 : 1442060 : else if (dr_safe_speculative_read_required (stmt_info))
6549 : : return dr_unaligned_unsupported;
6550 : :
6551 : 1093651 : if (loop_vinfo)
6552 : : {
6553 : 684246 : vect_loop = LOOP_VINFO_LOOP (loop_vinfo);
6554 : 684246 : nested_in_vect_loop = nested_in_vect_loop_p (vect_loop, stmt_info);
6555 : : }
6556 : :
6557 : : /* Possibly unaligned access. */
6558 : :
6559 : : /* We can choose between using the implicit realignment scheme (generating
6560 : : a misaligned_move stmt) and the explicit realignment scheme (generating
6561 : : aligned loads with a REALIGN_LOAD). There are two variants to the
6562 : : explicit realignment scheme: optimized, and unoptimized.
6563 : : We can optimize the realignment only if the step between consecutive
6564 : : vector loads is equal to the vector size. Since the vector memory
6565 : : accesses advance in steps of VS (Vector Size) in the vectorized loop, it
6566 : : is guaranteed that the misalignment amount remains the same throughout the
6567 : : execution of the vectorized loop. Therefore, we can create the
6568 : : "realignment token" (the permutation mask that is passed to REALIGN_LOAD)
6569 : : at the loop preheader.
6570 : :
6571 : : However, in the case of outer-loop vectorization, when vectorizing a
6572 : : memory access in the inner-loop nested within the LOOP that is now being
6573 : : vectorized, while it is guaranteed that the misalignment of the
6574 : : vectorized memory access will remain the same in different outer-loop
6575 : : iterations, it is *not* guaranteed that is will remain the same throughout
6576 : : the execution of the inner-loop. This is because the inner-loop advances
6577 : : with the original scalar step (and not in steps of VS). If the inner-loop
6578 : : step happens to be a multiple of VS, then the misalignment remains fixed
6579 : : and we can use the optimized realignment scheme. For example:
6580 : :
6581 : : for (i=0; i<N; i++)
6582 : : for (j=0; j<M; j++)
6583 : : s += a[i+j];
6584 : :
6585 : : When vectorizing the i-loop in the above example, the step between
6586 : : consecutive vector loads is 1, and so the misalignment does not remain
6587 : : fixed across the execution of the inner-loop, and the realignment cannot
6588 : : be optimized (as illustrated in the following pseudo vectorized loop):
6589 : :
6590 : : for (i=0; i<N; i+=4)
6591 : : for (j=0; j<M; j++){
6592 : : vs += vp[i+j]; // misalignment of &vp[i+j] is {0,1,2,3,0,1,2,3,...}
6593 : : // when j is {0,1,2,3,4,5,6,7,...} respectively.
6594 : : // (assuming that we start from an aligned address).
6595 : : }
6596 : :
6597 : : We therefore have to use the unoptimized realignment scheme:
6598 : :
6599 : : for (i=0; i<N; i+=4)
6600 : : for (j=k; j<M; j+=4)
6601 : : vs += vp[i+j]; // misalignment of &vp[i+j] is always k (assuming
6602 : : // that the misalignment of the initial address is
6603 : : // 0).
6604 : :
6605 : : The loop can then be vectorized as follows:
6606 : :
6607 : : for (k=0; k<4; k++){
6608 : : rt = get_realignment_token (&vp[k]);
6609 : : for (i=0; i<N; i+=4){
6610 : : v1 = vp[i+k];
6611 : : for (j=k; j<M; j+=4){
6612 : : v2 = vp[i+j+VS-1];
6613 : : va = REALIGN_LOAD <v1,v2,rt>;
6614 : : vs += va;
6615 : : v1 = v2;
6616 : : }
6617 : : }
6618 : : } */
6619 : :
6620 : 1093651 : if (DR_IS_READ (dr))
6621 : : {
6622 : 448586 : if (can_implement_p (vec_realign_load_optab, mode)
6623 : 448586 : && (!targetm.vectorize.builtin_mask_for_load
6624 : 0 : || targetm.vectorize.builtin_mask_for_load ()))
6625 : : {
6626 : : /* If we are doing SLP then the accesses need not have the
6627 : : same alignment, instead it depends on the SLP group size. */
6628 : 0 : if (loop_vinfo
6629 : 0 : && STMT_SLP_TYPE (stmt_info)
6630 : 0 : && STMT_VINFO_GROUPED_ACCESS (stmt_info)
6631 : 0 : && !multiple_p (LOOP_VINFO_VECT_FACTOR (loop_vinfo)
6632 : 0 : * (DR_GROUP_SIZE
6633 : 0 : (DR_GROUP_FIRST_ELEMENT (stmt_info))),
6634 : 0 : TYPE_VECTOR_SUBPARTS (vectype)))
6635 : : ;
6636 : 0 : else if (!loop_vinfo
6637 : 0 : || (nested_in_vect_loop
6638 : 0 : && maybe_ne (TREE_INT_CST_LOW (DR_STEP (dr)),
6639 : 0 : GET_MODE_SIZE (TYPE_MODE (vectype)))))
6640 : 0 : return dr_explicit_realign;
6641 : : else
6642 : 0 : return dr_explicit_realign_optimized;
6643 : : }
6644 : : }
6645 : :
6646 : 1093651 : bool is_packed = false;
6647 : 1093651 : tree type = TREE_TYPE (DR_REF (dr));
6648 : 1093651 : if (misalignment == DR_MISALIGNMENT_UNKNOWN)
6649 : 876068 : is_packed = not_size_aligned (DR_REF (dr));
6650 : 1093651 : if (targetm.vectorize.support_vector_misalignment (mode, type, misalignment,
6651 : : is_packed,
6652 : : is_gather_scatter))
6653 : : return dr_unaligned_supported;
6654 : :
6655 : : /* Unsupported. */
6656 : : return dr_unaligned_unsupported;
6657 : : }
|