Line data Source code
1 : /* Interprocedural constant propagation
2 : Copyright (C) 2005-2026 Free Software Foundation, Inc.
3 :
4 : Contributed by Razya Ladelsky <RAZYA@il.ibm.com> and Martin Jambor
5 : <mjambor@suse.cz>
6 :
7 : This file is part of GCC.
8 :
9 : GCC is free software; you can redistribute it and/or modify it under
10 : the terms of the GNU General Public License as published by the Free
11 : Software Foundation; either version 3, or (at your option) any later
12 : version.
13 :
14 : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 : WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 : for more details.
18 :
19 : You should have received a copy of the GNU General Public License
20 : along with GCC; see the file COPYING3. If not see
21 : <http://www.gnu.org/licenses/>. */
22 :
23 : /* Interprocedural constant propagation (IPA-CP).
24 :
25 : The goal of this transformation is to
26 :
27 : 1) discover functions which are always invoked with some arguments with the
28 : same known constant values and modify the functions so that the
29 : subsequent optimizations can take advantage of the knowledge, and
30 :
31 : 2) partial specialization - create specialized versions of functions
32 : transformed in this way if some parameters are known constants only in
33 : certain contexts but the estimated tradeoff between speedup and cost size
34 : is deemed good.
35 :
36 : The algorithm also propagates types and attempts to perform type based
37 : devirtualization. Types are propagated much like constants.
38 :
39 : The algorithm basically consists of three stages. In the first, functions
40 : are analyzed one at a time and jump functions are constructed for all known
41 : call-sites. In the second phase, the pass propagates information from the
42 : jump functions across the call to reveal what values are available at what
43 : call sites, performs estimations of effects of known values on functions and
44 : their callees, and finally decides what specialized extra versions should be
45 : created. In the third, the special versions materialize and appropriate
46 : calls are redirected.
47 :
48 : The algorithm used is to a certain extent based on "Interprocedural Constant
49 : Propagation", by David Callahan, Keith D Cooper, Ken Kennedy, Linda Torczon,
50 : Comp86, pg 152-161 and "A Methodology for Procedure Cloning" by Keith D
51 : Cooper, Mary W. Hall, and Ken Kennedy.
52 :
53 :
54 : First stage - intraprocedural analysis
55 : =======================================
56 :
57 : This phase computes jump_function and modification flags.
58 :
59 : A jump function for a call-site represents the values passed as an actual
60 : arguments of a given call-site. In principle, there are three types of
61 : values:
62 :
63 : Pass through - the caller's formal parameter is passed as an actual
64 : argument, plus an operation on it can be performed.
65 : Constant - a constant is passed as an actual argument.
66 : Unknown - neither of the above.
67 :
68 : All jump function types are described in detail in ipa-prop.h, together with
69 : the data structures that represent them and methods of accessing them.
70 :
71 : ipcp_generate_summary() is the main function of the first stage.
72 :
73 : Second stage - interprocedural analysis
74 : ========================================
75 :
76 : This stage is itself divided into two phases. In the first, we propagate
77 : known values over the call graph, in the second, we make cloning decisions.
78 : It uses a different algorithm than the original Callahan's paper.
79 :
80 : First, we traverse the functions topologically from callers to callees and,
81 : for each strongly connected component (SCC), we propagate constants
82 : according to previously computed jump functions. We also record what known
83 : values depend on other known values and estimate local effects. Finally, we
84 : propagate cumulative information about these effects from dependent values
85 : to those on which they depend.
86 :
87 : Second, we again traverse the call graph in the same topological order and
88 : make clones for functions which we know are called with the same values in
89 : all contexts and decide about extra specialized clones of functions just for
90 : some contexts - these decisions are based on both local estimates and
91 : cumulative estimates propagated from callees.
92 :
93 : ipcp_propagate_stage() and ipcp_decision_stage() together constitute the
94 : third stage.
95 :
96 : Third phase - materialization of clones, call statement updates.
97 : ============================================
98 :
99 : This stage is currently performed by call graph code (mainly in cgraphunit.cc
100 : and tree-inline.cc) according to instructions inserted to the call graph by
101 : the second stage. */
102 :
103 : #define INCLUDE_ALGORITHM
104 : #include "config.h"
105 : #include "system.h"
106 : #include "coretypes.h"
107 : #include "backend.h"
108 : #include "tree.h"
109 : #include "gimple-expr.h"
110 : #include "gimple.h"
111 : #include "predict.h"
112 : #include "sreal.h"
113 : #include "alloc-pool.h"
114 : #include "tree-pass.h"
115 : #include "cgraph.h"
116 : #include "diagnostic.h"
117 : #include "fold-const.h"
118 : #include "gimple-iterator.h"
119 : #include "gimple-fold.h"
120 : #include "symbol-summary.h"
121 : #include "tree-vrp.h"
122 : #include "ipa-cp.h"
123 : #include "ipa-prop.h"
124 : #include "tree-pretty-print.h"
125 : #include "tree-inline.h"
126 : #include "ipa-fnsummary.h"
127 : #include "ipa-utils.h"
128 : #include "tree-ssa-ccp.h"
129 : #include "stringpool.h"
130 : #include "attribs.h"
131 : #include "dbgcnt.h"
132 : #include "symtab-clones.h"
133 : #include "gimple-range.h"
134 : #include "attr-callback.h"
135 :
136 : /* Allocation pools for values and their sources in ipa-cp. */
137 :
138 : object_allocator<ipcp_value<tree> > ipcp_cst_values_pool
139 : ("IPA-CP constant values");
140 :
141 : object_allocator<ipcp_value<ipa_polymorphic_call_context> >
142 : ipcp_poly_ctx_values_pool ("IPA-CP polymorphic contexts");
143 :
144 : object_allocator<ipcp_value_source<tree> > ipcp_sources_pool
145 : ("IPA-CP value sources");
146 :
147 : object_allocator<ipcp_agg_lattice> ipcp_agg_lattice_pool
148 : ("IPA_CP aggregate lattices");
149 :
150 : /* Original overall size of the program. */
151 :
152 : static long overall_size, orig_overall_size;
153 :
154 : /* The maximum number of IPA-CP decision sweeps that any node requested in its
155 : param. */
156 : static int max_number_sweeps;
157 :
158 : /* Node name to unique clone suffix number map. */
159 : static hash_map<const char *, unsigned> *clone_num_suffixes;
160 :
161 : /* Return the param lattices structure corresponding to the Ith formal
162 : parameter of the function described by INFO. */
163 : static inline class ipcp_param_lattices *
164 33359760 : ipa_get_parm_lattices (class ipa_node_params *info, int i)
165 : {
166 66719520 : gcc_assert (i >= 0 && i < ipa_get_param_count (info));
167 33359760 : gcc_checking_assert (!info->ipcp_orig_node);
168 33359760 : return &(info->lattices[i]);
169 : }
170 :
171 : /* Return the lattice corresponding to the scalar value of the Ith formal
172 : parameter of the function described by INFO. */
173 : static inline ipcp_lattice<tree> *
174 5858776 : ipa_get_scalar_lat (class ipa_node_params *info, int i)
175 : {
176 6040654 : class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
177 5858776 : return &plats->itself;
178 : }
179 :
180 : /* Return the lattice corresponding to the scalar value of the Ith formal
181 : parameter of the function described by INFO. */
182 : static inline ipcp_lattice<ipa_polymorphic_call_context> *
183 761009 : ipa_get_poly_ctx_lat (class ipa_node_params *info, int i)
184 : {
185 761009 : class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
186 761009 : return &plats->ctxlat;
187 : }
188 :
189 : /* Return whether LAT is a lattice with a single constant and without an
190 : undefined value. */
191 :
192 : template <typename valtype>
193 : inline bool
194 14891469 : ipcp_lattice<valtype>::is_single_const ()
195 : {
196 3206149 : if (bottom || contains_variable || values_count != 1)
197 : return false;
198 : else
199 : return true;
200 : }
201 :
202 : /* Return true iff X and Y should be considered equal values by IPA-CP. */
203 :
204 : bool
205 1382238 : values_equal_for_ipcp_p (tree x, tree y)
206 : {
207 1382238 : gcc_checking_assert (x != NULL_TREE && y != NULL_TREE);
208 :
209 1382238 : if (x == y)
210 : return true;
211 :
212 617993 : if (TREE_CODE (x) == ADDR_EXPR
213 218548 : && TREE_CODE (y) == ADDR_EXPR
214 217707 : && (TREE_CODE (TREE_OPERAND (x, 0)) == CONST_DECL
215 172097 : || (TREE_CODE (TREE_OPERAND (x, 0)) == VAR_DECL
216 92179 : && DECL_IN_CONSTANT_POOL (TREE_OPERAND (x, 0))))
217 663603 : && (TREE_CODE (TREE_OPERAND (y, 0)) == CONST_DECL
218 13 : || (TREE_CODE (TREE_OPERAND (y, 0)) == VAR_DECL
219 8 : && DECL_IN_CONSTANT_POOL (TREE_OPERAND (y, 0)))))
220 45597 : return TREE_OPERAND (x, 0) == TREE_OPERAND (y, 0)
221 91016 : || operand_equal_p (DECL_INITIAL (TREE_OPERAND (x, 0)),
222 45419 : DECL_INITIAL (TREE_OPERAND (y, 0)), 0);
223 : else
224 572396 : return operand_equal_p (x, y, 0);
225 : }
226 :
227 : /* Print V which is extracted from a value in a lattice to F. This overloaded
228 : function is used to print tree constants. */
229 :
230 : static void
231 693 : print_ipcp_constant_value (FILE * f, tree v)
232 : {
233 0 : ipa_print_constant_value (f, v);
234 36 : }
235 :
236 : /* Print V which is extracted from a value in a lattice to F. This overloaded
237 : function is used to print constant polymorphic call contexts. */
238 :
239 : static void
240 214 : print_ipcp_constant_value (FILE * f, ipa_polymorphic_call_context v)
241 : {
242 214 : v.dump(f, false);
243 0 : }
244 :
245 : /* Print a lattice LAT to F. */
246 :
247 : template <typename valtype>
248 : void
249 1979 : ipcp_lattice<valtype>::print (FILE * f, bool dump_sources, bool dump_benefits)
250 : {
251 : ipcp_value<valtype> *val;
252 1979 : bool prev = false;
253 :
254 1979 : if (bottom)
255 : {
256 838 : fprintf (f, "BOTTOM\n");
257 838 : return;
258 : }
259 :
260 1141 : if (!values_count && !contains_variable)
261 : {
262 0 : fprintf (f, "TOP\n");
263 0 : return;
264 : }
265 :
266 1141 : if (contains_variable)
267 : {
268 861 : fprintf (f, "VARIABLE");
269 861 : prev = true;
270 861 : if (dump_benefits)
271 861 : fprintf (f, "\n");
272 : }
273 :
274 1770 : for (val = values; val; val = val->next)
275 : {
276 629 : if (dump_benefits && prev)
277 349 : fprintf (f, " ");
278 280 : else if (!dump_benefits && prev)
279 0 : fprintf (f, ", ");
280 : else
281 : prev = true;
282 :
283 629 : print_ipcp_constant_value (f, val->value);
284 :
285 629 : if (dump_sources)
286 : {
287 : ipcp_value_source<valtype> *s;
288 :
289 174 : if (val->self_recursion_generated_p ())
290 27 : fprintf (f, " [self_gen(%i), from:",
291 : val->self_recursion_generated_level);
292 : else
293 147 : fprintf (f, " [scc: %i, from:", val->scc_no);
294 366 : for (s = val->sources; s; s = s->next)
295 192 : fprintf (f, " %i(%f)", s->cs->caller->get_uid (),
296 384 : s->cs->sreal_frequency ().to_double ());
297 174 : fprintf (f, "]");
298 : }
299 :
300 629 : if (dump_benefits)
301 629 : fprintf (f, " [loc_time: %g, loc_size: %i, "
302 : "prop_time: %g, prop_size: %i]\n",
303 : val->local_time_benefit.to_double (), val->local_size_cost,
304 : val->prop_time_benefit.to_double (), val->prop_size_cost);
305 : }
306 1141 : if (!dump_benefits)
307 0 : fprintf (f, "\n");
308 : }
309 :
310 : /* Print VALUE to F in a form which in usual cases does not take thousands of
311 : characters. */
312 :
313 : static void
314 1476 : ipcp_print_widest_int (FILE *f, const widest_int &value)
315 : {
316 1476 : if (value == -1)
317 0 : fprintf (f, "-1");
318 1476 : else if (wi::arshift (value, 128) == -1)
319 : {
320 333 : char buf[35], *p = buf + 2;
321 333 : widest_int v = wi::zext (value, 128);
322 333 : size_t len;
323 333 : print_hex (v, buf);
324 333 : len = strlen (p);
325 333 : if (len == 32)
326 : {
327 333 : fprintf (f, "0xf..f");
328 9831 : while (*p == 'f')
329 9165 : ++p;
330 : }
331 : else
332 0 : fprintf (f, "0xf..f%0*d", (int) (32 - len), 0);
333 333 : fputs (p, f);
334 333 : }
335 : else
336 1143 : print_hex (value, f);
337 1476 : }
338 :
339 : void
340 914 : ipcp_bits_lattice::print (FILE *f)
341 : {
342 914 : if (bottom_p ())
343 : {
344 604 : fprintf (f, " Bits unusable (BOTTOM)\n");
345 604 : return;
346 : }
347 :
348 310 : if (top_p ())
349 0 : fprintf (f, " Bits unknown (TOP)");
350 : else
351 : {
352 310 : fprintf (f, " Bits: value = ");
353 310 : ipcp_print_widest_int (f, get_value ());
354 310 : fprintf (f, ", mask = ");
355 310 : ipcp_print_widest_int (f, get_mask ());
356 : }
357 :
358 310 : if (m_recipient_only)
359 136 : fprintf (f, " (recipient only)");
360 310 : fprintf (f, "\n");
361 : }
362 :
363 : /* Print value range lattice to F. */
364 :
365 : void
366 914 : ipcp_vr_lattice::print (FILE * f)
367 : {
368 914 : if (m_recipient_only)
369 263 : fprintf (f, "(recipient only) ");
370 914 : m_vr.dump (f);
371 914 : }
372 :
373 : /* Print all ipcp_lattices of all functions to F. */
374 :
375 : static void
376 161 : print_all_lattices (FILE * f, bool dump_sources, bool dump_benefits)
377 : {
378 161 : struct cgraph_node *node;
379 161 : int i, count;
380 :
381 161 : fprintf (f, "\nLattices:\n");
382 886 : FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
383 : {
384 725 : class ipa_node_params *info;
385 :
386 725 : info = ipa_node_params_sum->get (node);
387 : /* Skip unoptimized functions and constprop clones since we don't make
388 : lattices for them. */
389 725 : if (!info || info->ipcp_orig_node)
390 0 : continue;
391 725 : fprintf (f, " Node: %s:\n", node->dump_name ());
392 725 : count = ipa_get_param_count (info);
393 1639 : for (i = 0; i < count; i++)
394 : {
395 914 : struct ipcp_agg_lattice *aglat;
396 914 : class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
397 914 : fprintf (f, " param [%d]: ", i);
398 914 : plats->itself.print (f, dump_sources, dump_benefits);
399 914 : fprintf (f, " ctxs: ");
400 914 : plats->ctxlat.print (f, dump_sources, dump_benefits);
401 914 : plats->bits_lattice.print (f);
402 914 : fprintf (f, " ");
403 914 : plats->m_value_range.print (f);
404 914 : fprintf (f, "\n");
405 914 : if (plats->virt_call)
406 75 : fprintf (f, " virt_call flag set\n");
407 :
408 914 : if (plats->aggs_bottom)
409 : {
410 439 : fprintf (f, " AGGS BOTTOM\n");
411 439 : continue;
412 : }
413 475 : if (plats->aggs_contain_variable)
414 437 : fprintf (f, " AGGS VARIABLE\n");
415 626 : for (aglat = plats->aggs; aglat; aglat = aglat->next)
416 : {
417 151 : fprintf (f, " %soffset " HOST_WIDE_INT_PRINT_DEC ": ",
418 151 : plats->aggs_by_ref ? "ref " : "", aglat->offset);
419 151 : aglat->print (f, dump_sources, dump_benefits);
420 : }
421 : }
422 : }
423 161 : }
424 :
425 : /* Determine whether it is at all technically possible to create clones of NODE
426 : and store this information in the ipa_node_params structure associated
427 : with NODE. */
428 :
429 : static void
430 1260412 : determine_versionability (struct cgraph_node *node,
431 : class ipa_node_params *info)
432 : {
433 1260412 : const char *reason = NULL;
434 :
435 : /* There are a number of generic reasons functions cannot be versioned. We
436 : also cannot remove parameters if there are type attributes such as fnspec
437 : present. */
438 1260412 : if (node->alias || node->thunk)
439 : reason = "alias or thunk";
440 1260412 : else if (!node->versionable)
441 : reason = "not a tree_versionable_function";
442 1132157 : else if (node->get_availability () <= AVAIL_INTERPOSABLE)
443 : reason = "insufficient body availability";
444 1065344 : else if (!opt_for_fn (node->decl, optimize)
445 1065344 : || !opt_for_fn (node->decl, flag_ipa_cp))
446 : reason = "non-optimized function";
447 1065344 : else if (lookup_attribute ("omp declare simd", DECL_ATTRIBUTES (node->decl)))
448 : {
449 : /* Ideally we should clone the SIMD clones themselves and create
450 : vector copies of them, so IPA-cp and SIMD clones can happily
451 : coexist, but that may not be worth the effort. */
452 : reason = "function has SIMD clones";
453 : }
454 1064984 : else if (lookup_attribute ("target_clones", DECL_ATTRIBUTES (node->decl)))
455 : {
456 : /* Ideally we should clone the target clones themselves and create
457 : copies of them, so IPA-cp and target clones can happily
458 : coexist, but that may not be worth the effort. */
459 : reason = "function target_clones attribute";
460 : }
461 : /* Don't clone decls local to a comdat group; it breaks and for C++
462 : decloned constructors, inlining is always better anyway. */
463 1064984 : else if (node->comdat_local_p ())
464 : reason = "comdat-local function";
465 1062863 : else if (node->calls_comdat_local)
466 : {
467 : /* TODO: call is versionable if we make sure that all
468 : callers are inside of a comdat group. */
469 2223 : reason = "calls comdat-local function";
470 : }
471 :
472 : /* Functions calling BUILT_IN_VA_ARG_PACK and BUILT_IN_VA_ARG_PACK_LEN
473 : work only when inlined. Cloning them may still lead to better code
474 : because ipa-cp will not give up on cloning further. If the function is
475 : external this however leads to wrong code because we may end up producing
476 : offline copy of the function. */
477 1260412 : if (DECL_EXTERNAL (node->decl))
478 83893 : for (cgraph_edge *edge = node->callees; !reason && edge;
479 57209 : edge = edge->next_callee)
480 57209 : if (fndecl_built_in_p (edge->callee->decl, BUILT_IN_NORMAL))
481 : {
482 5614 : if (DECL_FUNCTION_CODE (edge->callee->decl) == BUILT_IN_VA_ARG_PACK)
483 0 : reason = "external function which calls va_arg_pack";
484 5614 : if (DECL_FUNCTION_CODE (edge->callee->decl)
485 : == BUILT_IN_VA_ARG_PACK_LEN)
486 0 : reason = "external function which calls va_arg_pack_len";
487 : }
488 :
489 1260412 : if (reason && dump_file && !node->alias && !node->thunk)
490 56 : fprintf (dump_file, "Function %s is not versionable, reason: %s.\n",
491 : node->dump_name (), reason);
492 :
493 1260412 : info->versionable = (reason == NULL);
494 1260412 : }
495 :
496 : /* Return true if it is at all technically possible to create clones of a
497 : NODE. */
498 :
499 : static bool
500 6038381 : ipcp_versionable_function_p (struct cgraph_node *node)
501 : {
502 6038381 : ipa_node_params *info = ipa_node_params_sum->get (node);
503 6038381 : return info && info->versionable;
504 : }
505 :
506 : /* Structure holding accumulated information about callers of a node. */
507 :
508 3384518 : struct caller_statistics
509 : {
510 : /* If requested (see below), self-recursive call counts are summed into this
511 : field. */
512 : profile_count rec_count_sum;
513 : /* The sum of all ipa counts of all the other (non-recursive) calls. */
514 : profile_count count_sum;
515 : /* Sum of all frequencies for all calls. */
516 : sreal freq_sum;
517 : /* Number of calls and calls considered interesting respectively. */
518 : int n_calls, n_interesting_calls;
519 : /* If itself is set up, also count the number of non-self-recursive
520 : calls. */
521 : int n_nonrec_calls;
522 : /* If non-NULL, this is the node itself and calls from it should have their
523 : counts included in rec_count_sum and not count_sum. */
524 : cgraph_node *itself;
525 : /* True if there is a caller that has no IPA profile. */
526 : bool called_without_ipa_profile;
527 : };
528 :
529 : /* Initialize fields of STAT to zeroes and optionally set it up so that edges
530 : from IGNORED_CALLER are not counted. */
531 :
532 : static inline void
533 2642514 : init_caller_stats (caller_statistics *stats, cgraph_node *itself = NULL)
534 : {
535 2642514 : stats->rec_count_sum = profile_count::zero ();
536 2642514 : stats->count_sum = profile_count::zero ();
537 2642514 : stats->n_calls = 0;
538 2642514 : stats->n_interesting_calls = 0;
539 2642514 : stats->n_nonrec_calls = 0;
540 2642514 : stats->freq_sum = 0;
541 2642514 : stats->itself = itself;
542 2642514 : stats->called_without_ipa_profile = false;
543 2642514 : }
544 :
545 : /* We want to propagate across edges that may be executed, however
546 : we do not want to check maybe_hot, since call itself may be cold
547 : while calee contains some heavy loop which makes propagation still
548 : relevant.
549 :
550 : In particular, even edge called once may lead to significant
551 : improvement. */
552 :
553 : static bool
554 4687538 : cs_interesting_for_ipcp_p (cgraph_edge *e)
555 : {
556 : /* If profile says the edge is executed, we want to optimize. */
557 4687538 : if (e->count.ipa ().nonzero_p ())
558 773 : return true;
559 : /* If local (possibly guseed or adjusted 0 profile) claims edge is
560 : not executed, do not propagate.
561 : Do not trust AFDO since branch needs to be executed multiple
562 : time to count while we want to propagate even call called
563 : once during the train run if callee is important. */
564 4686765 : if (e->count.initialized_p () && !e->count.nonzero_p ()
565 5344391 : && e->count.quality () != AFDO)
566 : return false;
567 : /* If we have zero IPA profile, still consider edge for cloning
568 : in case we do partial training. */
569 4029139 : if (e->count.ipa ().initialized_p ()
570 4029139 : && e->count.ipa ().quality () != AFDO
571 4029154 : && !opt_for_fn (e->callee->decl,flag_profile_partial_training))
572 15 : return false;
573 : return true;
574 : }
575 :
576 : /* Worker callback of cgraph_for_node_and_aliases accumulating statistics of
577 : non-thunk incoming edges to NODE. */
578 :
579 : static bool
580 2817261 : gather_caller_stats (struct cgraph_node *node, void *data)
581 : {
582 2817261 : struct caller_statistics *stats = (struct caller_statistics *) data;
583 2817261 : struct cgraph_edge *cs;
584 :
585 7309022 : for (cs = node->callers; cs; cs = cs->next_caller)
586 4491761 : if (!cs->caller->thunk)
587 : {
588 4488250 : ipa_node_params *info = ipa_node_params_sum->get (cs->caller);
589 4488250 : if (info && info->node_dead)
590 163234 : continue;
591 :
592 4325016 : if (cs->count.ipa ().initialized_p ())
593 : {
594 330458 : if (stats->itself && stats->itself == cs->caller)
595 0 : stats->rec_count_sum += cs->count.ipa ();
596 : else
597 330458 : stats->count_sum += cs->count.ipa ();
598 : }
599 : else
600 3994558 : stats->called_without_ipa_profile = true;
601 4325016 : stats->freq_sum += cs->sreal_frequency ();
602 4325016 : stats->n_calls++;
603 4325016 : if (stats->itself && stats->itself != cs->caller)
604 11 : stats->n_nonrec_calls++;
605 :
606 : /* If profile known to be zero, we do not want to clone for performance.
607 : However if call is cold, the called function may still contain
608 : important hot loops. */
609 4325016 : if (cs_interesting_for_ipcp_p (cs))
610 3712752 : stats->n_interesting_calls++;
611 : }
612 2817261 : return false;
613 :
614 : }
615 :
616 : /* Return true if this NODE is viable candidate for cloning. */
617 :
618 : static bool
619 787639 : ipcp_cloning_candidate_p (struct cgraph_node *node)
620 : {
621 787639 : struct caller_statistics stats;
622 :
623 787639 : gcc_checking_assert (node->has_gimple_body_p ());
624 :
625 787639 : if (!opt_for_fn (node->decl, flag_ipa_cp_clone))
626 : {
627 737042 : if (dump_file)
628 31 : fprintf (dump_file, "Not considering %s for cloning; "
629 : "-fipa-cp-clone disabled.\n",
630 : node->dump_name ());
631 737042 : return false;
632 : }
633 :
634 : /* Do not use profile here since cold wrapper wrap
635 : hot function. */
636 50597 : if (opt_for_fn (node->decl, optimize_size))
637 : {
638 10 : if (dump_file)
639 0 : fprintf (dump_file, "Not considering %s for cloning; "
640 : "optimizing it for size.\n",
641 : node->dump_name ());
642 10 : return false;
643 : }
644 :
645 50587 : init_caller_stats (&stats);
646 50587 : node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats, false);
647 :
648 50587 : if (ipa_size_summaries->get (node)->self_size < stats.n_calls)
649 : {
650 298 : if (dump_file)
651 0 : fprintf (dump_file, "Considering %s for cloning; code might shrink.\n",
652 : node->dump_name ());
653 298 : return true;
654 : }
655 50289 : if (!stats.n_interesting_calls)
656 : {
657 38765 : if (dump_file)
658 198 : fprintf (dump_file, "Not considering %s for cloning; "
659 : "no calls considered interesting by profile.\n",
660 : node->dump_name ());
661 38765 : return false;
662 : }
663 11524 : if (dump_file)
664 186 : fprintf (dump_file, "Considering %s for cloning.\n",
665 : node->dump_name ());
666 : return true;
667 : }
668 :
669 : template <typename valtype>
670 : class value_topo_info
671 : {
672 : public:
673 : /* Head of the linked list of topologically sorted values. */
674 : ipcp_value<valtype> *values_topo;
675 : /* Stack for creating SCCs, represented by a linked list too. */
676 : ipcp_value<valtype> *stack;
677 : /* Counter driving the algorithm in add_val_to_toposort. */
678 : int dfs_counter;
679 :
680 127990 : value_topo_info () : values_topo (NULL), stack (NULL), dfs_counter (0)
681 : {}
682 : void add_val (ipcp_value<valtype> *cur_val);
683 : void propagate_effects ();
684 : };
685 :
686 : /* Arrays representing a topological ordering of call graph nodes and a stack
687 : of nodes used during constant propagation and also data required to perform
688 : topological sort of values and propagation of benefits in the determined
689 : order. */
690 :
691 : class ipa_topo_info
692 : {
693 : public:
694 : /* Array with obtained topological order of cgraph nodes. */
695 : struct cgraph_node **order;
696 : /* Stack of cgraph nodes used during propagation within SCC until all values
697 : in the SCC stabilize. */
698 : struct cgraph_node **stack;
699 : int nnodes, stack_top;
700 :
701 : value_topo_info<tree> constants;
702 : value_topo_info<ipa_polymorphic_call_context> contexts;
703 :
704 127990 : ipa_topo_info () : order(NULL), stack(NULL), nnodes(0), stack_top(0),
705 127990 : constants ()
706 : {}
707 : };
708 :
709 : /* Skip edges from and to nodes without ipa_cp enabled.
710 : Ignore not available symbols. */
711 :
712 : static bool
713 5232687 : ignore_edge_p (cgraph_edge *e)
714 : {
715 5232687 : enum availability avail;
716 5232687 : cgraph_node *ultimate_target
717 5232687 : = e->callee->function_or_virtual_thunk_symbol (&avail, e->caller);
718 :
719 5232687 : return (avail <= AVAIL_INTERPOSABLE
720 1851807 : || !opt_for_fn (ultimate_target->decl, optimize)
721 7075794 : || !opt_for_fn (ultimate_target->decl, flag_ipa_cp));
722 : }
723 :
724 : /* Allocate the arrays in TOPO and topologically sort the nodes into order. */
725 :
726 : static void
727 127990 : build_toporder_info (class ipa_topo_info *topo)
728 : {
729 127990 : topo->order = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
730 127990 : topo->stack = XCNEWVEC (struct cgraph_node *, symtab->cgraph_count);
731 :
732 127990 : gcc_checking_assert (topo->stack_top == 0);
733 127990 : topo->nnodes = ipa_reduced_postorder (topo->order, true,
734 : ignore_edge_p);
735 127990 : }
736 :
737 : /* Free information about strongly connected components and the arrays in
738 : TOPO. */
739 :
740 : static void
741 127990 : free_toporder_info (class ipa_topo_info *topo)
742 : {
743 127990 : ipa_free_postorder_info ();
744 127990 : free (topo->order);
745 127990 : free (topo->stack);
746 127990 : }
747 :
748 : /* Add NODE to the stack in TOPO, unless it is already there. */
749 :
750 : static inline void
751 1264474 : push_node_to_stack (class ipa_topo_info *topo, struct cgraph_node *node)
752 : {
753 1264474 : ipa_node_params *info = ipa_node_params_sum->get (node);
754 1264474 : if (info->node_enqueued)
755 : return;
756 1263505 : info->node_enqueued = 1;
757 1263505 : topo->stack[topo->stack_top++] = node;
758 : }
759 :
760 : /* Pop a node from the stack in TOPO and return it or return NULL if the stack
761 : is empty. */
762 :
763 : static struct cgraph_node *
764 2604704 : pop_node_from_stack (class ipa_topo_info *topo)
765 : {
766 2604704 : if (topo->stack_top)
767 : {
768 1263505 : struct cgraph_node *node;
769 1263505 : topo->stack_top--;
770 1263505 : node = topo->stack[topo->stack_top];
771 1263505 : ipa_node_params_sum->get (node)->node_enqueued = 0;
772 1263505 : return node;
773 : }
774 : else
775 : return NULL;
776 : }
777 :
778 : /* Set lattice LAT to bottom and return true if it previously was not set as
779 : such. */
780 :
781 : template <typename valtype>
782 : inline bool
783 2100074 : ipcp_lattice<valtype>::set_to_bottom ()
784 : {
785 2100074 : bool ret = !bottom;
786 2100074 : bottom = true;
787 : return ret;
788 : }
789 :
790 : /* Mark lattice as containing an unknown value and return true if it previously
791 : was not marked as such. */
792 :
793 : template <typename valtype>
794 : inline bool
795 1532971 : ipcp_lattice<valtype>::set_contains_variable ()
796 : {
797 1532971 : bool ret = !contains_variable;
798 1532971 : contains_variable = true;
799 : return ret;
800 : }
801 :
802 : /* Set all aggregate lattices in PLATS to bottom and return true if they were
803 : not previously set as such. */
804 :
805 : static inline bool
806 2099759 : set_agg_lats_to_bottom (class ipcp_param_lattices *plats)
807 : {
808 2099759 : bool ret = !plats->aggs_bottom;
809 2099759 : plats->aggs_bottom = true;
810 2099759 : return ret;
811 : }
812 :
813 : /* Mark all aggregate lattices in PLATS as containing an unknown value and
814 : return true if they were not previously marked as such. */
815 :
816 : static inline bool
817 1038893 : set_agg_lats_contain_variable (class ipcp_param_lattices *plats)
818 : {
819 1038893 : bool ret = !plats->aggs_contain_variable;
820 1038893 : plats->aggs_contain_variable = true;
821 1038893 : return ret;
822 : }
823 :
824 : bool
825 0 : ipcp_vr_lattice::meet_with (const ipcp_vr_lattice &other)
826 : {
827 0 : return meet_with_1 (other.m_vr);
828 : }
829 :
830 : /* Meet the current value of the lattice with the range described by
831 : P_VR. */
832 :
833 : bool
834 494340 : ipcp_vr_lattice::meet_with (const vrange &p_vr)
835 : {
836 494340 : return meet_with_1 (p_vr);
837 : }
838 :
839 : /* Meet the current value of the lattice with the range described by
840 : OTHER_VR. Return TRUE if anything changed. */
841 :
842 : bool
843 494340 : ipcp_vr_lattice::meet_with_1 (const vrange &other_vr)
844 : {
845 494340 : if (bottom_p ())
846 : return false;
847 :
848 494340 : if (other_vr.varying_p ())
849 0 : return set_to_bottom ();
850 :
851 494340 : bool res;
852 494340 : if (flag_checking)
853 : {
854 494340 : value_range save (m_vr);
855 494340 : res = m_vr.union_ (other_vr);
856 494340 : gcc_assert (res == (m_vr != save));
857 494340 : }
858 : else
859 0 : res = m_vr.union_ (other_vr);
860 : return res;
861 : }
862 :
863 : /* Return true if value range information in the lattice is yet unknown. */
864 :
865 : bool
866 : ipcp_vr_lattice::top_p () const
867 : {
868 172778 : return m_vr.undefined_p ();
869 : }
870 :
871 : /* Return true if value range information in the lattice is known to be
872 : unusable. */
873 :
874 : bool
875 4834631 : ipcp_vr_lattice::bottom_p () const
876 : {
877 494340 : return m_vr.varying_p ();
878 : }
879 :
880 : /* Set value range information in the lattice to bottom. Return true if it
881 : previously was in a different state. */
882 :
883 : bool
884 2368748 : ipcp_vr_lattice::set_to_bottom ()
885 : {
886 2368748 : if (m_vr.varying_p ())
887 : return false;
888 :
889 : /* Setting an unsupported type here forces the temporary to default
890 : to unsupported_range, which can handle VARYING/DEFINED ranges,
891 : but nothing else (union, intersect, etc). This allows us to set
892 : bottoms on any ranges, and is safe as all users of the lattice
893 : check for bottom first. */
894 2229946 : m_vr.set_type (void_type_node);
895 2229946 : m_vr.set_varying (void_type_node);
896 :
897 2229946 : return true;
898 : }
899 :
900 : /* Set the flag that this lattice is a recipient only, return true if it was
901 : not set before. */
902 :
903 : bool
904 27997 : ipcp_vr_lattice::set_recipient_only ()
905 : {
906 27997 : if (m_recipient_only)
907 : return false;
908 27997 : m_recipient_only = true;
909 27997 : return true;
910 : }
911 :
912 : /* Set lattice value to bottom, if it already isn't the case. */
913 :
914 : bool
915 2387977 : ipcp_bits_lattice::set_to_bottom ()
916 : {
917 2387977 : if (bottom_p ())
918 : return false;
919 2249703 : m_lattice_val = IPA_BITS_VARYING;
920 2249703 : m_value = 0;
921 2249703 : m_mask = -1;
922 2249703 : return true;
923 : }
924 :
925 : /* Set to constant if it isn't already. Only meant to be called
926 : when switching state from TOP. */
927 :
928 : bool
929 76940 : ipcp_bits_lattice::set_to_constant (widest_int value, widest_int mask)
930 : {
931 76940 : gcc_assert (top_p ());
932 76940 : m_lattice_val = IPA_BITS_CONSTANT;
933 76940 : m_value = wi::bit_and (wi::bit_not (mask), value);
934 76940 : m_mask = mask;
935 76940 : return true;
936 : }
937 :
938 : /* Return true if any of the known bits are non-zero. */
939 :
940 : bool
941 456 : ipcp_bits_lattice::known_nonzero_p () const
942 : {
943 456 : if (!constant_p ())
944 : return false;
945 456 : return wi::ne_p (wi::bit_and (wi::bit_not (m_mask), m_value), 0);
946 : }
947 :
948 : /* Set the flag that this lattice is a recipient only, return true if it was not
949 : set before. */
950 :
951 : bool
952 27997 : ipcp_bits_lattice::set_recipient_only ()
953 : {
954 27997 : if (m_recipient_only)
955 : return false;
956 27997 : m_recipient_only = true;
957 27997 : return true;
958 : }
959 :
960 : /* Convert operand to value, mask form. */
961 :
962 : void
963 2037 : ipcp_bits_lattice::get_value_and_mask (tree operand, widest_int *valuep, widest_int *maskp)
964 : {
965 2037 : wide_int get_nonzero_bits (const_tree);
966 :
967 2037 : if (TREE_CODE (operand) == INTEGER_CST)
968 : {
969 2037 : *valuep = wi::to_widest (operand);
970 2037 : *maskp = 0;
971 : }
972 : else
973 : {
974 0 : *valuep = 0;
975 0 : *maskp = -1;
976 : }
977 2037 : }
978 :
979 : /* Meet operation, similar to ccp_lattice_meet, we xor values
980 : if this->value, value have different values at same bit positions, we want
981 : to drop that bit to varying. Return true if mask is changed.
982 : This function assumes that the lattice value is in CONSTANT state. If
983 : DROP_ALL_ONES, mask out any known bits with value one afterwards. */
984 :
985 : bool
986 302311 : ipcp_bits_lattice::meet_with_1 (widest_int value, widest_int mask,
987 : unsigned precision, bool drop_all_ones)
988 : {
989 302311 : gcc_assert (constant_p ());
990 :
991 302311 : widest_int old_mask = m_mask;
992 302311 : m_mask = (m_mask | mask) | (m_value ^ value);
993 302311 : if (drop_all_ones)
994 197 : m_mask |= m_value;
995 :
996 302311 : widest_int cap_mask = wi::shifted_mask <widest_int> (0, precision, true);
997 302311 : m_mask |= cap_mask;
998 302311 : if (wi::sext (m_mask, precision) == -1)
999 3524 : return set_to_bottom ();
1000 :
1001 298787 : m_value &= ~m_mask;
1002 298787 : return m_mask != old_mask;
1003 302311 : }
1004 :
1005 : /* Meet the bits lattice with operand
1006 : described by <value, mask, sgn, precision. */
1007 :
1008 : bool
1009 408951 : ipcp_bits_lattice::meet_with (widest_int value, widest_int mask,
1010 : unsigned precision)
1011 : {
1012 408951 : if (bottom_p ())
1013 : return false;
1014 :
1015 408951 : if (top_p ())
1016 : {
1017 119381 : if (wi::sext (mask, precision) == -1)
1018 47638 : return set_to_bottom ();
1019 71743 : return set_to_constant (value, mask);
1020 : }
1021 :
1022 289570 : return meet_with_1 (value, mask, precision, false);
1023 : }
1024 :
1025 : /* Meet bits lattice with the result of bit_value_binop (other, operand)
1026 : if code is binary operation or bit_value_unop (other) if code is unary op.
1027 : In the case when code is nop_expr, no adjustment is required. If
1028 : DROP_ALL_ONES, mask out any known bits with value one afterwards. */
1029 :
1030 : bool
1031 21505 : ipcp_bits_lattice::meet_with (ipcp_bits_lattice& other, unsigned precision,
1032 : signop sgn, enum tree_code code, tree operand,
1033 : bool drop_all_ones)
1034 : {
1035 21505 : if (other.bottom_p ())
1036 0 : return set_to_bottom ();
1037 :
1038 21505 : if (bottom_p () || other.top_p ())
1039 : return false;
1040 :
1041 18047 : widest_int adjusted_value, adjusted_mask;
1042 :
1043 18047 : if (TREE_CODE_CLASS (code) == tcc_binary)
1044 : {
1045 2037 : tree type = TREE_TYPE (operand);
1046 2037 : widest_int o_value, o_mask;
1047 2037 : get_value_and_mask (operand, &o_value, &o_mask);
1048 :
1049 2037 : bit_value_binop (code, sgn, precision, &adjusted_value, &adjusted_mask,
1050 4074 : sgn, precision, other.get_value (), other.get_mask (),
1051 2037 : TYPE_SIGN (type), TYPE_PRECISION (type), o_value, o_mask);
1052 :
1053 2037 : if (wi::sext (adjusted_mask, precision) == -1)
1054 80 : return set_to_bottom ();
1055 2037 : }
1056 :
1057 16010 : else if (TREE_CODE_CLASS (code) == tcc_unary)
1058 : {
1059 31970 : bit_value_unop (code, sgn, precision, &adjusted_value,
1060 31970 : &adjusted_mask, sgn, precision, other.get_value (),
1061 15985 : other.get_mask ());
1062 :
1063 15985 : if (wi::sext (adjusted_mask, precision) == -1)
1064 4 : return set_to_bottom ();
1065 : }
1066 :
1067 : else
1068 25 : return set_to_bottom ();
1069 :
1070 17938 : if (top_p ())
1071 : {
1072 5197 : if (drop_all_ones)
1073 : {
1074 259 : adjusted_mask |= adjusted_value;
1075 259 : adjusted_value &= ~adjusted_mask;
1076 : }
1077 5197 : widest_int cap_mask = wi::shifted_mask <widest_int> (0, precision, true);
1078 5197 : adjusted_mask |= cap_mask;
1079 5197 : if (wi::sext (adjusted_mask, precision) == -1)
1080 0 : return set_to_bottom ();
1081 5197 : return set_to_constant (adjusted_value, adjusted_mask);
1082 5197 : }
1083 : else
1084 12741 : return meet_with_1 (adjusted_value, adjusted_mask, precision,
1085 : drop_all_ones);
1086 18047 : }
1087 :
1088 : /* Dump the contents of the list to FILE. */
1089 :
1090 : void
1091 115 : ipa_argagg_value_list::dump (FILE *f)
1092 : {
1093 115 : bool comma = false;
1094 319 : for (const ipa_argagg_value &av : m_elts)
1095 : {
1096 204 : fprintf (f, "%s %i[%u]=", comma ? "," : "",
1097 204 : av.index, av.unit_offset);
1098 204 : print_generic_expr (f, av.value);
1099 204 : if (av.by_ref)
1100 178 : fprintf (f, "(by_ref)");
1101 204 : if (av.killed)
1102 1 : fprintf (f, "(killed)");
1103 204 : comma = true;
1104 : }
1105 115 : fprintf (f, "\n");
1106 115 : }
1107 :
1108 : /* Dump the contents of the list to stderr. */
1109 :
1110 : void
1111 0 : ipa_argagg_value_list::debug ()
1112 : {
1113 0 : dump (stderr);
1114 0 : }
1115 :
1116 : /* Return the item describing a constant stored for INDEX at UNIT_OFFSET or
1117 : NULL if there is no such constant. */
1118 :
1119 : const ipa_argagg_value *
1120 28753816 : ipa_argagg_value_list::get_elt (int index, unsigned unit_offset) const
1121 : {
1122 28753816 : ipa_argagg_value key;
1123 28753816 : key.index = index;
1124 28753816 : key.unit_offset = unit_offset;
1125 28753816 : const ipa_argagg_value *res
1126 28753816 : = std::lower_bound (m_elts.begin (), m_elts.end (), key,
1127 6195641 : [] (const ipa_argagg_value &elt,
1128 : const ipa_argagg_value &val)
1129 : {
1130 6195641 : if (elt.index < val.index)
1131 : return true;
1132 5268788 : if (elt.index > val.index)
1133 : return false;
1134 4250968 : if (elt.unit_offset < val.unit_offset)
1135 : return true;
1136 : return false;
1137 : });
1138 :
1139 28753816 : if (res == m_elts.end ()
1140 2681356 : || res->index != index
1141 30901518 : || res->unit_offset != unit_offset)
1142 : res = nullptr;
1143 :
1144 : /* TODO: perhaps remove the check (that the underlying array is indeed
1145 : sorted) if it turns out it can be too slow? */
1146 28753816 : if (!flag_checking)
1147 : return res;
1148 :
1149 : const ipa_argagg_value *slow_res = NULL;
1150 : int prev_index = -1;
1151 : unsigned prev_unit_offset = 0;
1152 42863835 : for (const ipa_argagg_value &av : m_elts)
1153 : {
1154 14110019 : gcc_assert (prev_index < 0
1155 : || prev_index < av.index
1156 : || prev_unit_offset < av.unit_offset);
1157 14110019 : prev_index = av.index;
1158 14110019 : prev_unit_offset = av.unit_offset;
1159 14110019 : if (av.index == index
1160 6878364 : && av.unit_offset == unit_offset)
1161 14110019 : slow_res = &av;
1162 : }
1163 28753816 : gcc_assert (res == slow_res);
1164 :
1165 : return res;
1166 : }
1167 :
1168 : /* Return the first item describing a constant stored for parameter with INDEX,
1169 : regardless of offset or reference, or NULL if there is no such constant. */
1170 :
1171 : const ipa_argagg_value *
1172 219578 : ipa_argagg_value_list::get_elt_for_index (int index) const
1173 : {
1174 219578 : const ipa_argagg_value *res
1175 219578 : = std::lower_bound (m_elts.begin (), m_elts.end (), index,
1176 20812 : [] (const ipa_argagg_value &elt, unsigned idx)
1177 : {
1178 20812 : return elt.index < idx;
1179 : });
1180 219578 : if (res == m_elts.end ()
1181 219578 : || res->index != index)
1182 : res = nullptr;
1183 219578 : return res;
1184 : }
1185 :
1186 : /* Return the aggregate constant stored for INDEX at UNIT_OFFSET, not
1187 : performing any check of whether value is passed by reference, or NULL_TREE
1188 : if there is no such constant. */
1189 :
1190 : tree
1191 38978 : ipa_argagg_value_list::get_value (int index, unsigned unit_offset) const
1192 : {
1193 38978 : const ipa_argagg_value *av = get_elt (index, unit_offset);
1194 38978 : return av ? av->value : NULL_TREE;
1195 : }
1196 :
1197 : /* Return the aggregate constant stored for INDEX at UNIT_OFFSET, if it is
1198 : passed by reference or not according to BY_REF, or NULL_TREE if there is
1199 : no such constant. */
1200 :
1201 : tree
1202 28704307 : ipa_argagg_value_list::get_value (int index, unsigned unit_offset,
1203 : bool by_ref) const
1204 : {
1205 28704307 : const ipa_argagg_value *av = get_elt (index, unit_offset);
1206 28704307 : if (av && av->by_ref == by_ref)
1207 1670913 : return av->value;
1208 : return NULL_TREE;
1209 : }
1210 :
1211 : /* Return true if all elements present in OTHER are also present in this
1212 : list. */
1213 :
1214 : bool
1215 48 : ipa_argagg_value_list::superset_of_p (const ipa_argagg_value_list &other) const
1216 : {
1217 48 : unsigned j = 0;
1218 175 : for (unsigned i = 0; i < other.m_elts.size (); i++)
1219 : {
1220 150 : unsigned other_index = other.m_elts[i].index;
1221 150 : unsigned other_offset = other.m_elts[i].unit_offset;
1222 :
1223 150 : while (j < m_elts.size ()
1224 252 : && (m_elts[j].index < other_index
1225 231 : || (m_elts[j].index == other_index
1226 231 : && m_elts[j].unit_offset < other_offset)))
1227 102 : j++;
1228 :
1229 150 : if (j >= m_elts.size ()
1230 129 : || m_elts[j].index != other_index
1231 129 : || m_elts[j].unit_offset != other_offset
1232 127 : || m_elts[j].by_ref != other.m_elts[i].by_ref
1233 127 : || !m_elts[j].value
1234 277 : || !values_equal_for_ipcp_p (m_elts[j].value, other.m_elts[i].value))
1235 23 : return false;
1236 : }
1237 : return true;
1238 : }
1239 :
1240 : /* Push all items in this list that describe parameter SRC_INDEX into RES as
1241 : ones describing DST_INDEX while subtracting UNIT_DELTA from their unit
1242 : offsets but skip those which would end up with a negative offset. */
1243 :
1244 : void
1245 3297 : ipa_argagg_value_list::push_adjusted_values (unsigned src_index,
1246 : unsigned dest_index,
1247 : unsigned unit_delta,
1248 : vec<ipa_argagg_value> *res) const
1249 : {
1250 3297 : const ipa_argagg_value *av = get_elt_for_index (src_index);
1251 3297 : if (!av)
1252 : return;
1253 : unsigned prev_unit_offset = 0;
1254 : bool first = true;
1255 12411 : for (; av < m_elts.end (); ++av)
1256 : {
1257 9862 : if (av->index > src_index)
1258 : return;
1259 9242 : if (av->index == src_index
1260 9242 : && (av->unit_offset >= unit_delta)
1261 9098 : && av->value)
1262 : {
1263 9098 : ipa_argagg_value new_av;
1264 9098 : gcc_checking_assert (av->value);
1265 9098 : new_av.value = av->value;
1266 9098 : new_av.unit_offset = av->unit_offset - unit_delta;
1267 9098 : new_av.index = dest_index;
1268 9098 : new_av.by_ref = av->by_ref;
1269 9098 : gcc_assert (!av->killed);
1270 9098 : new_av.killed = false;
1271 :
1272 : /* Quick check that the offsets we push are indeed increasing. */
1273 9098 : gcc_assert (first
1274 : || new_av.unit_offset > prev_unit_offset);
1275 9098 : prev_unit_offset = new_av.unit_offset;
1276 9098 : first = false;
1277 :
1278 9098 : res->safe_push (new_av);
1279 : }
1280 : }
1281 : }
1282 :
1283 : /* Push to RES information about single lattices describing aggregate values in
1284 : PLATS as those describing parameter DEST_INDEX and the original offset minus
1285 : UNIT_DELTA. Return true if any item has been pushed to RES. */
1286 :
1287 : static bool
1288 4521591 : push_agg_values_from_plats (ipcp_param_lattices *plats, int dest_index,
1289 : unsigned unit_delta,
1290 : vec<ipa_argagg_value> *res)
1291 : {
1292 4521591 : if (plats->aggs_contain_variable)
1293 : return false;
1294 :
1295 3843757 : bool pushed_sth = false;
1296 3843757 : bool first = true;
1297 3843757 : unsigned prev_unit_offset = 0;
1298 3914361 : for (struct ipcp_agg_lattice *aglat = plats->aggs; aglat; aglat = aglat->next)
1299 139952 : if (aglat->is_single_const ()
1300 45307 : && (aglat->offset / BITS_PER_UNIT - unit_delta) >= 0)
1301 : {
1302 45307 : ipa_argagg_value iav;
1303 45307 : iav.value = aglat->values->value;
1304 45307 : iav.unit_offset = aglat->offset / BITS_PER_UNIT - unit_delta;
1305 45307 : iav.index = dest_index;
1306 45307 : iav.by_ref = plats->aggs_by_ref;
1307 45307 : iav.killed = false;
1308 :
1309 45307 : gcc_assert (first
1310 : || iav.unit_offset > prev_unit_offset);
1311 45307 : prev_unit_offset = iav.unit_offset;
1312 45307 : first = false;
1313 :
1314 45307 : pushed_sth = true;
1315 45307 : res->safe_push (iav);
1316 : }
1317 : return pushed_sth;
1318 : }
1319 :
1320 : /* Turn all values in LIST that are not present in OTHER into NULL_TREEs.
1321 : Return the number of remaining valid entries. */
1322 :
1323 : static unsigned
1324 53146 : intersect_argaggs_with (vec<ipa_argagg_value> &elts,
1325 : const vec<ipa_argagg_value> &other)
1326 : {
1327 53146 : unsigned valid_entries = 0;
1328 53146 : unsigned j = 0;
1329 380285 : for (unsigned i = 0; i < elts.length (); i++)
1330 : {
1331 327139 : if (!elts[i].value)
1332 46671 : continue;
1333 :
1334 280468 : unsigned this_index = elts[i].index;
1335 280468 : unsigned this_offset = elts[i].unit_offset;
1336 :
1337 280468 : while (j < other.length ()
1338 1048149 : && (other[j].index < this_index
1339 493610 : || (other[j].index == this_index
1340 490240 : && other[j].unit_offset < this_offset)))
1341 247138 : j++;
1342 :
1343 280468 : if (j >= other.length ())
1344 : {
1345 7063 : elts[i].value = NULL_TREE;
1346 7063 : continue;
1347 : }
1348 :
1349 273405 : if (other[j].index == this_index
1350 270035 : && other[j].unit_offset == this_offset
1351 265514 : && other[j].by_ref == elts[i].by_ref
1352 265514 : && other[j].value
1353 538919 : && values_equal_for_ipcp_p (other[j].value, elts[i].value))
1354 247946 : valid_entries++;
1355 : else
1356 25459 : elts[i].value = NULL_TREE;
1357 : }
1358 53146 : return valid_entries;
1359 : }
1360 :
1361 : /* Mark bot aggregate and scalar lattices as containing an unknown variable,
1362 : return true is any of them has not been marked as such so far. If if
1363 : MAKE_SIMPLE_RECIPIENTS is true, set the lattices that can only hold one
1364 : value to being recipients only, otherwise also set them to bottom. */
1365 :
1366 : static inline bool
1367 166475 : set_all_contains_variable (class ipcp_param_lattices *plats,
1368 : bool make_simple_recipients = false)
1369 : {
1370 166475 : bool ret;
1371 166475 : ret = plats->itself.set_contains_variable ();
1372 166475 : ret |= plats->ctxlat.set_contains_variable ();
1373 166475 : ret |= set_agg_lats_contain_variable (plats);
1374 166475 : if (make_simple_recipients)
1375 : {
1376 27997 : ret |= plats->bits_lattice.set_recipient_only ();
1377 27997 : ret |= plats->m_value_range.set_recipient_only ();
1378 : }
1379 : else
1380 : {
1381 138478 : ret |= plats->bits_lattice.set_to_bottom ();
1382 138478 : ret |= plats->m_value_range.set_to_bottom ();
1383 : }
1384 166475 : return ret;
1385 : }
1386 :
1387 : /* Worker of call_for_symbol_thunks_and_aliases, increment the integer DATA
1388 : points to by the number of callers to NODE. */
1389 :
1390 : static bool
1391 97685 : count_callers (cgraph_node *node, void *data)
1392 : {
1393 97685 : int *caller_count = (int *) data;
1394 :
1395 401673 : for (cgraph_edge *cs = node->callers; cs; cs = cs->next_caller)
1396 : /* Local thunks can be handled transparently, but if the thunk cannot
1397 : be optimized out, count it as a real use. */
1398 303988 : if (!cs->caller->thunk || !cs->caller->local)
1399 303988 : ++*caller_count;
1400 97685 : return false;
1401 : }
1402 :
1403 : /* Worker of call_for_symbol_thunks_and_aliases, it is supposed to be called on
1404 : the one caller of some other node. Set the caller's corresponding flag. */
1405 :
1406 : static bool
1407 54569 : set_single_call_flag (cgraph_node *node, void *)
1408 : {
1409 54569 : cgraph_edge *cs = node->callers;
1410 : /* Local thunks can be handled transparently, skip them. */
1411 54569 : while (cs && cs->caller->thunk && cs->caller->local)
1412 0 : cs = cs->next_caller;
1413 54569 : if (cs)
1414 54027 : if (ipa_node_params* info = ipa_node_params_sum->get (cs->caller))
1415 : {
1416 54026 : info->node_calling_single_call = true;
1417 54026 : return true;
1418 : }
1419 : return false;
1420 : }
1421 :
1422 : /* Initialize ipcp_lattices. */
1423 :
1424 : static void
1425 1260412 : initialize_node_lattices (struct cgraph_node *node)
1426 : {
1427 1260412 : ipa_node_params *info = ipa_node_params_sum->get (node);
1428 1260412 : struct cgraph_edge *ie;
1429 1260412 : bool disable = false, variable = false;
1430 1260412 : int i;
1431 :
1432 1260412 : gcc_checking_assert (node->has_gimple_body_p ());
1433 :
1434 1260412 : if (!ipa_get_param_count (info))
1435 : disable = true;
1436 1033535 : else if (node->local)
1437 : {
1438 86528 : int caller_count = 0;
1439 86528 : node->call_for_symbol_thunks_and_aliases (count_callers, &caller_count,
1440 : true);
1441 86528 : if (caller_count == 1)
1442 54027 : node->call_for_symbol_thunks_and_aliases (set_single_call_flag,
1443 : NULL, true);
1444 32501 : else if (caller_count == 0)
1445 : {
1446 1 : gcc_checking_assert (!opt_for_fn (node->decl, flag_toplevel_reorder));
1447 : variable = true;
1448 : }
1449 : }
1450 : else
1451 : {
1452 : /* When cloning is allowed, we can assume that externally visible
1453 : functions are not called. We will compensate this by cloning
1454 : later. */
1455 947007 : if (ipcp_versionable_function_p (node)
1456 947007 : && ipcp_cloning_candidate_p (node))
1457 : variable = true;
1458 : else
1459 : disable = true;
1460 : }
1461 :
1462 725 : if (dump_file && (dump_flags & TDF_DETAILS)
1463 1260579 : && !node->alias && !node->thunk)
1464 : {
1465 167 : fprintf (dump_file, "Initializing lattices of %s\n",
1466 : node->dump_name ());
1467 167 : if (disable || variable)
1468 132 : fprintf (dump_file, " Marking all lattices as %s\n",
1469 : disable ? "BOTTOM" : "VARIABLE");
1470 : }
1471 :
1472 1260412 : auto_vec<bool, 16> surviving_params;
1473 1260412 : bool pre_modified = false;
1474 :
1475 1260412 : clone_info *cinfo = clone_info::get (node);
1476 :
1477 1260412 : if (!disable && cinfo && cinfo->param_adjustments)
1478 : {
1479 : /* At the moment all IPA optimizations should use the number of
1480 : parameters of the prevailing decl as the m_always_copy_start.
1481 : Handling any other value would complicate the code below, so for the
1482 : time bing let's only assert it is so. */
1483 0 : gcc_assert ((cinfo->param_adjustments->m_always_copy_start
1484 : == ipa_get_param_count (info))
1485 : || cinfo->param_adjustments->m_always_copy_start < 0);
1486 :
1487 0 : pre_modified = true;
1488 0 : cinfo->param_adjustments->get_surviving_params (&surviving_params);
1489 :
1490 0 : if (dump_file && (dump_flags & TDF_DETAILS)
1491 0 : && !node->alias && !node->thunk)
1492 : {
1493 : bool first = true;
1494 0 : for (int j = 0; j < ipa_get_param_count (info); j++)
1495 : {
1496 0 : if (j < (int) surviving_params.length ()
1497 0 : && surviving_params[j])
1498 0 : continue;
1499 0 : if (first)
1500 : {
1501 0 : fprintf (dump_file,
1502 : " The following parameters are dead on arrival:");
1503 0 : first = false;
1504 : }
1505 0 : fprintf (dump_file, " %u", j);
1506 : }
1507 0 : if (!first)
1508 0 : fprintf (dump_file, "\n");
1509 : }
1510 : }
1511 :
1512 6934035 : for (i = 0; i < ipa_get_param_count (info); i++)
1513 : {
1514 2320044 : ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
1515 2320044 : tree type = ipa_get_type (info, i);
1516 2320044 : if (disable
1517 221334 : || !ipa_get_type (info, i)
1518 2541378 : || (pre_modified && (surviving_params.length () <= (unsigned) i
1519 0 : || !surviving_params[i])))
1520 : {
1521 2098710 : plats->itself.set_to_bottom ();
1522 2098710 : plats->ctxlat.set_to_bottom ();
1523 2098710 : set_agg_lats_to_bottom (plats);
1524 2098710 : plats->bits_lattice.set_to_bottom ();
1525 2098710 : plats->m_value_range.init (type);
1526 2098710 : plats->m_value_range.set_to_bottom ();
1527 : }
1528 : else
1529 : {
1530 221334 : plats->m_value_range.init (type);
1531 221334 : if (variable)
1532 27997 : set_all_contains_variable (plats, true);
1533 : }
1534 : }
1535 :
1536 1395966 : for (ie = node->indirect_calls; ie; ie = ie->next_callee)
1537 135554 : if (ie->indirect_info->param_index >= 0
1538 144985 : && is_a <cgraph_polymorphic_indirect_info *> (ie->indirect_info))
1539 9431 : ipa_get_parm_lattices (info,
1540 9431 : ie->indirect_info->param_index)->virt_call = 1;
1541 1260412 : }
1542 :
1543 : /* Return VALUE if it is NULL_TREE or if it can be directly safely IPA-CP
1544 : propagated to a parameter of type PARAM_TYPE, or return a fold-converted
1545 : VALUE to PARAM_TYPE if that is possible. Return NULL_TREE otherwise. */
1546 :
1547 : static tree
1548 5155718 : ipacp_value_safe_for_type (tree param_type, tree value)
1549 : {
1550 5155718 : if (!value)
1551 : return NULL_TREE;
1552 5155384 : tree val_type = TREE_TYPE (value);
1553 5155384 : if (param_type == val_type
1554 5155384 : || useless_type_conversion_p (param_type, val_type))
1555 5152015 : return value;
1556 3369 : if (fold_convertible_p (param_type, value))
1557 3164 : return fold_convert (param_type, value);
1558 : else
1559 : return NULL_TREE;
1560 : }
1561 :
1562 : /* Return the result of a (possibly arithmetic) operation determined by OPCODE
1563 : on the constant value INPUT. OPERAND is 2nd operand for binary operation
1564 : and is required for binary operations. RES_TYPE, required when opcode is
1565 : not NOP_EXPR, is the type in which any operation is to be performed. Return
1566 : NULL_TREE if that cannot be determined or be considered an interprocedural
1567 : invariant. */
1568 :
1569 : static tree
1570 70682 : ipa_get_jf_arith_result (enum tree_code opcode, tree input, tree operand,
1571 : tree res_type)
1572 : {
1573 70682 : tree res;
1574 :
1575 70682 : if (opcode == NOP_EXPR)
1576 : return input;
1577 7118 : if (!is_gimple_ip_invariant (input))
1578 : return NULL_TREE;
1579 :
1580 7118 : if (opcode == ASSERT_EXPR)
1581 : {
1582 3934 : if (values_equal_for_ipcp_p (input, operand))
1583 : return input;
1584 : else
1585 : return NULL_TREE;
1586 : }
1587 :
1588 3184 : if (TREE_CODE_CLASS (opcode) == tcc_unary)
1589 113 : res = fold_unary (opcode, res_type, input);
1590 : else
1591 3071 : res = fold_binary (opcode, res_type, input, operand);
1592 :
1593 3184 : if (res && !is_gimple_ip_invariant (res))
1594 : return NULL_TREE;
1595 :
1596 : return res;
1597 : }
1598 :
1599 : /* Return the result of an ancestor jump function JFUNC on the constant value
1600 : INPUT. Return NULL_TREE if that cannot be determined. */
1601 :
1602 : static tree
1603 1280 : ipa_get_jf_ancestor_result (struct ipa_jump_func *jfunc, tree input)
1604 : {
1605 1280 : gcc_checking_assert (TREE_CODE (input) != TREE_BINFO);
1606 1280 : if (TREE_CODE (input) == ADDR_EXPR)
1607 : {
1608 1198 : gcc_checking_assert (is_gimple_ip_invariant_address (input));
1609 1198 : poly_int64 off = ipa_get_jf_ancestor_offset (jfunc);
1610 1198 : if (known_eq (off, 0))
1611 : return input;
1612 1072 : poly_int64 byte_offset = exact_div (off, BITS_PER_UNIT);
1613 2144 : return build1 (ADDR_EXPR, TREE_TYPE (input),
1614 1072 : fold_build2 (MEM_REF, TREE_TYPE (TREE_TYPE (input)), input,
1615 1072 : build_int_cst (ptr_type_node, byte_offset)));
1616 : }
1617 82 : else if (ipa_get_jf_ancestor_keep_null (jfunc)
1618 82 : && zerop (input))
1619 : return input;
1620 : else
1621 78 : return NULL_TREE;
1622 : }
1623 :
1624 : /* Determine whether JFUNC evaluates to a single known constant value and if
1625 : so, return it. Otherwise return NULL. INFO describes the caller node or
1626 : the one it is inlined to, so that pass-through jump functions can be
1627 : evaluated. PARM_TYPE is the type of the parameter to which the result is
1628 : passed. */
1629 :
1630 : tree
1631 17601827 : ipa_value_from_jfunc (class ipa_node_params *info, struct ipa_jump_func *jfunc,
1632 : tree parm_type)
1633 : {
1634 17601827 : if (!parm_type)
1635 : return NULL_TREE;
1636 17364685 : if (jfunc->type == IPA_JF_CONST)
1637 4713883 : return ipacp_value_safe_for_type (parm_type, ipa_get_jf_constant (jfunc));
1638 12650802 : else if (jfunc->type == IPA_JF_PASS_THROUGH
1639 9894246 : || jfunc->type == IPA_JF_ANCESTOR)
1640 : {
1641 3519736 : tree input;
1642 3519736 : int idx;
1643 :
1644 3519736 : if (jfunc->type == IPA_JF_PASS_THROUGH)
1645 2756556 : idx = ipa_get_jf_pass_through_formal_id (jfunc);
1646 : else
1647 763180 : idx = ipa_get_jf_ancestor_formal_id (jfunc);
1648 :
1649 3519736 : if (info->ipcp_orig_node)
1650 56947 : input = info->known_csts[idx];
1651 : else
1652 : {
1653 3462789 : ipcp_lattice<tree> *lat;
1654 :
1655 6292490 : if (info->lattices.is_empty ()
1656 2829701 : || idx >= ipa_get_param_count (info))
1657 : return NULL_TREE;
1658 2829701 : lat = ipa_get_scalar_lat (info, idx);
1659 2829701 : if (!lat->is_single_const ())
1660 : return NULL_TREE;
1661 147 : input = lat->values->value;
1662 : }
1663 :
1664 57094 : if (!input)
1665 : return NULL_TREE;
1666 :
1667 20628 : if (jfunc->type == IPA_JF_PASS_THROUGH)
1668 : {
1669 19659 : enum tree_code opcode = ipa_get_jf_pass_through_operation (jfunc);
1670 19659 : tree op2 = ipa_get_jf_pass_through_operand (jfunc);
1671 19659 : tree op_type
1672 19659 : = (opcode == NOP_EXPR) ? NULL_TREE
1673 983 : : ipa_get_jf_pass_through_op_type (jfunc);
1674 19659 : tree cstval = ipa_get_jf_arith_result (opcode, input, op2, op_type);
1675 19659 : return ipacp_value_safe_for_type (parm_type, cstval);
1676 : }
1677 : else
1678 969 : return ipacp_value_safe_for_type (parm_type,
1679 : ipa_get_jf_ancestor_result (jfunc,
1680 969 : input));
1681 : }
1682 : else
1683 : return NULL_TREE;
1684 : }
1685 :
1686 : /* Determine whether JFUNC evaluates to single known polymorphic context, given
1687 : that INFO describes the caller node or the one it is inlined to, CS is the
1688 : call graph edge corresponding to JFUNC and CSIDX index of the described
1689 : parameter. */
1690 :
1691 : ipa_polymorphic_call_context
1692 832880 : ipa_context_from_jfunc (ipa_node_params *info, cgraph_edge *cs, int csidx,
1693 : ipa_jump_func *jfunc)
1694 : {
1695 832880 : ipa_edge_args *args = ipa_edge_args_sum->get (cs);
1696 832880 : ipa_polymorphic_call_context ctx;
1697 832880 : ipa_polymorphic_call_context *edge_ctx
1698 832880 : = cs ? ipa_get_ith_polymorhic_call_context (args, csidx) : NULL;
1699 :
1700 341313 : if (edge_ctx && !edge_ctx->useless_p ())
1701 336136 : ctx = *edge_ctx;
1702 :
1703 832880 : if (jfunc->type == IPA_JF_PASS_THROUGH
1704 739554 : || jfunc->type == IPA_JF_ANCESTOR)
1705 : {
1706 101410 : ipa_polymorphic_call_context srcctx;
1707 101410 : int srcidx;
1708 101410 : bool type_preserved = true;
1709 101410 : if (jfunc->type == IPA_JF_PASS_THROUGH)
1710 : {
1711 93326 : if (ipa_get_jf_pass_through_operation (jfunc) != NOP_EXPR)
1712 1871 : return ctx;
1713 91455 : type_preserved = ipa_get_jf_pass_through_type_preserved (jfunc);
1714 91455 : srcidx = ipa_get_jf_pass_through_formal_id (jfunc);
1715 : }
1716 : else
1717 : {
1718 8084 : type_preserved = ipa_get_jf_ancestor_type_preserved (jfunc);
1719 8084 : srcidx = ipa_get_jf_ancestor_formal_id (jfunc);
1720 : }
1721 99539 : if (info->ipcp_orig_node)
1722 : {
1723 12334 : if (info->known_contexts.exists ())
1724 1352 : srcctx = info->known_contexts[srcidx];
1725 : }
1726 : else
1727 : {
1728 172243 : if (info->lattices.is_empty ()
1729 85038 : || srcidx >= ipa_get_param_count (info))
1730 2167 : return ctx;
1731 85038 : ipcp_lattice<ipa_polymorphic_call_context> *lat;
1732 85038 : lat = ipa_get_poly_ctx_lat (info, srcidx);
1733 85038 : if (!lat->is_single_const ())
1734 81028 : return ctx;
1735 4010 : srcctx = lat->values->value;
1736 : }
1737 16344 : if (srcctx.useless_p ())
1738 11412 : return ctx;
1739 4932 : if (jfunc->type == IPA_JF_ANCESTOR)
1740 258 : srcctx.offset_by (ipa_get_jf_ancestor_offset (jfunc));
1741 4932 : if (!type_preserved)
1742 2943 : srcctx.possible_dynamic_type_change (cs->in_polymorphic_cdtor);
1743 4932 : srcctx.combine_with (ctx);
1744 4932 : return srcctx;
1745 : }
1746 :
1747 731470 : return ctx;
1748 : }
1749 :
1750 : /* Emulate effects of unary OPERATION and/or conversion from SRC_TYPE to
1751 : DST_TYPE on value range in SRC_VR and store it to DST_VR. Return true if
1752 : the result is a range that is not VARYING nor UNDEFINED. */
1753 :
1754 : bool
1755 9026921 : ipa_vr_operation_and_type_effects (vrange &dst_vr,
1756 : const vrange &src_vr,
1757 : enum tree_code operation,
1758 : tree dst_type, tree src_type)
1759 : {
1760 17022696 : if (!ipa_vr_supported_type_p (dst_type)
1761 0 : || !ipa_vr_supported_type_p (src_type))
1762 : return false;
1763 :
1764 9026921 : range_op_handler handler (operation);
1765 9026921 : if (!handler)
1766 : return false;
1767 :
1768 9026921 : value_range varying (dst_type);
1769 9026921 : varying.set_varying (dst_type);
1770 :
1771 9026921 : return (handler.operand_check_p (dst_type, src_type, dst_type)
1772 9026921 : && handler.fold_range (dst_vr, dst_type, src_vr, varying)
1773 9026919 : && !dst_vr.varying_p ()
1774 18053780 : && !dst_vr.undefined_p ());
1775 9026921 : }
1776 :
1777 : /* Same as above, but the SRC_VR argument is an IPA_VR which must
1778 : first be extracted onto a vrange. */
1779 :
1780 : bool
1781 8934268 : ipa_vr_operation_and_type_effects (vrange &dst_vr,
1782 : const ipa_vr &src_vr,
1783 : enum tree_code operation,
1784 : tree dst_type, tree src_type)
1785 : {
1786 8934268 : value_range tmp;
1787 8934268 : src_vr.get_vrange (tmp);
1788 8934268 : return ipa_vr_operation_and_type_effects (dst_vr, tmp, operation,
1789 8934268 : dst_type, src_type);
1790 8934268 : }
1791 :
1792 : /* Given a PASS_THROUGH jump function JFUNC that takes as its source SRC_VR of
1793 : SRC_TYPE and the result needs to be DST_TYPE, if any value range information
1794 : can be deduced at all, intersect VR with it. CONTEXT_NODE is the call graph
1795 : node representing the function for which optimization flags should be
1796 : evaluated. */
1797 :
1798 : static void
1799 93053 : ipa_vr_intersect_with_arith_jfunc (vrange &vr,
1800 : ipa_jump_func *jfunc,
1801 : cgraph_node *context_node,
1802 : const value_range &src_vr,
1803 : tree src_type,
1804 : tree dst_type)
1805 : {
1806 93053 : if (src_vr.undefined_p () || src_vr.varying_p ())
1807 91713 : return;
1808 :
1809 92594 : enum tree_code operation = ipa_get_jf_pass_through_operation (jfunc);
1810 92594 : if (TREE_CODE_CLASS (operation) == tcc_unary)
1811 : {
1812 91254 : value_range op_res;
1813 91254 : const value_range *inter_vr;
1814 91254 : if (operation != NOP_EXPR)
1815 : {
1816 89 : tree operation_type = ipa_get_jf_pass_through_op_type (jfunc);
1817 89 : op_res.set_varying (operation_type);
1818 89 : if (!ipa_vr_operation_and_type_effects (op_res, src_vr, operation,
1819 : operation_type, src_type))
1820 : return;
1821 89 : if (src_type == dst_type)
1822 : {
1823 30 : vr.intersect (op_res);
1824 30 : return;
1825 : }
1826 : inter_vr = &op_res;
1827 : src_type = operation_type;
1828 : }
1829 : else
1830 : inter_vr = &src_vr;
1831 :
1832 91224 : value_range tmp_res (dst_type);
1833 91224 : if (ipa_vr_operation_and_type_effects (tmp_res, *inter_vr, NOP_EXPR,
1834 : dst_type, src_type))
1835 91224 : vr.intersect (tmp_res);
1836 91224 : return;
1837 91254 : }
1838 :
1839 1340 : tree operand = ipa_get_jf_pass_through_operand (jfunc);
1840 1340 : range_op_handler handler (operation);
1841 1340 : if (!handler)
1842 : return;
1843 1340 : value_range op_vr (TREE_TYPE (operand));
1844 1340 : ipa_get_range_from_ip_invariant (op_vr, operand, context_node);
1845 :
1846 1340 : tree operation_type = ipa_get_jf_pass_through_op_type (jfunc);
1847 1340 : value_range op_res (operation_type);
1848 1863 : if (!ipa_vr_supported_type_p (operation_type)
1849 1340 : || !handler.operand_check_p (operation_type, src_type, op_vr.type ())
1850 1340 : || !handler.fold_range (op_res, operation_type, src_vr, op_vr))
1851 0 : return;
1852 :
1853 1340 : value_range tmp_res (dst_type);
1854 1340 : if (ipa_vr_operation_and_type_effects (tmp_res, op_res, NOP_EXPR, dst_type,
1855 : operation_type))
1856 1292 : vr.intersect (tmp_res);
1857 1340 : }
1858 :
1859 : /* Determine range of JFUNC given that INFO describes the caller node or
1860 : the one it is inlined to, CS is the call graph edge corresponding to JFUNC
1861 : and PARM_TYPE of the parameter. */
1862 :
1863 : void
1864 11596886 : ipa_value_range_from_jfunc (vrange &vr,
1865 : ipa_node_params *info, cgraph_edge *cs,
1866 : ipa_jump_func *jfunc, tree parm_type)
1867 : {
1868 11596886 : vr.set_varying (parm_type);
1869 :
1870 11596886 : if (jfunc->m_vr && jfunc->m_vr->known_p ())
1871 8152511 : ipa_vr_operation_and_type_effects (vr,
1872 : *jfunc->m_vr,
1873 : NOP_EXPR, parm_type,
1874 8152511 : jfunc->m_vr->type ());
1875 11596886 : if (vr.singleton_p ())
1876 : return;
1877 :
1878 11596746 : if (jfunc->type == IPA_JF_PASS_THROUGH)
1879 : {
1880 2179497 : ipcp_transformation *sum
1881 2179497 : = ipcp_get_transformation_summary (cs->caller->inlined_to
1882 : ? cs->caller->inlined_to
1883 : : cs->caller);
1884 2179497 : if (!sum || !sum->m_vr)
1885 2101811 : return;
1886 :
1887 119111 : int idx = ipa_get_jf_pass_through_formal_id (jfunc);
1888 :
1889 119111 : if (!(*sum->m_vr)[idx].known_p ())
1890 : return;
1891 77686 : tree src_type = ipa_get_type (info, idx);
1892 77686 : value_range srcvr;
1893 77686 : (*sum->m_vr)[idx].get_vrange (srcvr);
1894 :
1895 77686 : ipa_vr_intersect_with_arith_jfunc (vr, jfunc, cs->caller, srcvr, src_type,
1896 : parm_type);
1897 77686 : }
1898 : }
1899 :
1900 : /* Determine whether ITEM, jump function for an aggregate part, evaluates to a
1901 : single known constant value and if so, return it. Otherwise return NULL.
1902 : NODE and INFO describes the caller node or the one it is inlined to, and
1903 : its related info. */
1904 :
1905 : tree
1906 2926852 : ipa_agg_value_from_jfunc (ipa_node_params *info, cgraph_node *node,
1907 : const ipa_agg_jf_item *item)
1908 : {
1909 2926852 : tree value = NULL_TREE;
1910 2926852 : int src_idx;
1911 :
1912 2926852 : if (item->offset < 0
1913 2877794 : || item->jftype == IPA_JF_UNKNOWN
1914 2746413 : || item->offset >= (HOST_WIDE_INT) UINT_MAX * BITS_PER_UNIT)
1915 : return NULL_TREE;
1916 :
1917 2746413 : if (item->jftype == IPA_JF_CONST)
1918 2416460 : return item->value.constant;
1919 :
1920 329953 : gcc_checking_assert (item->jftype == IPA_JF_PASS_THROUGH
1921 : || item->jftype == IPA_JF_LOAD_AGG);
1922 :
1923 329953 : src_idx = item->value.pass_through.formal_id;
1924 :
1925 329953 : if (info->ipcp_orig_node)
1926 : {
1927 16409 : if (item->jftype == IPA_JF_PASS_THROUGH)
1928 3796 : value = info->known_csts[src_idx];
1929 12613 : else if (ipcp_transformation *ts = ipcp_get_transformation_summary (node))
1930 : {
1931 12613 : ipa_argagg_value_list avl (ts);
1932 12613 : value = avl.get_value (src_idx,
1933 12613 : item->value.load_agg.offset / BITS_PER_UNIT,
1934 12613 : item->value.load_agg.by_ref);
1935 : }
1936 : }
1937 313544 : else if (!info->lattices.is_empty ())
1938 : {
1939 221951 : class ipcp_param_lattices *src_plats
1940 221951 : = ipa_get_parm_lattices (info, src_idx);
1941 :
1942 221951 : if (item->jftype == IPA_JF_PASS_THROUGH)
1943 : {
1944 133759 : struct ipcp_lattice<tree> *lat = &src_plats->itself;
1945 :
1946 508426 : if (!lat->is_single_const ())
1947 : return NULL_TREE;
1948 :
1949 0 : value = lat->values->value;
1950 : }
1951 88192 : else if (src_plats->aggs
1952 9658 : && !src_plats->aggs_bottom
1953 9658 : && !src_plats->aggs_contain_variable
1954 1499 : && src_plats->aggs_by_ref == item->value.load_agg.by_ref)
1955 : {
1956 : struct ipcp_agg_lattice *aglat;
1957 :
1958 2366 : for (aglat = src_plats->aggs; aglat; aglat = aglat->next)
1959 : {
1960 2366 : if (aglat->offset > item->value.load_agg.offset)
1961 : break;
1962 :
1963 2334 : if (aglat->offset == item->value.load_agg.offset)
1964 : {
1965 1467 : if (aglat->is_single_const ())
1966 7 : value = aglat->values->value;
1967 : break;
1968 : }
1969 : }
1970 : }
1971 : }
1972 :
1973 16448 : if (!value)
1974 185785 : return NULL_TREE;
1975 :
1976 10409 : if (item->jftype == IPA_JF_LOAD_AGG)
1977 : {
1978 8098 : tree load_type = item->value.load_agg.type;
1979 8098 : tree value_type = TREE_TYPE (value);
1980 :
1981 : /* Ensure value type is compatible with load type. */
1982 8098 : if (!useless_type_conversion_p (load_type, value_type))
1983 : return NULL_TREE;
1984 : }
1985 :
1986 20818 : tree cstval = ipa_get_jf_arith_result (item->value.pass_through.operation,
1987 : value,
1988 10409 : item->value.pass_through.operand,
1989 10409 : item->value.pass_through.op_type);
1990 10409 : return ipacp_value_safe_for_type (item->type, cstval);
1991 : }
1992 :
1993 : /* Process all items in AGG_JFUNC relative to caller (or the node the original
1994 : caller is inlined to) NODE which described by INFO and push the results to
1995 : RES as describing values passed in parameter DST_INDEX. */
1996 :
1997 : void
1998 14112210 : ipa_push_agg_values_from_jfunc (ipa_node_params *info, cgraph_node *node,
1999 : ipa_agg_jump_function *agg_jfunc,
2000 : unsigned dst_index,
2001 : vec<ipa_argagg_value> *res)
2002 : {
2003 14112210 : unsigned prev_unit_offset = 0;
2004 14112210 : bool first = true;
2005 :
2006 18274184 : for (const ipa_agg_jf_item &item : agg_jfunc->items)
2007 : {
2008 2076280 : tree value = ipa_agg_value_from_jfunc (info, node, &item);
2009 2076280 : if (!value)
2010 478395 : continue;
2011 :
2012 1597885 : ipa_argagg_value iav;
2013 1597885 : iav.value = value;
2014 1597885 : iav.unit_offset = item.offset / BITS_PER_UNIT;
2015 1597885 : iav.index = dst_index;
2016 1597885 : iav.by_ref = agg_jfunc->by_ref;
2017 1597885 : iav.killed = 0;
2018 :
2019 1597885 : gcc_assert (first
2020 : || iav.unit_offset > prev_unit_offset);
2021 1597885 : prev_unit_offset = iav.unit_offset;
2022 1597885 : first = false;
2023 :
2024 1597885 : res->safe_push (iav);
2025 : }
2026 14112210 : }
2027 :
2028 : /* If checking is enabled, verify that no lattice is in the TOP state, i.e. not
2029 : bottom, not containing a variable component and without any known value at
2030 : the same time. */
2031 :
2032 : DEBUG_FUNCTION void
2033 127982 : ipcp_verify_propagated_values (void)
2034 : {
2035 127982 : struct cgraph_node *node;
2036 :
2037 1397220 : FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
2038 : {
2039 1269238 : ipa_node_params *info = ipa_node_params_sum->get (node);
2040 1269238 : if (!opt_for_fn (node->decl, flag_ipa_cp)
2041 1269238 : || !opt_for_fn (node->decl, optimize))
2042 8843 : continue;
2043 1260395 : int i, count = ipa_get_param_count (info);
2044 :
2045 3580425 : for (i = 0; i < count; i++)
2046 : {
2047 2320030 : ipcp_lattice<tree> *lat = ipa_get_scalar_lat (info, i);
2048 :
2049 2320030 : if (!lat->bottom
2050 220330 : && !lat->contains_variable
2051 31996 : && lat->values_count == 0)
2052 : {
2053 0 : if (dump_file)
2054 : {
2055 0 : symtab->dump (dump_file);
2056 0 : fprintf (dump_file, "\nIPA lattices after constant "
2057 : "propagation, before gcc_unreachable:\n");
2058 0 : print_all_lattices (dump_file, true, false);
2059 : }
2060 :
2061 0 : gcc_unreachable ();
2062 : }
2063 : }
2064 : }
2065 127982 : }
2066 :
2067 : /* Return true iff X and Y should be considered equal contexts by IPA-CP. */
2068 :
2069 : static bool
2070 2641 : values_equal_for_ipcp_p (ipa_polymorphic_call_context x,
2071 : ipa_polymorphic_call_context y)
2072 : {
2073 2145 : return x.equal_to (y);
2074 : }
2075 :
2076 :
2077 : /* Add a new value source to the value represented by THIS, marking that a
2078 : value comes from edge CS and (if the underlying jump function is a
2079 : pass-through or an ancestor one) from a caller value SRC_VAL of a caller
2080 : parameter described by SRC_INDEX. OFFSET is negative if the source was the
2081 : scalar value of the parameter itself or the offset within an aggregate. */
2082 :
2083 : template <typename valtype>
2084 : void
2085 338540 : ipcp_value<valtype>::add_source (cgraph_edge *cs, ipcp_value *src_val,
2086 : int src_idx, HOST_WIDE_INT offset)
2087 : {
2088 : ipcp_value_source<valtype> *src;
2089 :
2090 486085 : src = new (ipcp_sources_pool.allocate ()) ipcp_value_source<valtype>;
2091 486085 : src->offset = offset;
2092 486085 : src->cs = cs;
2093 486085 : src->val = src_val;
2094 486085 : src->index = src_idx;
2095 :
2096 486085 : src->next = sources;
2097 486085 : sources = src;
2098 : }
2099 :
2100 : /* Allocate a new ipcp_value holding a tree constant, initialize its value to
2101 : SOURCE and clear all other fields. */
2102 :
2103 : static ipcp_value<tree> *
2104 139875 : allocate_and_init_ipcp_value (tree cst, unsigned same_lat_gen_level)
2105 : {
2106 139875 : ipcp_value<tree> *val;
2107 :
2108 279750 : val = new (ipcp_cst_values_pool.allocate ()) ipcp_value<tree>();
2109 139875 : val->value = cst;
2110 139875 : val->self_recursion_generated_level = same_lat_gen_level;
2111 139875 : return val;
2112 : }
2113 :
2114 : /* Allocate a new ipcp_value holding a polymorphic context, initialize its
2115 : value to SOURCE and clear all other fields. */
2116 :
2117 : static ipcp_value<ipa_polymorphic_call_context> *
2118 7670 : allocate_and_init_ipcp_value (ipa_polymorphic_call_context ctx,
2119 : unsigned same_lat_gen_level)
2120 : {
2121 7670 : ipcp_value<ipa_polymorphic_call_context> *val;
2122 :
2123 7670 : val = new (ipcp_poly_ctx_values_pool.allocate ())
2124 7670 : ipcp_value<ipa_polymorphic_call_context>();
2125 7670 : val->value = ctx;
2126 7670 : val->self_recursion_generated_level = same_lat_gen_level;
2127 7670 : return val;
2128 : }
2129 :
2130 : /* Try to add NEWVAL to LAT, potentially creating a new ipcp_value for it. CS,
2131 : SRC_VAL SRC_INDEX and OFFSET are meant for add_source and have the same
2132 : meaning. OFFSET -1 means the source is scalar and not a part of an
2133 : aggregate. If non-NULL, VAL_P records address of existing or newly added
2134 : ipcp_value.
2135 :
2136 : If the value is generated for a self-recursive call as a result of an
2137 : arithmetic pass-through jump-function acting on a value in the same lattice,
2138 : SAME_LAT_GEN_LEVEL must be the length of such chain, otherwise it must be
2139 : zero. If it is non-zero, PARAM_IPA_CP_VALUE_LIST_SIZE limit is ignored. */
2140 :
2141 : template <typename valtype>
2142 : bool
2143 498436 : ipcp_lattice<valtype>::add_value (valtype newval, cgraph_edge *cs,
2144 : ipcp_value<valtype> *src_val,
2145 : int src_idx, HOST_WIDE_INT offset,
2146 : ipcp_value<valtype> **val_p,
2147 : unsigned same_lat_gen_level)
2148 : {
2149 498436 : ipcp_value<valtype> *val, *last_val = NULL;
2150 :
2151 498436 : if (val_p)
2152 1257 : *val_p = NULL;
2153 :
2154 498436 : if (bottom)
2155 : return false;
2156 :
2157 967303 : for (val = values; val; last_val = val, val = val->next)
2158 818394 : if (values_equal_for_ipcp_p (val->value, newval))
2159 : {
2160 346171 : if (val_p)
2161 416 : *val_p = val;
2162 :
2163 346171 : if (val->self_recursion_generated_level < same_lat_gen_level)
2164 179 : val->self_recursion_generated_level = same_lat_gen_level;
2165 :
2166 346171 : if (ipa_edge_within_scc (cs))
2167 : {
2168 : ipcp_value_source<valtype> *s;
2169 48488 : for (s = val->sources; s; s = s->next)
2170 44322 : if (s->cs == cs && s->val == src_val)
2171 : break;
2172 11797 : if (s)
2173 : return false;
2174 : }
2175 :
2176 338540 : val->add_source (cs, src_val, src_idx, offset);
2177 338540 : return false;
2178 : }
2179 :
2180 148909 : if (!same_lat_gen_level && values_count >= opt_for_fn (cs->callee->decl,
2181 : param_ipa_cp_value_list_size))
2182 : {
2183 : /* We can only free sources, not the values themselves, because sources
2184 : of other values in this SCC might point to them. */
2185 12258 : for (val = values; val; val = val->next)
2186 : {
2187 40407 : while (val->sources)
2188 : {
2189 29513 : ipcp_value_source<valtype> *src = val->sources;
2190 29513 : val->sources = src->next;
2191 29513 : ipcp_sources_pool.remove ((ipcp_value_source<tree>*)src);
2192 : }
2193 : }
2194 1364 : values = NULL;
2195 1364 : return set_to_bottom ();
2196 : }
2197 :
2198 147545 : values_count++;
2199 147545 : val = allocate_and_init_ipcp_value (newval, same_lat_gen_level);
2200 147545 : val->add_source (cs, src_val, src_idx, offset);
2201 147545 : val->next = NULL;
2202 :
2203 : /* Add the new value to end of value list, which can reduce iterations
2204 : of propagation stage for recursive function. */
2205 147545 : if (last_val)
2206 45205 : last_val->next = val;
2207 : else
2208 102340 : values = val;
2209 :
2210 147545 : if (val_p)
2211 841 : *val_p = val;
2212 :
2213 : return true;
2214 : }
2215 :
2216 : /* A helper function that returns result of operation specified by OPCODE on
2217 : the value of SRC_VAL. If non-NULL, OPND1_TYPE is expected type for the
2218 : value of SRC_VAL. If the operation is binary, OPND2 is a constant value
2219 : acting as its second operand. OP_TYPE is the type in which the operation is
2220 : performed. */
2221 :
2222 : static tree
2223 21345 : get_val_across_arith_op (enum tree_code opcode,
2224 : tree opnd1_type,
2225 : tree opnd2,
2226 : ipcp_value<tree> *src_val,
2227 : tree op_type)
2228 : {
2229 21345 : tree opnd1 = src_val->value;
2230 :
2231 : /* Skip source values that is incompatible with specified type. */
2232 21345 : if (opnd1_type
2233 21345 : && !useless_type_conversion_p (opnd1_type, TREE_TYPE (opnd1)))
2234 : return NULL_TREE;
2235 :
2236 21345 : return ipa_get_jf_arith_result (opcode, opnd1, opnd2, op_type);
2237 : }
2238 :
2239 : /* Propagate values through an arithmetic transformation described by a jump
2240 : function associated with edge CS, taking values from SRC_LAT and putting
2241 : them into DEST_LAT. OPND1_TYPE, if non-NULL, is the expected type for the
2242 : values in SRC_LAT. OPND2 is a constant value if transformation is a binary
2243 : operation. SRC_OFFSET specifies offset in an aggregate if SRC_LAT describes
2244 : lattice of a part of an aggregate, otherwise it should be -1. SRC_IDX is
2245 : the index of the source parameter. OP_TYPE is the type in which the
2246 : operation is performed and can be NULL when OPCODE is NOP_EXPR. RES_TYPE is
2247 : the value type of result being propagated into. Return true if DEST_LAT
2248 : changed. */
2249 :
2250 : static bool
2251 76357 : propagate_vals_across_arith_jfunc (cgraph_edge *cs,
2252 : enum tree_code opcode,
2253 : tree opnd1_type,
2254 : tree opnd2,
2255 : ipcp_lattice<tree> *src_lat,
2256 : ipcp_lattice<tree> *dest_lat,
2257 : HOST_WIDE_INT src_offset,
2258 : int src_idx,
2259 : tree op_type,
2260 : tree res_type)
2261 : {
2262 76357 : ipcp_value<tree> *src_val;
2263 76357 : bool ret = false;
2264 :
2265 : /* Due to circular dependencies, propagating within an SCC through arithmetic
2266 : transformation would create infinite number of values. But for
2267 : self-feeding recursive function, we could allow propagation in a limited
2268 : count, and this can enable a simple kind of recursive function versioning.
2269 : For other scenario, we would just make lattices bottom. */
2270 76357 : if (opcode != NOP_EXPR && ipa_edge_within_scc (cs))
2271 : {
2272 2184 : int i;
2273 :
2274 2184 : int max_recursive_depth = opt_for_fn(cs->caller->decl,
2275 : param_ipa_cp_max_recursive_depth);
2276 2184 : if (src_lat != dest_lat || max_recursive_depth < 1)
2277 1666 : return dest_lat->set_contains_variable ();
2278 :
2279 : /* No benefit if recursive execution is in low probability. */
2280 1300 : if (cs->sreal_frequency () * 100
2281 2600 : <= ((sreal) 1) * opt_for_fn (cs->caller->decl,
2282 : param_ipa_cp_min_recursive_probability))
2283 89 : return dest_lat->set_contains_variable ();
2284 :
2285 1211 : auto_vec<ipcp_value<tree> *, 8> val_seeds;
2286 :
2287 2258 : for (src_val = src_lat->values; src_val; src_val = src_val->next)
2288 : {
2289 : /* Now we do not use self-recursively generated value as propagation
2290 : source, this is absolutely conservative, but could avoid explosion
2291 : of lattice's value space, especially when one recursive function
2292 : calls another recursive. */
2293 1740 : if (src_val->self_recursion_generated_p ())
2294 : {
2295 909 : ipcp_value_source<tree> *s;
2296 :
2297 : /* If the lattice has already been propagated for the call site,
2298 : no need to do that again. */
2299 1422 : for (s = src_val->sources; s; s = s->next)
2300 1206 : if (s->cs == cs)
2301 693 : return dest_lat->set_contains_variable ();
2302 : }
2303 : else
2304 831 : val_seeds.safe_push (src_val);
2305 : }
2306 :
2307 1036 : gcc_assert ((int) val_seeds.length () <= param_ipa_cp_value_list_size);
2308 :
2309 : /* Recursively generate lattice values with a limited count. */
2310 836 : FOR_EACH_VEC_ELT (val_seeds, i, src_val)
2311 : {
2312 1416 : for (int j = 1; j < max_recursive_depth; j++)
2313 : {
2314 1261 : tree cstval = get_val_across_arith_op (opcode, opnd1_type, opnd2,
2315 : src_val, op_type);
2316 1261 : cstval = ipacp_value_safe_for_type (res_type, cstval);
2317 1261 : if (!cstval)
2318 : break;
2319 :
2320 1257 : ret |= dest_lat->add_value (cstval, cs, src_val, src_idx,
2321 : src_offset, &src_val, j);
2322 1257 : gcc_checking_assert (src_val);
2323 : }
2324 : }
2325 518 : ret |= dest_lat->set_contains_variable ();
2326 1211 : }
2327 : else
2328 94382 : for (src_val = src_lat->values; src_val; src_val = src_val->next)
2329 : {
2330 : /* Now we do not use self-recursively generated value as propagation
2331 : source, otherwise it is easy to make value space of normal lattice
2332 : overflow. */
2333 20209 : if (src_val->self_recursion_generated_p ())
2334 : {
2335 125 : ret |= dest_lat->set_contains_variable ();
2336 125 : continue;
2337 : }
2338 :
2339 20084 : tree cstval = get_val_across_arith_op (opcode, opnd1_type, opnd2,
2340 : src_val, op_type);
2341 20084 : cstval = ipacp_value_safe_for_type (res_type, cstval);
2342 20084 : if (cstval)
2343 19883 : ret |= dest_lat->add_value (cstval, cs, src_val, src_idx,
2344 : src_offset);
2345 : else
2346 201 : ret |= dest_lat->set_contains_variable ();
2347 : }
2348 :
2349 : return ret;
2350 : }
2351 :
2352 : /* Propagate values through a pass-through jump function JFUNC associated with
2353 : edge CS, taking values from SRC_LAT and putting them into DEST_LAT. SRC_IDX
2354 : is the index of the source parameter. PARM_TYPE is the type of the
2355 : parameter to which the result is passed. */
2356 :
2357 : static bool
2358 71733 : propagate_vals_across_pass_through (cgraph_edge *cs, ipa_jump_func *jfunc,
2359 : ipcp_lattice<tree> *src_lat,
2360 : ipcp_lattice<tree> *dest_lat, int src_idx,
2361 : tree parm_type)
2362 : {
2363 71733 : gcc_checking_assert (parm_type);
2364 71733 : enum tree_code opcode = ipa_get_jf_pass_through_operation (jfunc);
2365 71733 : tree op_type = (opcode == NOP_EXPR) ? NULL_TREE
2366 2415 : : ipa_get_jf_pass_through_op_type (jfunc);
2367 71733 : return propagate_vals_across_arith_jfunc (cs, opcode, NULL_TREE,
2368 : ipa_get_jf_pass_through_operand (jfunc),
2369 : src_lat, dest_lat, -1, src_idx, op_type,
2370 71733 : parm_type);
2371 : }
2372 :
2373 : /* Propagate values through an ancestor jump function JFUNC associated with
2374 : edge CS, taking values from SRC_LAT and putting them into DEST_LAT. SRC_IDX
2375 : is the index of the source parameter. */
2376 :
2377 : static bool
2378 2140 : propagate_vals_across_ancestor (struct cgraph_edge *cs,
2379 : struct ipa_jump_func *jfunc,
2380 : ipcp_lattice<tree> *src_lat,
2381 : ipcp_lattice<tree> *dest_lat, int src_idx,
2382 : tree param_type)
2383 : {
2384 2140 : ipcp_value<tree> *src_val;
2385 2140 : bool ret = false;
2386 :
2387 2140 : if (ipa_edge_within_scc (cs))
2388 14 : return dest_lat->set_contains_variable ();
2389 :
2390 2437 : for (src_val = src_lat->values; src_val; src_val = src_val->next)
2391 : {
2392 311 : tree t = ipa_get_jf_ancestor_result (jfunc, src_val->value);
2393 311 : t = ipacp_value_safe_for_type (param_type, t);
2394 311 : if (t)
2395 253 : ret |= dest_lat->add_value (t, cs, src_val, src_idx);
2396 : else
2397 58 : ret |= dest_lat->set_contains_variable ();
2398 : }
2399 :
2400 : return ret;
2401 : }
2402 :
2403 : /* Propagate scalar values across jump function JFUNC that is associated with
2404 : edge CS and put the values into DEST_LAT. PARM_TYPE is the type of the
2405 : parameter to which the result is passed. */
2406 :
2407 : static bool
2408 3840835 : propagate_scalar_across_jump_function (struct cgraph_edge *cs,
2409 : struct ipa_jump_func *jfunc,
2410 : ipcp_lattice<tree> *dest_lat,
2411 : tree param_type)
2412 : {
2413 3840835 : if (dest_lat->bottom)
2414 : return false;
2415 :
2416 817740 : if (jfunc->type == IPA_JF_CONST)
2417 : {
2418 369873 : tree val = ipa_get_jf_constant (jfunc);
2419 369873 : val = ipacp_value_safe_for_type (param_type, val);
2420 369873 : if (val)
2421 369856 : return dest_lat->add_value (val, cs, NULL, 0);
2422 : else
2423 17 : return dest_lat->set_contains_variable ();
2424 : }
2425 447867 : else if (jfunc->type == IPA_JF_PASS_THROUGH
2426 270456 : || jfunc->type == IPA_JF_ANCESTOR)
2427 : {
2428 181878 : ipa_node_params *caller_info = ipa_node_params_sum->get (cs->caller);
2429 181878 : ipcp_lattice<tree> *src_lat;
2430 181878 : int src_idx;
2431 181878 : bool ret;
2432 :
2433 181878 : if (jfunc->type == IPA_JF_PASS_THROUGH)
2434 177411 : src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
2435 : else
2436 4467 : src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
2437 :
2438 181878 : src_lat = ipa_get_scalar_lat (caller_info, src_idx);
2439 181878 : if (src_lat->bottom)
2440 107858 : return dest_lat->set_contains_variable ();
2441 :
2442 : /* If we would need to clone the caller and cannot, do not propagate. */
2443 74020 : if (!ipcp_versionable_function_p (cs->caller)
2444 74020 : && (src_lat->contains_variable
2445 132 : || (src_lat->values_count > 1)))
2446 147 : return dest_lat->set_contains_variable ();
2447 :
2448 73873 : if (jfunc->type == IPA_JF_PASS_THROUGH)
2449 71733 : ret = propagate_vals_across_pass_through (cs, jfunc, src_lat,
2450 : dest_lat, src_idx,
2451 : param_type);
2452 : else
2453 2140 : ret = propagate_vals_across_ancestor (cs, jfunc, src_lat, dest_lat,
2454 : src_idx, param_type);
2455 :
2456 73873 : if (src_lat->contains_variable)
2457 64359 : ret |= dest_lat->set_contains_variable ();
2458 :
2459 73873 : return ret;
2460 : }
2461 :
2462 : /* TODO: We currently do not handle member method pointers in IPA-CP (we only
2463 : use it for indirect inlining), we should propagate them too. */
2464 265989 : return dest_lat->set_contains_variable ();
2465 : }
2466 :
2467 : /* Propagate scalar values across jump function JFUNC that is associated with
2468 : edge CS and describes argument IDX and put the values into DEST_LAT. */
2469 :
2470 : static bool
2471 3840835 : propagate_context_across_jump_function (cgraph_edge *cs,
2472 : ipa_jump_func *jfunc, int idx,
2473 : ipcp_lattice<ipa_polymorphic_call_context> *dest_lat)
2474 : {
2475 3840835 : if (dest_lat->bottom)
2476 : return false;
2477 914172 : ipa_edge_args *args = ipa_edge_args_sum->get (cs);
2478 914172 : bool ret = false;
2479 914172 : bool added_sth = false;
2480 914172 : bool type_preserved = true;
2481 :
2482 914172 : ipa_polymorphic_call_context edge_ctx, *edge_ctx_ptr
2483 927930 : = ipa_get_ith_polymorhic_call_context (args, idx);
2484 :
2485 13758 : if (edge_ctx_ptr)
2486 13758 : edge_ctx = *edge_ctx_ptr;
2487 :
2488 914172 : if (jfunc->type == IPA_JF_PASS_THROUGH
2489 736270 : || jfunc->type == IPA_JF_ANCESTOR)
2490 : {
2491 182465 : ipa_node_params *caller_info = ipa_node_params_sum->get (cs->caller);
2492 182465 : int src_idx;
2493 182465 : ipcp_lattice<ipa_polymorphic_call_context> *src_lat;
2494 :
2495 : /* TODO: Once we figure out how to propagate speculations, it will
2496 : probably be a good idea to switch to speculation if type_preserved is
2497 : not set instead of punting. */
2498 182465 : if (jfunc->type == IPA_JF_PASS_THROUGH)
2499 : {
2500 177902 : if (ipa_get_jf_pass_through_operation (jfunc) != NOP_EXPR)
2501 6896 : goto prop_fail;
2502 171006 : type_preserved = ipa_get_jf_pass_through_type_preserved (jfunc);
2503 171006 : src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
2504 : }
2505 : else
2506 : {
2507 4563 : type_preserved = ipa_get_jf_ancestor_type_preserved (jfunc);
2508 4563 : src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
2509 : }
2510 :
2511 175569 : src_lat = ipa_get_poly_ctx_lat (caller_info, src_idx);
2512 : /* If we would need to clone the caller and cannot, do not propagate. */
2513 175569 : if (!ipcp_versionable_function_p (cs->caller)
2514 175569 : && (src_lat->contains_variable
2515 13951 : || (src_lat->values_count > 1)))
2516 2472 : goto prop_fail;
2517 :
2518 173097 : ipcp_value<ipa_polymorphic_call_context> *src_val;
2519 174331 : for (src_val = src_lat->values; src_val; src_val = src_val->next)
2520 : {
2521 1234 : ipa_polymorphic_call_context cur = src_val->value;
2522 :
2523 1234 : if (!type_preserved)
2524 836 : cur.possible_dynamic_type_change (cs->in_polymorphic_cdtor);
2525 1234 : if (jfunc->type == IPA_JF_ANCESTOR)
2526 318 : cur.offset_by (ipa_get_jf_ancestor_offset (jfunc));
2527 : /* TODO: In cases we know how the context is going to be used,
2528 : we can improve the result by passing proper OTR_TYPE. */
2529 1234 : cur.combine_with (edge_ctx);
2530 2468 : if (!cur.useless_p ())
2531 : {
2532 775 : if (src_lat->contains_variable
2533 775 : && !edge_ctx.equal_to (cur))
2534 241 : ret |= dest_lat->set_contains_variable ();
2535 775 : ret |= dest_lat->add_value (cur, cs, src_val, src_idx);
2536 775 : added_sth = true;
2537 : }
2538 : }
2539 : }
2540 :
2541 731707 : prop_fail:
2542 182465 : if (!added_sth)
2543 : {
2544 913456 : if (!edge_ctx.useless_p ())
2545 8457 : ret |= dest_lat->add_value (edge_ctx, cs);
2546 : else
2547 904999 : ret |= dest_lat->set_contains_variable ();
2548 : }
2549 :
2550 : return ret;
2551 : }
2552 :
2553 : /* Propagate bits across jfunc that is associated with
2554 : edge cs and update dest_lattice accordingly. */
2555 :
2556 : bool
2557 3840835 : propagate_bits_across_jump_function (cgraph_edge *cs, int idx,
2558 : ipa_jump_func *jfunc,
2559 : ipcp_bits_lattice *dest_lattice)
2560 : {
2561 3840835 : if (dest_lattice->bottom_p ())
2562 : return false;
2563 :
2564 529974 : enum availability availability;
2565 529974 : cgraph_node *callee = cs->callee->function_symbol (&availability);
2566 529974 : ipa_node_params *callee_info = ipa_node_params_sum->get (callee);
2567 529974 : tree parm_type = ipa_get_type (callee_info, idx);
2568 :
2569 : /* For K&R C programs, ipa_get_type() could return NULL_TREE. Avoid the
2570 : transform for these cases. Similarly, we can have bad type mismatches
2571 : with LTO, avoid doing anything with those too. */
2572 529974 : if (!parm_type
2573 529974 : || (!INTEGRAL_TYPE_P (parm_type) && !POINTER_TYPE_P (parm_type)))
2574 : {
2575 29167 : if (dump_file && (dump_flags & TDF_DETAILS))
2576 11 : fprintf (dump_file, "Setting dest_lattice to bottom, because type of "
2577 : "param %i of %s is NULL or unsuitable for bits propagation\n",
2578 11 : idx, cs->callee->dump_name ());
2579 :
2580 29167 : return dest_lattice->set_to_bottom ();
2581 : }
2582 :
2583 500807 : if (jfunc->type == IPA_JF_PASS_THROUGH
2584 405207 : || jfunc->type == IPA_JF_ANCESTOR)
2585 : {
2586 98048 : ipa_node_params *caller_info = ipa_node_params_sum->get (cs->caller);
2587 98048 : tree operand = NULL_TREE;
2588 98048 : tree op_type = NULL_TREE;
2589 98048 : enum tree_code code;
2590 98048 : unsigned src_idx;
2591 98048 : bool keep_null = false;
2592 :
2593 98048 : if (jfunc->type == IPA_JF_PASS_THROUGH)
2594 : {
2595 95600 : code = ipa_get_jf_pass_through_operation (jfunc);
2596 95600 : src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
2597 95600 : if (code != NOP_EXPR)
2598 : {
2599 1860 : operand = ipa_get_jf_pass_through_operand (jfunc);
2600 1860 : op_type = ipa_get_jf_pass_through_op_type (jfunc);
2601 : }
2602 : }
2603 : else
2604 : {
2605 2448 : code = POINTER_PLUS_EXPR;
2606 2448 : src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
2607 2448 : unsigned HOST_WIDE_INT offset
2608 2448 : = ipa_get_jf_ancestor_offset (jfunc) / BITS_PER_UNIT;
2609 2448 : keep_null = (ipa_get_jf_ancestor_keep_null (jfunc) || !offset);
2610 2448 : operand = build_int_cstu (size_type_node, offset);
2611 : }
2612 :
2613 98048 : class ipcp_param_lattices *src_lats
2614 98048 : = ipa_get_parm_lattices (caller_info, src_idx);
2615 :
2616 : /* Try to propagate bits if src_lattice is bottom, but jfunc is known.
2617 : for eg consider:
2618 : int f(int x)
2619 : {
2620 : g (x & 0xff);
2621 : }
2622 : Assume lattice for x is bottom, however we can still propagate
2623 : result of x & 0xff == 0xff, which gets computed during ccp1 pass
2624 : and we store it in jump function during analysis stage. */
2625 :
2626 98048 : if (!src_lats->bits_lattice.bottom_p ()
2627 98048 : && !src_lats->bits_lattice.recipient_only_p ())
2628 : {
2629 21505 : if (!op_type)
2630 20338 : op_type = ipa_get_type (caller_info, src_idx);
2631 :
2632 21505 : unsigned precision = TYPE_PRECISION (op_type);
2633 21505 : signop sgn = TYPE_SIGN (op_type);
2634 21505 : bool drop_all_ones
2635 21505 : = keep_null && !src_lats->bits_lattice.known_nonzero_p ();
2636 :
2637 21505 : return dest_lattice->meet_with (src_lats->bits_lattice, precision,
2638 21505 : sgn, code, operand, drop_all_ones);
2639 : }
2640 : }
2641 :
2642 479302 : value_range vr (parm_type);
2643 479302 : if (jfunc->m_vr)
2644 : {
2645 408951 : jfunc->m_vr->get_vrange (vr);
2646 408951 : if (!vr.undefined_p () && !vr.varying_p ())
2647 : {
2648 408951 : irange_bitmask bm = vr.get_bitmask ();
2649 408951 : widest_int mask
2650 408951 : = widest_int::from (bm.mask (), TYPE_SIGN (parm_type));
2651 408951 : widest_int value
2652 408951 : = widest_int::from (bm.value (), TYPE_SIGN (parm_type));
2653 408951 : return dest_lattice->meet_with (value, mask,
2654 408951 : TYPE_PRECISION (parm_type));
2655 408951 : }
2656 : }
2657 70351 : return dest_lattice->set_to_bottom ();
2658 479302 : }
2659 :
2660 : /* Propagate value range across jump function JFUNC that is associated with
2661 : edge CS with param of callee of PARAM_TYPE and update DEST_PLATS
2662 : accordingly. */
2663 :
2664 : static bool
2665 3839990 : propagate_vr_across_jump_function (cgraph_edge *cs, ipa_jump_func *jfunc,
2666 : class ipcp_param_lattices *dest_plats,
2667 : tree param_type)
2668 : {
2669 3839990 : ipcp_vr_lattice *dest_lat = &dest_plats->m_value_range;
2670 :
2671 3839990 : if (dest_lat->bottom_p ())
2672 : return false;
2673 :
2674 625055 : if (!param_type
2675 625055 : || !ipa_vr_supported_type_p (param_type))
2676 29107 : return dest_lat->set_to_bottom ();
2677 :
2678 595948 : value_range vr (param_type);
2679 595948 : vr.set_varying (param_type);
2680 595948 : if (jfunc->m_vr)
2681 514342 : ipa_vr_operation_and_type_effects (vr, *jfunc->m_vr, NOP_EXPR,
2682 : param_type,
2683 514342 : jfunc->m_vr->type ());
2684 :
2685 595948 : if (jfunc->type == IPA_JF_PASS_THROUGH)
2686 : {
2687 89970 : ipa_node_params *caller_info = ipa_node_params_sum->get (cs->caller);
2688 89970 : int src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
2689 89970 : class ipcp_param_lattices *src_lats
2690 89970 : = ipa_get_parm_lattices (caller_info, src_idx);
2691 89970 : tree operand_type = ipa_get_type (caller_info, src_idx);
2692 :
2693 89970 : if (src_lats->m_value_range.bottom_p ()
2694 89970 : || src_lats->m_value_range.recipient_only_p ())
2695 74108 : return dest_lat->set_to_bottom ();
2696 :
2697 15862 : if (ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR
2698 15862 : || !ipa_edge_within_scc (cs))
2699 15367 : ipa_vr_intersect_with_arith_jfunc (vr, jfunc, cs->caller,
2700 15367 : src_lats->m_value_range.m_vr,
2701 : operand_type, param_type);
2702 : }
2703 :
2704 521840 : if (!vr.undefined_p () && !vr.varying_p ())
2705 494340 : return dest_lat->meet_with (vr);
2706 : else
2707 27500 : return dest_lat->set_to_bottom ();
2708 595948 : }
2709 :
2710 : /* If DEST_PLATS already has aggregate items, check that aggs_by_ref matches
2711 : NEW_AGGS_BY_REF and if not, mark all aggs as bottoms and return true (in all
2712 : other cases, return false). If there are no aggregate items, set
2713 : aggs_by_ref to NEW_AGGS_BY_REF. */
2714 :
2715 : static bool
2716 41803 : set_check_aggs_by_ref (class ipcp_param_lattices *dest_plats,
2717 : bool new_aggs_by_ref)
2718 : {
2719 0 : if (dest_plats->aggs)
2720 : {
2721 22130 : if (dest_plats->aggs_by_ref != new_aggs_by_ref)
2722 : {
2723 0 : set_agg_lats_to_bottom (dest_plats);
2724 0 : return true;
2725 : }
2726 : }
2727 : else
2728 19673 : dest_plats->aggs_by_ref = new_aggs_by_ref;
2729 : return false;
2730 : }
2731 :
2732 : /* Walk aggregate lattices in DEST_PLATS from ***AGLAT on, until ***aglat is an
2733 : already existing lattice for the given OFFSET and SIZE, marking all skipped
2734 : lattices as containing variable and checking for overlaps. If there is no
2735 : already existing lattice for the OFFSET and VAL_SIZE, create one, initialize
2736 : it with offset, size and contains_variable to PRE_EXISTING, and return true,
2737 : unless there are too many already. If there are two many, return false. If
2738 : there are overlaps turn whole DEST_PLATS to bottom and return false. If any
2739 : skipped lattices were newly marked as containing variable, set *CHANGE to
2740 : true. MAX_AGG_ITEMS is the maximum number of lattices. */
2741 :
2742 : static bool
2743 112795 : merge_agg_lats_step (class ipcp_param_lattices *dest_plats,
2744 : HOST_WIDE_INT offset, HOST_WIDE_INT val_size,
2745 : struct ipcp_agg_lattice ***aglat,
2746 : bool pre_existing, bool *change, int max_agg_items)
2747 : {
2748 112795 : gcc_checking_assert (offset >= 0);
2749 :
2750 116863 : while (**aglat && (**aglat)->offset < offset)
2751 : {
2752 4068 : if ((**aglat)->offset + (**aglat)->size > offset)
2753 : {
2754 0 : set_agg_lats_to_bottom (dest_plats);
2755 0 : return false;
2756 : }
2757 4068 : *change |= (**aglat)->set_contains_variable ();
2758 4068 : *aglat = &(**aglat)->next;
2759 : }
2760 :
2761 112795 : if (**aglat && (**aglat)->offset == offset)
2762 : {
2763 55474 : if ((**aglat)->size != val_size)
2764 : {
2765 13 : set_agg_lats_to_bottom (dest_plats);
2766 13 : return false;
2767 : }
2768 55461 : gcc_assert (!(**aglat)->next
2769 : || (**aglat)->next->offset >= offset + val_size);
2770 : return true;
2771 : }
2772 : else
2773 : {
2774 57321 : struct ipcp_agg_lattice *new_al;
2775 :
2776 57321 : if (**aglat && (**aglat)->offset < offset + val_size)
2777 : {
2778 3 : set_agg_lats_to_bottom (dest_plats);
2779 3 : return false;
2780 : }
2781 57318 : if (dest_plats->aggs_count == max_agg_items)
2782 : return false;
2783 57279 : dest_plats->aggs_count++;
2784 57279 : new_al = ipcp_agg_lattice_pool.allocate ();
2785 :
2786 57279 : new_al->offset = offset;
2787 57279 : new_al->size = val_size;
2788 57279 : new_al->contains_variable = pre_existing;
2789 :
2790 57279 : new_al->next = **aglat;
2791 57279 : **aglat = new_al;
2792 57279 : return true;
2793 : }
2794 : }
2795 :
2796 : /* Set all AGLAT and all other aggregate lattices reachable by next pointers as
2797 : containing an unknown value. */
2798 :
2799 : static bool
2800 41785 : set_chain_of_aglats_contains_variable (struct ipcp_agg_lattice *aglat)
2801 : {
2802 41785 : bool ret = false;
2803 44231 : while (aglat)
2804 : {
2805 2446 : ret |= aglat->set_contains_variable ();
2806 2446 : aglat = aglat->next;
2807 : }
2808 41785 : return ret;
2809 : }
2810 :
2811 : /* Merge existing aggregate lattices in SRC_PLATS to DEST_PLATS, subtracting
2812 : DELTA_OFFSET. CS is the call graph edge and SRC_IDX the index of the source
2813 : parameter used for lattice value sources. Return true if DEST_PLATS changed
2814 : in any way. */
2815 :
2816 : static bool
2817 3958 : merge_aggregate_lattices (struct cgraph_edge *cs,
2818 : class ipcp_param_lattices *dest_plats,
2819 : class ipcp_param_lattices *src_plats,
2820 : int src_idx, HOST_WIDE_INT offset_delta)
2821 : {
2822 3958 : bool pre_existing = dest_plats->aggs != NULL;
2823 3958 : struct ipcp_agg_lattice **dst_aglat;
2824 3958 : bool ret = false;
2825 :
2826 3958 : if (set_check_aggs_by_ref (dest_plats, src_plats->aggs_by_ref))
2827 0 : return true;
2828 3958 : if (src_plats->aggs_bottom)
2829 2 : return set_agg_lats_contain_variable (dest_plats);
2830 3956 : if (src_plats->aggs_contain_variable)
2831 2294 : ret |= set_agg_lats_contain_variable (dest_plats);
2832 3956 : dst_aglat = &dest_plats->aggs;
2833 :
2834 3956 : int max_agg_items = opt_for_fn (cs->callee->function_symbol ()->decl,
2835 : param_ipa_max_agg_items);
2836 3956 : for (struct ipcp_agg_lattice *src_aglat = src_plats->aggs;
2837 11585 : src_aglat;
2838 7629 : src_aglat = src_aglat->next)
2839 : {
2840 7629 : HOST_WIDE_INT new_offset = src_aglat->offset - offset_delta;
2841 :
2842 7629 : if (new_offset < 0)
2843 49 : continue;
2844 7580 : if (merge_agg_lats_step (dest_plats, new_offset, src_aglat->size,
2845 : &dst_aglat, pre_existing, &ret, max_agg_items))
2846 : {
2847 7576 : struct ipcp_agg_lattice *new_al = *dst_aglat;
2848 :
2849 7576 : dst_aglat = &(*dst_aglat)->next;
2850 7576 : if (src_aglat->bottom)
2851 : {
2852 0 : ret |= new_al->set_contains_variable ();
2853 0 : continue;
2854 : }
2855 7576 : if (src_aglat->contains_variable)
2856 4417 : ret |= new_al->set_contains_variable ();
2857 7576 : for (ipcp_value<tree> *val = src_aglat->values;
2858 11729 : val;
2859 4153 : val = val->next)
2860 4153 : ret |= new_al->add_value (val->value, cs, val, src_idx,
2861 : src_aglat->offset);
2862 : }
2863 4 : else if (dest_plats->aggs_bottom)
2864 : return true;
2865 : }
2866 3956 : ret |= set_chain_of_aglats_contains_variable (*dst_aglat);
2867 3956 : return ret;
2868 : }
2869 :
2870 : /* Determine whether there is anything to propagate FROM SRC_PLATS through a
2871 : pass-through JFUNC and if so, whether it has conform and conforms to the
2872 : rules about propagating values passed by reference. */
2873 :
2874 : static bool
2875 170830 : agg_pass_through_permissible_p (class ipcp_param_lattices *src_plats,
2876 : struct ipa_jump_func *jfunc)
2877 : {
2878 170830 : return src_plats->aggs
2879 170830 : && (!src_plats->aggs_by_ref
2880 4971 : || ipa_get_jf_pass_through_agg_preserved (jfunc));
2881 : }
2882 :
2883 : /* Propagate values through ITEM, jump function for a part of an aggregate,
2884 : into corresponding aggregate lattice AGLAT. CS is the call graph edge
2885 : associated with the jump function. Return true if AGLAT changed in any
2886 : way. */
2887 :
2888 : static bool
2889 105164 : propagate_aggregate_lattice (struct cgraph_edge *cs,
2890 : struct ipa_agg_jf_item *item,
2891 : struct ipcp_agg_lattice *aglat)
2892 : {
2893 105164 : class ipa_node_params *caller_info;
2894 105164 : class ipcp_param_lattices *src_plats;
2895 105164 : struct ipcp_lattice<tree> *src_lat;
2896 105164 : HOST_WIDE_INT src_offset;
2897 105164 : int src_idx;
2898 105164 : tree load_type;
2899 105164 : bool ret;
2900 :
2901 105164 : if (item->jftype == IPA_JF_CONST)
2902 : {
2903 93802 : tree value = item->value.constant;
2904 :
2905 93802 : gcc_checking_assert (is_gimple_ip_invariant (value));
2906 93802 : return aglat->add_value (value, cs, NULL, 0);
2907 : }
2908 :
2909 11362 : gcc_checking_assert (item->jftype == IPA_JF_PASS_THROUGH
2910 : || item->jftype == IPA_JF_LOAD_AGG);
2911 :
2912 11362 : caller_info = ipa_node_params_sum->get (cs->caller);
2913 11362 : src_idx = item->value.pass_through.formal_id;
2914 11362 : src_plats = ipa_get_parm_lattices (caller_info, src_idx);
2915 :
2916 11362 : if (item->jftype == IPA_JF_PASS_THROUGH)
2917 : {
2918 3253 : load_type = NULL_TREE;
2919 3253 : src_lat = &src_plats->itself;
2920 3253 : src_offset = -1;
2921 : }
2922 : else
2923 : {
2924 8109 : HOST_WIDE_INT load_offset = item->value.load_agg.offset;
2925 8109 : struct ipcp_agg_lattice *src_aglat;
2926 :
2927 12490 : for (src_aglat = src_plats->aggs; src_aglat; src_aglat = src_aglat->next)
2928 8158 : if (src_aglat->offset >= load_offset)
2929 : break;
2930 :
2931 8109 : load_type = item->value.load_agg.type;
2932 8109 : if (!src_aglat
2933 3777 : || src_aglat->offset > load_offset
2934 3441 : || src_aglat->size != tree_to_shwi (TYPE_SIZE (load_type))
2935 11550 : || src_plats->aggs_by_ref != item->value.load_agg.by_ref)
2936 4668 : return aglat->set_contains_variable ();
2937 :
2938 : src_lat = src_aglat;
2939 : src_offset = load_offset;
2940 : }
2941 :
2942 6694 : if (src_lat->bottom
2943 6694 : || (!ipcp_versionable_function_p (cs->caller)
2944 6694 : && !src_lat->is_single_const ()))
2945 2070 : return aglat->set_contains_variable ();
2946 :
2947 4624 : ret = propagate_vals_across_arith_jfunc (cs,
2948 : item->value.pass_through.operation,
2949 : load_type,
2950 : item->value.pass_through.operand,
2951 : src_lat, aglat,
2952 : src_offset,
2953 : src_idx,
2954 : item->value.pass_through.op_type,
2955 : item->type);
2956 :
2957 4624 : if (src_lat->contains_variable)
2958 2635 : ret |= aglat->set_contains_variable ();
2959 :
2960 : return ret;
2961 : }
2962 :
2963 : /* Propagate scalar values across jump function JFUNC that is associated with
2964 : edge CS and put the values into DEST_LAT. */
2965 :
2966 : static bool
2967 3840835 : propagate_aggs_across_jump_function (struct cgraph_edge *cs,
2968 : struct ipa_jump_func *jfunc,
2969 : class ipcp_param_lattices *dest_plats)
2970 : {
2971 3840835 : bool ret = false;
2972 :
2973 3840835 : if (dest_plats->aggs_bottom)
2974 : return false;
2975 :
2976 912958 : if (jfunc->type == IPA_JF_PASS_THROUGH
2977 912958 : && ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
2978 : {
2979 170830 : ipa_node_params *caller_info = ipa_node_params_sum->get (cs->caller);
2980 170830 : int src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
2981 170830 : class ipcp_param_lattices *src_plats;
2982 :
2983 170830 : src_plats = ipa_get_parm_lattices (caller_info, src_idx);
2984 170830 : if (agg_pass_through_permissible_p (src_plats, jfunc))
2985 : {
2986 : /* Currently we do not produce clobber aggregate jump
2987 : functions, replace with merging when we do. */
2988 3834 : gcc_assert (!jfunc->agg.items);
2989 3834 : ret |= merge_aggregate_lattices (cs, dest_plats, src_plats,
2990 : src_idx, 0);
2991 3834 : return ret;
2992 : }
2993 : }
2994 742128 : else if (jfunc->type == IPA_JF_ANCESTOR
2995 742128 : && ipa_get_jf_ancestor_agg_preserved (jfunc))
2996 : {
2997 1161 : ipa_node_params *caller_info = ipa_node_params_sum->get (cs->caller);
2998 1161 : int src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
2999 1161 : class ipcp_param_lattices *src_plats;
3000 :
3001 1161 : src_plats = ipa_get_parm_lattices (caller_info, src_idx);
3002 1161 : if (src_plats->aggs && src_plats->aggs_by_ref)
3003 : {
3004 : /* Currently we do not produce clobber aggregate jump
3005 : functions, replace with merging when we do. */
3006 124 : gcc_assert (!jfunc->agg.items);
3007 124 : ret |= merge_aggregate_lattices (cs, dest_plats, src_plats, src_idx,
3008 : ipa_get_jf_ancestor_offset (jfunc));
3009 : }
3010 1037 : else if (!src_plats->aggs_by_ref)
3011 1033 : ret |= set_agg_lats_to_bottom (dest_plats);
3012 : else
3013 4 : ret |= set_agg_lats_contain_variable (dest_plats);
3014 1161 : return ret;
3015 : }
3016 :
3017 907963 : if (jfunc->agg.items)
3018 : {
3019 37845 : bool pre_existing = dest_plats->aggs != NULL;
3020 37845 : struct ipcp_agg_lattice **aglat = &dest_plats->aggs;
3021 37845 : struct ipa_agg_jf_item *item;
3022 37845 : int i;
3023 :
3024 37845 : if (set_check_aggs_by_ref (dest_plats, jfunc->agg.by_ref))
3025 16 : return true;
3026 :
3027 37845 : int max_agg_items = opt_for_fn (cs->callee->function_symbol ()->decl,
3028 : param_ipa_max_agg_items);
3029 143364 : FOR_EACH_VEC_ELT (*jfunc->agg.items, i, item)
3030 : {
3031 105535 : HOST_WIDE_INT val_size;
3032 :
3033 105535 : if (item->offset < 0 || item->jftype == IPA_JF_UNKNOWN)
3034 320 : continue;
3035 105215 : val_size = tree_to_shwi (TYPE_SIZE (item->type));
3036 :
3037 105215 : if (merge_agg_lats_step (dest_plats, item->offset, val_size,
3038 : &aglat, pre_existing, &ret, max_agg_items))
3039 : {
3040 105164 : ret |= propagate_aggregate_lattice (cs, item, *aglat);
3041 105164 : aglat = &(*aglat)->next;
3042 : }
3043 51 : else if (dest_plats->aggs_bottom)
3044 : return true;
3045 : }
3046 :
3047 75658 : ret |= set_chain_of_aglats_contains_variable (*aglat);
3048 : }
3049 : else
3050 870118 : ret |= set_agg_lats_contain_variable (dest_plats);
3051 :
3052 907947 : return ret;
3053 : }
3054 :
3055 : /* Return true if on the way cfrom CS->caller to the final (non-alias and
3056 : non-thunk) destination, the call passes through a thunk. */
3057 :
3058 : static bool
3059 1902866 : call_passes_through_thunk (cgraph_edge *cs)
3060 : {
3061 1902866 : cgraph_node *alias_or_thunk = cs->callee;
3062 2038373 : while (alias_or_thunk->alias)
3063 135507 : alias_or_thunk = alias_or_thunk->get_alias_target ();
3064 1902866 : return alias_or_thunk->thunk;
3065 : }
3066 :
3067 : /* Propagate constants from the caller to the callee of CS. INFO describes the
3068 : caller. */
3069 :
3070 : static bool
3071 5238817 : propagate_constants_across_call (struct cgraph_edge *cs)
3072 : {
3073 5238817 : class ipa_node_params *callee_info;
3074 5238817 : enum availability availability;
3075 5238817 : cgraph_node *callee;
3076 5238817 : class ipa_edge_args *args;
3077 5238817 : bool ret = false;
3078 5238817 : int i, args_count, parms_count;
3079 :
3080 5238817 : callee = cs->callee->function_symbol (&availability);
3081 5238817 : if (!callee->definition)
3082 : return false;
3083 1931631 : gcc_checking_assert (callee->has_gimple_body_p ());
3084 1931631 : callee_info = ipa_node_params_sum->get (callee);
3085 1931631 : if (!callee_info)
3086 : return false;
3087 :
3088 1923115 : args = ipa_edge_args_sum->get (cs);
3089 1923115 : parms_count = ipa_get_param_count (callee_info);
3090 1730714 : if (parms_count == 0)
3091 : return false;
3092 1730714 : if (!args
3093 1730470 : || !opt_for_fn (cs->caller->decl, flag_ipa_cp)
3094 3461184 : || !opt_for_fn (cs->caller->decl, optimize))
3095 : {
3096 752 : for (i = 0; i < parms_count; i++)
3097 508 : ret |= set_all_contains_variable (ipa_get_parm_lattices (callee_info,
3098 : i));
3099 : return ret;
3100 : }
3101 1730470 : args_count = ipa_get_cs_argument_count (args);
3102 :
3103 : /* If this call goes through a thunk we must not propagate to the first (0th)
3104 : parameter. However, we might need to uncover a thunk from below a series
3105 : of aliases first. */
3106 1730470 : if (call_passes_through_thunk (cs))
3107 : {
3108 227 : ret |= set_all_contains_variable (ipa_get_parm_lattices (callee_info,
3109 : 0));
3110 227 : i = 1;
3111 : }
3112 : else
3113 : i = 0;
3114 :
3115 5708866 : for (; (i < args_count) && (i < parms_count); i++)
3116 : {
3117 3978396 : struct ipa_jump_func *jump_func = ipa_get_ith_jump_func (args, i);
3118 3978396 : class ipcp_param_lattices *dest_plats;
3119 3978396 : tree param_type = ipa_get_type (callee_info, i);
3120 :
3121 3978396 : dest_plats = ipa_get_parm_lattices (callee_info, i);
3122 3978396 : if (availability == AVAIL_INTERPOSABLE)
3123 137561 : ret |= set_all_contains_variable (dest_plats);
3124 : else
3125 : {
3126 3840835 : ret |= propagate_scalar_across_jump_function (cs, jump_func,
3127 : &dest_plats->itself,
3128 : param_type);
3129 3840835 : ret |= propagate_context_across_jump_function (cs, jump_func, i,
3130 : &dest_plats->ctxlat);
3131 3840835 : ret
3132 3840835 : |= propagate_bits_across_jump_function (cs, i, jump_func,
3133 : &dest_plats->bits_lattice);
3134 3840835 : ret |= propagate_aggs_across_jump_function (cs, jump_func,
3135 : dest_plats);
3136 3840835 : if (opt_for_fn (callee->decl, flag_ipa_vrp))
3137 3839990 : ret |= propagate_vr_across_jump_function (cs, jump_func,
3138 : dest_plats, param_type);
3139 : else
3140 845 : ret |= dest_plats->m_value_range.set_to_bottom ();
3141 : }
3142 : }
3143 1730652 : for (; i < parms_count; i++)
3144 182 : ret |= set_all_contains_variable (ipa_get_parm_lattices (callee_info, i));
3145 :
3146 : return ret;
3147 : }
3148 :
3149 : /* If an indirect edge IE can be turned into a direct one based on KNOWN_VALS
3150 : KNOWN_CONTEXTS, and known aggregates either in AVS or KNOWN_AGGS return
3151 : the destination. The latter three can be NULL. If AGG_REPS is not NULL,
3152 : KNOWN_AGGS is ignored. */
3153 :
3154 : static tree
3155 1588530 : ipa_get_indirect_edge_target_1 (struct cgraph_edge *ie,
3156 : const vec<tree> &known_csts,
3157 : const vec<ipa_polymorphic_call_context> &known_contexts,
3158 : const ipa_argagg_value_list &avs,
3159 : bool *speculative)
3160 : {
3161 1588530 : int param_index = ie->indirect_info->param_index;
3162 1588530 : *speculative = false;
3163 :
3164 1588530 : if (param_index == -1)
3165 : return NULL_TREE;
3166 :
3167 548713 : if (cgraph_simple_indirect_info *sii
3168 548713 : = dyn_cast <cgraph_simple_indirect_info *> (ie->indirect_info))
3169 : {
3170 309378 : tree t = NULL;
3171 :
3172 309378 : if (sii->agg_contents)
3173 : {
3174 73622 : t = NULL;
3175 73622 : if ((unsigned) param_index < known_csts.length ()
3176 73622 : && known_csts[param_index])
3177 63121 : t = ipa_find_agg_cst_from_init (known_csts[param_index],
3178 : sii->offset,
3179 : sii->by_ref);
3180 :
3181 73622 : if (!t && sii->guaranteed_unmodified)
3182 67494 : t = avs.get_value (param_index, sii->offset / BITS_PER_UNIT,
3183 : sii->by_ref);
3184 : }
3185 235756 : else if ((unsigned) param_index < known_csts.length ())
3186 235756 : t = known_csts[param_index];
3187 :
3188 309308 : if (t
3189 204380 : && TREE_CODE (t) == ADDR_EXPR
3190 513473 : && TREE_CODE (TREE_OPERAND (t, 0)) == FUNCTION_DECL)
3191 204165 : return TREE_OPERAND (t, 0);
3192 : else
3193 105213 : return NULL_TREE;
3194 : }
3195 :
3196 239335 : if (!opt_for_fn (ie->caller->decl, flag_devirtualize))
3197 : return NULL_TREE;
3198 :
3199 239335 : cgraph_polymorphic_indirect_info *pii
3200 239335 : = as_a <cgraph_polymorphic_indirect_info *> (ie->indirect_info);
3201 239335 : if (!pii->usable_p ())
3202 : return NULL_TREE;
3203 :
3204 239335 : HOST_WIDE_INT anc_offset = pii->offset;
3205 239335 : tree t = NULL;
3206 239335 : tree target = NULL;
3207 239335 : if ((unsigned) param_index < known_csts.length ()
3208 239335 : && known_csts[param_index])
3209 28356 : t = ipa_find_agg_cst_from_init (known_csts[param_index], anc_offset, true);
3210 :
3211 : /* Try to work out value of virtual table pointer value in replacements. */
3212 : /* or known aggregate values. */
3213 28356 : if (!t)
3214 239326 : t = avs.get_value (param_index, anc_offset / BITS_PER_UNIT, true);
3215 :
3216 : /* If we found the virtual table pointer, lookup the target. */
3217 239326 : if (t)
3218 : {
3219 18471 : tree vtable;
3220 18471 : unsigned HOST_WIDE_INT offset;
3221 18471 : if (vtable_pointer_value_to_vtable (t, &vtable, &offset))
3222 : {
3223 18471 : bool can_refer;
3224 18471 : target = gimple_get_virt_method_for_vtable (pii->otr_token, vtable,
3225 : offset, &can_refer);
3226 18471 : if (can_refer)
3227 : {
3228 18408 : if (!target
3229 18408 : || fndecl_built_in_p (target, BUILT_IN_UNREACHABLE)
3230 36696 : || !possible_polymorphic_call_target_p
3231 18288 : (ie, cgraph_node::get (target)))
3232 : {
3233 : /* Do not speculate builtin_unreachable, it is stupid! */
3234 237 : if (pii->vptr_changed)
3235 15908 : return NULL;
3236 237 : target = ipa_impossible_devirt_target (ie, target);
3237 : }
3238 18408 : *speculative = pii->vptr_changed;
3239 18408 : if (!*speculative)
3240 : return target;
3241 : }
3242 : }
3243 : }
3244 :
3245 : /* Do we know the constant value of pointer? */
3246 223427 : if (!t && (unsigned) param_index < known_csts.length ())
3247 46632 : t = known_csts[param_index];
3248 :
3249 223427 : ipa_polymorphic_call_context context;
3250 223427 : if (known_contexts.length () > (unsigned int) param_index)
3251 : {
3252 223057 : context = known_contexts[param_index];
3253 223057 : context.offset_by (anc_offset);
3254 223057 : if (pii->vptr_changed)
3255 46552 : context.possible_dynamic_type_change (ie->in_polymorphic_cdtor,
3256 : pii->otr_type);
3257 223057 : if (t)
3258 : {
3259 13140 : ipa_polymorphic_call_context ctx2
3260 13140 : = ipa_polymorphic_call_context (t, pii->otr_type, anc_offset);
3261 26280 : if (!ctx2.useless_p ())
3262 11621 : context.combine_with (ctx2, pii->otr_type);
3263 : }
3264 : }
3265 370 : else if (t)
3266 : {
3267 21 : context = ipa_polymorphic_call_context (t, pii->otr_type, anc_offset);
3268 21 : if (pii->vptr_changed)
3269 6 : context.possible_dynamic_type_change (ie->in_polymorphic_cdtor,
3270 : pii->otr_type);
3271 : }
3272 : else
3273 : return NULL_TREE;
3274 :
3275 223078 : vec <cgraph_node *>targets;
3276 223078 : bool final;
3277 :
3278 223078 : targets = possible_polymorphic_call_targets (pii->otr_type, pii->otr_token,
3279 : context, &final);
3280 235688 : if (!final || targets.length () > 1)
3281 : {
3282 211102 : struct cgraph_node *node;
3283 211102 : if (*speculative)
3284 : return target;
3285 211074 : if (!opt_for_fn (ie->caller->decl, flag_devirtualize_speculatively)
3286 211074 : || ie->speculative || !ie->maybe_hot_p ())
3287 68463 : return NULL;
3288 142611 : node = try_speculative_devirtualization (pii->otr_type, pii->otr_token,
3289 : context);
3290 142611 : if (node)
3291 : {
3292 991 : *speculative = true;
3293 991 : target = node->decl;
3294 : }
3295 : else
3296 : return NULL;
3297 : }
3298 : else
3299 : {
3300 11976 : *speculative = false;
3301 11976 : if (targets.length () == 1)
3302 11937 : target = targets[0]->decl;
3303 : else
3304 39 : target = ipa_impossible_devirt_target (ie, NULL_TREE);
3305 : }
3306 :
3307 12967 : if (target && !possible_polymorphic_call_target_p (ie,
3308 : cgraph_node::get (target)))
3309 : {
3310 55 : if (*speculative)
3311 : return NULL;
3312 40 : target = ipa_impossible_devirt_target (ie, target);
3313 : }
3314 :
3315 : return target;
3316 : }
3317 :
3318 : /* If an indirect edge IE can be turned into a direct one based on data in
3319 : AVALS, return the destination. Store into *SPECULATIVE a boolean determinig
3320 : whether the discovered target is only speculative guess. */
3321 :
3322 : tree
3323 1413873 : ipa_get_indirect_edge_target (struct cgraph_edge *ie,
3324 : ipa_call_arg_values *avals,
3325 : bool *speculative)
3326 : {
3327 1413873 : ipa_argagg_value_list avl (avals);
3328 1413873 : return ipa_get_indirect_edge_target_1 (ie, avals->m_known_vals,
3329 1413873 : avals->m_known_contexts,
3330 1413873 : avl, speculative);
3331 : }
3332 :
3333 : /* Calculate devirtualization time bonus for NODE, assuming we know information
3334 : about arguments stored in AVALS.
3335 :
3336 : FIXME: This function will also consider devirtualization of calls that are
3337 : known to be dead in the clone. */
3338 :
3339 : static sreal
3340 1505688 : devirtualization_time_bonus (struct cgraph_node *node,
3341 : ipa_auto_call_arg_values *avals)
3342 : {
3343 1505688 : struct cgraph_edge *ie;
3344 1505688 : sreal res = 0;
3345 :
3346 1678443 : for (ie = node->indirect_calls; ie; ie = ie->next_callee)
3347 : {
3348 172755 : struct cgraph_node *callee;
3349 172755 : class ipa_fn_summary *isummary;
3350 172755 : enum availability avail;
3351 172755 : tree target;
3352 172755 : bool speculative;
3353 :
3354 172755 : ipa_argagg_value_list avl (avals);
3355 172755 : target = ipa_get_indirect_edge_target_1 (ie, avals->m_known_vals,
3356 : avals->m_known_contexts,
3357 : avl, &speculative);
3358 172755 : if (!target)
3359 171872 : continue;
3360 :
3361 : /* Only bare minimum benefit for clearly un-inlineable targets. */
3362 3110 : res = res + ie->combined_sreal_frequency ();
3363 3110 : callee = cgraph_node::get (target);
3364 3110 : if (!callee || !callee->definition)
3365 600 : continue;
3366 2510 : callee = callee->function_symbol (&avail);
3367 2510 : if (avail < AVAIL_AVAILABLE)
3368 0 : continue;
3369 2510 : isummary = ipa_fn_summaries->get (callee);
3370 2510 : if (!isummary || !isummary->inlinable)
3371 66 : continue;
3372 :
3373 2444 : int savings = 0;
3374 2444 : int size = ipa_size_summaries->get (callee)->size;
3375 : /* FIXME: The values below need re-considering and perhaps also
3376 : integrating into the cost metrics, at lest in some very basic way. */
3377 2444 : int max_inline_insns_auto
3378 2444 : = opt_for_fn (callee->decl, param_max_inline_insns_auto);
3379 2444 : if (size <= max_inline_insns_auto / 4)
3380 327 : savings = 31 / ((int)speculative + 1);
3381 2117 : else if (size <= max_inline_insns_auto / 2)
3382 373 : savings = 15 / ((int)speculative + 1);
3383 3305 : else if (size <= max_inline_insns_auto
3384 1744 : || DECL_DECLARED_INLINE_P (callee->decl))
3385 183 : savings = 7 / ((int)speculative + 1);
3386 : else
3387 1561 : continue;
3388 883 : res = res + ie->combined_sreal_frequency () * (sreal) savings;
3389 : }
3390 :
3391 1505688 : return res;
3392 : }
3393 :
3394 : /* Return time bonus incurred because of hints stored in ESTIMATES. */
3395 :
3396 : static sreal
3397 280530 : hint_time_bonus (cgraph_node *node, const ipa_call_estimates &estimates)
3398 : {
3399 280530 : sreal result = 0;
3400 280530 : ipa_hints hints = estimates.hints;
3401 280530 : if (hints & (INLINE_HINT_loop_iterations | INLINE_HINT_loop_stride))
3402 26822 : result += opt_for_fn (node->decl, param_ipa_cp_loop_hint_bonus);
3403 :
3404 280530 : sreal bonus_for_one = opt_for_fn (node->decl, param_ipa_cp_loop_hint_bonus);
3405 :
3406 280530 : if (hints & INLINE_HINT_loop_iterations)
3407 18805 : result += estimates.loops_with_known_iterations * bonus_for_one;
3408 :
3409 280530 : if (hints & INLINE_HINT_loop_stride)
3410 10604 : result += estimates.loops_with_known_strides * bonus_for_one;
3411 :
3412 280530 : return result;
3413 : }
3414 :
3415 : /* If there is a reason to penalize the function described by INFO in the
3416 : cloning goodness evaluation, do so. */
3417 :
3418 : static inline sreal
3419 94049 : incorporate_penalties (cgraph_node *node, ipa_node_params *info,
3420 : sreal evaluation)
3421 : {
3422 94049 : if (info->node_within_scc && !info->node_is_self_scc)
3423 1988 : evaluation = (evaluation
3424 1988 : * (100 - opt_for_fn (node->decl,
3425 3976 : param_ipa_cp_recursion_penalty))) / 100;
3426 :
3427 94049 : if (info->node_calling_single_call)
3428 5771 : evaluation = (evaluation
3429 5771 : * (100 - opt_for_fn (node->decl,
3430 5771 : param_ipa_cp_single_call_penalty)))
3431 11542 : / 100;
3432 :
3433 94049 : return evaluation;
3434 : }
3435 :
3436 : /* Return true if cloning NODE is a good idea, given the estimated TIME_BENEFIT
3437 : and SIZE_COST and with the sum of frequencies of incoming edges to the
3438 : potential new clone in FREQUENCIES. CUR_SWEEP is the number of the current
3439 : sweep of IPA-CP over the call-graph in the decision stage. */
3440 :
3441 : static bool
3442 331537 : good_cloning_opportunity_p (struct cgraph_node *node, sreal time_benefit,
3443 : sreal freq_sum, profile_count count_sum,
3444 : int size_cost, bool called_without_ipa_profile,
3445 : int cur_sweep)
3446 : {
3447 331537 : gcc_assert (count_sum.ipa () == count_sum);
3448 331537 : if (count_sum.quality () == AFDO)
3449 0 : count_sum = count_sum.force_nonzero ();
3450 331537 : if (time_benefit == 0
3451 281736 : || !opt_for_fn (node->decl, flag_ipa_cp_clone)
3452 : /* If there is no call which was executed in profiling or where
3453 : profile is missing, we do not want to clone. */
3454 94140 : || (!called_without_ipa_profile && !count_sum.nonzero_p ()))
3455 : {
3456 237488 : if (dump_file && (dump_flags & TDF_DETAILS))
3457 24 : fprintf (dump_file, " good_cloning_opportunity_p (time: %g, "
3458 : "size: %i): Definitely not good or prohibited.\n",
3459 : time_benefit.to_double (), size_cost);
3460 237488 : return false;
3461 : }
3462 :
3463 94049 : gcc_assert (size_cost > 0);
3464 :
3465 94049 : ipa_node_params *info = ipa_node_params_sum->get (node);
3466 94049 : int num_sweeps = opt_for_fn (node->decl, param_ipa_cp_sweeps);
3467 94049 : int eval_threshold = opt_for_fn (node->decl, param_ipa_cp_eval_threshold);
3468 94049 : eval_threshold = (eval_threshold * num_sweeps) / cur_sweep;
3469 : /* If we know the execution IPA execution counts, we can estimate overall
3470 : speedup of the program. */
3471 94049 : if (count_sum.nonzero_p ())
3472 : {
3473 365 : profile_count saved_time = count_sum * time_benefit;
3474 365 : sreal evaluation = saved_time.to_sreal_scale (profile_count::one ())
3475 730 : / size_cost;
3476 365 : evaluation = incorporate_penalties (node, info, evaluation);
3477 :
3478 365 : if (dump_file && (dump_flags & TDF_DETAILS))
3479 : {
3480 0 : fprintf (dump_file, " good_cloning_opportunity_p (time: %g, "
3481 : "size: %i, count_sum: ", time_benefit.to_double (),
3482 : size_cost);
3483 0 : count_sum.dump (dump_file);
3484 0 : fprintf (dump_file, ", overall time saved: ");
3485 0 : saved_time.dump (dump_file);
3486 0 : fprintf (dump_file, "%s%s) -> evaluation: %.2f, threshold: %i\n",
3487 0 : info->node_within_scc
3488 0 : ? (info->node_is_self_scc ? ", self_scc" : ", scc") : "",
3489 0 : info->node_calling_single_call ? ", single_call" : "",
3490 : evaluation.to_double (), eval_threshold);
3491 : }
3492 365 : gcc_checking_assert (saved_time == saved_time.ipa ());
3493 365 : if (!maybe_hot_count_p (NULL, saved_time))
3494 : {
3495 27 : if (dump_file && (dump_flags & TDF_DETAILS))
3496 0 : fprintf (dump_file, " not cloning: time saved is not hot\n");
3497 : }
3498 : /* Evaulation approximately corresponds to time saved per instruction
3499 : introduced. This is likely almost always going to be true, since we
3500 : already checked that time saved is large enough to be considered
3501 : hot. */
3502 338 : else if (evaluation >= (sreal)eval_threshold)
3503 365 : return true;
3504 : /* If all call sites have profile known; we know we do not want t clone.
3505 : If there are calls with unknown profile; try local heuristics. */
3506 335 : if (!called_without_ipa_profile)
3507 : return false;
3508 : }
3509 93684 : sreal evaluation = (time_benefit * freq_sum) / size_cost;
3510 93684 : evaluation = incorporate_penalties (node, info, evaluation);
3511 93684 : evaluation *= 1000;
3512 :
3513 93684 : if (dump_file && (dump_flags & TDF_DETAILS))
3514 302 : fprintf (dump_file, " good_cloning_opportunity_p (time: %g, "
3515 : "size: %i, freq_sum: %g%s%s) -> evaluation: %.2f, "
3516 : "threshold: %i\n",
3517 : time_benefit.to_double (), size_cost, freq_sum.to_double (),
3518 151 : info->node_within_scc
3519 26 : ? (info->node_is_self_scc ? ", self_scc" : ", scc") : "",
3520 151 : info->node_calling_single_call ? ", single_call" : "",
3521 : evaluation.to_double (), eval_threshold);
3522 :
3523 93684 : return evaluation >= eval_threshold;
3524 : }
3525 :
3526 : /* Grow vectors in AVALS and fill them with information about values of
3527 : parameters that are known to be independent of the context. INFO describes
3528 : the function. If REMOVABLE_PARAMS_COST is non-NULL, the movement cost of
3529 : all removable parameters will be stored in it.
3530 :
3531 : TODO: Also grow context independent value range vectors. */
3532 :
3533 : static bool
3534 2156758 : gather_context_independent_values (class ipa_node_params *info,
3535 : ipa_auto_call_arg_values *avals,
3536 : int *removable_params_cost)
3537 : {
3538 2156758 : int i, count = ipa_get_param_count (info);
3539 2156758 : bool ret = false;
3540 :
3541 2156758 : avals->m_known_vals.safe_grow_cleared (count, true);
3542 2156758 : avals->m_known_contexts.safe_grow_cleared (count, true);
3543 :
3544 2156758 : if (removable_params_cost)
3545 2156758 : *removable_params_cost = 0;
3546 :
3547 7159734 : for (i = 0; i < count; i++)
3548 : {
3549 5002976 : class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
3550 5002976 : ipcp_lattice<tree> *lat = &plats->itself;
3551 :
3552 5002976 : if (lat->is_single_const ())
3553 : {
3554 34847 : ipcp_value<tree> *val = lat->values;
3555 34847 : gcc_checking_assert (TREE_CODE (val->value) != TREE_BINFO);
3556 34847 : avals->m_known_vals[i] = val->value;
3557 34847 : if (removable_params_cost)
3558 69694 : *removable_params_cost
3559 34847 : += estimate_move_cost (TREE_TYPE (val->value), false);
3560 : ret = true;
3561 : }
3562 4968129 : else if (removable_params_cost
3563 4968129 : && !ipa_is_param_used (info, i))
3564 952656 : *removable_params_cost
3565 476328 : += ipa_get_param_move_cost (info, i);
3566 :
3567 5002976 : if (!ipa_is_param_used (info, i))
3568 481471 : continue;
3569 :
3570 4521505 : ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
3571 : /* Do not account known context as reason for cloning. We can see
3572 : if it permits devirtualization. */
3573 4521505 : if (ctxlat->is_single_const ())
3574 23391 : avals->m_known_contexts[i] = ctxlat->values->value;
3575 :
3576 4521505 : ret |= push_agg_values_from_plats (plats, i, 0, &avals->m_known_aggs);
3577 : }
3578 :
3579 2156758 : return ret;
3580 : }
3581 :
3582 : /* Perform time and size measurement of NODE with the context given in AVALS,
3583 : calculate the benefit compared to the node without specialization and store
3584 : it into VAL. Take into account REMOVABLE_PARAMS_COST of all
3585 : context-independent or unused removable parameters and EST_MOVE_COST, the
3586 : estimated movement of the considered parameter. */
3587 :
3588 : static void
3589 76063 : perform_estimation_of_a_value (cgraph_node *node,
3590 : ipa_auto_call_arg_values *avals,
3591 : int removable_params_cost, int est_move_cost,
3592 : ipcp_value_base *val)
3593 : {
3594 76063 : sreal time_benefit;
3595 76063 : ipa_call_estimates estimates;
3596 :
3597 76063 : estimate_ipcp_clone_size_and_time (node, avals, &estimates);
3598 :
3599 : /* Extern inline functions have no cloning local time benefits because they
3600 : will be inlined anyway. The only reason to clone them is if it enables
3601 : optimization in any of the functions they call. */
3602 76063 : if (DECL_EXTERNAL (node->decl) && DECL_DECLARED_INLINE_P (node->decl))
3603 52 : time_benefit = 0;
3604 : else
3605 76011 : time_benefit = (estimates.nonspecialized_time - estimates.time)
3606 152022 : + hint_time_bonus (node, estimates)
3607 152022 : + (devirtualization_time_bonus (node, avals)
3608 152022 : + removable_params_cost + est_move_cost);
3609 :
3610 76063 : int size = estimates.size;
3611 76063 : gcc_checking_assert (size >=0);
3612 : /* The inliner-heuristics based estimates may think that in certain
3613 : contexts some functions do not have any size at all but we want
3614 : all specializations to have at least a tiny cost, not least not to
3615 : divide by zero. */
3616 76063 : if (size == 0)
3617 0 : size = 1;
3618 :
3619 76063 : val->local_time_benefit = time_benefit;
3620 76063 : val->local_size_cost = size;
3621 76063 : }
3622 :
3623 : /* Get the overall limit of growth based on parameters extracted from growth,
3624 : and CUR_SWEEP, which is the number of the current sweep of IPA-CP over the
3625 : call-graph in the decision stage. It does not really make sense to mix
3626 : functions with different overall growth limits or even number of sweeps but
3627 : it is possible and if it happens, we do not want to select one limit at
3628 : random, so get the limits from NODE. */
3629 :
3630 : static long
3631 206366 : get_max_overall_size (cgraph_node *node, int cur_sweep)
3632 : {
3633 206366 : long max_new_size = orig_overall_size;
3634 206366 : long large_unit = opt_for_fn (node->decl, param_ipa_cp_large_unit_insns);
3635 206366 : if (max_new_size < large_unit)
3636 : max_new_size = large_unit;
3637 206366 : int num_sweeps = opt_for_fn (node->decl, param_ipa_cp_sweeps);
3638 206366 : gcc_assert (cur_sweep <= num_sweeps);
3639 206366 : int unit_growth = opt_for_fn (node->decl, param_ipa_cp_unit_growth);
3640 206366 : max_new_size += ((max_new_size * unit_growth * cur_sweep)
3641 206366 : / num_sweeps) / 100 + 1;
3642 206366 : return max_new_size;
3643 : }
3644 :
3645 : /* Return true if NODE should be cloned just for a parameter removal, possibly
3646 : dumping a reason if not. */
3647 :
3648 : static bool
3649 188328 : clone_for_param_removal_p (cgraph_node *node)
3650 : {
3651 188328 : if (!node->can_change_signature)
3652 : {
3653 4945 : if (dump_file && (dump_flags & TDF_DETAILS))
3654 0 : fprintf (dump_file, " Not considering cloning to remove parameters, "
3655 : "function cannot change signature.\n");
3656 4945 : return false;
3657 : }
3658 183383 : if (node->can_be_local_p ())
3659 : {
3660 133493 : if (dump_file && (dump_flags & TDF_DETAILS))
3661 0 : fprintf (dump_file, " Not considering cloning to remove parameters, "
3662 : "IPA-SRA can do it potentially better.\n");
3663 133493 : return false;
3664 : }
3665 : return true;
3666 : }
3667 :
3668 : /* Iterate over known values of parameters of NODE and estimate the local
3669 : effects in terms of time and size they have. */
3670 :
3671 : static void
3672 1260412 : estimate_local_effects (struct cgraph_node *node)
3673 : {
3674 1260412 : ipa_node_params *info = ipa_node_params_sum->get (node);
3675 1260412 : int count = ipa_get_param_count (info);
3676 1033535 : int removable_params_cost;
3677 :
3678 1033535 : if (!count || !ipcp_versionable_function_p (node))
3679 393413 : return;
3680 :
3681 866999 : if (dump_file && (dump_flags & TDF_DETAILS))
3682 117 : fprintf (dump_file, "\nEstimating effects for %s.\n", node->dump_name ());
3683 :
3684 866999 : ipa_auto_call_arg_values avals;
3685 866999 : gather_context_independent_values (info, &avals, &removable_params_cost);
3686 :
3687 2895703 : for (int i = 0; i < count; i++)
3688 : {
3689 2028704 : class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
3690 2028704 : ipcp_lattice<tree> *lat = &plats->itself;
3691 2028704 : ipcp_value<tree> *val;
3692 :
3693 4036442 : if (lat->bottom
3694 209675 : || !lat->values
3695 2067173 : || avals.m_known_vals[i])
3696 2007738 : continue;
3697 :
3698 64513 : for (val = lat->values; val; val = val->next)
3699 : {
3700 43547 : gcc_checking_assert (TREE_CODE (val->value) != TREE_BINFO);
3701 43547 : avals.m_known_vals[i] = val->value;
3702 :
3703 43547 : int emc = estimate_move_cost (TREE_TYPE (val->value), true);
3704 43547 : perform_estimation_of_a_value (node, &avals, removable_params_cost,
3705 : emc, val);
3706 :
3707 43547 : if (dump_file && (dump_flags & TDF_DETAILS))
3708 : {
3709 44 : fprintf (dump_file, " - estimates for value ");
3710 44 : print_ipcp_constant_value (dump_file, val->value);
3711 44 : fprintf (dump_file, " for ");
3712 44 : ipa_dump_param (dump_file, info, i);
3713 44 : fprintf (dump_file, ": time_benefit: %g, size: %i\n",
3714 : val->local_time_benefit.to_double (),
3715 : val->local_size_cost);
3716 : }
3717 : }
3718 20966 : avals.m_known_vals[i] = NULL_TREE;
3719 : }
3720 :
3721 2895703 : for (int i = 0; i < count; i++)
3722 : {
3723 2028704 : class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
3724 :
3725 2028704 : if (!plats->virt_call)
3726 2020837 : continue;
3727 :
3728 7867 : ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
3729 7867 : ipcp_value<ipa_polymorphic_call_context> *val;
3730 :
3731 15579 : if (ctxlat->bottom
3732 2820 : || !ctxlat->values
3733 10681 : || !avals.m_known_contexts[i].useless_p ())
3734 7712 : continue;
3735 :
3736 377 : for (val = ctxlat->values; val; val = val->next)
3737 : {
3738 222 : avals.m_known_contexts[i] = val->value;
3739 222 : perform_estimation_of_a_value (node, &avals, removable_params_cost,
3740 : 0, val);
3741 :
3742 222 : if (dump_file && (dump_flags & TDF_DETAILS))
3743 : {
3744 0 : fprintf (dump_file, " - estimates for polymorphic context ");
3745 0 : print_ipcp_constant_value (dump_file, val->value);
3746 0 : fprintf (dump_file, " for ");
3747 0 : ipa_dump_param (dump_file, info, i);
3748 0 : fprintf (dump_file, ": time_benefit: %g, size: %i\n",
3749 : val->local_time_benefit.to_double (),
3750 : val->local_size_cost);
3751 : }
3752 : }
3753 155 : avals.m_known_contexts[i] = ipa_polymorphic_call_context ();
3754 : }
3755 :
3756 866999 : unsigned all_ctx_len = avals.m_known_aggs.length ();
3757 866999 : auto_vec<ipa_argagg_value, 32> all_ctx;
3758 866999 : all_ctx.reserve_exact (all_ctx_len);
3759 866999 : all_ctx.splice (avals.m_known_aggs);
3760 866999 : avals.m_known_aggs.safe_grow_cleared (all_ctx_len + 1);
3761 :
3762 866999 : unsigned j = 0;
3763 2895703 : for (int index = 0; index < count; index++)
3764 : {
3765 2028704 : class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, index);
3766 :
3767 2028704 : if (plats->aggs_bottom || !plats->aggs)
3768 2009555 : continue;
3769 :
3770 73930 : for (ipcp_agg_lattice *aglat = plats->aggs; aglat; aglat = aglat->next)
3771 : {
3772 54781 : ipcp_value<tree> *val;
3773 54425 : if (aglat->bottom || !aglat->values
3774 : /* If the following is true, the one value is already part of all
3775 : context estimations. */
3776 101798 : || (!plats->aggs_contain_variable
3777 27532 : && aglat->is_single_const ()))
3778 30968 : continue;
3779 :
3780 23813 : unsigned unit_offset = aglat->offset / BITS_PER_UNIT;
3781 23813 : while (j < all_ctx_len
3782 32194 : && (all_ctx[j].index < index
3783 3327 : || (all_ctx[j].index == index
3784 2365 : && all_ctx[j].unit_offset < unit_offset)))
3785 : {
3786 3228 : avals.m_known_aggs[j] = all_ctx[j];
3787 3228 : j++;
3788 : }
3789 :
3790 32903 : for (unsigned k = j; k < all_ctx_len; k++)
3791 9090 : avals.m_known_aggs[k+1] = all_ctx[k];
3792 :
3793 56107 : for (val = aglat->values; val; val = val->next)
3794 : {
3795 32294 : avals.m_known_aggs[j].value = val->value;
3796 32294 : avals.m_known_aggs[j].unit_offset = unit_offset;
3797 32294 : avals.m_known_aggs[j].index = index;
3798 32294 : avals.m_known_aggs[j].by_ref = plats->aggs_by_ref;
3799 32294 : avals.m_known_aggs[j].killed = false;
3800 :
3801 32294 : perform_estimation_of_a_value (node, &avals,
3802 : removable_params_cost, 0, val);
3803 :
3804 32294 : if (dump_file && (dump_flags & TDF_DETAILS))
3805 : {
3806 79 : fprintf (dump_file, " - estimates for value ");
3807 79 : print_ipcp_constant_value (dump_file, val->value);
3808 79 : fprintf (dump_file, " for ");
3809 79 : ipa_dump_param (dump_file, info, index);
3810 158 : fprintf (dump_file, "[%soffset: " HOST_WIDE_INT_PRINT_DEC
3811 : "]: time_benefit: %g, size: %i\n",
3812 79 : plats->aggs_by_ref ? "ref " : "",
3813 : aglat->offset,
3814 : val->local_time_benefit.to_double (),
3815 : val->local_size_cost);
3816 : }
3817 : }
3818 : }
3819 : }
3820 866999 : }
3821 :
3822 :
3823 : /* Add value CUR_VAL and all yet-unsorted values it is dependent on to the
3824 : topological sort of values. */
3825 :
3826 : template <typename valtype>
3827 : void
3828 136804 : value_topo_info<valtype>::add_val (ipcp_value<valtype> *cur_val)
3829 : {
3830 : ipcp_value_source<valtype> *src;
3831 :
3832 136804 : if (cur_val->dfs)
3833 : return;
3834 :
3835 136642 : dfs_counter++;
3836 136642 : cur_val->dfs = dfs_counter;
3837 136642 : cur_val->low_link = dfs_counter;
3838 :
3839 136642 : cur_val->topo_next = stack;
3840 136642 : stack = cur_val;
3841 136642 : cur_val->on_stack = true;
3842 :
3843 593181 : for (src = cur_val->sources; src; src = src->next)
3844 456539 : if (src->val)
3845 : {
3846 20783 : if (src->val->dfs == 0)
3847 : {
3848 186 : add_val (src->val);
3849 186 : if (src->val->low_link < cur_val->low_link)
3850 19 : cur_val->low_link = src->val->low_link;
3851 : }
3852 20597 : else if (src->val->on_stack
3853 1584 : && src->val->dfs < cur_val->low_link)
3854 73 : cur_val->low_link = src->val->dfs;
3855 : }
3856 :
3857 136642 : if (cur_val->dfs == cur_val->low_link)
3858 : {
3859 : ipcp_value<valtype> *v, *scc_list = NULL;
3860 :
3861 : do
3862 : {
3863 136642 : v = stack;
3864 136642 : stack = v->topo_next;
3865 136642 : v->on_stack = false;
3866 136642 : v->scc_no = cur_val->dfs;
3867 :
3868 136642 : v->scc_next = scc_list;
3869 136642 : scc_list = v;
3870 : }
3871 136642 : while (v != cur_val);
3872 :
3873 136554 : cur_val->topo_next = values_topo;
3874 136554 : values_topo = cur_val;
3875 : }
3876 : }
3877 :
3878 : /* Add all values in lattices associated with NODE to the topological sort if
3879 : they are not there yet. */
3880 :
3881 : static void
3882 1260412 : add_all_node_vals_to_toposort (cgraph_node *node, ipa_topo_info *topo)
3883 : {
3884 1260412 : ipa_node_params *info = ipa_node_params_sum->get (node);
3885 1260412 : int i, count = ipa_get_param_count (info);
3886 :
3887 3580456 : for (i = 0; i < count; i++)
3888 : {
3889 2320044 : class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
3890 2320044 : ipcp_lattice<tree> *lat = &plats->itself;
3891 2320044 : struct ipcp_agg_lattice *aglat;
3892 :
3893 2320044 : if (!lat->bottom)
3894 : {
3895 220330 : ipcp_value<tree> *val;
3896 292153 : for (val = lat->values; val; val = val->next)
3897 71823 : topo->constants.add_val (val);
3898 : }
3899 :
3900 2320044 : if (!plats->aggs_bottom)
3901 277529 : for (aglat = plats->aggs; aglat; aglat = aglat->next)
3902 57244 : if (!aglat->bottom)
3903 : {
3904 56888 : ipcp_value<tree> *val;
3905 114045 : for (val = aglat->values; val; val = val->next)
3906 57157 : topo->constants.add_val (val);
3907 : }
3908 :
3909 2320044 : ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
3910 2320044 : if (!ctxlat->bottom)
3911 : {
3912 221330 : ipcp_value<ipa_polymorphic_call_context> *ctxval;
3913 228968 : for (ctxval = ctxlat->values; ctxval; ctxval = ctxval->next)
3914 7638 : topo->contexts.add_val (ctxval);
3915 : }
3916 : }
3917 1260412 : }
3918 :
3919 : /* One pass of constants propagation along the call graph edges, from callers
3920 : to callees (requires topological ordering in TOPO), iterate over strongly
3921 : connected components. */
3922 :
3923 : static void
3924 127990 : propagate_constants_topo (class ipa_topo_info *topo)
3925 : {
3926 127990 : int i;
3927 :
3928 1469189 : for (i = topo->nnodes - 1; i >= 0; i--)
3929 : {
3930 1341199 : unsigned j;
3931 1341199 : struct cgraph_node *v, *node = topo->order[i];
3932 1341199 : vec<cgraph_node *> cycle_nodes = ipa_get_nodes_in_cycle (node);
3933 :
3934 : /* First, iteratively propagate within the strongly connected component
3935 : until all lattices stabilize. */
3936 2686808 : FOR_EACH_VEC_ELT (cycle_nodes, j, v)
3937 1345609 : if (v->has_gimple_body_p ())
3938 : {
3939 1269255 : if (opt_for_fn (v->decl, flag_ipa_cp)
3940 1269255 : && opt_for_fn (v->decl, optimize))
3941 1260412 : push_node_to_stack (topo, v);
3942 : /* When V is not optimized, we can not push it to stack, but
3943 : still we need to set all its callees lattices to bottom. */
3944 : else
3945 : {
3946 21806 : for (cgraph_edge *cs = v->callees; cs; cs = cs->next_callee)
3947 12963 : propagate_constants_across_call (cs);
3948 : }
3949 : }
3950 :
3951 1341199 : v = pop_node_from_stack (topo);
3952 3945903 : while (v)
3953 : {
3954 1263505 : struct cgraph_edge *cs;
3955 1263505 : class ipa_node_params *info = NULL;
3956 1263505 : bool self_scc = true;
3957 :
3958 6515421 : for (cs = v->callees; cs; cs = cs->next_callee)
3959 5251916 : if (ipa_edge_within_scc (cs))
3960 : {
3961 28663 : cgraph_node *callee = cs->callee->function_symbol ();
3962 :
3963 28663 : if (v != callee)
3964 17282 : self_scc = false;
3965 :
3966 28663 : if (!info)
3967 : {
3968 13134 : info = ipa_node_params_sum->get (v);
3969 13134 : info->node_within_scc = true;
3970 : }
3971 :
3972 28663 : if (propagate_constants_across_call (cs))
3973 4062 : push_node_to_stack (topo, callee);
3974 : }
3975 :
3976 1263505 : if (info)
3977 13134 : info->node_is_self_scc = self_scc;
3978 :
3979 1263505 : v = pop_node_from_stack (topo);
3980 : }
3981 :
3982 : /* Afterwards, propagate along edges leading out of the SCC, calculates
3983 : the local effects of the discovered constants and all valid values to
3984 : their topological sort. */
3985 2686808 : FOR_EACH_VEC_ELT (cycle_nodes, j, v)
3986 1345609 : if (v->has_gimple_body_p ()
3987 1269255 : && opt_for_fn (v->decl, flag_ipa_cp)
3988 2606021 : && opt_for_fn (v->decl, optimize))
3989 : {
3990 1260412 : struct cgraph_edge *cs;
3991 :
3992 1260412 : estimate_local_effects (v);
3993 1260412 : add_all_node_vals_to_toposort (v, topo);
3994 6478989 : for (cs = v->callees; cs; cs = cs->next_callee)
3995 5218577 : if (!ipa_edge_within_scc (cs))
3996 5197191 : propagate_constants_across_call (cs);
3997 : }
3998 1341199 : cycle_nodes.release ();
3999 : }
4000 127990 : }
4001 :
4002 : /* Propagate the estimated effects of individual values along the topological
4003 : from the dependent values to those they depend on. */
4004 :
4005 : template <typename valtype>
4006 : void
4007 255980 : value_topo_info<valtype>::propagate_effects ()
4008 : {
4009 : ipcp_value<valtype> *base;
4010 255980 : hash_set<ipcp_value<valtype> *> processed_srcvals;
4011 :
4012 392534 : for (base = values_topo; base; base = base->topo_next)
4013 : {
4014 : ipcp_value_source<valtype> *src;
4015 : ipcp_value<valtype> *val;
4016 136554 : sreal time = 0;
4017 136554 : HOST_WIDE_INT size = 0;
4018 :
4019 273196 : for (val = base; val; val = val->scc_next)
4020 : {
4021 136642 : time = time + val->local_time_benefit + val->prop_time_benefit;
4022 136642 : size = size + val->local_size_cost + val->prop_size_cost;
4023 : }
4024 :
4025 273196 : for (val = base; val; val = val->scc_next)
4026 : {
4027 136642 : processed_srcvals.empty ();
4028 593181 : for (src = val->sources; src; src = src->next)
4029 456539 : if (src->val
4030 456539 : && cs_interesting_for_ipcp_p (src->cs))
4031 : {
4032 20745 : if (!processed_srcvals.add (src->val))
4033 : {
4034 16586 : HOST_WIDE_INT prop_size = size + src->val->prop_size_cost;
4035 16586 : if (prop_size < INT_MAX)
4036 16586 : src->val->prop_size_cost = prop_size;
4037 : else
4038 0 : continue;
4039 : }
4040 :
4041 20745 : int special_factor = 1;
4042 20745 : if (val->same_scc (src->val))
4043 : special_factor
4044 1672 : = opt_for_fn(src->cs->caller->decl,
4045 : param_ipa_cp_recursive_freq_factor);
4046 19073 : else if (val->self_recursion_generated_p ()
4047 19073 : && (src->cs->callee->function_symbol ()
4048 822 : == src->cs->caller))
4049 : {
4050 822 : int max_recur_gen_depth
4051 822 : = opt_for_fn(src->cs->caller->decl,
4052 : param_ipa_cp_max_recursive_depth);
4053 822 : special_factor = max_recur_gen_depth
4054 822 : - val->self_recursion_generated_level + 1;
4055 : }
4056 :
4057 20745 : src->val->prop_time_benefit
4058 41490 : += time * special_factor * src->cs->sreal_frequency ();
4059 : }
4060 :
4061 136642 : if (size < INT_MAX)
4062 : {
4063 136642 : val->prop_time_benefit = time;
4064 136642 : val->prop_size_cost = size;
4065 : }
4066 : else
4067 : {
4068 0 : val->prop_time_benefit = 0;
4069 0 : val->prop_size_cost = 0;
4070 : }
4071 : }
4072 : }
4073 255980 : }
4074 :
4075 :
4076 : /* Propagate constants, polymorphic contexts and their effects from the
4077 : summaries interprocedurally. */
4078 :
4079 : static void
4080 127990 : ipcp_propagate_stage (class ipa_topo_info *topo)
4081 : {
4082 127990 : struct cgraph_node *node;
4083 :
4084 127990 : if (dump_file)
4085 161 : fprintf (dump_file, "\n Propagating constants:\n\n");
4086 :
4087 1473603 : FOR_EACH_DEFINED_FUNCTION (node)
4088 : {
4089 1345613 : if (node->has_gimple_body_p ()
4090 1269255 : && opt_for_fn (node->decl, flag_ipa_cp)
4091 2606025 : && opt_for_fn (node->decl, optimize))
4092 : {
4093 1260412 : ipa_node_params *info = ipa_node_params_sum->get (node);
4094 1260412 : determine_versionability (node, info);
4095 :
4096 1260412 : unsigned nlattices = ipa_get_param_count (info);
4097 1260412 : info->lattices.safe_grow_cleared (nlattices, true);
4098 1260412 : initialize_node_lattices (node);
4099 :
4100 1260412 : int num_sweeps = opt_for_fn (node->decl, param_ipa_cp_sweeps);
4101 1260412 : if (max_number_sweeps < num_sweeps)
4102 120161 : max_number_sweeps = num_sweeps;
4103 : }
4104 1345613 : ipa_size_summary *s = ipa_size_summaries->get (node);
4105 1345613 : if (node->definition && !node->alias && s != NULL)
4106 1270220 : overall_size += s->self_size;
4107 : }
4108 :
4109 127990 : orig_overall_size = overall_size;
4110 :
4111 127990 : if (dump_file)
4112 161 : fprintf (dump_file, "\noverall_size: %li\n", overall_size);
4113 :
4114 127990 : propagate_constants_topo (topo);
4115 127990 : if (flag_checking)
4116 127982 : ipcp_verify_propagated_values ();
4117 127990 : topo->constants.propagate_effects ();
4118 127990 : topo->contexts.propagate_effects ();
4119 :
4120 127990 : if (dump_file)
4121 : {
4122 161 : fprintf (dump_file, "\nIPA lattices after all propagation:\n");
4123 161 : print_all_lattices (dump_file, (dump_flags & TDF_DETAILS), true);
4124 : }
4125 127990 : }
4126 :
4127 : /* Discover newly direct outgoing edges from NODE which is a new clone with
4128 : known KNOWN_CSTS and make them direct. */
4129 :
4130 : static void
4131 22485 : ipcp_discover_new_direct_edges (struct cgraph_node *node,
4132 : vec<tree> known_csts,
4133 : vec<ipa_polymorphic_call_context>
4134 : known_contexts,
4135 : vec<ipa_argagg_value, va_gc> *aggvals)
4136 : {
4137 22485 : struct cgraph_edge *ie, *next_ie;
4138 22485 : bool found = false;
4139 :
4140 24387 : for (ie = node->indirect_calls; ie; ie = next_ie)
4141 : {
4142 1902 : tree target;
4143 1902 : bool speculative;
4144 :
4145 1902 : next_ie = ie->next_callee;
4146 1902 : ipa_argagg_value_list avs (aggvals);
4147 1902 : target = ipa_get_indirect_edge_target_1 (ie, known_csts, known_contexts,
4148 : avs, &speculative);
4149 1902 : if (target)
4150 : {
4151 587 : cgraph_polymorphic_indirect_info *pii
4152 587 : = dyn_cast <cgraph_polymorphic_indirect_info *> (ie->indirect_info);
4153 587 : cgraph_simple_indirect_info *sii
4154 1099 : = dyn_cast <cgraph_simple_indirect_info *> (ie->indirect_info);
4155 442 : bool agg_contents = sii && sii->agg_contents;
4156 587 : bool polymorphic = !!pii;
4157 587 : int param_index = ie->indirect_info->param_index;
4158 587 : struct cgraph_edge *cs = ipa_make_edge_direct_to_target (ie, target,
4159 : speculative);
4160 587 : found = true;
4161 :
4162 587 : if (cs && !agg_contents && !polymorphic)
4163 : {
4164 367 : ipa_node_params *info = ipa_node_params_sum->get (node);
4165 367 : int c = ipa_get_controlled_uses (info, param_index);
4166 367 : if (c != IPA_UNDESCRIBED_USE
4167 367 : && !ipa_get_param_load_dereferenced (info, param_index))
4168 : {
4169 363 : struct ipa_ref *to_del;
4170 :
4171 363 : c--;
4172 363 : ipa_set_controlled_uses (info, param_index, c);
4173 363 : if (dump_file && (dump_flags & TDF_DETAILS))
4174 3 : fprintf (dump_file, " controlled uses count of param "
4175 : "%i bumped down to %i\n", param_index, c);
4176 363 : if (c == 0
4177 363 : && (to_del = node->find_reference (cs->callee, NULL, 0,
4178 : IPA_REF_ADDR)))
4179 : {
4180 289 : if (dump_file && (dump_flags & TDF_DETAILS))
4181 3 : fprintf (dump_file, " and even removing its "
4182 : "cloning-created reference\n");
4183 289 : to_del->remove_reference ();
4184 : }
4185 : }
4186 : }
4187 : }
4188 : }
4189 : /* Turning calls to direct calls will improve overall summary. */
4190 22485 : if (found)
4191 486 : ipa_update_overall_fn_summary (node);
4192 22485 : }
4193 :
4194 : class edge_clone_summary;
4195 : static call_summary <edge_clone_summary *> *edge_clone_summaries = NULL;
4196 :
4197 : /* Edge clone summary. */
4198 :
4199 : class edge_clone_summary
4200 : {
4201 : public:
4202 : /* Default constructor. */
4203 392903 : edge_clone_summary (): prev_clone (NULL), next_clone (NULL) {}
4204 :
4205 : /* Default destructor. */
4206 392903 : ~edge_clone_summary ()
4207 : {
4208 392903 : if (prev_clone)
4209 38317 : edge_clone_summaries->get (prev_clone)->next_clone = next_clone;
4210 392903 : if (next_clone)
4211 162542 : edge_clone_summaries->get (next_clone)->prev_clone = prev_clone;
4212 392903 : }
4213 :
4214 : cgraph_edge *prev_clone;
4215 : cgraph_edge *next_clone;
4216 : };
4217 :
4218 : class edge_clone_summary_t:
4219 : public call_summary <edge_clone_summary *>
4220 : {
4221 : public:
4222 127990 : edge_clone_summary_t (symbol_table *symtab):
4223 255980 : call_summary <edge_clone_summary *> (symtab)
4224 : {
4225 127990 : m_initialize_when_cloning = true;
4226 : }
4227 :
4228 : void duplicate (cgraph_edge *src_edge, cgraph_edge *dst_edge,
4229 : edge_clone_summary *src_data,
4230 : edge_clone_summary *dst_data) final override;
4231 : };
4232 :
4233 : /* Edge duplication hook. */
4234 :
4235 : void
4236 199809 : edge_clone_summary_t::duplicate (cgraph_edge *src_edge, cgraph_edge *dst_edge,
4237 : edge_clone_summary *src_data,
4238 : edge_clone_summary *dst_data)
4239 : {
4240 199809 : if (src_data->next_clone)
4241 6706 : edge_clone_summaries->get (src_data->next_clone)->prev_clone = dst_edge;
4242 199809 : dst_data->prev_clone = src_edge;
4243 199809 : dst_data->next_clone = src_data->next_clone;
4244 199809 : src_data->next_clone = dst_edge;
4245 199809 : }
4246 :
4247 : /* Return true is CS calls DEST or its clone for all contexts. When
4248 : ALLOW_RECURSION_TO_CLONE is false, also return false for self-recursive
4249 : edges from/to an all-context clone. */
4250 :
4251 : static bool
4252 1801504 : calls_same_node_or_its_all_contexts_clone_p (cgraph_edge *cs, cgraph_node *dest,
4253 : bool allow_recursion_to_clone)
4254 : {
4255 1801504 : enum availability availability;
4256 1801504 : cgraph_node *callee = cs->callee->function_symbol (&availability);
4257 :
4258 1801504 : if (availability <= AVAIL_INTERPOSABLE)
4259 : return false;
4260 1795564 : if (callee == dest)
4261 : return true;
4262 624353 : if (!allow_recursion_to_clone && cs->caller == callee)
4263 : return false;
4264 :
4265 624169 : ipa_node_params *info = ipa_node_params_sum->get (callee);
4266 624169 : return info->is_all_contexts_clone && info->ipcp_orig_node == dest;
4267 : }
4268 :
4269 : /* Return true if edge CS does bring about the value described by SRC to
4270 : DEST_VAL of node DEST or its clone for all contexts. */
4271 :
4272 : static bool
4273 1791660 : cgraph_edge_brings_value_p (cgraph_edge *cs, ipcp_value_source<tree> *src,
4274 : cgraph_node *dest, ipcp_value<tree> *dest_val)
4275 : {
4276 1791660 : ipa_node_params *caller_info = ipa_node_params_sum->get (cs->caller);
4277 :
4278 1791660 : if (!calls_same_node_or_its_all_contexts_clone_p (cs, dest, !src->val)
4279 1791660 : || caller_info->node_dead)
4280 : return false;
4281 :
4282 719793 : if (!src->val)
4283 : return true;
4284 :
4285 63239 : if (caller_info->ipcp_orig_node)
4286 : {
4287 21859 : tree t = NULL_TREE;
4288 21859 : if (src->offset == -1)
4289 15501 : t = caller_info->known_csts[src->index];
4290 6358 : else if (ipcp_transformation *ts
4291 6358 : = ipcp_get_transformation_summary (cs->caller))
4292 : {
4293 6358 : ipa_argagg_value_list avl (ts);
4294 6358 : t = avl.get_value (src->index, src->offset / BITS_PER_UNIT);
4295 : }
4296 21859 : return (t != NULL_TREE
4297 21859 : && values_equal_for_ipcp_p (src->val->value, t));
4298 : }
4299 : else
4300 : {
4301 41380 : if (src->val == dest_val)
4302 : return true;
4303 :
4304 35643 : struct ipcp_agg_lattice *aglat;
4305 35643 : class ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
4306 : src->index);
4307 35643 : if (src->offset == -1)
4308 26296 : return (plats->itself.is_single_const ()
4309 20 : && values_equal_for_ipcp_p (src->val->value,
4310 20 : plats->itself.values->value));
4311 : else
4312 : {
4313 9347 : if (plats->aggs_bottom || plats->aggs_contain_variable)
4314 : return false;
4315 3620 : for (aglat = plats->aggs; aglat; aglat = aglat->next)
4316 3620 : if (aglat->offset == src->offset)
4317 1464 : return (aglat->is_single_const ()
4318 8 : && values_equal_for_ipcp_p (src->val->value,
4319 8 : aglat->values->value));
4320 : }
4321 : return false;
4322 : }
4323 : }
4324 :
4325 : /* Return true if edge CS does bring about the value described by SRC to
4326 : DST_VAL of node DEST or its clone for all contexts. */
4327 :
4328 : static bool
4329 9844 : cgraph_edge_brings_value_p (cgraph_edge *cs,
4330 : ipcp_value_source<ipa_polymorphic_call_context> *src,
4331 : cgraph_node *dest,
4332 : ipcp_value<ipa_polymorphic_call_context> *)
4333 : {
4334 9844 : ipa_node_params *caller_info = ipa_node_params_sum->get (cs->caller);
4335 :
4336 9844 : if (!calls_same_node_or_its_all_contexts_clone_p (cs, dest, true)
4337 9844 : || caller_info->node_dead)
4338 : return false;
4339 8650 : if (!src->val)
4340 : return true;
4341 :
4342 1634 : if (caller_info->ipcp_orig_node)
4343 2612 : return (caller_info->known_contexts.length () > (unsigned) src->index)
4344 524 : && values_equal_for_ipcp_p (src->val->value,
4345 262 : caller_info->known_contexts[src->index]);
4346 :
4347 1357 : class ipcp_param_lattices *plats = ipa_get_parm_lattices (caller_info,
4348 : src->index);
4349 1357 : return plats->ctxlat.is_single_const ()
4350 234 : && values_equal_for_ipcp_p (src->val->value,
4351 234 : plats->ctxlat.values->value);
4352 : }
4353 :
4354 : /* Get the next clone in the linked list of clones of an edge. */
4355 :
4356 : static inline struct cgraph_edge *
4357 1801813 : get_next_cgraph_edge_clone (struct cgraph_edge *cs)
4358 : {
4359 1801813 : edge_clone_summary *s = edge_clone_summaries->get (cs);
4360 1801813 : return s != NULL ? s->next_clone : NULL;
4361 : }
4362 :
4363 : /* Given VAL that is intended for DEST, iterate over all its sources and if any
4364 : of them is viable and hot, return true. In that case, for those that still
4365 : hold, add their edge frequency and their number and cumulative profile
4366 : counts of self-ecursive and other edges into *FREQUENCY, *CALLER_COUNT,
4367 : REC_COUNT_SUM and NONREC_COUNT_SUM respectively. */
4368 :
4369 : template <typename valtype>
4370 : static bool
4371 205428 : get_info_about_necessary_edges (ipcp_value<valtype> *val, cgraph_node *dest,
4372 : sreal *freq_sum, int *caller_count,
4373 : profile_count *rec_count_sum,
4374 : profile_count *nonrec_count_sum,
4375 : bool *called_without_ipa_profile)
4376 : {
4377 : ipcp_value_source<valtype> *src;
4378 205428 : sreal freq = 0;
4379 205428 : int count = 0;
4380 205428 : profile_count rec_cnt = profile_count::zero ();
4381 205428 : profile_count nonrec_cnt = profile_count::zero ();
4382 205428 : bool interesting = false;
4383 205428 : bool non_self_recursive = false;
4384 205428 : *called_without_ipa_profile = false;
4385 :
4386 952558 : for (src = val->sources; src; src = src->next)
4387 : {
4388 747130 : struct cgraph_edge *cs = src->cs;
4389 1860479 : while (cs)
4390 : {
4391 1113349 : if (cgraph_edge_brings_value_p (cs, src, dest, val))
4392 : {
4393 341739 : count++;
4394 341739 : freq += cs->sreal_frequency ();
4395 341739 : interesting |= cs_interesting_for_ipcp_p (cs);
4396 341739 : if (cs->caller != dest)
4397 : {
4398 334983 : non_self_recursive = true;
4399 334983 : if (cs->count.ipa ().initialized_p ())
4400 923 : rec_cnt += cs->count.ipa ();
4401 : else
4402 334060 : *called_without_ipa_profile = true;
4403 : }
4404 6756 : else if (cs->count.ipa ().initialized_p ())
4405 0 : nonrec_cnt += cs->count.ipa ();
4406 : else
4407 6756 : *called_without_ipa_profile = true;
4408 : }
4409 1113349 : cs = get_next_cgraph_edge_clone (cs);
4410 : }
4411 : }
4412 :
4413 : /* If the only edges bringing a value are self-recursive ones, do not bother
4414 : evaluating it. */
4415 205428 : if (!non_self_recursive)
4416 : return false;
4417 :
4418 144484 : *freq_sum = freq;
4419 144484 : *caller_count = count;
4420 144484 : *rec_count_sum = rec_cnt;
4421 144484 : *nonrec_count_sum = nonrec_cnt;
4422 :
4423 144484 : return interesting;
4424 : }
4425 :
4426 : /* Given a NODE, and a set of its CALLERS, try to adjust order of the callers
4427 : to let a non-self-recursive caller be the first element. Thus, we can
4428 : simplify intersecting operations on values that arrive from all of these
4429 : callers, especially when there exists self-recursive call. Return true if
4430 : this kind of adjustment is possible. */
4431 :
4432 : static bool
4433 56656 : adjust_callers_for_value_intersection (vec<cgraph_edge *> &callers,
4434 : cgraph_node *node)
4435 : {
4436 60879 : for (unsigned i = 0; i < callers.length (); i++)
4437 : {
4438 60783 : cgraph_edge *cs = callers[i];
4439 :
4440 60783 : if (cs->caller != node)
4441 : {
4442 56560 : if (i > 0)
4443 : {
4444 1977 : callers[i] = callers[0];
4445 1977 : callers[0] = cs;
4446 : }
4447 56560 : return true;
4448 : }
4449 : }
4450 : return false;
4451 : }
4452 :
4453 : /* Return a vector of incoming edges that do bring value VAL to node DEST. It
4454 : is assumed their number is known and equal to CALLER_COUNT. */
4455 :
4456 : template <typename valtype>
4457 : static vec<cgraph_edge *>
4458 144128 : gather_edges_for_value (ipcp_value<valtype> *val, cgraph_node *dest,
4459 : int caller_count)
4460 : {
4461 : ipcp_value_source<valtype> *src;
4462 : vec<cgraph_edge *> ret;
4463 :
4464 144128 : ret.create (caller_count);
4465 496099 : for (src = val->sources; src; src = src->next)
4466 : {
4467 351971 : struct cgraph_edge *cs = src->cs;
4468 792494 : while (cs)
4469 : {
4470 440523 : if (cgraph_edge_brings_value_p (cs, src, dest, val))
4471 338707 : ret.quick_push (cs);
4472 440523 : cs = get_next_cgraph_edge_clone (cs);
4473 : }
4474 : }
4475 :
4476 144128 : if (caller_count > 1)
4477 39200 : adjust_callers_for_value_intersection (ret, dest);
4478 :
4479 144128 : return ret;
4480 : }
4481 :
4482 : /* Construct a replacement map for a know VALUE for a formal parameter PARAM.
4483 : Return it or NULL if for some reason it cannot be created. FORCE_LOAD_REF
4484 : should be set to true when the reference created for the constant should be
4485 : a load one and not an address one because the corresponding parameter p is
4486 : only used as *p. */
4487 :
4488 : static struct ipa_replace_map *
4489 25634 : get_replacement_map (class ipa_node_params *info, tree value, int parm_num,
4490 : bool force_load_ref)
4491 : {
4492 25634 : struct ipa_replace_map *replace_map;
4493 :
4494 25634 : replace_map = ggc_alloc<ipa_replace_map> ();
4495 25634 : if (dump_file)
4496 : {
4497 181 : fprintf (dump_file, " replacing ");
4498 181 : ipa_dump_param (dump_file, info, parm_num);
4499 :
4500 181 : fprintf (dump_file, " with const ");
4501 181 : print_generic_expr (dump_file, value);
4502 :
4503 181 : if (force_load_ref)
4504 11 : fprintf (dump_file, " - forcing load reference\n");
4505 : else
4506 170 : fprintf (dump_file, "\n");
4507 : }
4508 25634 : replace_map->parm_num = parm_num;
4509 25634 : replace_map->new_tree = value;
4510 25634 : replace_map->force_load_ref = force_load_ref;
4511 25634 : return replace_map;
4512 : }
4513 :
4514 : /* Dump new profiling counts of NODE. SPEC is true when NODE is a specialzied
4515 : one, otherwise it will be referred to as the original node. */
4516 :
4517 : static void
4518 4 : dump_profile_updates (cgraph_node *node, bool spec)
4519 : {
4520 4 : if (spec)
4521 2 : fprintf (dump_file, " setting count of the specialized node %s to ",
4522 : node->dump_name ());
4523 : else
4524 2 : fprintf (dump_file, " setting count of the original node %s to ",
4525 : node->dump_name ());
4526 :
4527 4 : node->count.dump (dump_file);
4528 4 : fprintf (dump_file, "\n");
4529 6 : for (cgraph_edge *cs = node->callees; cs; cs = cs->next_callee)
4530 : {
4531 2 : fprintf (dump_file, " edge to %s has count ",
4532 2 : cs->callee->dump_name ());
4533 2 : cs->count.dump (dump_file);
4534 2 : fprintf (dump_file, "\n");
4535 : }
4536 4 : }
4537 :
4538 : /* With partial train run we do not want to assume that original's count is
4539 : zero whenever we redurect all executed edges to clone. Simply drop profile
4540 : to local one in this case. In eany case, return the new value. ORIG_NODE
4541 : is the original node and its count has not been updaed yet. */
4542 :
4543 : profile_count
4544 20 : lenient_count_portion_handling (profile_count remainder, cgraph_node *orig_node)
4545 : {
4546 40 : if (remainder.ipa_p () && !remainder.ipa ().nonzero_p ()
4547 30 : && orig_node->count.ipa_p () && orig_node->count.ipa ().nonzero_p ()
4548 5 : && opt_for_fn (orig_node->decl, flag_profile_partial_training))
4549 0 : remainder = orig_node->count.guessed_local ();
4550 :
4551 20 : return remainder;
4552 : }
4553 :
4554 : /* Structure to sum counts coming from nodes other than the original node and
4555 : its clones. */
4556 :
4557 : struct gather_other_count_struct
4558 : {
4559 : cgraph_node *orig;
4560 : profile_count other_count;
4561 : };
4562 :
4563 : /* Worker callback of call_for_symbol_thunks_and_aliases summing the number of
4564 : counts that come from non-self-recursive calls.. */
4565 :
4566 : static bool
4567 10 : gather_count_of_non_rec_edges (cgraph_node *node, void *data)
4568 : {
4569 10 : gather_other_count_struct *desc = (gather_other_count_struct *) data;
4570 24 : for (cgraph_edge *cs = node->callers; cs; cs = cs->next_caller)
4571 14 : if (cs->caller != desc->orig && cs->caller->clone_of != desc->orig)
4572 0 : if (cs->count.ipa ().initialized_p ())
4573 0 : desc->other_count += cs->count.ipa ();
4574 10 : return false;
4575 : }
4576 :
4577 : /* Structure to help analyze if we need to boost counts of some clones of some
4578 : non-recursive edges to match the new callee count. */
4579 :
4580 : struct desc_incoming_count_struct
4581 : {
4582 : cgraph_node *orig;
4583 : hash_set <cgraph_edge *> *processed_edges;
4584 : profile_count count;
4585 : unsigned unproc_orig_rec_edges;
4586 : };
4587 :
4588 : /* Go over edges calling NODE and its thunks and gather information about
4589 : incoming counts so that we know if we need to make any adjustments. */
4590 :
4591 : static void
4592 10 : analyze_clone_icoming_counts (cgraph_node *node,
4593 : desc_incoming_count_struct *desc)
4594 : {
4595 24 : for (cgraph_edge *cs = node->callers; cs; cs = cs->next_caller)
4596 14 : if (cs->caller->thunk)
4597 : {
4598 0 : analyze_clone_icoming_counts (cs->caller, desc);
4599 0 : continue;
4600 : }
4601 : else
4602 : {
4603 14 : if (cs->count.initialized_p ())
4604 14 : desc->count += cs->count.ipa ();
4605 14 : if (!desc->processed_edges->contains (cs)
4606 14 : && cs->caller->clone_of == desc->orig)
4607 4 : desc->unproc_orig_rec_edges++;
4608 : }
4609 10 : }
4610 :
4611 : /* If caller edge counts of a clone created for a self-recursive arithmetic
4612 : jump function must be adjusted because it is coming from a the "seed" clone
4613 : for the first value and so has been excessively scaled back as if it was not
4614 : a recursive call, adjust it so that the incoming counts of NODE match its
4615 : count. NODE is the node or its thunk. */
4616 :
4617 : static void
4618 0 : adjust_clone_incoming_counts (cgraph_node *node,
4619 : desc_incoming_count_struct *desc)
4620 : {
4621 0 : for (cgraph_edge *cs = node->callers; cs; cs = cs->next_caller)
4622 0 : if (cs->caller->thunk)
4623 : {
4624 0 : adjust_clone_incoming_counts (cs->caller, desc);
4625 0 : profile_count sum = profile_count::zero ();
4626 0 : for (cgraph_edge *e = cs->caller->callers; e; e = e->next_caller)
4627 0 : if (e->count.initialized_p ())
4628 0 : sum += e->count.ipa ();
4629 0 : cs->count = cs->count.combine_with_ipa_count (sum);
4630 : }
4631 0 : else if (!desc->processed_edges->contains (cs)
4632 0 : && cs->caller->clone_of == desc->orig
4633 0 : && cs->count.compatible_p (desc->count))
4634 : {
4635 0 : cs->count += desc->count;
4636 0 : if (dump_file)
4637 : {
4638 0 : fprintf (dump_file, " Adjusted count of an incoming edge of "
4639 0 : "a clone %s -> %s to ", cs->caller->dump_name (),
4640 0 : cs->callee->dump_name ());
4641 0 : cs->count.dump (dump_file);
4642 0 : fprintf (dump_file, "\n");
4643 : }
4644 : }
4645 0 : }
4646 :
4647 : /* When ORIG_NODE has been cloned for values which have been generated fora
4648 : self-recursive call as a result of an arithmetic pass-through
4649 : jump-functions, adjust its count together with counts of all such clones in
4650 : SELF_GEN_CLONES which also at this point contains ORIG_NODE itself.
4651 :
4652 : The function sums the counts of the original node and all its clones that
4653 : cannot be attributed to a specific clone because it comes from a
4654 : non-recursive edge. This sum is then evenly divided between the clones and
4655 : on top of that each one gets all the counts which can be attributed directly
4656 : to it. */
4657 :
4658 : static void
4659 41 : update_counts_for_self_gen_clones (cgraph_node *orig_node,
4660 : const vec<cgraph_node *> &self_gen_clones)
4661 : {
4662 41 : profile_count redist_sum = orig_node->count.ipa ();
4663 41 : if (!redist_sum.nonzero_p ())
4664 : return;
4665 :
4666 4 : if (dump_file)
4667 0 : fprintf (dump_file, " Updating profile of self recursive clone "
4668 : "series\n");
4669 :
4670 4 : gather_other_count_struct gocs;
4671 4 : gocs.orig = orig_node;
4672 4 : gocs.other_count = profile_count::zero ();
4673 :
4674 4 : auto_vec <profile_count, 8> other_edges_count;
4675 22 : for (cgraph_node *n : self_gen_clones)
4676 : {
4677 10 : gocs.other_count = profile_count::zero ();
4678 10 : n->call_for_symbol_thunks_and_aliases (gather_count_of_non_rec_edges,
4679 : &gocs, false);
4680 10 : other_edges_count.safe_push (gocs.other_count);
4681 10 : redist_sum -= gocs.other_count;
4682 : }
4683 :
4684 4 : hash_set<cgraph_edge *> processed_edges;
4685 4 : unsigned i = 0;
4686 22 : for (cgraph_node *n : self_gen_clones)
4687 : {
4688 10 : profile_count new_count
4689 20 : = (redist_sum / self_gen_clones.length () + other_edges_count[i]);
4690 10 : new_count = lenient_count_portion_handling (new_count, orig_node);
4691 10 : n->scale_profile_to (new_count);
4692 20 : for (cgraph_edge *cs = n->callees; cs; cs = cs->next_callee)
4693 10 : processed_edges.add (cs);
4694 :
4695 10 : i++;
4696 : }
4697 :
4698 : /* There are still going to be edges to ORIG_NODE that have one or more
4699 : clones coming from another node clone in SELF_GEN_CLONES and which we
4700 : scaled by the same amount, which means that the total incoming sum of
4701 : counts to ORIG_NODE will be too high, scale such edges back. */
4702 8 : for (cgraph_edge *cs = orig_node->callees; cs; cs = cs->next_callee)
4703 : {
4704 4 : if (cs->callee->ultimate_alias_target () == orig_node)
4705 : {
4706 4 : unsigned den = 0;
4707 22 : for (cgraph_edge *e = cs; e; e = get_next_cgraph_edge_clone (e))
4708 18 : if (e->callee->ultimate_alias_target () == orig_node
4709 18 : && processed_edges.contains (e))
4710 8 : den++;
4711 4 : if (den > 0)
4712 22 : for (cgraph_edge *e = cs; e; e = get_next_cgraph_edge_clone (e))
4713 18 : if (e->callee->ultimate_alias_target () == orig_node
4714 8 : && processed_edges.contains (e)
4715 : /* If count is not IPA, this adjustment makes verifier
4716 : unhappy, since we expect bb->count to match e->count.
4717 : We may add a flag to mark edge conts that has been
4718 : modified by IPA code, but so far it does not seem
4719 : to be worth the effort. With local counts the profile
4720 : will not propagate at IPA level. */
4721 34 : && e->count.ipa_p ())
4722 8 : e->count /= den;
4723 : }
4724 : }
4725 :
4726 : /* Edges from the seeds of the valus generated for arithmetic jump-functions
4727 : along self-recursive edges are likely to have fairly low count and so
4728 : edges from them to nodes in the self_gen_clones do not correspond to the
4729 : artificially distributed count of the nodes, the total sum of incoming
4730 : edges to some clones might be too low. Detect this situation and correct
4731 : it. */
4732 22 : for (cgraph_node *n : self_gen_clones)
4733 : {
4734 10 : if (!n->count.ipa ().nonzero_p ())
4735 0 : continue;
4736 :
4737 10 : desc_incoming_count_struct desc;
4738 10 : desc.orig = orig_node;
4739 10 : desc.processed_edges = &processed_edges;
4740 10 : desc.count = profile_count::zero ();
4741 10 : desc.unproc_orig_rec_edges = 0;
4742 10 : analyze_clone_icoming_counts (n, &desc);
4743 :
4744 10 : if (n->count.differs_from_p (desc.count))
4745 : {
4746 0 : if (n->count > desc.count
4747 0 : && desc.unproc_orig_rec_edges > 0)
4748 : {
4749 0 : desc.count = n->count - desc.count;
4750 0 : desc.count = desc.count /= desc.unproc_orig_rec_edges;
4751 0 : adjust_clone_incoming_counts (n, &desc);
4752 : }
4753 0 : else if (dump_file)
4754 0 : fprintf (dump_file,
4755 : " Unable to fix up incoming counts for %s.\n",
4756 : n->dump_name ());
4757 : }
4758 : }
4759 :
4760 4 : if (dump_file)
4761 0 : for (cgraph_node *n : self_gen_clones)
4762 0 : dump_profile_updates (n, n != orig_node);
4763 4 : return;
4764 4 : }
4765 :
4766 : /* After a specialized NEW_NODE version of ORIG_NODE has been created, update
4767 : their profile information to reflect this. This function should not be used
4768 : for clones generated for arithmetic pass-through jump functions on a
4769 : self-recursive call graph edge, that situation is handled by
4770 : update_counts_for_self_gen_clones. */
4771 :
4772 : static void
4773 4962 : update_profiling_info (struct cgraph_node *orig_node,
4774 : struct cgraph_node *new_node)
4775 : {
4776 4962 : struct caller_statistics stats;
4777 4962 : profile_count new_sum;
4778 4962 : profile_count remainder, orig_node_count = orig_node->count.ipa ();
4779 :
4780 4962 : if (!orig_node_count.nonzero_p ())
4781 4952 : return;
4782 :
4783 10 : if (dump_file)
4784 : {
4785 2 : fprintf (dump_file, " Updating profile from original count: ");
4786 2 : orig_node_count.dump (dump_file);
4787 2 : fprintf (dump_file, "\n");
4788 : }
4789 :
4790 10 : init_caller_stats (&stats, new_node);
4791 10 : new_node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats,
4792 : false);
4793 10 : new_sum = stats.count_sum;
4794 :
4795 10 : bool orig_edges_processed = false;
4796 10 : if (new_sum > orig_node_count)
4797 : {
4798 : /* Profile has alreay gone astray, keep what we have but lower it
4799 : to global0adjusted or to local if we have partial training. */
4800 0 : if (opt_for_fn (orig_node->decl, flag_profile_partial_training))
4801 0 : orig_node->make_profile_local ();
4802 0 : if (new_sum.quality () == AFDO)
4803 0 : orig_node->make_profile_global0 (GUESSED_GLOBAL0_AFDO);
4804 : else
4805 0 : orig_node->make_profile_global0 (GUESSED_GLOBAL0_ADJUSTED);
4806 : orig_edges_processed = true;
4807 : }
4808 10 : else if (stats.rec_count_sum.nonzero_p ())
4809 : {
4810 0 : int new_nonrec_calls = stats.n_nonrec_calls;
4811 : /* There are self-recursive edges which are likely to bring in the
4812 : majority of calls but which we must divide in between the original and
4813 : new node. */
4814 0 : init_caller_stats (&stats, orig_node);
4815 0 : orig_node->call_for_symbol_thunks_and_aliases (gather_caller_stats,
4816 : &stats, false);
4817 0 : int orig_nonrec_calls = stats.n_nonrec_calls;
4818 0 : profile_count orig_nonrec_call_count = stats.count_sum;
4819 :
4820 0 : if (orig_node->local)
4821 : {
4822 0 : if (!orig_nonrec_call_count.nonzero_p ())
4823 : {
4824 0 : if (dump_file)
4825 0 : fprintf (dump_file, " The original is local and the only "
4826 : "incoming edges from non-dead callers with nonzero "
4827 : "counts are self-recursive, assuming it is cold.\n");
4828 : /* The NEW_NODE count and counts of all its outgoing edges
4829 : are still unmodified copies of ORIG_NODE's. Just clear
4830 : the latter and bail out. */
4831 0 : if (opt_for_fn (orig_node->decl, flag_profile_partial_training))
4832 0 : orig_node->make_profile_local ();
4833 0 : else if (orig_nonrec_call_count.quality () == AFDO)
4834 0 : orig_node->make_profile_global0 (GUESSED_GLOBAL0_AFDO);
4835 : else
4836 0 : orig_node->make_profile_global0 (GUESSED_GLOBAL0_ADJUSTED);
4837 0 : return;
4838 : }
4839 : }
4840 : else
4841 : {
4842 : /* Let's behave as if there was another caller that accounts for all
4843 : the calls that were either indirect or from other compilation
4844 : units. */
4845 0 : orig_nonrec_calls++;
4846 0 : profile_count pretend_caller_count
4847 0 : = (orig_node_count - new_sum - orig_nonrec_call_count
4848 0 : - stats.rec_count_sum);
4849 0 : orig_nonrec_call_count += pretend_caller_count;
4850 : }
4851 :
4852 : /* Divide all "unexplained" counts roughly proportionally to sums of
4853 : counts of non-recursive calls.
4854 :
4855 : We put rather arbitrary limits on how many counts we claim because the
4856 : number of non-self-recursive incoming count is only a rough guideline
4857 : and there are cases (such as mcf) where using it blindly just takes
4858 : too many. And if lattices are considered in the opposite order we
4859 : could also take too few. */
4860 0 : profile_count unexp = orig_node_count - new_sum - orig_nonrec_call_count;
4861 :
4862 0 : int limit_den = 2 * (orig_nonrec_calls + new_nonrec_calls);
4863 0 : profile_count new_part = unexp.apply_scale (limit_den - 1, limit_den);
4864 0 : profile_count den = new_sum + orig_nonrec_call_count;
4865 0 : if (den.nonzero_p ())
4866 0 : new_part = MIN (unexp.apply_scale (new_sum, den), new_part);
4867 0 : new_part = MAX (new_part,
4868 : unexp.apply_scale (new_nonrec_calls, limit_den));
4869 0 : if (dump_file)
4870 : {
4871 0 : fprintf (dump_file, " Claiming ");
4872 0 : new_part.dump (dump_file);
4873 0 : fprintf (dump_file, " of unexplained ");
4874 0 : unexp.dump (dump_file);
4875 0 : fprintf (dump_file, " counts because of self-recursive "
4876 : "calls\n");
4877 : }
4878 0 : new_sum += new_part;
4879 0 : remainder = lenient_count_portion_handling (orig_node_count - new_sum,
4880 : orig_node);
4881 : }
4882 : else
4883 10 : remainder = lenient_count_portion_handling (orig_node_count - new_sum,
4884 : orig_node);
4885 :
4886 10 : new_node->scale_profile_to (new_sum);
4887 :
4888 10 : if (!orig_edges_processed)
4889 10 : orig_node->scale_profile_to (remainder);
4890 :
4891 10 : if (dump_file)
4892 : {
4893 2 : dump_profile_updates (new_node, true);
4894 2 : dump_profile_updates (orig_node, false);
4895 : }
4896 : }
4897 :
4898 : /* Update the respective profile of specialized NEW_NODE and the original
4899 : ORIG_NODE after additional edges with cumulative count sum REDIRECTED_SUM
4900 : have been redirected to the specialized version. */
4901 :
4902 : static void
4903 0 : update_specialized_profile (struct cgraph_node *new_node,
4904 : struct cgraph_node *orig_node,
4905 : profile_count redirected_sum)
4906 : {
4907 0 : if (dump_file)
4908 : {
4909 0 : fprintf (dump_file, " the sum of counts of redirected edges is ");
4910 0 : redirected_sum.dump (dump_file);
4911 0 : fprintf (dump_file, "\n old ipa count of the original node is ");
4912 0 : orig_node->count.dump (dump_file);
4913 0 : fprintf (dump_file, "\n");
4914 : }
4915 0 : if (!orig_node->count.ipa ().nonzero_p ()
4916 0 : || !redirected_sum.nonzero_p ())
4917 0 : return;
4918 :
4919 0 : orig_node->scale_profile_to
4920 0 : (lenient_count_portion_handling (orig_node->count.ipa () - redirected_sum,
4921 : orig_node));
4922 :
4923 0 : new_node->scale_profile_to (new_node->count.ipa () + redirected_sum);
4924 :
4925 0 : if (dump_file)
4926 : {
4927 0 : dump_profile_updates (new_node, true);
4928 0 : dump_profile_updates (orig_node, false);
4929 : }
4930 : }
4931 :
4932 : static void adjust_references_in_caller (cgraph_edge *cs,
4933 : symtab_node *symbol, int index);
4934 :
4935 : /* Simple structure to pass a symbol and index (with same meaning as parameters
4936 : of adjust_references_in_caller) through a void* parameter of a
4937 : call_for_symbol_thunks_and_aliases callback. */
4938 : struct symbol_and_index_together
4939 : {
4940 : symtab_node *symbol;
4941 : int index;
4942 : };
4943 :
4944 : /* Worker callback of call_for_symbol_thunks_and_aliases to recursively call
4945 : adjust_references_in_caller on edges up in the call-graph, if necessary. */
4946 : static bool
4947 8 : adjust_refs_in_act_callers (struct cgraph_node *node, void *data)
4948 : {
4949 8 : symbol_and_index_together *pack = (symbol_and_index_together *) data;
4950 38 : for (cgraph_edge *cs = node->callers; cs; cs = cs->next_caller)
4951 30 : if (!cs->caller->thunk)
4952 30 : adjust_references_in_caller (cs, pack->symbol, pack->index);
4953 8 : return false;
4954 : }
4955 :
4956 : /* At INDEX of a function being called by CS there is an ADDR_EXPR of a
4957 : variable which is only dereferenced and which is represented by SYMBOL. See
4958 : if we can remove ADDR reference in callers assosiated witht the call. */
4959 :
4960 : static void
4961 421 : adjust_references_in_caller (cgraph_edge *cs, symtab_node *symbol, int index)
4962 : {
4963 421 : ipa_edge_args *args = ipa_edge_args_sum->get (cs);
4964 421 : ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, index);
4965 421 : if (jfunc->type == IPA_JF_CONST)
4966 : {
4967 403 : ipa_ref *to_del = cs->caller->find_reference (symbol, cs->call_stmt,
4968 : cs->lto_stmt_uid,
4969 : IPA_REF_ADDR);
4970 403 : if (!to_del)
4971 413 : return;
4972 403 : to_del->remove_reference ();
4973 403 : ipa_zap_jf_refdesc (jfunc);
4974 403 : if (dump_file)
4975 22 : fprintf (dump_file, " Removed a reference from %s to %s.\n",
4976 11 : cs->caller->dump_name (), symbol->dump_name ());
4977 403 : return;
4978 : }
4979 :
4980 18 : if (jfunc->type != IPA_JF_PASS_THROUGH
4981 18 : || ipa_get_jf_pass_through_operation (jfunc) != NOP_EXPR
4982 36 : || ipa_get_jf_pass_through_refdesc_decremented (jfunc))
4983 : return;
4984 :
4985 18 : int fidx = ipa_get_jf_pass_through_formal_id (jfunc);
4986 18 : cgraph_node *caller = cs->caller;
4987 18 : ipa_node_params *caller_info = ipa_node_params_sum->get (caller);
4988 : /* TODO: This consistency check may be too big and not really
4989 : that useful. Consider removing it. */
4990 18 : tree cst;
4991 18 : if (caller_info->ipcp_orig_node)
4992 17 : cst = caller_info->known_csts[fidx];
4993 : else
4994 : {
4995 1 : ipcp_lattice<tree> *lat = ipa_get_scalar_lat (caller_info, fidx);
4996 1 : gcc_assert (lat->is_single_const ());
4997 1 : cst = lat->values->value;
4998 : }
4999 18 : gcc_assert (TREE_CODE (cst) == ADDR_EXPR
5000 : && (symtab_node::get (get_base_address (TREE_OPERAND (cst, 0)))
5001 : == symbol));
5002 :
5003 18 : int cuses = ipa_get_controlled_uses (caller_info, fidx);
5004 18 : if (cuses == IPA_UNDESCRIBED_USE)
5005 : return;
5006 18 : gcc_assert (cuses > 0);
5007 18 : cuses--;
5008 18 : ipa_set_controlled_uses (caller_info, fidx, cuses);
5009 18 : ipa_set_jf_pass_through_refdesc_decremented (jfunc, true);
5010 18 : if (dump_file && (dump_flags & TDF_DETAILS))
5011 3 : fprintf (dump_file, " Controlled uses of parameter %i of %s dropped "
5012 : "to %i.\n", fidx, caller->dump_name (), cuses);
5013 18 : if (cuses)
5014 : return;
5015 :
5016 8 : if (caller_info->ipcp_orig_node)
5017 : {
5018 : /* Cloning machinery has created a reference here, we need to either
5019 : remove it or change it to a read one. */
5020 7 : ipa_ref *to_del = caller->find_reference (symbol, NULL, 0, IPA_REF_ADDR);
5021 7 : if (to_del)
5022 : {
5023 7 : to_del->remove_reference ();
5024 7 : if (dump_file)
5025 6 : fprintf (dump_file, " Removed a reference from %s to %s.\n",
5026 3 : cs->caller->dump_name (), symbol->dump_name ());
5027 7 : if (ipa_get_param_load_dereferenced (caller_info, fidx))
5028 : {
5029 3 : caller->create_reference (symbol, IPA_REF_LOAD, NULL);
5030 3 : if (dump_file)
5031 2 : fprintf (dump_file,
5032 : " ...and replaced it with LOAD one.\n");
5033 : }
5034 : }
5035 : }
5036 :
5037 8 : symbol_and_index_together pack;
5038 8 : pack.symbol = symbol;
5039 8 : pack.index = fidx;
5040 8 : if (caller->can_change_signature)
5041 8 : caller->call_for_symbol_thunks_and_aliases (adjust_refs_in_act_callers,
5042 : &pack, true);
5043 : }
5044 :
5045 :
5046 : /* Return true if we would like to remove a parameter from NODE when cloning it
5047 : with KNOWN_CSTS scalar constants. */
5048 :
5049 : static bool
5050 20930 : want_remove_some_param_p (cgraph_node *node, vec<tree> known_csts)
5051 : {
5052 20930 : auto_vec<bool, 16> surviving;
5053 20930 : bool filled_vec = false;
5054 20930 : ipa_node_params *info = ipa_node_params_sum->get (node);
5055 20930 : int i, count = ipa_get_param_count (info);
5056 :
5057 41598 : for (i = 0; i < count; i++)
5058 : {
5059 36755 : if (!known_csts[i] && ipa_is_param_used (info, i))
5060 20668 : continue;
5061 :
5062 16087 : if (!filled_vec)
5063 : {
5064 16087 : clone_info *info = clone_info::get (node);
5065 16087 : if (!info || !info->param_adjustments)
5066 : return true;
5067 0 : info->param_adjustments->get_surviving_params (&surviving);
5068 0 : filled_vec = true;
5069 : }
5070 0 : if (surviving.length() < (unsigned) i && surviving[i])
5071 : return true;
5072 : }
5073 : return false;
5074 20930 : }
5075 :
5076 : /* Create a specialized version of NODE with known constants in KNOWN_CSTS,
5077 : known contexts in KNOWN_CONTEXTS and known aggregate values in AGGVALS and
5078 : redirect all edges in CALLERS to it. */
5079 :
5080 : static struct cgraph_node *
5081 22485 : create_specialized_node (struct cgraph_node *node,
5082 : vec<tree> known_csts,
5083 : vec<ipa_polymorphic_call_context> known_contexts,
5084 : vec<ipa_argagg_value, va_gc> *aggvals,
5085 : vec<cgraph_edge *> &callers)
5086 : {
5087 22485 : ipa_node_params *new_info, *info = ipa_node_params_sum->get (node);
5088 22485 : vec<ipa_replace_map *, va_gc> *replace_trees = NULL;
5089 22485 : vec<ipa_adjusted_param, va_gc> *new_params = NULL;
5090 22485 : struct cgraph_node *new_node;
5091 22485 : int i, count = ipa_get_param_count (info);
5092 22485 : clone_info *cinfo = clone_info::get (node);
5093 0 : ipa_param_adjustments *old_adjustments = cinfo
5094 22485 : ? cinfo->param_adjustments : NULL;
5095 22485 : ipa_param_adjustments *new_adjustments;
5096 22485 : gcc_assert (!info->ipcp_orig_node);
5097 22485 : gcc_assert (node->can_change_signature
5098 : || !old_adjustments);
5099 :
5100 20930 : if (old_adjustments)
5101 : {
5102 : /* At the moment all IPA optimizations should use the number of
5103 : parameters of the prevailing decl as the m_always_copy_start.
5104 : Handling any other value would complicate the code below, so for the
5105 : time bing let's only assert it is so. */
5106 0 : gcc_assert (old_adjustments->m_always_copy_start == count
5107 : || old_adjustments->m_always_copy_start < 0);
5108 0 : int old_adj_count = vec_safe_length (old_adjustments->m_adj_params);
5109 0 : for (i = 0; i < old_adj_count; i++)
5110 : {
5111 0 : ipa_adjusted_param *old_adj = &(*old_adjustments->m_adj_params)[i];
5112 0 : if (!node->can_change_signature
5113 0 : || old_adj->op != IPA_PARAM_OP_COPY
5114 0 : || (!known_csts[old_adj->base_index]
5115 0 : && ipa_is_param_used (info, old_adj->base_index)))
5116 : {
5117 0 : ipa_adjusted_param new_adj = *old_adj;
5118 :
5119 0 : new_adj.prev_clone_adjustment = true;
5120 0 : new_adj.prev_clone_index = i;
5121 0 : vec_safe_push (new_params, new_adj);
5122 : }
5123 : }
5124 0 : bool skip_return = old_adjustments->m_skip_return;
5125 0 : new_adjustments = (new (ggc_alloc <ipa_param_adjustments> ())
5126 : ipa_param_adjustments (new_params, count,
5127 0 : skip_return));
5128 : }
5129 22485 : else if (node->can_change_signature
5130 22485 : && want_remove_some_param_p (node, known_csts))
5131 : {
5132 16087 : ipa_adjusted_param adj;
5133 16087 : memset (&adj, 0, sizeof (adj));
5134 16087 : adj.op = IPA_PARAM_OP_COPY;
5135 62639 : for (i = 0; i < count; i++)
5136 46552 : if (!known_csts[i] && ipa_is_param_used (info, i))
5137 : {
5138 16850 : adj.base_index = i;
5139 16850 : adj.prev_clone_index = i;
5140 16850 : vec_safe_push (new_params, adj);
5141 : }
5142 16087 : new_adjustments = (new (ggc_alloc <ipa_param_adjustments> ())
5143 16087 : ipa_param_adjustments (new_params, count, false));
5144 : }
5145 : else
5146 : new_adjustments = NULL;
5147 :
5148 22485 : auto_vec<cgraph_edge *, 2> self_recursive_calls;
5149 190261 : for (i = callers.length () - 1; i >= 0; i--)
5150 : {
5151 145291 : cgraph_edge *cs = callers[i];
5152 145291 : if (cs->caller == node)
5153 : {
5154 137 : self_recursive_calls.safe_push (cs);
5155 137 : callers.unordered_remove (i);
5156 : }
5157 : }
5158 22485 : replace_trees = cinfo ? vec_safe_copy (cinfo->tree_map) : NULL;
5159 85761 : for (i = 0; i < count; i++)
5160 : {
5161 63276 : tree t = known_csts[i];
5162 63276 : if (!t)
5163 37642 : continue;
5164 :
5165 25634 : gcc_checking_assert (TREE_CODE (t) != TREE_BINFO);
5166 :
5167 25634 : bool load_ref = false;
5168 25634 : symtab_node *ref_symbol;
5169 25634 : if (TREE_CODE (t) == ADDR_EXPR)
5170 : {
5171 6764 : tree base = get_base_address (TREE_OPERAND (t, 0));
5172 6764 : if (TREE_CODE (base) == VAR_DECL
5173 3145 : && ipa_get_controlled_uses (info, i) == 0
5174 947 : && ipa_get_param_load_dereferenced (info, i)
5175 7168 : && (ref_symbol = symtab_node::get (base)))
5176 : {
5177 404 : load_ref = true;
5178 404 : if (node->can_change_signature)
5179 1477 : for (cgraph_edge *caller : callers)
5180 391 : adjust_references_in_caller (caller, ref_symbol, i);
5181 : }
5182 : }
5183 :
5184 25634 : ipa_replace_map *replace_map = get_replacement_map (info, t, i, load_ref);
5185 25634 : if (replace_map)
5186 25634 : vec_safe_push (replace_trees, replace_map);
5187 : }
5188 :
5189 67455 : unsigned &suffix_counter = clone_num_suffixes->get_or_insert (
5190 22485 : IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (
5191 : node->decl)));
5192 22485 : new_node = node->create_virtual_clone (callers, replace_trees,
5193 : new_adjustments, "constprop",
5194 : suffix_counter);
5195 22485 : suffix_counter++;
5196 :
5197 22485 : bool have_self_recursive_calls = !self_recursive_calls.is_empty ();
5198 22622 : for (unsigned j = 0; j < self_recursive_calls.length (); j++)
5199 : {
5200 137 : cgraph_edge *cs = get_next_cgraph_edge_clone (self_recursive_calls[j]);
5201 : /* Cloned edges can disappear during cloning as speculation can be
5202 : resolved, check that we have one and that it comes from the last
5203 : cloning. */
5204 137 : if (cs && cs->caller == new_node)
5205 136 : cs->redirect_callee_duplicating_thunks (new_node);
5206 : /* Any future code that would make more than one clone of an outgoing
5207 : edge would confuse this mechanism, so let's check that does not
5208 : happen. */
5209 136 : gcc_checking_assert (!cs
5210 : || !get_next_cgraph_edge_clone (cs)
5211 : || get_next_cgraph_edge_clone (cs)->caller != new_node);
5212 : }
5213 22485 : if (have_self_recursive_calls)
5214 127 : new_node->expand_all_artificial_thunks ();
5215 :
5216 22485 : ipa_set_node_agg_value_chain (new_node, aggvals);
5217 55474 : for (const ipa_argagg_value &av : aggvals)
5218 32989 : new_node->maybe_create_reference (av.value, NULL);
5219 :
5220 22485 : if (dump_file && (dump_flags & TDF_DETAILS))
5221 : {
5222 91 : fprintf (dump_file, " the new node is %s.\n", new_node->dump_name ());
5223 91 : if (known_contexts.exists ())
5224 : {
5225 0 : for (i = 0; i < count; i++)
5226 0 : if (!known_contexts[i].useless_p ())
5227 : {
5228 0 : fprintf (dump_file, " known ctx %i is ", i);
5229 0 : known_contexts[i].dump (dump_file);
5230 : }
5231 : }
5232 91 : if (aggvals)
5233 : {
5234 49 : fprintf (dump_file, " Aggregate replacements:");
5235 49 : ipa_argagg_value_list avs (aggvals);
5236 49 : avs.dump (dump_file);
5237 : }
5238 : }
5239 :
5240 22485 : new_info = ipa_node_params_sum->get (new_node);
5241 22485 : new_info->ipcp_orig_node = node;
5242 22485 : new_node->ipcp_clone = true;
5243 22485 : new_info->known_csts = known_csts;
5244 22485 : new_info->known_contexts = known_contexts;
5245 :
5246 22485 : ipcp_discover_new_direct_edges (new_node, known_csts, known_contexts,
5247 : aggvals);
5248 :
5249 22485 : return new_node;
5250 22485 : }
5251 :
5252 : /* Return true if JFUNC, which describes a i-th parameter of call CS, is a
5253 : pass-through function to itself when the cgraph_node involved is not an
5254 : IPA-CP clone. When SIMPLE is true, further check if JFUNC is a simple
5255 : no-operation pass-through. */
5256 :
5257 : static bool
5258 733054 : self_recursive_pass_through_p (cgraph_edge *cs, ipa_jump_func *jfunc, int i,
5259 : bool simple = true)
5260 : {
5261 733054 : enum availability availability;
5262 733054 : if (jfunc->type == IPA_JF_PASS_THROUGH
5263 80721 : && cs->caller == cs->callee->function_symbol (&availability)
5264 19334 : && availability > AVAIL_INTERPOSABLE
5265 19334 : && (!simple || ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
5266 19334 : && ipa_get_jf_pass_through_formal_id (jfunc) == i
5267 19334 : && ipa_node_params_sum->get (cs->caller)
5268 752388 : && !ipa_node_params_sum->get (cs->caller)->ipcp_orig_node)
5269 : return true;
5270 : return false;
5271 : }
5272 :
5273 : /* Return true if JFUNC, which describes the i-th parameter of call CS, is an
5274 : ancestor function with zero offset to itself when the cgraph_node involved
5275 : is not an IPA-CP clone. */
5276 :
5277 : static bool
5278 713732 : self_recursive_ancestor_p (cgraph_edge *cs, ipa_jump_func *jfunc, int i)
5279 : {
5280 713732 : enum availability availability;
5281 713732 : if (jfunc->type == IPA_JF_ANCESTOR
5282 3249 : && cs->caller == cs->callee->function_symbol (&availability)
5283 1 : && availability > AVAIL_INTERPOSABLE
5284 1 : && ipa_get_jf_ancestor_offset (jfunc) == 0
5285 1 : && ipa_get_jf_ancestor_formal_id (jfunc) == i
5286 1 : && ipa_node_params_sum->get (cs->caller)
5287 713733 : && !ipa_node_params_sum->get (cs->caller)->ipcp_orig_node)
5288 : return true;
5289 : return false;
5290 : }
5291 :
5292 : /* Return true if JFUNC, which describes a part of an aggregate represented or
5293 : pointed to by the i-th parameter of call CS, is a pass-through function to
5294 : itself when the cgraph_node involved is not an IPA-CP clone.. When
5295 : SIMPLE is true, further check if JFUNC is a simple no-operation
5296 : pass-through. */
5297 :
5298 : static bool
5299 315996 : self_recursive_agg_pass_through_p (const cgraph_edge *cs,
5300 : const ipa_agg_jf_item *jfunc,
5301 : int i, bool simple = true)
5302 : {
5303 315996 : enum availability availability;
5304 315996 : if (cs->caller == cs->callee->function_symbol (&availability)
5305 3758 : && availability > AVAIL_INTERPOSABLE
5306 3758 : && jfunc->jftype == IPA_JF_LOAD_AGG
5307 599 : && jfunc->offset == jfunc->value.load_agg.offset
5308 599 : && (!simple || jfunc->value.pass_through.operation == NOP_EXPR)
5309 599 : && jfunc->value.pass_through.formal_id == i
5310 593 : && useless_type_conversion_p (jfunc->value.load_agg.type, jfunc->type)
5311 593 : && ipa_node_params_sum->get (cs->caller)
5312 316589 : && !ipa_node_params_sum->get (cs->caller)->ipcp_orig_node)
5313 : return true;
5314 : return false;
5315 : }
5316 :
5317 : /* Given a NODE, and a subset of its CALLERS, try to populate blanks slots in
5318 : KNOWN_CSTS with constants that are also known for all of the CALLERS. */
5319 :
5320 : static void
5321 161488 : find_scalar_values_for_callers_subset (vec<tree> &known_csts,
5322 : ipa_node_params *info,
5323 : const vec<cgraph_edge *> &callers)
5324 : {
5325 161488 : int i, count = ipa_get_param_count (info);
5326 :
5327 688654 : for (i = 0; i < count; i++)
5328 : {
5329 527166 : ipcp_lattice<tree> *lat = ipa_get_scalar_lat (info, i);
5330 527166 : if (lat->bottom)
5331 527166 : continue;
5332 513476 : if (lat->is_single_const ())
5333 : {
5334 29309 : known_csts[i] = lat->values->value;
5335 29309 : continue;
5336 : }
5337 :
5338 484167 : struct cgraph_edge *cs;
5339 484167 : tree newval = NULL_TREE;
5340 484167 : int j;
5341 484167 : bool first = true;
5342 484167 : tree type = ipa_get_type (info, i);
5343 :
5344 919254 : FOR_EACH_VEC_ELT (callers, j, cs)
5345 : {
5346 732502 : struct ipa_jump_func *jump_func;
5347 732502 : tree t;
5348 :
5349 732502 : ipa_edge_args *args = ipa_edge_args_sum->get (cs);
5350 732502 : if (!args
5351 732502 : || i >= ipa_get_cs_argument_count (args)
5352 1464977 : || (i == 0
5353 172396 : && call_passes_through_thunk (cs)))
5354 : {
5355 : newval = NULL_TREE;
5356 : break;
5357 : }
5358 732407 : jump_func = ipa_get_ith_jump_func (args, i);
5359 :
5360 : /* Besides simple pass-through jump function, arithmetic jump
5361 : function could also introduce argument-direct-pass-through for
5362 : self-feeding recursive call. For example,
5363 :
5364 : fn (int i)
5365 : {
5366 : fn (i & 1);
5367 : }
5368 :
5369 : Given that i is 0, recursive propagation via (i & 1) also gets
5370 : 0. */
5371 732407 : if (self_recursive_pass_through_p (cs, jump_func, i, false))
5372 : {
5373 18682 : gcc_assert (newval);
5374 18682 : enum tree_code opcode
5375 18682 : = ipa_get_jf_pass_through_operation (jump_func);
5376 18682 : tree op_type = (opcode == NOP_EXPR) ? NULL_TREE
5377 50 : : ipa_get_jf_pass_through_op_type (jump_func);
5378 18682 : t = ipa_get_jf_arith_result (opcode, newval,
5379 : ipa_get_jf_pass_through_operand (jump_func),
5380 : op_type);
5381 18682 : t = ipacp_value_safe_for_type (type, t);
5382 : }
5383 713725 : else if (self_recursive_ancestor_p (cs, jump_func, i))
5384 0 : continue;
5385 : else
5386 713725 : t = ipa_value_from_jfunc (ipa_node_params_sum->get (cs->caller),
5387 : jump_func, type);
5388 732407 : if (!t
5389 455387 : || (newval
5390 243998 : && !values_equal_for_ipcp_p (t, newval))
5391 1167494 : || (!first && !newval))
5392 : {
5393 : newval = NULL_TREE;
5394 : break;
5395 : }
5396 : else
5397 : newval = t;
5398 : first = false;
5399 : }
5400 :
5401 484167 : if (newval)
5402 186752 : known_csts[i] = newval;
5403 : }
5404 161488 : }
5405 :
5406 : /* Given a NODE and a subset of its CALLERS, try to populate plank slots in
5407 : KNOWN_CONTEXTS with polymorphic contexts that are also known for all of the
5408 : CALLERS. */
5409 :
5410 : static void
5411 161488 : find_contexts_for_caller_subset (vec<ipa_polymorphic_call_context>
5412 : &known_contexts,
5413 : ipa_node_params *info,
5414 : const vec<cgraph_edge *> &callers)
5415 : {
5416 161488 : int i, count = ipa_get_param_count (info);
5417 :
5418 688638 : for (i = 0; i < count; i++)
5419 : {
5420 527162 : if (!ipa_is_param_used (info, i))
5421 28837 : continue;
5422 :
5423 500402 : ipcp_lattice<ipa_polymorphic_call_context> *ctxlat
5424 500402 : = ipa_get_poly_ctx_lat (info, i);
5425 500402 : if (ctxlat->bottom)
5426 959 : continue;
5427 499443 : if (ctxlat->is_single_const ())
5428 : {
5429 1118 : if (!ctxlat->values->value.useless_p ())
5430 : {
5431 1118 : if (known_contexts.is_empty ())
5432 1057 : known_contexts.safe_grow_cleared (count, true);
5433 1118 : known_contexts[i] = ctxlat->values->value;
5434 : }
5435 1118 : continue;
5436 : }
5437 :
5438 498325 : cgraph_edge *cs;
5439 498325 : ipa_polymorphic_call_context newval;
5440 498325 : bool first = true;
5441 498325 : int j;
5442 :
5443 503318 : FOR_EACH_VEC_ELT (callers, j, cs)
5444 : {
5445 499652 : ipa_edge_args *args = ipa_edge_args_sum->get (cs);
5446 499652 : if (!args
5447 999304 : || i >= ipa_get_cs_argument_count (args))
5448 12 : return;
5449 499640 : ipa_jump_func *jfunc = ipa_get_ith_jump_func (args, i);
5450 499640 : ipa_polymorphic_call_context ctx;
5451 499640 : ctx = ipa_context_from_jfunc (ipa_node_params_sum->get (cs->caller),
5452 : cs, i, jfunc);
5453 499640 : if (first)
5454 : {
5455 498313 : newval = ctx;
5456 498313 : first = false;
5457 : }
5458 : else
5459 1327 : newval.meet_with (ctx);
5460 996668 : if (newval.useless_p ())
5461 : break;
5462 : }
5463 :
5464 996626 : if (!newval.useless_p ())
5465 : {
5466 3666 : if (known_contexts.is_empty ())
5467 3449 : known_contexts.safe_grow_cleared (count, true);
5468 3666 : known_contexts[i] = newval;
5469 : }
5470 :
5471 : }
5472 : }
5473 :
5474 : /* Push all aggregate values coming along edge CS for parameter number INDEX to
5475 : RES. If INTERIM is non-NULL, it contains the current interim state of
5476 : collected aggregate values which can be used to compute values passed over
5477 : self-recursive edges.
5478 :
5479 : This basically one iteration of push_agg_values_from_edge over one
5480 : parameter, which allows for simpler early returns. */
5481 :
5482 : static void
5483 581442 : push_agg_values_for_index_from_edge (struct cgraph_edge *cs, int index,
5484 : vec<ipa_argagg_value> *res,
5485 : const ipa_argagg_value_list *interim)
5486 : {
5487 581442 : bool agg_values_from_caller = false;
5488 581442 : bool agg_jf_preserved = false;
5489 581442 : unsigned unit_delta = UINT_MAX;
5490 581442 : int src_idx = -1;
5491 581442 : ipa_jump_func *jfunc = ipa_get_ith_jump_func (ipa_edge_args_sum->get (cs),
5492 : index);
5493 :
5494 581442 : if (jfunc->type == IPA_JF_PASS_THROUGH
5495 581442 : && ipa_get_jf_pass_through_operation (jfunc) == NOP_EXPR)
5496 : {
5497 57339 : agg_values_from_caller = true;
5498 57339 : agg_jf_preserved = ipa_get_jf_pass_through_agg_preserved (jfunc);
5499 57339 : src_idx = ipa_get_jf_pass_through_formal_id (jfunc);
5500 57339 : unit_delta = 0;
5501 : }
5502 524103 : else if (jfunc->type == IPA_JF_ANCESTOR
5503 524103 : && ipa_get_jf_ancestor_agg_preserved (jfunc))
5504 : {
5505 405 : agg_values_from_caller = true;
5506 405 : agg_jf_preserved = true;
5507 405 : src_idx = ipa_get_jf_ancestor_formal_id (jfunc);
5508 405 : unit_delta = ipa_get_jf_ancestor_offset (jfunc) / BITS_PER_UNIT;
5509 : }
5510 :
5511 581442 : ipa_node_params *caller_info = ipa_node_params_sum->get (cs->caller);
5512 581442 : if (agg_values_from_caller)
5513 : {
5514 57744 : if (caller_info->ipcp_orig_node)
5515 : {
5516 11895 : struct cgraph_node *orig_node = caller_info->ipcp_orig_node;
5517 11895 : ipcp_transformation *ts
5518 11895 : = ipcp_get_transformation_summary (cs->caller);
5519 11895 : ipa_node_params *orig_info = ipa_node_params_sum->get (orig_node);
5520 11895 : ipcp_param_lattices *orig_plats
5521 11895 : = ipa_get_parm_lattices (orig_info, src_idx);
5522 11895 : if (ts
5523 11895 : && orig_plats->aggs
5524 3137 : && (agg_jf_preserved || !orig_plats->aggs_by_ref))
5525 : {
5526 2656 : ipa_argagg_value_list src (ts);
5527 2656 : src.push_adjusted_values (src_idx, index, unit_delta, res);
5528 2656 : return;
5529 : }
5530 : }
5531 : else
5532 : {
5533 45849 : ipcp_param_lattices *src_plats
5534 45849 : = ipa_get_parm_lattices (caller_info, src_idx);
5535 45849 : if (src_plats->aggs
5536 2450 : && !src_plats->aggs_bottom
5537 2450 : && (agg_jf_preserved || !src_plats->aggs_by_ref))
5538 : {
5539 1460 : if (interim && (self_recursive_pass_through_p (cs, jfunc, index)
5540 7 : || self_recursive_ancestor_p (cs, jfunc, index)))
5541 : {
5542 641 : interim->push_adjusted_values (src_idx, index, unit_delta,
5543 : res);
5544 641 : return;
5545 : }
5546 819 : if (!src_plats->aggs_contain_variable)
5547 : {
5548 86 : push_agg_values_from_plats (src_plats, index, unit_delta,
5549 : res);
5550 86 : return;
5551 : }
5552 : }
5553 : }
5554 : }
5555 :
5556 578059 : if (!jfunc->agg.items)
5557 : return;
5558 197970 : bool first = true;
5559 197970 : unsigned prev_unit_offset = 0;
5560 1048012 : for (const ipa_agg_jf_item &agg_jf : *jfunc->agg.items)
5561 : {
5562 850042 : tree value, srcvalue;
5563 : /* Besides simple pass-through aggregate jump function, arithmetic
5564 : aggregate jump function could also bring same aggregate value as
5565 : parameter passed-in for self-feeding recursive call. For example,
5566 :
5567 : fn (int *i)
5568 : {
5569 : int j = *i & 1;
5570 : fn (&j);
5571 : }
5572 :
5573 : Given that *i is 0, recursive propagation via (*i & 1) also gets 0. */
5574 850042 : if (interim
5575 315996 : && self_recursive_agg_pass_through_p (cs, &agg_jf, index, false)
5576 850635 : && (srcvalue = interim->get_value(index,
5577 593 : agg_jf.offset / BITS_PER_UNIT)))
5578 : {
5579 1174 : value = ipa_get_jf_arith_result (agg_jf.value.pass_through.operation,
5580 : srcvalue,
5581 587 : agg_jf.value.pass_through.operand,
5582 587 : agg_jf.value.pass_through.op_type);
5583 587 : value = ipacp_value_safe_for_type (agg_jf.type, value);
5584 : }
5585 : else
5586 849455 : value = ipa_agg_value_from_jfunc (caller_info, cs->caller,
5587 : &agg_jf);
5588 850042 : if (value)
5589 : {
5590 828429 : struct ipa_argagg_value iav;
5591 828429 : iav.value = value;
5592 828429 : iav.unit_offset = agg_jf.offset / BITS_PER_UNIT;
5593 828429 : iav.index = index;
5594 828429 : iav.by_ref = jfunc->agg.by_ref;
5595 828429 : iav.killed = false;
5596 :
5597 828429 : gcc_assert (first
5598 : || iav.unit_offset > prev_unit_offset);
5599 828429 : prev_unit_offset = iav.unit_offset;
5600 828429 : first = false;
5601 :
5602 828429 : res->safe_push (iav);
5603 : }
5604 : }
5605 : return;
5606 : }
5607 :
5608 : /* Push all aggregate values coming along edge CS to RES. DEST_INFO is the
5609 : description of ultimate callee of CS or the one it was cloned from (the
5610 : summary where lattices are). If INTERIM is non-NULL, it contains the
5611 : current interim state of collected aggregate values which can be used to
5612 : compute values passed over self-recursive edges (if OPTIMIZE_SELF_RECURSION
5613 : is true) and to skip values which clearly will not be part of intersection
5614 : with INTERIM. */
5615 :
5616 : static void
5617 214682 : push_agg_values_from_edge (struct cgraph_edge *cs,
5618 : ipa_node_params *dest_info,
5619 : vec<ipa_argagg_value> *res,
5620 : const ipa_argagg_value_list *interim,
5621 : bool optimize_self_recursion)
5622 : {
5623 214682 : ipa_edge_args *args = ipa_edge_args_sum->get (cs);
5624 214682 : if (!args)
5625 : return;
5626 :
5627 429364 : int count = MIN (ipa_get_param_count (dest_info),
5628 : ipa_get_cs_argument_count (args));
5629 :
5630 214682 : unsigned interim_index = 0;
5631 878747 : for (int index = 0; index < count; index++)
5632 : {
5633 664065 : if (interim)
5634 : {
5635 254787 : while (interim_index < interim->m_elts.size ()
5636 232942 : && interim->m_elts[interim_index].value
5637 446903 : && interim->m_elts[interim_index].index < index)
5638 117861 : interim_index++;
5639 189358 : if (interim_index >= interim->m_elts.size ()
5640 136926 : || interim->m_elts[interim_index].index > index)
5641 52432 : continue;
5642 : }
5643 :
5644 611633 : ipcp_param_lattices *plats = ipa_get_parm_lattices (dest_info, index);
5645 611633 : if (!ipa_is_param_used (dest_info, index)
5646 611633 : || plats->aggs_bottom)
5647 30191 : continue;
5648 581490 : push_agg_values_for_index_from_edge (cs, index, res,
5649 : optimize_self_recursion ? interim
5650 : : NULL);
5651 : }
5652 : }
5653 :
5654 :
5655 : /* Look at edges in CALLERS and collect all known aggregate values that arrive
5656 : from all of them into INTERIM. Return how many there are. */
5657 :
5658 : static unsigned int
5659 161488 : find_aggregate_values_for_callers_subset_1 (vec<ipa_argagg_value> &interim,
5660 : struct cgraph_node *node,
5661 : const vec<cgraph_edge *> &callers)
5662 : {
5663 161488 : ipa_node_params *dest_info = ipa_node_params_sum->get (node);
5664 161488 : if (dest_info->ipcp_orig_node)
5665 0 : dest_info = ipa_node_params_sum->get (dest_info->ipcp_orig_node);
5666 :
5667 : /* gather_edges_for_value puts a non-recursive call into the first element of
5668 : callers if it can. */
5669 161488 : push_agg_values_from_edge (callers[0], dest_info, &interim, NULL, true);
5670 :
5671 245292 : unsigned valid_entries = interim.length ();
5672 161488 : if (!valid_entries)
5673 : return 0;
5674 :
5675 79355 : unsigned caller_count = callers.length();
5676 130830 : for (unsigned i = 1; i < caller_count; i++)
5677 : {
5678 53146 : auto_vec<ipa_argagg_value, 32> last;
5679 53146 : ipa_argagg_value_list avs (&interim);
5680 53146 : push_agg_values_from_edge (callers[i], dest_info, &last, &avs, true);
5681 :
5682 53146 : valid_entries = intersect_argaggs_with (interim, last);
5683 53146 : if (!valid_entries)
5684 1671 : return 0;
5685 53146 : }
5686 :
5687 : return valid_entries;
5688 : }
5689 :
5690 : /* Look at edges in CALLERS and collect all known aggregate values that arrive
5691 : from all of them and return them in a garbage-collected vector. Return
5692 : nullptr if there are none. */
5693 :
5694 : static void
5695 144128 : find_aggregate_values_for_callers_subset (vec<ipa_argagg_value> &res,
5696 : struct cgraph_node *node,
5697 : const vec<cgraph_edge *> &callers)
5698 : {
5699 144128 : auto_vec<ipa_argagg_value, 32> interim;
5700 144128 : unsigned valid_entries
5701 144128 : = find_aggregate_values_for_callers_subset_1 (interim, node, callers);
5702 144128 : if (!valid_entries)
5703 : return;
5704 :
5705 711678 : for (const ipa_argagg_value &av : interim)
5706 494583 : if (av.value)
5707 467024 : res.safe_push(av);
5708 : return;
5709 144128 : }
5710 :
5711 : /* Look at edges in CALLERS and collect all known aggregate values that arrive
5712 : from all of them and return them in a garbage-collected vector. Return
5713 : nullptr if there are none. */
5714 :
5715 : static struct vec<ipa_argagg_value, va_gc> *
5716 17360 : find_aggregate_values_for_callers_subset_gc (struct cgraph_node *node,
5717 : const vec<cgraph_edge *> &callers)
5718 : {
5719 17360 : auto_vec<ipa_argagg_value, 32> interim;
5720 17360 : unsigned valid_entries
5721 17360 : = find_aggregate_values_for_callers_subset_1 (interim, node, callers);
5722 17360 : if (!valid_entries)
5723 : return nullptr;
5724 :
5725 5319 : vec<ipa_argagg_value, va_gc> *res = NULL;
5726 5319 : vec_safe_reserve_exact (res, valid_entries);
5727 39409 : for (const ipa_argagg_value &av : interim)
5728 23452 : if (av.value)
5729 22053 : res->quick_push(av);
5730 5319 : gcc_checking_assert (res->length () == valid_entries);
5731 : return res;
5732 17360 : }
5733 :
5734 : /* Determine whether CS also brings all scalar values that the NODE is
5735 : specialized for. */
5736 :
5737 : static bool
5738 85 : cgraph_edge_brings_all_scalars_for_node (struct cgraph_edge *cs,
5739 : struct cgraph_node *node)
5740 : {
5741 85 : ipa_node_params *dest_info = ipa_node_params_sum->get (node);
5742 85 : int count = ipa_get_param_count (dest_info);
5743 85 : class ipa_node_params *caller_info;
5744 85 : class ipa_edge_args *args;
5745 85 : int i;
5746 :
5747 85 : caller_info = ipa_node_params_sum->get (cs->caller);
5748 85 : args = ipa_edge_args_sum->get (cs);
5749 209 : for (i = 0; i < count; i++)
5750 : {
5751 151 : struct ipa_jump_func *jump_func;
5752 151 : tree val, t;
5753 :
5754 151 : val = dest_info->known_csts[i];
5755 151 : if (!val)
5756 94 : continue;
5757 :
5758 114 : if (i >= ipa_get_cs_argument_count (args))
5759 : return false;
5760 57 : jump_func = ipa_get_ith_jump_func (args, i);
5761 57 : t = ipa_value_from_jfunc (caller_info, jump_func,
5762 : ipa_get_type (dest_info, i));
5763 57 : if (!t || !values_equal_for_ipcp_p (val, t))
5764 27 : return false;
5765 : }
5766 : return true;
5767 : }
5768 :
5769 : /* Determine whether CS also brings all aggregate values that NODE is
5770 : specialized for. */
5771 :
5772 : static bool
5773 58 : cgraph_edge_brings_all_agg_vals_for_node (struct cgraph_edge *cs,
5774 : struct cgraph_node *node)
5775 : {
5776 58 : ipcp_transformation *ts = ipcp_get_transformation_summary (node);
5777 58 : if (!ts || vec_safe_is_empty (ts->m_agg_values))
5778 : return true;
5779 :
5780 48 : const ipa_argagg_value_list existing (ts->m_agg_values);
5781 48 : auto_vec<ipa_argagg_value, 32> edge_values;
5782 48 : ipa_node_params *dest_info = ipa_node_params_sum->get (node);
5783 48 : gcc_checking_assert (dest_info->ipcp_orig_node);
5784 48 : dest_info = ipa_node_params_sum->get (dest_info->ipcp_orig_node);
5785 48 : push_agg_values_from_edge (cs, dest_info, &edge_values, &existing, false);
5786 48 : const ipa_argagg_value_list avl (&edge_values);
5787 48 : return avl.superset_of_p (existing);
5788 48 : }
5789 :
5790 : /* Given an original NODE and a VAL for which we have already created a
5791 : specialized clone, look whether there are incoming edges that still lead
5792 : into the old node but now also bring the requested value and also conform to
5793 : all other criteria such that they can be redirected the special node.
5794 : This function can therefore redirect the final edge in a SCC. */
5795 :
5796 : template <typename valtype>
5797 : static void
5798 10361 : perhaps_add_new_callers (cgraph_node *node, ipcp_value<valtype> *val)
5799 : {
5800 : ipcp_value_source<valtype> *src;
5801 10361 : profile_count redirected_sum = profile_count::zero ();
5802 :
5803 129938 : for (src = val->sources; src; src = src->next)
5804 : {
5805 119577 : struct cgraph_edge *cs = src->cs;
5806 367209 : while (cs)
5807 : {
5808 247632 : if (cgraph_edge_brings_value_p (cs, src, node, val)
5809 85 : && cgraph_edge_brings_all_scalars_for_node (cs, val->spec_node)
5810 247690 : && cgraph_edge_brings_all_agg_vals_for_node (cs, val->spec_node))
5811 : {
5812 35 : if (dump_file)
5813 3 : fprintf (dump_file, " - adding an extra caller %s of %s\n",
5814 3 : cs->caller->dump_name (),
5815 3 : val->spec_node->dump_name ());
5816 :
5817 35 : cs->redirect_callee_duplicating_thunks (val->spec_node);
5818 35 : val->spec_node->expand_all_artificial_thunks ();
5819 35 : if (cs->count.ipa ().initialized_p ())
5820 0 : redirected_sum = redirected_sum + cs->count.ipa ();
5821 : }
5822 247632 : cs = get_next_cgraph_edge_clone (cs);
5823 : }
5824 : }
5825 :
5826 10361 : if (redirected_sum.nonzero_p ())
5827 0 : update_specialized_profile (val->spec_node, node, redirected_sum);
5828 10361 : }
5829 :
5830 : /* Return true if KNOWN_CONTEXTS contain at least one useful context. */
5831 :
5832 : static bool
5833 5125 : known_contexts_useful_p (vec<ipa_polymorphic_call_context> known_contexts)
5834 : {
5835 5125 : ipa_polymorphic_call_context *ctx;
5836 5125 : int i;
5837 :
5838 5125 : FOR_EACH_VEC_ELT (known_contexts, i, ctx)
5839 115 : if (!ctx->useless_p ())
5840 : return true;
5841 : return false;
5842 : }
5843 :
5844 : /* Return a copy of KNOWN_CSTS if it is not empty, otherwise return vNULL. */
5845 :
5846 : static vec<ipa_polymorphic_call_context>
5847 5125 : copy_useful_known_contexts (const vec<ipa_polymorphic_call_context> &known_contexts)
5848 : {
5849 5125 : if (known_contexts_useful_p (known_contexts))
5850 115 : return known_contexts.copy ();
5851 : else
5852 5010 : return vNULL;
5853 : }
5854 :
5855 : /* Return true if the VALUE is represented in KNOWN_CSTS at INDEX if OFFSET is
5856 : minus one or in AGGVALS for INDEX and OFFSET otherwise. */
5857 :
5858 : DEBUG_FUNCTION bool
5859 5074 : ipcp_val_replacement_ok_p (vec<tree> &known_csts,
5860 : vec<ipa_polymorphic_call_context> &,
5861 : vec<ipa_argagg_value, va_gc> *aggvals,
5862 : int index, HOST_WIDE_INT offset, tree value)
5863 : {
5864 5074 : tree v;
5865 5074 : if (offset == -1)
5866 3618 : v = known_csts[index];
5867 : else
5868 : {
5869 1456 : const ipa_argagg_value_list avl (aggvals);
5870 1456 : v = avl.get_value (index, offset / BITS_PER_UNIT);
5871 : }
5872 :
5873 5074 : return v && values_equal_for_ipcp_p (v, value);
5874 : }
5875 :
5876 : /* Dump to F all the values in AVALS for which we are re-evaluating the effects
5877 : on the function represented b INFO. */
5878 :
5879 : DEBUG_FUNCTION void
5880 53 : dump_reestimation_message (FILE *f, ipa_node_params *info,
5881 : const ipa_auto_call_arg_values &avals)
5882 : {
5883 53 : fprintf (f, " Re-estimating effects with\n"
5884 : " Scalar constants:");
5885 53 : int param_count = ipa_get_param_count (info);
5886 125 : for (int i = 0; i < param_count; i++)
5887 72 : if (avals.m_known_vals[i])
5888 : {
5889 36 : fprintf (f, " %i:", i);
5890 36 : print_ipcp_constant_value (f, avals.m_known_vals[i]);
5891 : }
5892 53 : fprintf (f, "\n");
5893 53 : if (!avals.m_known_contexts.is_empty ())
5894 : {
5895 0 : fprintf (f, " Pol. contexts:");
5896 0 : for (int i = 0; i < param_count; i++)
5897 0 : if (!avals.m_known_contexts[i].useless_p ())
5898 : {
5899 0 : fprintf (f, " %i:", i);
5900 0 : avals.m_known_contexts[i].dump (f);
5901 : }
5902 0 : fprintf (f, "\n");
5903 : }
5904 53 : if (!avals.m_known_aggs.is_empty ())
5905 : {
5906 17 : fprintf (f, " Aggregate replacements:");
5907 17 : ipa_argagg_value_list avs (&avals);
5908 17 : avs.dump (f);
5909 : }
5910 53 : }
5911 :
5912 : /* Return true if the VALUE is represented in KNOWN_CONTEXTS at INDEX and that
5913 : if OFFSET is is equal to minus one (because source of a polymorphic context
5914 : cannot be an aggregate value). */
5915 :
5916 : DEBUG_FUNCTION bool
5917 51 : ipcp_val_replacement_ok_p (vec<tree> &,
5918 : vec<ipa_polymorphic_call_context> &known_contexts,
5919 : vec<ipa_argagg_value, va_gc> *,
5920 : int index, HOST_WIDE_INT offset,
5921 : ipa_polymorphic_call_context value)
5922 : {
5923 51 : if (offset != -1)
5924 : return false;
5925 51 : return (known_contexts.length () > (unsigned) index
5926 51 : && !known_contexts[index].useless_p ()
5927 102 : && known_contexts[index].equal_to (value));
5928 : }
5929 :
5930 : /* Decide whether to create a special version of NODE for value VAL of
5931 : parameter at the given INDEX. If OFFSET is -1, the value is for the
5932 : parameter itself, otherwise it is stored at the given OFFSET of the
5933 : parameter. AVALS describes the other already known values. SELF_GEN_CLONES
5934 : is a vector which contains clones created for self-recursive calls with an
5935 : arithmetic pass-through jump function. CUR_SWEEP is the number of the
5936 : current sweep of the call-graph during the decision stage. */
5937 :
5938 : template <typename valtype>
5939 : static bool
5940 216381 : decide_about_value (struct cgraph_node *node, int index, HOST_WIDE_INT offset,
5941 : ipcp_value<valtype> *val,
5942 : vec<cgraph_node *> *self_gen_clones, int cur_sweep)
5943 : {
5944 : int caller_count;
5945 216381 : sreal freq_sum;
5946 : profile_count count_sum, rec_count_sum;
5947 : bool called_without_ipa_profile;
5948 :
5949 216381 : if (val->spec_node)
5950 : {
5951 10361 : perhaps_add_new_callers (node, val);
5952 10361 : return false;
5953 : }
5954 206020 : else if (val->local_size_cost + overall_size
5955 206020 : > get_max_overall_size (node, cur_sweep))
5956 : {
5957 592 : if (dump_file && (dump_flags & TDF_DETAILS))
5958 0 : fprintf (dump_file, " Ignoring candidate value because "
5959 : "maximum unit size would be reached with %li.\n",
5960 : val->local_size_cost + overall_size);
5961 592 : return false;
5962 : }
5963 205428 : else if (!get_info_about_necessary_edges (val, node, &freq_sum, &caller_count,
5964 : &rec_count_sum, &count_sum,
5965 : &called_without_ipa_profile))
5966 : return false;
5967 :
5968 144128 : if (!dbg_cnt (ipa_cp_values))
5969 : return false;
5970 :
5971 144128 : if (val->self_recursion_generated_p ())
5972 : {
5973 : /* The edge counts in this case might not have been adjusted yet.
5974 : Nevertleless, even if they were it would be only a guesswork which we
5975 : can do now. The recursive part of the counts can be derived from the
5976 : count of the original node anyway. */
5977 323 : if (node->count.ipa ().nonzero_p ())
5978 : {
5979 16 : unsigned dem = self_gen_clones->length () + 1;
5980 16 : rec_count_sum = node->count.ipa () / dem;
5981 : }
5982 : else
5983 291 : rec_count_sum = profile_count::zero ();
5984 : }
5985 :
5986 : /* get_info_about_necessary_edges only sums up ipa counts. */
5987 144128 : count_sum += rec_count_sum;
5988 :
5989 144128 : if (dump_file && (dump_flags & TDF_DETAILS))
5990 : {
5991 119 : fprintf (dump_file, " - considering value ");
5992 119 : print_ipcp_constant_value (dump_file, val->value);
5993 119 : fprintf (dump_file, " for ");
5994 119 : ipa_dump_param (dump_file, ipa_node_params_sum->get (node), index);
5995 119 : if (offset != -1)
5996 55 : fprintf (dump_file, ", offset: " HOST_WIDE_INT_PRINT_DEC, offset);
5997 119 : fprintf (dump_file, " (caller_count: %i)\n", caller_count);
5998 : }
5999 :
6000 : vec<cgraph_edge *> callers;
6001 144128 : callers = gather_edges_for_value (val, node, caller_count);
6002 144128 : ipa_node_params *info = ipa_node_params_sum->get (node);
6003 144128 : ipa_auto_call_arg_values avals;
6004 144128 : avals.m_known_vals.safe_grow_cleared (ipa_get_param_count (info), true);
6005 144128 : find_scalar_values_for_callers_subset (avals.m_known_vals, info, callers);
6006 144128 : find_contexts_for_caller_subset (avals.m_known_contexts, info, callers);
6007 144128 : find_aggregate_values_for_callers_subset (avals.m_known_aggs, node, callers);
6008 :
6009 :
6010 144128 : if (good_cloning_opportunity_p (node, val->prop_time_benefit,
6011 : freq_sum, count_sum, val->prop_size_cost,
6012 : called_without_ipa_profile, cur_sweep))
6013 : ;
6014 : else
6015 : {
6016 : /* Extern inline functions are only meaningful to clione to propagate
6017 : values to their callees. */
6018 141904 : if (DECL_EXTERNAL (node->decl) && DECL_DECLARED_INLINE_P (node->decl))
6019 : {
6020 204 : if (dump_file && (dump_flags & TDF_DETAILS))
6021 0 : fprintf (dump_file, " Skipping extern inline.\n");
6022 139003 : return false;
6023 : }
6024 141700 : if (dump_file && (dump_flags & TDF_DETAILS))
6025 53 : dump_reestimation_message (dump_file, info, avals);
6026 :
6027 141700 : ipa_call_estimates estimates;
6028 141700 : estimate_ipcp_clone_size_and_time (node, &avals, &estimates);
6029 141700 : int removable_params_cost = 0;
6030 898160 : for (tree t : avals.m_known_vals)
6031 473060 : if (t)
6032 196243 : removable_params_cost += estimate_move_cost (TREE_TYPE (t), true);
6033 :
6034 141700 : int size = estimates.size - caller_count * removable_params_cost;
6035 :
6036 141700 : if (size <= 0)
6037 : {
6038 1782 : if (dump_file)
6039 0 : fprintf (dump_file, " Code not going to grow.\n");
6040 : }
6041 : else
6042 : {
6043 : sreal time_benefit
6044 139918 : = ((estimates.nonspecialized_time - estimates.time)
6045 279836 : + hint_time_bonus (node, estimates)
6046 139918 : + (devirtualization_time_bonus (node, &avals)
6047 139918 : + removable_params_cost));
6048 :
6049 139918 : if (!good_cloning_opportunity_p (node, time_benefit, freq_sum,
6050 : count_sum, size,
6051 : called_without_ipa_profile,
6052 : cur_sweep))
6053 138799 : return false;
6054 : }
6055 : }
6056 :
6057 5125 : if (dump_file)
6058 148 : fprintf (dump_file, " Creating a specialized node of %s.\n",
6059 : node->dump_name ());
6060 :
6061 5125 : vec<tree> known_csts = avals.m_known_vals.copy ();
6062 : vec<ipa_polymorphic_call_context> known_contexts
6063 5125 : = copy_useful_known_contexts (avals.m_known_contexts);
6064 :
6065 5125 : vec<ipa_argagg_value, va_gc> *aggvals = NULL;
6066 5125 : vec_safe_reserve_exact (aggvals, avals.m_known_aggs.length ());
6067 26311 : for (const ipa_argagg_value &av : avals.m_known_aggs)
6068 10936 : aggvals->quick_push (av);
6069 5125 : gcc_checking_assert (ipcp_val_replacement_ok_p (known_csts, known_contexts,
6070 : aggvals, index,
6071 : offset, val->value));
6072 5125 : val->spec_node = create_specialized_node (node, known_csts, known_contexts,
6073 : aggvals, callers);
6074 :
6075 5125 : if (val->self_recursion_generated_p ())
6076 163 : self_gen_clones->safe_push (val->spec_node);
6077 : else
6078 4962 : update_profiling_info (node, val->spec_node);
6079 :
6080 5125 : callers.release ();
6081 5125 : overall_size += val->local_size_cost;
6082 5125 : if (dump_file && (dump_flags & TDF_DETAILS))
6083 67 : fprintf (dump_file, " overall size reached %li\n",
6084 : overall_size);
6085 :
6086 : /* TODO: If for some lattice there is only one other known value
6087 : left, make a special node for it too. */
6088 :
6089 : return true;
6090 144128 : }
6091 :
6092 : /* Like irange::contains_p(), but convert VAL to the range of R if
6093 : necessary. */
6094 :
6095 : static inline bool
6096 47765 : ipa_range_contains_p (const vrange &r, tree val)
6097 : {
6098 47765 : if (r.undefined_p ())
6099 : return false;
6100 :
6101 47765 : tree type = r.type ();
6102 47765 : if (!wi::fits_to_tree_p (wi::to_wide (val), type))
6103 : return false;
6104 :
6105 47765 : val = fold_convert (type, val);
6106 47765 : return r.contains_p (val);
6107 : }
6108 :
6109 : /* Structure holding opportunitties so that they can be pre-sorted. */
6110 :
6111 216381 : struct cloning_opportunity_ranking
6112 : {
6113 : /* A very rough evaluation of likely benefit. */
6114 : sreal eval;
6115 : /* In the case of aggregate constants, a non-negative offset within their
6116 : aggregates. -1 for scalar constants, -2 for polymorphic contexts. */
6117 : HOST_WIDE_INT offset;
6118 : /* The value being considered for evaluation for cloning. */
6119 : ipcp_value_base *val;
6120 : /* Index of the formal parameter the value is coming in. */
6121 : int index;
6122 : };
6123 :
6124 : /* Helper function to qsort a vecotr of cloning opportunities. */
6125 :
6126 : static int
6127 2024837 : compare_cloning_opportunities (const void *a, const void *b)
6128 : {
6129 2024837 : const cloning_opportunity_ranking *o1
6130 : = (const cloning_opportunity_ranking *) a;
6131 2024837 : const cloning_opportunity_ranking *o2
6132 : = (const cloning_opportunity_ranking *) b;
6133 2024837 : if (o1->eval < o2->eval)
6134 : return 1;
6135 1586490 : if (o1->eval > o2->eval)
6136 512170 : return -1;
6137 : return 0;
6138 : }
6139 :
6140 : /* Use the estimations in VAL to determine how good a candidate it represents
6141 : for the purposes of ordering real evaluation of opportunities (which
6142 : includes information about incoming edges, among other things). */
6143 :
6144 : static sreal
6145 216381 : cloning_opportunity_ranking_evaluation (const ipcp_value_base *val)
6146 : {
6147 216381 : sreal e1 = (val->local_time_benefit * 1000) / MAX (val->local_size_cost, 1);
6148 216381 : sreal e2 = (val->prop_time_benefit * 1000) / MAX (val->prop_size_cost, 1);
6149 216381 : if (e2 > e1)
6150 15119 : return e2;
6151 : else
6152 201262 : return e1;
6153 : }
6154 :
6155 : /* Decide whether and what specialized clones of NODE should be created.
6156 : CUR_SWEEP is the number of the current sweep of the call-graph during the
6157 : decision stage. */
6158 :
6159 : static bool
6160 3203385 : decide_whether_version_node (struct cgraph_node *node, int cur_sweep)
6161 : {
6162 3203385 : ipa_node_params *info = ipa_node_params_sum->get (node);
6163 3203385 : int count = ipa_get_param_count (info);
6164 3203385 : bool ret = false;
6165 :
6166 3203385 : if (info->node_dead || count == 0)
6167 : return false;
6168 :
6169 2591917 : if (dump_file && (dump_flags & TDF_DETAILS))
6170 345 : fprintf (dump_file, "\nEvaluating opportunities for %s.\n",
6171 : node->dump_name ());
6172 :
6173 2591917 : auto_vec <cloning_opportunity_ranking, 32> opp_ranking;
6174 8654044 : for (int i = 0; i < count;i++)
6175 : {
6176 6062127 : if (!ipa_is_param_used (info, i))
6177 680842 : continue;
6178 :
6179 5381285 : class ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
6180 5381285 : ipcp_lattice<tree> *lat = &plats->itself;
6181 5381285 : ipcp_lattice<ipa_polymorphic_call_context> *ctxlat = &plats->ctxlat;
6182 :
6183 5381285 : if (!lat->bottom
6184 5381285 : && !lat->is_single_const ())
6185 : {
6186 528640 : ipcp_value<tree> *val;
6187 644681 : for (val = lat->values; val; val = val->next)
6188 : {
6189 : /* If some values generated for self-recursive calls with
6190 : arithmetic jump functions fall outside of the known
6191 : range for the parameter, we can skip them. */
6192 116103 : if (TREE_CODE (val->value) == INTEGER_CST
6193 70230 : && !plats->m_value_range.bottom_p ()
6194 163806 : && !ipa_range_contains_p (plats->m_value_range.m_vr,
6195 : val->value))
6196 : {
6197 : /* This can happen also if a constant present in the source
6198 : code falls outside of the range of parameter's type, so we
6199 : cannot assert. */
6200 62 : if (dump_file && (dump_flags & TDF_DETAILS))
6201 : {
6202 0 : fprintf (dump_file, " - skipping%s value ",
6203 0 : val->self_recursion_generated_p ()
6204 : ? " self_recursion_generated" : "");
6205 0 : print_ipcp_constant_value (dump_file, val->value);
6206 0 : fprintf (dump_file, " because it is outside known "
6207 : "value range.\n");
6208 : }
6209 62 : continue;
6210 : }
6211 115979 : cloning_opportunity_ranking opp;
6212 115979 : opp.eval = cloning_opportunity_ranking_evaluation (val);
6213 115979 : opp.offset = -1;
6214 115979 : opp.val = val;
6215 115979 : opp.index = i;
6216 115979 : opp_ranking.safe_push (opp);
6217 : }
6218 : }
6219 :
6220 5381285 : if (!plats->aggs_bottom)
6221 : {
6222 558251 : struct ipcp_agg_lattice *aglat;
6223 558251 : ipcp_value<tree> *val;
6224 699592 : for (aglat = plats->aggs; aglat; aglat = aglat->next)
6225 140199 : if (!aglat->bottom && aglat->values
6226 : /* If the following is false, the one value has been considered
6227 : for cloning for all contexts. */
6228 260378 : && (plats->aggs_contain_variable
6229 198527 : || !aglat->is_single_const ()))
6230 169451 : for (val = aglat->values; val; val = val->next)
6231 : {
6232 96702 : cloning_opportunity_ranking opp;
6233 96702 : opp.eval = cloning_opportunity_ranking_evaluation (val);
6234 96702 : opp.offset = aglat->offset;
6235 96702 : opp.val = val;
6236 96702 : opp.index = i;
6237 96702 : opp_ranking.safe_push (opp);
6238 : }
6239 : }
6240 :
6241 5381285 : if (!ctxlat->bottom
6242 6623252 : && !ctxlat->is_single_const ())
6243 : {
6244 543335 : ipcp_value<ipa_polymorphic_call_context> *val;
6245 547035 : for (val = ctxlat->values; val; val = val->next)
6246 7400 : if (!val->value.useless_p ())
6247 : {
6248 3700 : cloning_opportunity_ranking opp;
6249 3700 : opp.eval = cloning_opportunity_ranking_evaluation (val);
6250 3700 : opp.offset = -2;
6251 3700 : opp.val = val;
6252 3700 : opp.index = i;
6253 3700 : opp_ranking.safe_push (opp);
6254 : }
6255 : }
6256 : }
6257 :
6258 2591917 : if (!opp_ranking.is_empty ())
6259 : {
6260 51265 : opp_ranking.qsort (compare_cloning_opportunities);
6261 51265 : auto_vec <cgraph_node *, 9> self_gen_clones;
6262 370176 : for (const cloning_opportunity_ranking &opp : opp_ranking)
6263 216381 : if (opp.offset == -2)
6264 : {
6265 3700 : ipcp_value<ipa_polymorphic_call_context> *val
6266 : = static_cast <ipcp_value<ipa_polymorphic_call_context> *>
6267 : (opp.val);
6268 3700 : ret |= decide_about_value (node, opp.index, -1, val,
6269 : &self_gen_clones, cur_sweep);
6270 : }
6271 : else
6272 : {
6273 212681 : ipcp_value<tree> *val = static_cast<ipcp_value<tree> *> (opp.val);
6274 212681 : ret |= decide_about_value (node, opp.index, opp.offset, val,
6275 : &self_gen_clones, cur_sweep);
6276 : }
6277 :
6278 102530 : if (!self_gen_clones.is_empty ())
6279 : {
6280 41 : self_gen_clones.safe_push (node);
6281 41 : update_counts_for_self_gen_clones (node, self_gen_clones);
6282 : }
6283 51265 : }
6284 :
6285 2591917 : struct caller_statistics stats;
6286 2591917 : init_caller_stats (&stats);
6287 2591917 : node->call_for_symbol_thunks_and_aliases (gather_caller_stats, &stats,
6288 : false);
6289 2591917 : if (!stats.n_calls)
6290 : {
6291 1302158 : if (dump_file)
6292 986 : fprintf (dump_file, " Not cloning for all contexts because "
6293 : "there are no callers of the original node (any more).\n");
6294 1302158 : return ret;
6295 : }
6296 :
6297 1289759 : bool do_clone_for_all_contexts = false;
6298 1289759 : ipa_auto_call_arg_values avals;
6299 1289759 : int removable_params_cost;
6300 1289759 : bool ctx_independent_const
6301 1289759 : = gather_context_independent_values (info, &avals, &removable_params_cost);
6302 1289759 : sreal devirt_bonus = devirtualization_time_bonus (node, &avals);
6303 1275054 : if (ctx_independent_const || devirt_bonus > 0
6304 2564807 : || (removable_params_cost && clone_for_param_removal_p (node)))
6305 : {
6306 64601 : ipa_call_estimates estimates;
6307 :
6308 64601 : estimate_ipcp_clone_size_and_time (node, &avals, &estimates);
6309 64601 : sreal time = estimates.nonspecialized_time - estimates.time;
6310 64601 : time += devirt_bonus;
6311 64601 : time += hint_time_bonus (node, estimates);
6312 64601 : time += removable_params_cost;
6313 64601 : int size = estimates.size - stats.n_calls * removable_params_cost;
6314 :
6315 64601 : if (dump_file && (dump_flags & TDF_DETAILS))
6316 26 : fprintf (dump_file, " - context independent values, size: %i, "
6317 : "time_benefit: %f\n", size, (time).to_double ());
6318 :
6319 64601 : if (size <= 0 || node->local)
6320 : {
6321 17110 : if (!dbg_cnt (ipa_cp_values))
6322 0 : return ret;
6323 :
6324 17110 : do_clone_for_all_contexts = true;
6325 17110 : if (dump_file)
6326 106 : fprintf (dump_file, " Decided to specialize for all "
6327 : "known contexts, code not going to grow.\n");
6328 : }
6329 47491 : else if (good_cloning_opportunity_p (node, time, stats.freq_sum,
6330 : stats.count_sum, size,
6331 47491 : stats.called_without_ipa_profile,
6332 : cur_sweep))
6333 : {
6334 346 : if (size + overall_size <= get_max_overall_size (node, cur_sweep))
6335 : {
6336 346 : if (!dbg_cnt (ipa_cp_values))
6337 : return ret;
6338 :
6339 346 : do_clone_for_all_contexts = true;
6340 346 : overall_size += size;
6341 346 : if (dump_file)
6342 14 : fprintf (dump_file, " Decided to specialize for all "
6343 : "known contexts, growth (to %li) deemed "
6344 : "beneficial.\n", overall_size);
6345 : }
6346 0 : else if (dump_file && (dump_flags & TDF_DETAILS))
6347 0 : fprintf (dump_file, " Not cloning for all contexts because "
6348 : "maximum unit size would be reached with %li.\n",
6349 : size + overall_size);
6350 : }
6351 47145 : else if (dump_file && (dump_flags & TDF_DETAILS))
6352 2 : fprintf (dump_file, " Not cloning for all contexts because "
6353 : "!good_cloning_opportunity_p.\n");
6354 : }
6355 :
6356 1289759 : if (do_clone_for_all_contexts)
6357 : {
6358 17456 : auto_vec<cgraph_edge *> callers = node->collect_callers ();
6359 :
6360 86814 : for (int i = callers.length () - 1; i >= 0; i--)
6361 : {
6362 51902 : cgraph_edge *cs = callers[i];
6363 51902 : ipa_node_params *caller_info = ipa_node_params_sum->get (cs->caller);
6364 :
6365 51902 : if (caller_info && caller_info->node_dead)
6366 2660 : callers.unordered_remove (i);
6367 : }
6368 :
6369 17456 : if (!adjust_callers_for_value_intersection (callers, node))
6370 : /* If node is not called by anyone, or all its caller edges are
6371 : self-recursive, the node is not really in use, no need to do
6372 : cloning. */
6373 96 : return ret;
6374 :
6375 17360 : if (dump_file)
6376 118 : fprintf (dump_file, " Creating a specialized node of %s "
6377 : "for all known contexts.\n", node->dump_name ());
6378 :
6379 17360 : vec<tree> known_csts = vNULL;
6380 17360 : known_csts.safe_grow_cleared (count, true);
6381 17360 : find_scalar_values_for_callers_subset (known_csts, info, callers);
6382 17360 : vec<ipa_polymorphic_call_context> known_contexts = vNULL;
6383 17360 : find_contexts_for_caller_subset (known_contexts, info, callers);
6384 17360 : vec<ipa_argagg_value, va_gc> *aggvals
6385 17360 : = find_aggregate_values_for_callers_subset_gc (node, callers);
6386 :
6387 17360 : struct cgraph_node *clone = create_specialized_node (node, known_csts,
6388 : known_contexts,
6389 : aggvals, callers);
6390 17360 : ipa_node_params_sum->get (clone)->is_all_contexts_clone = true;
6391 17360 : ret = true;
6392 17456 : }
6393 :
6394 : return ret;
6395 3881676 : }
6396 :
6397 : /* Transitively mark all callees of NODE within the same SCC as not dead. */
6398 :
6399 : static void
6400 5468 : spread_undeadness (struct cgraph_node *node)
6401 : {
6402 5468 : struct cgraph_edge *cs;
6403 :
6404 18942 : for (cs = node->callees; cs; cs = cs->next_callee)
6405 13474 : if (ipa_edge_within_scc (cs))
6406 : {
6407 966 : struct cgraph_node *callee;
6408 966 : class ipa_node_params *info;
6409 :
6410 966 : callee = cs->callee->function_symbol (NULL);
6411 966 : info = ipa_node_params_sum->get (callee);
6412 :
6413 966 : if (info && info->node_dead)
6414 : {
6415 68 : info->node_dead = 0;
6416 68 : spread_undeadness (callee);
6417 : }
6418 : }
6419 5468 : }
6420 :
6421 : /* Return true if NODE has a caller from outside of its SCC that is not
6422 : dead. Worker callback for cgraph_for_node_and_aliases. */
6423 :
6424 : static bool
6425 16232 : has_undead_caller_from_outside_scc_p (struct cgraph_node *node,
6426 : void *data ATTRIBUTE_UNUSED)
6427 : {
6428 16232 : struct cgraph_edge *cs;
6429 :
6430 81547 : for (cs = node->callers; cs; cs = cs->next_caller)
6431 65957 : if (cs->caller->thunk
6432 65957 : && cs->caller->call_for_symbol_thunks_and_aliases
6433 0 : (has_undead_caller_from_outside_scc_p, NULL, true))
6434 : return true;
6435 65957 : else if (!ipa_edge_within_scc (cs))
6436 : {
6437 65702 : ipa_node_params *caller_info = ipa_node_params_sum->get (cs->caller);
6438 65702 : if (!caller_info /* Unoptimized caller are like dead ones. */
6439 65700 : || !caller_info->node_dead)
6440 : return true;
6441 : }
6442 : return false;
6443 : }
6444 :
6445 :
6446 : /* Identify nodes within the same SCC as NODE which are no longer needed
6447 : because of new clones and will be removed as unreachable. */
6448 :
6449 : static void
6450 20355 : identify_dead_nodes (struct cgraph_node *node)
6451 : {
6452 20355 : struct cgraph_node *v;
6453 41050 : for (v = node; v; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
6454 20695 : if (v->local)
6455 : {
6456 15996 : ipa_node_params *info = ipa_node_params_sum->get (v);
6457 15996 : if (info
6458 31992 : && !v->call_for_symbol_thunks_and_aliases
6459 15996 : (has_undead_caller_from_outside_scc_p, NULL, true))
6460 15354 : info->node_dead = 1;
6461 : }
6462 :
6463 41050 : for (v = node; v; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
6464 : {
6465 20695 : ipa_node_params *info = ipa_node_params_sum->get (v);
6466 20695 : if (info && !info->node_dead)
6467 5400 : spread_undeadness (v);
6468 : }
6469 :
6470 20355 : if (dump_file && (dump_flags & TDF_DETAILS))
6471 : {
6472 107 : for (v = node; v; v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
6473 55 : if (ipa_node_params_sum->get (v)
6474 55 : && ipa_node_params_sum->get (v)->node_dead)
6475 32 : fprintf (dump_file, " Marking node as dead: %s.\n",
6476 : v->dump_name ());
6477 : }
6478 20355 : }
6479 :
6480 : /* Removes all useless callback edges from the callgraph. Useless callback
6481 : edges might mess up the callgraph, because they might be impossible to
6482 : redirect and so on, leading to crashes. Their usefulness is evaluated
6483 : through callback_edge_useful_p. */
6484 :
6485 : static void
6486 127990 : purge_useless_callback_edges ()
6487 : {
6488 127990 : if (dump_file)
6489 161 : fprintf (dump_file, "\nPurging useless callback edges:\n");
6490 :
6491 127990 : cgraph_edge *e;
6492 127990 : cgraph_node *node;
6493 1419730 : FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
6494 : {
6495 6721433 : for (e = node->callees; e; e = e->next_callee)
6496 : {
6497 5429693 : if (e->has_callback)
6498 : {
6499 13676 : if (dump_file)
6500 3 : fprintf (dump_file, "\tExamining callbacks of edge %s -> %s:\n",
6501 3 : e->caller->dump_name (), e->callee->dump_name ());
6502 13676 : if (!lookup_attribute (CALLBACK_ATTR_IDENT,
6503 13676 : DECL_ATTRIBUTES (e->callee->decl))
6504 13676 : && !callback_is_special_cased (e->callee->decl, e->call_stmt))
6505 : {
6506 1 : if (dump_file)
6507 0 : fprintf (
6508 : dump_file,
6509 : "\t\tPurging callbacks, because the callback-dispatching"
6510 : "function no longer has any callback attributes.\n");
6511 1 : e->purge_callback_edges ();
6512 1 : continue;
6513 : }
6514 13675 : cgraph_edge *cbe, *next;
6515 27350 : for (cbe = e->first_callback_edge (); cbe; cbe = next)
6516 : {
6517 13675 : next = cbe->next_callback_edge ();
6518 13675 : if (!callback_edge_useful_p (cbe))
6519 : {
6520 13303 : if (dump_file)
6521 1 : fprintf (dump_file,
6522 : "\t\tCallback edge %s -> %s not deemed "
6523 : "useful, removing.\n",
6524 1 : cbe->caller->dump_name (),
6525 1 : cbe->callee->dump_name ());
6526 13303 : cgraph_edge::remove (cbe);
6527 : }
6528 : else
6529 : {
6530 372 : if (dump_file)
6531 2 : fprintf (dump_file,
6532 : "\t\tKept callback edge %s -> %s "
6533 : "because it looks useful.\n",
6534 2 : cbe->caller->dump_name (),
6535 2 : cbe->callee->dump_name ());
6536 : }
6537 : }
6538 : }
6539 : }
6540 : }
6541 :
6542 127990 : if (dump_file)
6543 161 : fprintf (dump_file, "\n");
6544 127990 : }
6545 :
6546 : /* The decision stage. Iterate over the topological order of call graph nodes
6547 : TOPO and make specialized clones if deemed beneficial. */
6548 :
6549 : static void
6550 127990 : ipcp_decision_stage (class ipa_topo_info *topo)
6551 : {
6552 127990 : int i;
6553 :
6554 127990 : if (dump_file)
6555 161 : fprintf (dump_file, "\nIPA decision stage (%i sweeps):\n",
6556 : max_number_sweeps);
6557 :
6558 491085 : for (int cur_sweep = 1; cur_sweep <= max_number_sweeps; cur_sweep++)
6559 : {
6560 363095 : if (dump_file && (dump_flags & TDF_DETAILS))
6561 144 : fprintf (dump_file, "\nIPA decision sweep number %i (out of %i):\n",
6562 : cur_sweep, max_number_sweeps);
6563 :
6564 4358586 : for (i = topo->nnodes - 1; i >= 0; i--)
6565 : {
6566 3995491 : struct cgraph_node *node = topo->order[i];
6567 3995491 : bool change = false, iterate = true;
6568 :
6569 8011343 : while (iterate)
6570 : {
6571 : struct cgraph_node *v;
6572 : iterate = false;
6573 4029432 : for (v = node;
6574 8045284 : v;
6575 4029432 : v = ((struct ipa_dfs_info *) v->aux)->next_cycle)
6576 4029432 : if (v->has_gimple_body_p ()
6577 3803625 : && ipcp_versionable_function_p (v)
6578 4029432 : && (cur_sweep
6579 3203385 : <= opt_for_fn (node->decl, param_ipa_cp_sweeps)))
6580 3203385 : iterate |= decide_whether_version_node (v, cur_sweep);
6581 :
6582 4015852 : change |= iterate;
6583 : }
6584 3995491 : if (change)
6585 20355 : identify_dead_nodes (node);
6586 : }
6587 : }
6588 :
6589 : /* Currently, the primary use of callback edges is constant propagation.
6590 : Constant propagation is now over, so we have to remove unused callback
6591 : edges. */
6592 127990 : purge_useless_callback_edges ();
6593 127990 : }
6594 :
6595 : /* Look up all VR and bits information that we have discovered and copy it
6596 : over to the transformation summary. */
6597 :
6598 : static void
6599 127990 : ipcp_store_vr_results (void)
6600 : {
6601 127990 : cgraph_node *node;
6602 :
6603 1419730 : FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
6604 : {
6605 1291740 : ipa_node_params *info = ipa_node_params_sum->get (node);
6606 1291740 : bool dumped_sth = false;
6607 1291740 : bool found_useful_result = false;
6608 1291740 : bool do_vr = true;
6609 1291740 : bool do_bits = true;
6610 :
6611 : /* If the function is not local, the gathered information is only useful
6612 : for clones. */
6613 1291740 : if (!node->local)
6614 1127785 : continue;
6615 :
6616 163955 : if (!info || !opt_for_fn (node->decl, flag_ipa_vrp))
6617 : {
6618 4828 : if (dump_file)
6619 9 : fprintf (dump_file, "Not considering %s for VR discovery "
6620 : "and propagate; -fipa-ipa-vrp: disabled.\n",
6621 : node->dump_name ());
6622 : do_vr = false;
6623 : }
6624 163955 : if (!info || !opt_for_fn (node->decl, flag_ipa_bit_cp))
6625 : {
6626 4796 : if (dump_file)
6627 2 : fprintf (dump_file, "Not considering %s for ipa bitwise "
6628 : "propagation ; -fipa-bit-cp: disabled.\n",
6629 : node->dump_name ());
6630 4796 : do_bits = false;
6631 : }
6632 4796 : if (!do_bits && !do_vr)
6633 4790 : continue;
6634 :
6635 159165 : if (info->ipcp_orig_node)
6636 22249 : info = ipa_node_params_sum->get (info->ipcp_orig_node);
6637 159165 : if (info->lattices.is_empty ())
6638 : /* Newly expanded artificial thunks do not have lattices. */
6639 50416 : continue;
6640 :
6641 108749 : unsigned count = ipa_get_param_count (info);
6642 224437 : for (unsigned i = 0; i < count; i++)
6643 : {
6644 176862 : ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
6645 176862 : if (do_vr
6646 176830 : && !plats->m_value_range.bottom_p ()
6647 235792 : && !plats->m_value_range.top_p ())
6648 : {
6649 : found_useful_result = true;
6650 : break;
6651 : }
6652 117933 : if (do_bits && plats->bits_lattice.constant_p ())
6653 : {
6654 : found_useful_result = true;
6655 : break;
6656 : }
6657 : }
6658 108749 : if (!found_useful_result)
6659 47575 : continue;
6660 :
6661 61174 : ipcp_transformation_initialize ();
6662 61174 : ipcp_transformation *ts = ipcp_transformation_sum->get_create (node);
6663 61174 : vec_safe_reserve_exact (ts->m_vr, count);
6664 :
6665 224469 : for (unsigned i = 0; i < count; i++)
6666 : {
6667 163295 : ipcp_param_lattices *plats = ipa_get_parm_lattices (info, i);
6668 163295 : ipcp_bits_lattice *bits = NULL;
6669 :
6670 163295 : if (do_bits
6671 163291 : && plats->bits_lattice.constant_p ()
6672 258385 : && dbg_cnt (ipa_cp_bits))
6673 95090 : bits = &plats->bits_lattice;
6674 :
6675 163295 : if (do_vr
6676 163271 : && !plats->m_value_range.bottom_p ()
6677 113848 : && !plats->m_value_range.top_p ()
6678 277143 : && dbg_cnt (ipa_cp_vr))
6679 : {
6680 113848 : if (bits)
6681 : {
6682 89682 : value_range tmp = plats->m_value_range.m_vr;
6683 89682 : tree type = ipa_get_type (info, i);
6684 179364 : irange_bitmask bm (wide_int::from (bits->get_value (),
6685 89682 : TYPE_PRECISION (type),
6686 89682 : TYPE_SIGN (type)),
6687 179364 : wide_int::from (bits->get_mask (),
6688 89682 : TYPE_PRECISION (type),
6689 179364 : TYPE_SIGN (type)));
6690 89682 : tmp.update_bitmask (bm);
6691 : // Reflecting the bitmask on the ranges can sometime
6692 : // produce an UNDEFINED value if the the bitmask update
6693 : // was previously deferred. See PR 120048.
6694 89682 : if (tmp.undefined_p ())
6695 0 : tmp.set_varying (type);
6696 89682 : ipa_vr vr (tmp);
6697 89682 : ts->m_vr->quick_push (vr);
6698 89682 : }
6699 : else
6700 : {
6701 24166 : ipa_vr vr (plats->m_value_range.m_vr);
6702 24166 : ts->m_vr->quick_push (vr);
6703 : }
6704 : }
6705 49447 : else if (bits)
6706 : {
6707 5408 : tree type = ipa_get_type (info, i);
6708 5408 : value_range tmp;
6709 5408 : tmp.set_varying (type);
6710 10816 : irange_bitmask bm (wide_int::from (bits->get_value (),
6711 5408 : TYPE_PRECISION (type),
6712 5408 : TYPE_SIGN (type)),
6713 10816 : wide_int::from (bits->get_mask (),
6714 5408 : TYPE_PRECISION (type),
6715 10816 : TYPE_SIGN (type)));
6716 5408 : tmp.update_bitmask (bm);
6717 : // Reflecting the bitmask on the ranges can sometime
6718 : // produce an UNDEFINED value if the the bitmask update
6719 : // was previously deferred. See PR 120048.
6720 5408 : if (tmp.undefined_p ())
6721 0 : tmp.set_varying (type);
6722 5408 : ipa_vr vr (tmp);
6723 5408 : ts->m_vr->quick_push (vr);
6724 5408 : }
6725 : else
6726 : {
6727 44039 : ipa_vr vr;
6728 44039 : ts->m_vr->quick_push (vr);
6729 : }
6730 :
6731 163295 : if (!dump_file || !bits)
6732 162867 : continue;
6733 :
6734 428 : if (!dumped_sth)
6735 : {
6736 306 : fprintf (dump_file, "Propagated bits info for function %s:\n",
6737 : node->dump_name ());
6738 306 : dumped_sth = true;
6739 : }
6740 428 : fprintf (dump_file, " param %i: value = ", i);
6741 428 : ipcp_print_widest_int (dump_file, bits->get_value ());
6742 428 : fprintf (dump_file, ", mask = ");
6743 428 : ipcp_print_widest_int (dump_file, bits->get_mask ());
6744 428 : fprintf (dump_file, "\n");
6745 : }
6746 : }
6747 127990 : }
6748 :
6749 : /* The IPCP driver. */
6750 :
6751 : static unsigned int
6752 127990 : ipcp_driver (void)
6753 : {
6754 127990 : class ipa_topo_info topo;
6755 :
6756 127990 : if (edge_clone_summaries == NULL)
6757 127990 : edge_clone_summaries = new edge_clone_summary_t (symtab);
6758 :
6759 127990 : ipa_check_create_node_params ();
6760 127990 : ipa_check_create_edge_args ();
6761 127990 : clone_num_suffixes = new hash_map<const char *, unsigned>;
6762 :
6763 127990 : if (dump_file)
6764 : {
6765 161 : fprintf (dump_file, "\nIPA structures before propagation:\n");
6766 161 : if (dump_flags & TDF_DETAILS)
6767 48 : ipa_print_all_params (dump_file);
6768 161 : ipa_print_all_jump_functions (dump_file);
6769 : }
6770 :
6771 : /* Topological sort. */
6772 127990 : build_toporder_info (&topo);
6773 : /* Do the interprocedural propagation. */
6774 127990 : ipcp_propagate_stage (&topo);
6775 : /* Decide what constant propagation and cloning should be performed. */
6776 127990 : ipcp_decision_stage (&topo);
6777 : /* Store results of value range and bits propagation. */
6778 127990 : ipcp_store_vr_results ();
6779 :
6780 : /* Free all IPCP structures. */
6781 255980 : delete clone_num_suffixes;
6782 127990 : free_toporder_info (&topo);
6783 127990 : delete edge_clone_summaries;
6784 127990 : edge_clone_summaries = NULL;
6785 127990 : ipa_free_all_structures_after_ipa_cp ();
6786 127990 : if (dump_file)
6787 161 : fprintf (dump_file, "\nIPA constant propagation end\n");
6788 127990 : return 0;
6789 : }
6790 :
6791 : /* Initialization and computation of IPCP data structures. This is the initial
6792 : intraprocedural analysis of functions, which gathers information to be
6793 : propagated later on. */
6794 :
6795 : static void
6796 124821 : ipcp_generate_summary (void)
6797 : {
6798 124821 : struct cgraph_node *node;
6799 :
6800 124821 : if (dump_file)
6801 163 : fprintf (dump_file, "\nIPA constant propagation start:\n");
6802 124821 : ipa_register_cgraph_hooks ();
6803 :
6804 1370740 : FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node)
6805 1245919 : ipa_analyze_node (node);
6806 :
6807 124821 : varpool_node *vnode;
6808 1790494 : FOR_EACH_STATIC_INITIALIZER (vnode)
6809 1665673 : ipa_analyze_var_static_initializer (vnode);
6810 124821 : }
6811 :
6812 : namespace {
6813 :
6814 : const pass_data pass_data_ipa_cp =
6815 : {
6816 : IPA_PASS, /* type */
6817 : "cp", /* name */
6818 : OPTGROUP_NONE, /* optinfo_flags */
6819 : TV_IPA_CONSTANT_PROP, /* tv_id */
6820 : 0, /* properties_required */
6821 : 0, /* properties_provided */
6822 : 0, /* properties_destroyed */
6823 : 0, /* todo_flags_start */
6824 : ( TODO_dump_symtab | TODO_remove_functions ), /* todo_flags_finish */
6825 : };
6826 :
6827 : class pass_ipa_cp : public ipa_opt_pass_d
6828 : {
6829 : public:
6830 285722 : pass_ipa_cp (gcc::context *ctxt)
6831 : : ipa_opt_pass_d (pass_data_ipa_cp, ctxt,
6832 : ipcp_generate_summary, /* generate_summary */
6833 : NULL, /* write_summary */
6834 : NULL, /* read_summary */
6835 : ipcp_write_transformation_summaries, /*
6836 : write_optimization_summary */
6837 : ipcp_read_transformation_summaries, /*
6838 : read_optimization_summary */
6839 : NULL, /* stmt_fixup */
6840 : 0, /* function_transform_todo_flags_start */
6841 : ipcp_transform_function, /* function_transform */
6842 285722 : NULL) /* variable_transform */
6843 285722 : {}
6844 :
6845 : /* opt_pass methods: */
6846 571909 : bool gate (function *) final override
6847 : {
6848 : /* FIXME: We should remove the optimize check after we ensure we never run
6849 : IPA passes when not optimizing. */
6850 571909 : return (flag_ipa_cp && optimize) || in_lto_p;
6851 : }
6852 :
6853 127990 : unsigned int execute (function *) final override { return ipcp_driver (); }
6854 :
6855 : }; // class pass_ipa_cp
6856 :
6857 : } // anon namespace
6858 :
6859 : ipa_opt_pass_d *
6860 285722 : make_pass_ipa_cp (gcc::context *ctxt)
6861 : {
6862 285722 : return new pass_ipa_cp (ctxt);
6863 : }
6864 :
6865 : /* Reset all state within ipa-cp.cc so that we can rerun the compiler
6866 : within the same process. For use by toplev::finalize. */
6867 :
6868 : void
6869 256621 : ipa_cp_cc_finalize (void)
6870 : {
6871 256621 : overall_size = 0;
6872 256621 : orig_overall_size = 0;
6873 256621 : ipcp_free_transformation_sum ();
6874 256621 : }
6875 :
6876 : /* Given PARAM which must be a parameter of function FNDECL described by THIS,
6877 : return its index in the DECL_ARGUMENTS chain, using a pre-computed
6878 : DECL_UID-sorted vector if available (which is pre-computed only if there are
6879 : many parameters). Can return -1 if param is static chain not represented
6880 : among DECL_ARGUMENTS. */
6881 :
6882 : int
6883 126242 : ipcp_transformation::get_param_index (const_tree fndecl, const_tree param) const
6884 : {
6885 126242 : gcc_assert (TREE_CODE (param) == PARM_DECL);
6886 126242 : if (m_uid_to_idx)
6887 : {
6888 0 : unsigned puid = DECL_UID (param);
6889 0 : const ipa_uid_to_idx_map_elt *res
6890 0 : = std::lower_bound (m_uid_to_idx->begin(), m_uid_to_idx->end (), puid,
6891 0 : [] (const ipa_uid_to_idx_map_elt &elt, unsigned uid)
6892 : {
6893 0 : return elt.uid < uid;
6894 : });
6895 0 : if (res == m_uid_to_idx->end ()
6896 0 : || res->uid != puid)
6897 : {
6898 0 : gcc_assert (DECL_STATIC_CHAIN (fndecl));
6899 : return -1;
6900 : }
6901 0 : return res->index;
6902 : }
6903 :
6904 126242 : unsigned index = 0;
6905 287233 : for (tree p = DECL_ARGUMENTS (fndecl); p; p = DECL_CHAIN (p), index++)
6906 285785 : if (p == param)
6907 124794 : return (int) index;
6908 :
6909 1448 : gcc_assert (DECL_STATIC_CHAIN (fndecl));
6910 : return -1;
6911 : }
6912 :
6913 : /* Helper function to qsort a vector of ipa_uid_to_idx_map_elt elements
6914 : according to the uid. */
6915 :
6916 : static int
6917 0 : compare_uids (const void *a, const void *b)
6918 : {
6919 0 : const ipa_uid_to_idx_map_elt *e1 = (const ipa_uid_to_idx_map_elt *) a;
6920 0 : const ipa_uid_to_idx_map_elt *e2 = (const ipa_uid_to_idx_map_elt *) b;
6921 0 : if (e1->uid < e2->uid)
6922 : return -1;
6923 0 : if (e1->uid > e2->uid)
6924 : return 1;
6925 0 : gcc_unreachable ();
6926 : }
6927 :
6928 : /* Assuming THIS describes FNDECL and it has sufficiently many parameters to
6929 : justify the overhead, create a DECL_UID-sorted vector to speed up mapping
6930 : from parameters to their indices in DECL_ARGUMENTS chain. */
6931 :
6932 : void
6933 23188 : ipcp_transformation::maybe_create_parm_idx_map (tree fndecl)
6934 : {
6935 23188 : int c = count_formal_params (fndecl);
6936 23188 : if (c < 32)
6937 : return;
6938 :
6939 0 : m_uid_to_idx = NULL;
6940 0 : vec_safe_reserve (m_uid_to_idx, c, true);
6941 0 : unsigned index = 0;
6942 0 : for (tree p = DECL_ARGUMENTS (fndecl); p; p = DECL_CHAIN (p), index++)
6943 : {
6944 0 : ipa_uid_to_idx_map_elt elt;
6945 0 : elt.uid = DECL_UID (p);
6946 0 : elt.index = index;
6947 0 : m_uid_to_idx->quick_push (elt);
6948 : }
6949 0 : m_uid_to_idx->qsort (compare_uids);
6950 : }
|