Line data Source code
1 : /* Analysis used by inlining decision heuristics.
2 : Copyright (C) 2003-2026 Free Software Foundation, Inc.
3 : Contributed by Jan Hubicka
4 :
5 : This file is part of GCC.
6 :
7 : GCC is free software; you can redistribute it and/or modify it under
8 : the terms of the GNU General Public License as published by the Free
9 : Software Foundation; either version 3, or (at your option) any later
10 : version.
11 :
12 : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 : WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 : for more details.
16 :
17 : You should have received a copy of the GNU General Public License
18 : along with GCC; see the file COPYING3. If not see
19 : <http://www.gnu.org/licenses/>. */
20 :
21 : #include "config.h"
22 : #include "system.h"
23 : #include "coretypes.h"
24 : #include "backend.h"
25 : #include "tree.h"
26 : #include "gimple.h"
27 : #include "alloc-pool.h"
28 : #include "tree-pass.h"
29 : #include "ssa.h"
30 : #include "tree-streamer.h"
31 : #include "cgraph.h"
32 : #include "diagnostic.h"
33 : #include "fold-const.h"
34 : #include "print-tree.h"
35 : #include "tree-inline.h"
36 : #include "gimple-pretty-print.h"
37 : #include "cfganal.h"
38 : #include "gimple-iterator.h"
39 : #include "tree-cfg.h"
40 : #include "tree-ssa-loop-niter.h"
41 : #include "tree-ssa-loop.h"
42 : #include "symbol-summary.h"
43 : #include "sreal.h"
44 : #include "ipa-cp.h"
45 : #include "ipa-prop.h"
46 : #include "ipa-fnsummary.h"
47 : #include "ipa-inline.h"
48 : #include "cfgloop.h"
49 : #include "tree-scalar-evolution.h"
50 : #include "ipa-utils.h"
51 : #include "cfgexpand.h"
52 : #include "gimplify.h"
53 : #include "attribs.h"
54 :
55 : /* Cached node/edge growths. */
56 : fast_call_summary<edge_growth_cache_entry *, va_heap> *edge_growth_cache = NULL;
57 :
58 : /* The context cache remembers estimated time/size and hints for given
59 : ipa_call_context of a call. */
60 : class node_context_cache_entry
61 : {
62 : public:
63 : ipa_cached_call_context ctx;
64 : sreal time, nonspec_time;
65 : int size;
66 : ipa_hints hints;
67 :
68 1184116 : node_context_cache_entry ()
69 1184116 : : ctx ()
70 : {
71 : }
72 1184116 : ~node_context_cache_entry ()
73 : {
74 1184116 : ctx.release ();
75 : }
76 : };
77 :
78 : /* At the moment we implement primitive single entry LRU cache. */
79 : class node_context_summary
80 : {
81 : public:
82 : node_context_cache_entry entry;
83 :
84 1184116 : node_context_summary ()
85 0 : : entry ()
86 : {
87 : }
88 1184116 : ~node_context_summary ()
89 : {
90 1184116 : }
91 : };
92 :
93 : /* Summary holding the context cache. */
94 : static fast_function_summary <node_context_summary *, va_heap>
95 : *node_context_cache = NULL;
96 : /* Statistics about the context cache effectivity. */
97 : static long node_context_cache_hit, node_context_cache_miss,
98 : node_context_cache_clear;
99 :
100 : /* Give initial reasons why inlining would fail on EDGE. This gets either
101 : nullified or usually overwritten by more precise reasons later. */
102 :
103 : void
104 38186662 : initialize_inline_failed (struct cgraph_edge *e)
105 : {
106 38186662 : struct cgraph_node *callee = e->callee;
107 :
108 38186662 : if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
109 76204997 : && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
110 : ;
111 38173513 : else if (e->indirect_unknown_callee)
112 667169 : e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
113 37506344 : else if (!callee->definition)
114 19637864 : e->inline_failed = CIF_BODY_NOT_AVAILABLE;
115 17868480 : else if (callee->redefined_extern_inline)
116 190 : e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
117 : else
118 17868290 : e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
119 38186662 : gcc_checking_assert (!e->call_stmt_cannot_inline_p
120 : || cgraph_inline_failed_type (e->inline_failed)
121 : == CIF_FINAL_ERROR);
122 38186662 : }
123 :
124 : /* Allocate edge growth caches. */
125 :
126 : void
127 232517 : initialize_growth_caches ()
128 : {
129 232517 : edge_growth_cache
130 232517 : = new fast_call_summary<edge_growth_cache_entry *, va_heap> (symtab);
131 232517 : node_context_cache
132 232517 : = new fast_function_summary<node_context_summary *, va_heap> (symtab);
133 232517 : edge_growth_cache->disable_duplication_hook ();
134 232517 : node_context_cache->disable_insertion_hook ();
135 232517 : node_context_cache->disable_duplication_hook ();
136 232517 : }
137 :
138 : /* Free growth caches. */
139 :
140 : void
141 232517 : free_growth_caches (void)
142 : {
143 232517 : delete edge_growth_cache;
144 232517 : delete node_context_cache;
145 232517 : edge_growth_cache = NULL;
146 232517 : node_context_cache = NULL;
147 232517 : if (dump_file)
148 178 : fprintf (dump_file, "node context cache: %li hits, %li misses,"
149 : " %li initializations\n",
150 : node_context_cache_hit, node_context_cache_miss,
151 : node_context_cache_clear);
152 232517 : node_context_cache_hit = 0;
153 232517 : node_context_cache_miss = 0;
154 232517 : node_context_cache_clear = 0;
155 232517 : }
156 :
157 : /* Return hints derived from EDGE. */
158 :
159 : int
160 6983445 : simple_edge_hints (struct cgraph_edge *edge)
161 : {
162 6983445 : int hints = 0;
163 5375097 : struct cgraph_node *to = (edge->caller->inlined_to
164 6983445 : ? edge->caller->inlined_to : edge->caller);
165 6983445 : struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
166 6983445 : int to_scc_no = ipa_fn_summaries->get (to)->scc_no;
167 6983445 : int callee_scc_no = ipa_fn_summaries->get (callee)->scc_no;
168 :
169 6983445 : if (to_scc_no && to_scc_no == callee_scc_no && !edge->recursive_p ())
170 : hints |= INLINE_HINT_same_scc;
171 :
172 6983445 : if (cross_module_call_p (edge))
173 4103 : hints |= INLINE_HINT_cross_module;
174 :
175 6983445 : return hints;
176 : }
177 :
178 : /* Estimate the time cost for the caller when inlining EDGE.
179 : Only to be called via estimate_edge_time, that handles the
180 : caching mechanism.
181 :
182 : When caching, also update the cache entry. Compute both time and
183 : size, since we always need both metrics eventually. */
184 :
185 : sreal
186 6987246 : do_estimate_edge_time (struct cgraph_edge *edge, sreal *ret_nonspec_time)
187 : {
188 6987246 : sreal time, nonspec_time;
189 6987246 : int size;
190 6987246 : ipa_hints hints;
191 6987246 : struct cgraph_node *callee;
192 6987246 : clause_t clause, nonspec_clause;
193 6987246 : ipa_auto_call_arg_values avals;
194 6987246 : class ipa_call_summary *es = ipa_call_summaries->get (edge);
195 6987246 : int min_size = -1;
196 :
197 6987246 : callee = edge->callee->ultimate_alias_target ();
198 :
199 6987246 : gcc_checking_assert (edge->inline_failed);
200 6987246 : evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
201 : &avals, true);
202 6987246 : ipa_call_context ctx (callee, clause, nonspec_clause, es->param, &avals);
203 6987246 : if (node_context_cache != NULL)
204 : {
205 6983445 : node_context_summary *e = node_context_cache->get_create (callee);
206 6983445 : if (e->entry.ctx.equal_to (ctx))
207 : {
208 5087175 : node_context_cache_hit++;
209 5087175 : size = e->entry.size;
210 5087175 : time = e->entry.time;
211 5087175 : nonspec_time = e->entry.nonspec_time;
212 5087175 : hints = e->entry.hints;
213 5087175 : if (flag_checking
214 5087175 : && !opt_for_fn (callee->decl, flag_profile_partial_training)
215 10174350 : && !callee->count.ipa_p ())
216 : {
217 5059472 : ipa_call_estimates chk_estimates;
218 5059472 : ctx.estimate_size_and_time (&chk_estimates);
219 15178416 : gcc_assert (chk_estimates.size == size
220 : && chk_estimates.time == time
221 : && chk_estimates.nonspecialized_time == nonspec_time
222 : && chk_estimates.hints == hints);
223 : }
224 : }
225 : else
226 : {
227 1896270 : if (e->entry.ctx.exists_p ())
228 712154 : node_context_cache_miss++;
229 : else
230 1184116 : node_context_cache_clear++;
231 1896270 : e->entry.ctx.release ();
232 1896270 : ipa_call_estimates estimates;
233 1896270 : ctx.estimate_size_and_time (&estimates);
234 1896270 : size = estimates.size;
235 1896270 : e->entry.size = size;
236 1896270 : time = estimates.time;
237 1896270 : e->entry.time = time;
238 1896270 : nonspec_time = estimates.nonspecialized_time;
239 1896270 : e->entry.nonspec_time = nonspec_time;
240 1896270 : hints = estimates.hints;
241 1896270 : e->entry.hints = hints;
242 1896270 : e->entry.ctx.duplicate_from (ctx);
243 : }
244 : }
245 : else
246 : {
247 3801 : ipa_call_estimates estimates;
248 3801 : ctx.estimate_size_and_time (&estimates);
249 3801 : size = estimates.size;
250 3801 : time = estimates.time;
251 3801 : nonspec_time = estimates.nonspecialized_time;
252 3801 : hints = estimates.hints;
253 : }
254 :
255 : /* When we have profile feedback or function attribute, we can quite safely
256 : identify hot edges and for those we disable size limits. Don't do that
257 : when probability that caller will call the callee is low however, since it
258 : may hurt optimization of the caller's hot path. */
259 6987246 : if ((edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
260 252 : && (edge->count.ipa () * 2
261 126 : > (edge->caller->inlined_to
262 6987263 : ? edge->caller->inlined_to->count.ipa ()
263 98 : : edge->caller->count.ipa ())))
264 7307812 : || (lookup_attribute ("hot", DECL_ATTRIBUTES (edge->caller->decl))
265 : != NULL
266 2 : && lookup_attribute ("hot", DECL_ATTRIBUTES (edge->callee->decl))
267 : != NULL))
268 109 : hints |= INLINE_HINT_known_hot;
269 :
270 6987246 : gcc_checking_assert (size >= 0);
271 6987246 : gcc_checking_assert (time >= 0);
272 :
273 : /* When caching, update the cache entry. */
274 6987246 : if (edge_growth_cache != NULL)
275 : {
276 6983445 : if (min_size >= 0)
277 : ipa_fn_summaries->get (edge->callee->function_symbol ())->min_size
278 : = min_size;
279 6983445 : edge_growth_cache_entry *entry
280 6983445 : = edge_growth_cache->get_create (edge);
281 6983445 : entry->time = time;
282 6983445 : entry->nonspec_time = nonspec_time;
283 :
284 6983445 : entry->size = size + (size >= 0);
285 6983445 : hints |= simple_edge_hints (edge);
286 6983445 : entry->hints = hints + 1;
287 : }
288 6987246 : if (ret_nonspec_time)
289 151058 : *ret_nonspec_time = nonspec_time;
290 13974492 : return time;
291 6987246 : }
292 :
293 : /* Reset cache for NODE.
294 : This must be done each time NODE body is modified. */
295 : void
296 3464423 : reset_node_cache (struct cgraph_node *node)
297 : {
298 3464423 : if (node_context_cache)
299 3462134 : node_context_cache->remove (node);
300 3464423 : }
301 :
302 : /* Remove EDGE from caches once it was inlined. */
303 : void
304 3952779 : ipa_remove_from_growth_caches (struct cgraph_edge *edge)
305 : {
306 3952779 : if (node_context_cache)
307 901816 : node_context_cache->remove (edge->callee);
308 3952779 : if (edge_growth_cache)
309 901816 : edge_growth_cache->remove (edge);
310 3952779 : }
311 :
312 : /* Return estimated callee growth after inlining EDGE.
313 : Only to be called via estimate_edge_size. */
314 :
315 : int
316 18737711 : do_estimate_edge_size (struct cgraph_edge *edge)
317 : {
318 18737711 : int size;
319 18737711 : struct cgraph_node *callee;
320 18737711 : clause_t clause, nonspec_clause;
321 :
322 : /* When we do caching, use do_estimate_edge_time to populate the entry. */
323 :
324 18737711 : if (edge_growth_cache != NULL)
325 : {
326 6775735 : do_estimate_edge_time (edge);
327 6775735 : size = edge_growth_cache->get (edge)->size;
328 6775735 : gcc_checking_assert (size);
329 6775735 : return size - (size > 0);
330 : }
331 :
332 11961976 : callee = edge->callee->ultimate_alias_target ();
333 :
334 : /* Early inliner runs without caching, go ahead and do the dirty work. */
335 11961976 : gcc_checking_assert (edge->inline_failed);
336 11961976 : ipa_auto_call_arg_values avals;
337 11961976 : evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
338 : &avals, true);
339 11961976 : ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals);
340 11961976 : ipa_call_estimates estimates;
341 11961976 : ctx.estimate_size_and_time (&estimates, false, false);
342 11961976 : return estimates.size;
343 11961976 : }
344 :
345 :
346 : /* Estimate the growth of the caller when inlining EDGE.
347 : Only to be called via estimate_edge_size. */
348 :
349 : ipa_hints
350 0 : do_estimate_edge_hints (struct cgraph_edge *edge)
351 : {
352 0 : struct cgraph_node *callee;
353 0 : clause_t clause, nonspec_clause;
354 :
355 : /* When we do caching, use do_estimate_edge_time to populate the entry. */
356 :
357 0 : if (edge_growth_cache != NULL)
358 : {
359 0 : do_estimate_edge_time (edge);
360 0 : ipa_hints hints = edge_growth_cache->get (edge)->hints;
361 0 : gcc_checking_assert (hints);
362 0 : return hints - 1;
363 : }
364 :
365 0 : callee = edge->callee->ultimate_alias_target ();
366 :
367 : /* Early inliner runs without caching, go ahead and do the dirty work. */
368 0 : gcc_checking_assert (edge->inline_failed);
369 0 : ipa_auto_call_arg_values avals;
370 0 : evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
371 : &avals, true);
372 0 : ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals);
373 0 : ipa_call_estimates estimates;
374 0 : ctx.estimate_size_and_time (&estimates, false, true);
375 0 : ipa_hints hints = estimates.hints | simple_edge_hints (edge);
376 0 : return hints;
377 0 : }
378 :
379 : /* Estimate the size of NODE after inlining EDGE which should be an
380 : edge to either NODE or a call inlined into NODE. */
381 :
382 : int
383 6588267 : estimate_size_after_inlining (struct cgraph_node *node,
384 : struct cgraph_edge *edge)
385 : {
386 6588267 : class ipa_call_summary *es = ipa_call_summaries->get (edge);
387 6588267 : ipa_size_summary *s = ipa_size_summaries->get (node);
388 6588267 : if (!es->predicate || *es->predicate != false)
389 : {
390 6588267 : int size = s->size + estimate_edge_growth (edge);
391 6588267 : gcc_assert (size >= 0);
392 : return size;
393 : }
394 0 : return s->size;
395 : }
396 :
397 :
398 : struct growth_data
399 : {
400 : struct cgraph_node *node;
401 : bool self_recursive;
402 : bool uninlinable;
403 : int growth;
404 : int cap;
405 : };
406 :
407 :
408 : /* Worker for do_estimate_growth. Collect growth for all callers. */
409 :
410 : static bool
411 1771443 : do_estimate_growth_1 (struct cgraph_node *node, void *data)
412 : {
413 1771443 : struct cgraph_edge *e;
414 1771443 : struct growth_data *d = (struct growth_data *) data;
415 :
416 4052443 : for (e = node->callers; e; e = e->next_caller)
417 : {
418 2365626 : gcc_checking_assert (e->inline_failed);
419 :
420 : /* Don't count callback edges into growth, since they are never inlined
421 : anyway. */
422 2365626 : if (e->callback)
423 1958 : continue;
424 :
425 2363668 : if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
426 2363668 : || !opt_for_fn (e->caller->decl, optimize))
427 : {
428 354 : d->uninlinable = true;
429 354 : if (d->cap < INT_MAX)
430 : return true;
431 354 : continue;
432 : }
433 :
434 2363314 : if (e->recursive_p ())
435 : {
436 10963 : d->self_recursive = true;
437 10963 : if (d->cap < INT_MAX)
438 : return true;
439 9598 : continue;
440 : }
441 2352351 : d->growth += estimate_edge_growth (e);
442 2352351 : if (d->growth > d->cap)
443 : return true;
444 : }
445 : return false;
446 : }
447 :
448 : /* Return estimated savings for eliminating offline copy of NODE by inlining
449 : it everywhere. */
450 :
451 : static int
452 3243010 : offline_size (struct cgraph_node *node, ipa_size_summary *info)
453 : {
454 3243010 : if (!DECL_EXTERNAL (node->decl))
455 : {
456 3109345 : if (node->will_be_removed_from_program_if_no_direct_calls_p ())
457 860757 : return info->size;
458 : /* COMDAT functions are very often not shared across multiple units
459 : since they come from various template instantiations.
460 : Take this into account. */
461 2248588 : else if (DECL_COMDAT (node->decl)
462 2248588 : && node->can_remove_if_no_direct_calls_p ())
463 : {
464 566309 : int prob = opt_for_fn (node->decl, param_comdat_sharing_probability);
465 566309 : return (info->size * (100 - prob) + 50) / 100;
466 : }
467 : }
468 : return 0;
469 : }
470 :
471 : /* Estimate the growth caused by inlining NODE into all callers. */
472 :
473 : int
474 1361542 : estimate_growth (struct cgraph_node *node)
475 : {
476 1361542 : struct growth_data d = { node, false, false, 0, INT_MAX };
477 1361542 : ipa_size_summary *info = ipa_size_summaries->get (node);
478 :
479 1361542 : if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
480 : return 1;
481 :
482 : /* For self recursive functions the growth estimation really should be
483 : infinity. We don't want to return very large values because the growth
484 : plays various roles in badness computation fractions. Be sure to not
485 : return zero or negative growths. */
486 1361542 : if (d.self_recursive)
487 5138 : d.growth = d.growth < info->size ? info->size : d.growth;
488 1356404 : else if (!d.uninlinable)
489 1356148 : d.growth -= offline_size (node, info);
490 :
491 1361542 : return d.growth;
492 : }
493 :
494 : /* Verify if there are fewer than MAX_CALLERS. */
495 :
496 : static bool
497 130877 : check_callers (cgraph_node *node, int *growth, int *n, int offline,
498 : int min_size, struct cgraph_edge *known_edge)
499 : {
500 130877 : ipa_ref *ref;
501 :
502 130877 : if (!node->can_remove_if_no_direct_calls_and_refs_p ())
503 : return true;
504 :
505 193083 : for (cgraph_edge *e = node->callers; e; e = e->next_caller)
506 : {
507 163189 : edge_growth_cache_entry *entry;
508 :
509 163189 : if (e == known_edge)
510 30532 : continue;
511 132657 : if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
512 : return true;
513 129233 : if (edge_growth_cache != NULL
514 100391 : && (entry = edge_growth_cache->get (e)) != NULL
515 209833 : && entry->size != 0)
516 80600 : *growth += entry->size - (entry->size > 0);
517 : else
518 : {
519 48633 : class ipa_call_summary *es = ipa_call_summaries->get (e);
520 48633 : if (!es)
521 : return true;
522 48612 : *growth += min_size - es->call_stmt_size;
523 48612 : if (--(*n) < 0)
524 : return false;
525 : }
526 128831 : if (*growth > offline)
527 : return true;
528 : }
529 :
530 29894 : if (*n > 0)
531 29844 : FOR_EACH_ALIAS (node, ref)
532 4 : if (check_callers (dyn_cast <cgraph_node *> (ref->referring), growth, n,
533 : offline, min_size, known_edge))
534 : return true;
535 :
536 : return false;
537 : }
538 :
539 :
540 : /* Decide if growth of NODE is positive. This is cheaper than calculating
541 : actual growth. If edge growth of KNOWN_EDGE is known
542 : it is passed by EDGE_GROWTH. */
543 :
544 : bool
545 1886862 : growth_positive_p (struct cgraph_node *node,
546 : struct cgraph_edge * known_edge, int edge_growth)
547 : {
548 1886862 : struct cgraph_edge *e;
549 :
550 1886862 : ipa_size_summary *s = ipa_size_summaries->get (node);
551 :
552 : /* First quickly check if NODE is removable at all. */
553 1886862 : int offline = offline_size (node, s);
554 1886862 : if (offline <= 0 && known_edge && edge_growth > 0)
555 : return true;
556 :
557 1392318 : int min_size = ipa_fn_summaries->get (node)->min_size;
558 1392318 : int n = 10;
559 :
560 1392318 : int min_growth = known_edge ? edge_growth : 0;
561 2148357 : for (e = node->callers; e; e = e->next_caller)
562 : {
563 1747880 : edge_growth_cache_entry *entry;
564 :
565 1747880 : if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
566 : return true;
567 1414865 : if (e == known_edge)
568 266725 : continue;
569 1148140 : if (edge_growth_cache != NULL
570 581293 : && (entry = edge_growth_cache->get (e)) != NULL
571 1620613 : && entry->size != 0)
572 472473 : min_growth += entry->size - (entry->size > 0);
573 : else
574 : {
575 675667 : class ipa_call_summary *es = ipa_call_summaries->get (e);
576 675667 : if (!es)
577 : return true;
578 675474 : min_growth += min_size - es->call_stmt_size;
579 675474 : if (--n <= 0)
580 : break;
581 : }
582 1137658 : if (min_growth > offline)
583 : return true;
584 : }
585 :
586 410766 : ipa_ref *ref;
587 410766 : if (n > 0)
588 430750 : FOR_EACH_ALIAS (node, ref)
589 261750 : if (check_callers (dyn_cast <cgraph_node *> (ref->referring),
590 : &min_growth, &n, offline, min_size, known_edge))
591 : return true;
592 :
593 310164 : struct growth_data d = { node, false, false, 0, offline };
594 310164 : if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
595 : return true;
596 225538 : if (d.self_recursive || d.uninlinable)
597 : return true;
598 225538 : return (d.growth > offline);
599 : }
|