Line data Source code
1 : /* Analysis used by inlining decision heuristics.
2 : Copyright (C) 2003-2026 Free Software Foundation, Inc.
3 : Contributed by Jan Hubicka
4 :
5 : This file is part of GCC.
6 :
7 : GCC is free software; you can redistribute it and/or modify it under
8 : the terms of the GNU General Public License as published by the Free
9 : Software Foundation; either version 3, or (at your option) any later
10 : version.
11 :
12 : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 : WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 : for more details.
16 :
17 : You should have received a copy of the GNU General Public License
18 : along with GCC; see the file COPYING3. If not see
19 : <http://www.gnu.org/licenses/>. */
20 :
21 : #include "config.h"
22 : #include "system.h"
23 : #include "coretypes.h"
24 : #include "backend.h"
25 : #include "tree.h"
26 : #include "gimple.h"
27 : #include "alloc-pool.h"
28 : #include "tree-pass.h"
29 : #include "ssa.h"
30 : #include "tree-streamer.h"
31 : #include "cgraph.h"
32 : #include "diagnostic.h"
33 : #include "fold-const.h"
34 : #include "print-tree.h"
35 : #include "tree-inline.h"
36 : #include "gimple-pretty-print.h"
37 : #include "cfganal.h"
38 : #include "gimple-iterator.h"
39 : #include "tree-cfg.h"
40 : #include "tree-ssa-loop-niter.h"
41 : #include "tree-ssa-loop.h"
42 : #include "symbol-summary.h"
43 : #include "sreal.h"
44 : #include "ipa-cp.h"
45 : #include "ipa-prop.h"
46 : #include "ipa-fnsummary.h"
47 : #include "ipa-inline.h"
48 : #include "cfgloop.h"
49 : #include "tree-scalar-evolution.h"
50 : #include "ipa-utils.h"
51 : #include "cfgexpand.h"
52 : #include "gimplify.h"
53 : #include "attribs.h"
54 :
55 : /* Cached node/edge growths. */
56 : fast_call_summary<edge_growth_cache_entry *, va_heap> *edge_growth_cache = NULL;
57 :
58 : /* The context cache remembers estimated time/size and hints for given
59 : ipa_call_context of a call. */
60 : class node_context_cache_entry
61 : {
62 : public:
63 : ipa_cached_call_context ctx;
64 : sreal time, nonspec_time;
65 : int size;
66 : ipa_hints hints;
67 :
68 1215686 : node_context_cache_entry ()
69 1215686 : : ctx ()
70 : {
71 : }
72 1215686 : ~node_context_cache_entry ()
73 : {
74 1215686 : ctx.release ();
75 : }
76 : };
77 :
78 : /* At the moment we implement primitive single entry LRU cache. */
79 : class node_context_summary
80 : {
81 : public:
82 : node_context_cache_entry entry;
83 :
84 1215686 : node_context_summary ()
85 0 : : entry ()
86 : {
87 : }
88 1215686 : ~node_context_summary ()
89 : {
90 1215686 : }
91 : };
92 :
93 : /* Summary holding the context cache. */
94 : static fast_function_summary <node_context_summary *, va_heap>
95 : *node_context_cache = NULL;
96 : /* Statistics about the context cache effectivity. */
97 : static long node_context_cache_hit, node_context_cache_miss,
98 : node_context_cache_clear;
99 :
100 : /* Give initial reasons why inlining would fail on EDGE. This gets either
101 : nullified or usually overwritten by more precise reasons later. */
102 :
103 : void
104 38473552 : initialize_inline_failed (struct cgraph_edge *e)
105 : {
106 38473552 : struct cgraph_node *callee = e->callee;
107 :
108 38473552 : if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
109 76778791 : && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
110 : ;
111 38460423 : else if (e->indirect_unknown_callee)
112 671276 : e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
113 37789147 : else if (!callee->definition)
114 19663908 : e->inline_failed = CIF_BODY_NOT_AVAILABLE;
115 18125239 : else if (callee->redefined_extern_inline)
116 190 : e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
117 : else
118 18125049 : e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
119 38473552 : gcc_checking_assert (!e->call_stmt_cannot_inline_p
120 : || cgraph_inline_failed_type (e->inline_failed)
121 : == CIF_FINAL_ERROR);
122 38473552 : }
123 :
124 : /* Allocate edge growth caches. */
125 :
126 : void
127 231549 : initialize_growth_caches ()
128 : {
129 231549 : edge_growth_cache
130 231549 : = new fast_call_summary<edge_growth_cache_entry *, va_heap> (symtab);
131 231549 : node_context_cache
132 231549 : = new fast_function_summary<node_context_summary *, va_heap> (symtab);
133 231549 : edge_growth_cache->disable_duplication_hook ();
134 231549 : node_context_cache->disable_insertion_hook ();
135 231549 : node_context_cache->disable_duplication_hook ();
136 231549 : }
137 :
138 : /* Free growth caches. */
139 :
140 : void
141 231549 : free_growth_caches (void)
142 : {
143 231549 : delete edge_growth_cache;
144 231549 : delete node_context_cache;
145 231549 : edge_growth_cache = NULL;
146 231549 : node_context_cache = NULL;
147 231549 : if (dump_file)
148 178 : fprintf (dump_file, "node context cache: %li hits, %li misses,"
149 : " %li initializations\n",
150 : node_context_cache_hit, node_context_cache_miss,
151 : node_context_cache_clear);
152 231549 : node_context_cache_hit = 0;
153 231549 : node_context_cache_miss = 0;
154 231549 : node_context_cache_clear = 0;
155 231549 : }
156 :
157 : /* Return hints derived from EDGE. */
158 :
159 : int
160 7218373 : simple_edge_hints (struct cgraph_edge *edge)
161 : {
162 7218373 : int hints = 0;
163 5525560 : struct cgraph_node *to = (edge->caller->inlined_to
164 7218373 : ? edge->caller->inlined_to : edge->caller);
165 7218373 : struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
166 7218373 : int to_scc_no = ipa_fn_summaries->get (to)->scc_no;
167 7218373 : int callee_scc_no = ipa_fn_summaries->get (callee)->scc_no;
168 :
169 7218373 : if (to_scc_no && to_scc_no == callee_scc_no && !edge->recursive_p ())
170 : hints |= INLINE_HINT_same_scc;
171 :
172 7218373 : if (cross_module_call_p (edge))
173 4103 : hints |= INLINE_HINT_cross_module;
174 :
175 7218373 : return hints;
176 : }
177 :
178 : /* Estimate the time cost for the caller when inlining EDGE.
179 : Only to be called via estimate_edge_time, that handles the
180 : caching mechanism.
181 :
182 : When caching, also update the cache entry. Compute both time and
183 : size, since we always need both metrics eventually. */
184 :
185 : sreal
186 7222174 : do_estimate_edge_time (struct cgraph_edge *edge, sreal *ret_nonspec_time)
187 : {
188 7222174 : sreal time, nonspec_time;
189 7222174 : int size;
190 7222174 : ipa_hints hints;
191 7222174 : struct cgraph_node *callee;
192 7222174 : clause_t clause, nonspec_clause;
193 7222174 : ipa_auto_call_arg_values avals;
194 7222174 : class ipa_call_summary *es = ipa_call_summaries->get (edge);
195 7222174 : int min_size = -1;
196 :
197 7222174 : callee = edge->callee->ultimate_alias_target ();
198 :
199 7222174 : gcc_checking_assert (edge->inline_failed);
200 7222174 : evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
201 : &avals, true);
202 7222174 : ipa_call_context ctx (callee, clause, nonspec_clause, es->param, &avals);
203 7222174 : if (node_context_cache != NULL)
204 : {
205 7218373 : node_context_summary *e = node_context_cache->get_create (callee);
206 7218373 : if (e->entry.ctx.equal_to (ctx))
207 : {
208 5254038 : node_context_cache_hit++;
209 5254038 : size = e->entry.size;
210 5254038 : time = e->entry.time;
211 5254038 : nonspec_time = e->entry.nonspec_time;
212 5254038 : hints = e->entry.hints;
213 5254038 : if (flag_checking
214 5254038 : && !opt_for_fn (callee->decl, flag_profile_partial_training)
215 10508076 : && !callee->count.ipa_p ())
216 : {
217 5222494 : ipa_call_estimates chk_estimates;
218 5222494 : ctx.estimate_size_and_time (&chk_estimates);
219 15667482 : gcc_assert (chk_estimates.size == size
220 : && chk_estimates.time == time
221 : && chk_estimates.nonspecialized_time == nonspec_time
222 : && chk_estimates.hints == hints);
223 : }
224 : }
225 : else
226 : {
227 1964335 : if (e->entry.ctx.exists_p ())
228 748649 : node_context_cache_miss++;
229 : else
230 1215686 : node_context_cache_clear++;
231 1964335 : e->entry.ctx.release ();
232 1964335 : ipa_call_estimates estimates;
233 1964335 : ctx.estimate_size_and_time (&estimates);
234 1964335 : size = estimates.size;
235 1964335 : e->entry.size = size;
236 1964335 : time = estimates.time;
237 1964335 : e->entry.time = time;
238 1964335 : nonspec_time = estimates.nonspecialized_time;
239 1964335 : e->entry.nonspec_time = nonspec_time;
240 1964335 : hints = estimates.hints;
241 1964335 : e->entry.hints = hints;
242 1964335 : e->entry.ctx.duplicate_from (ctx);
243 : }
244 : }
245 : else
246 : {
247 3801 : ipa_call_estimates estimates;
248 3801 : ctx.estimate_size_and_time (&estimates);
249 3801 : size = estimates.size;
250 3801 : time = estimates.time;
251 3801 : nonspec_time = estimates.nonspecialized_time;
252 3801 : hints = estimates.hints;
253 : }
254 :
255 : /* When we have profile feedback or function attribute, we can quite safely
256 : identify hot edges and for those we disable size limits. Don't do that
257 : when probability that caller will call the callee is low however, since it
258 : may hurt optimization of the caller's hot path. */
259 7222174 : if ((edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
260 252 : && (edge->count.ipa () * 2
261 126 : > (edge->caller->inlined_to
262 7222191 : ? edge->caller->inlined_to->count.ipa ()
263 98 : : edge->caller->count.ipa ())))
264 7549979 : || (lookup_attribute ("hot", DECL_ATTRIBUTES (edge->caller->decl))
265 : != NULL
266 2 : && lookup_attribute ("hot", DECL_ATTRIBUTES (edge->callee->decl))
267 : != NULL))
268 109 : hints |= INLINE_HINT_known_hot;
269 :
270 7222174 : gcc_checking_assert (size >= 0);
271 7222174 : gcc_checking_assert (time >= 0);
272 :
273 : /* When caching, update the cache entry. */
274 7222174 : if (edge_growth_cache != NULL)
275 : {
276 7218373 : if (min_size >= 0)
277 : ipa_fn_summaries->get (edge->callee->function_symbol ())->min_size
278 : = min_size;
279 7218373 : edge_growth_cache_entry *entry
280 7218373 : = edge_growth_cache->get_create (edge);
281 7218373 : entry->time = time;
282 7218373 : entry->nonspec_time = nonspec_time;
283 :
284 7218373 : entry->size = size + (size >= 0);
285 7218373 : hints |= simple_edge_hints (edge);
286 7218373 : entry->hints = hints + 1;
287 : }
288 7222174 : if (ret_nonspec_time)
289 151572 : *ret_nonspec_time = nonspec_time;
290 14444348 : return time;
291 7222174 : }
292 :
293 : /* Reset cache for NODE.
294 : This must be done each time NODE body is modified. */
295 : void
296 3567753 : reset_node_cache (struct cgraph_node *node)
297 : {
298 3567753 : if (node_context_cache)
299 3565253 : node_context_cache->remove (node);
300 3567753 : }
301 :
302 : /* Remove EDGE from caches once it was inlined. */
303 : void
304 4022824 : ipa_remove_from_growth_caches (struct cgraph_edge *edge)
305 : {
306 4022824 : if (node_context_cache)
307 923164 : node_context_cache->remove (edge->callee);
308 4022824 : if (edge_growth_cache)
309 923164 : edge_growth_cache->remove (edge);
310 4022824 : }
311 :
312 : /* Return estimated callee growth after inlining EDGE.
313 : Only to be called via estimate_edge_size. */
314 :
315 : int
316 19182825 : do_estimate_edge_size (struct cgraph_edge *edge)
317 : {
318 19182825 : int size;
319 19182825 : struct cgraph_node *callee;
320 19182825 : clause_t clause, nonspec_clause;
321 :
322 : /* When we do caching, use do_estimate_edge_time to populate the entry. */
323 :
324 19182825 : if (edge_growth_cache != NULL)
325 : {
326 7009813 : do_estimate_edge_time (edge);
327 7009813 : size = edge_growth_cache->get (edge)->size;
328 7009813 : gcc_checking_assert (size);
329 7009813 : return size - (size > 0);
330 : }
331 :
332 12173012 : callee = edge->callee->ultimate_alias_target ();
333 :
334 : /* Early inliner runs without caching, go ahead and do the dirty work. */
335 12173012 : gcc_checking_assert (edge->inline_failed);
336 12173012 : ipa_auto_call_arg_values avals;
337 12173012 : evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
338 : &avals, true);
339 12173012 : ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals);
340 12173012 : ipa_call_estimates estimates;
341 12173012 : ctx.estimate_size_and_time (&estimates, false, false);
342 12173012 : return estimates.size;
343 12173012 : }
344 :
345 :
346 : /* Estimate the growth of the caller when inlining EDGE.
347 : Only to be called via estimate_edge_size. */
348 :
349 : ipa_hints
350 0 : do_estimate_edge_hints (struct cgraph_edge *edge)
351 : {
352 0 : struct cgraph_node *callee;
353 0 : clause_t clause, nonspec_clause;
354 :
355 : /* When we do caching, use do_estimate_edge_time to populate the entry. */
356 :
357 0 : if (edge_growth_cache != NULL)
358 : {
359 0 : do_estimate_edge_time (edge);
360 0 : ipa_hints hints = edge_growth_cache->get (edge)->hints;
361 0 : gcc_checking_assert (hints);
362 0 : return hints - 1;
363 : }
364 :
365 0 : callee = edge->callee->ultimate_alias_target ();
366 :
367 : /* Early inliner runs without caching, go ahead and do the dirty work. */
368 0 : gcc_checking_assert (edge->inline_failed);
369 0 : ipa_auto_call_arg_values avals;
370 0 : evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
371 : &avals, true);
372 0 : ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals);
373 0 : ipa_call_estimates estimates;
374 0 : ctx.estimate_size_and_time (&estimates, false, true);
375 0 : ipa_hints hints = estimates.hints | simple_edge_hints (edge);
376 0 : return hints;
377 0 : }
378 :
379 : /* Estimate the size of NODE after inlining EDGE which should be an
380 : edge to either NODE or a call inlined into NODE. */
381 :
382 : int
383 6751256 : estimate_size_after_inlining (struct cgraph_node *node,
384 : struct cgraph_edge *edge)
385 : {
386 6751256 : class ipa_call_summary *es = ipa_call_summaries->get (edge);
387 6751256 : ipa_size_summary *s = ipa_size_summaries->get (node);
388 6751256 : if (!es->predicate || *es->predicate != false)
389 : {
390 6751256 : int size = s->size + estimate_edge_growth (edge);
391 6751256 : gcc_assert (size >= 0);
392 : return size;
393 : }
394 0 : return s->size;
395 : }
396 :
397 :
398 : struct growth_data
399 : {
400 : struct cgraph_node *node;
401 : bool self_recursive;
402 : bool uninlinable;
403 : int growth;
404 : int cap;
405 : };
406 :
407 :
408 : /* Worker for do_estimate_growth. Collect growth for all callers. */
409 :
410 : static bool
411 1783798 : do_estimate_growth_1 (struct cgraph_node *node, void *data)
412 : {
413 1783798 : struct cgraph_edge *e;
414 1783798 : struct growth_data *d = (struct growth_data *) data;
415 :
416 4101813 : for (e = node->callers; e; e = e->next_caller)
417 : {
418 2404522 : gcc_checking_assert (e->inline_failed);
419 :
420 : /* Don't count callback edges into growth, since they are never inlined
421 : anyway. */
422 2404522 : if (e->callback)
423 1958 : continue;
424 :
425 2402564 : if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
426 2402564 : || !opt_for_fn (e->caller->decl, optimize))
427 : {
428 290 : d->uninlinable = true;
429 290 : if (d->cap < INT_MAX)
430 : return true;
431 290 : continue;
432 : }
433 :
434 2402274 : if (e->recursive_p ())
435 : {
436 10972 : d->self_recursive = true;
437 10972 : if (d->cap < INT_MAX)
438 : return true;
439 9607 : continue;
440 : }
441 2391302 : d->growth += estimate_edge_growth (e);
442 2391302 : if (d->growth > d->cap)
443 : return true;
444 : }
445 : return false;
446 : }
447 :
448 : /* Return estimated savings for eliminating offline copy of NODE by inlining
449 : it everywhere. */
450 :
451 : static int
452 3270666 : offline_size (struct cgraph_node *node, ipa_size_summary *info)
453 : {
454 3270666 : if (!DECL_EXTERNAL (node->decl))
455 : {
456 3134081 : if (node->will_be_removed_from_program_if_no_direct_calls_p ())
457 862786 : return info->size;
458 : /* COMDAT functions are very often not shared across multiple units
459 : since they come from various template instantiations.
460 : Take this into account. */
461 2271295 : else if (DECL_COMDAT (node->decl)
462 2271295 : && node->can_remove_if_no_direct_calls_p ())
463 : {
464 588842 : int prob = opt_for_fn (node->decl, param_comdat_sharing_probability);
465 588842 : return (info->size * (100 - prob) + 50) / 100;
466 : }
467 : }
468 : return 0;
469 : }
470 :
471 : /* Estimate the growth caused by inlining NODE into all callers. */
472 :
473 : int
474 1371565 : estimate_growth (struct cgraph_node *node)
475 : {
476 1371565 : struct growth_data d = { node, false, false, 0, INT_MAX };
477 1371565 : ipa_size_summary *info = ipa_size_summaries->get (node);
478 :
479 1371565 : if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
480 : return 1;
481 :
482 : /* For self recursive functions the growth estimation really should be
483 : infinity. We don't want to return very large values because the growth
484 : plays various roles in badness computation fractions. Be sure to not
485 : return zero or negative growths. */
486 1371565 : if (d.self_recursive)
487 5147 : d.growth = d.growth < info->size ? info->size : d.growth;
488 1366418 : else if (!d.uninlinable)
489 1366226 : d.growth -= offline_size (node, info);
490 :
491 1371565 : return d.growth;
492 : }
493 :
494 : /* Verify if there are fewer than MAX_CALLERS. */
495 :
496 : static bool
497 131939 : check_callers (cgraph_node *node, int *growth, int *n, int offline,
498 : int min_size, struct cgraph_edge *known_edge)
499 : {
500 131939 : ipa_ref *ref;
501 :
502 131939 : if (!node->can_remove_if_no_direct_calls_and_refs_p ())
503 : return true;
504 :
505 194528 : for (cgraph_edge *e = node->callers; e; e = e->next_caller)
506 : {
507 164454 : edge_growth_cache_entry *entry;
508 :
509 164454 : if (e == known_edge)
510 30799 : continue;
511 133655 : if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
512 : return true;
513 130169 : if (edge_growth_cache != NULL
514 101231 : && (entry = edge_growth_cache->get (e)) != NULL
515 211501 : && entry->size != 0)
516 81332 : *growth += entry->size - (entry->size > 0);
517 : else
518 : {
519 48837 : class ipa_call_summary *es = ipa_call_summaries->get (e);
520 48837 : if (!es)
521 : return true;
522 48833 : *growth += min_size - es->call_stmt_size;
523 48833 : if (--(*n) < 0)
524 : return false;
525 : }
526 129784 : if (*growth > offline)
527 : return true;
528 : }
529 :
530 30074 : if (*n > 0)
531 30024 : FOR_EACH_ALIAS (node, ref)
532 4 : if (check_callers (dyn_cast <cgraph_node *> (ref->referring), growth, n,
533 : offline, min_size, known_edge))
534 : return true;
535 :
536 : return false;
537 : }
538 :
539 :
540 : /* Decide if growth of NODE is positive. This is cheaper than calculating
541 : actual growth. If edge growth of KNOWN_EDGE is known
542 : it is passed by EDGE_GROWTH. */
543 :
544 : bool
545 1904440 : growth_positive_p (struct cgraph_node *node,
546 : struct cgraph_edge * known_edge, int edge_growth)
547 : {
548 1904440 : struct cgraph_edge *e;
549 :
550 1904440 : ipa_size_summary *s = ipa_size_summaries->get (node);
551 :
552 : /* First quickly check if NODE is removable at all. */
553 1904440 : int offline = offline_size (node, s);
554 1904440 : if (offline <= 0 && known_edge && edge_growth > 0)
555 : return true;
556 :
557 1407037 : int min_size = ipa_fn_summaries->get (node)->min_size;
558 1407037 : int n = 10;
559 :
560 1407037 : int min_growth = known_edge ? edge_growth : 0;
561 2174541 : for (e = node->callers; e; e = e->next_caller)
562 : {
563 1771617 : edge_growth_cache_entry *entry;
564 :
565 1771617 : if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
566 : return true;
567 1437804 : if (e == known_edge)
568 268943 : continue;
569 1168861 : if (edge_growth_cache != NULL
570 591219 : && (entry = edge_growth_cache->get (e)) != NULL
571 1649658 : && entry->size != 0)
572 480797 : min_growth += entry->size - (entry->size > 0);
573 : else
574 : {
575 688064 : class ipa_call_summary *es = ipa_call_summaries->get (e);
576 688064 : if (!es)
577 : return true;
578 687931 : min_growth += min_size - es->call_stmt_size;
579 687931 : if (--n <= 0)
580 : break;
581 : }
582 1158300 : if (min_growth > offline)
583 : return true;
584 : }
585 :
586 413352 : ipa_ref *ref;
587 413352 : if (n > 0)
588 433377 : FOR_EACH_ALIAS (node, ref)
589 263874 : if (check_callers (dyn_cast <cgraph_node *> (ref->referring),
590 : &min_growth, &n, offline, min_size, known_edge))
591 : return true;
592 :
593 311868 : struct growth_data d = { node, false, false, 0, offline };
594 311868 : if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
595 : return true;
596 225361 : if (d.self_recursive || d.uninlinable)
597 : return true;
598 225361 : return (d.growth > offline);
599 : }
|