Line data Source code
1 : /* Analysis used by inlining decision heuristics.
2 : Copyright (C) 2003-2026 Free Software Foundation, Inc.
3 : Contributed by Jan Hubicka
4 :
5 : This file is part of GCC.
6 :
7 : GCC is free software; you can redistribute it and/or modify it under
8 : the terms of the GNU General Public License as published by the Free
9 : Software Foundation; either version 3, or (at your option) any later
10 : version.
11 :
12 : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 : WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 : for more details.
16 :
17 : You should have received a copy of the GNU General Public License
18 : along with GCC; see the file COPYING3. If not see
19 : <http://www.gnu.org/licenses/>. */
20 :
21 : #include "config.h"
22 : #include "system.h"
23 : #include "coretypes.h"
24 : #include "backend.h"
25 : #include "tree.h"
26 : #include "gimple.h"
27 : #include "alloc-pool.h"
28 : #include "tree-pass.h"
29 : #include "ssa.h"
30 : #include "tree-streamer.h"
31 : #include "cgraph.h"
32 : #include "diagnostic.h"
33 : #include "fold-const.h"
34 : #include "print-tree.h"
35 : #include "tree-inline.h"
36 : #include "gimple-pretty-print.h"
37 : #include "cfganal.h"
38 : #include "gimple-iterator.h"
39 : #include "tree-cfg.h"
40 : #include "tree-ssa-loop-niter.h"
41 : #include "tree-ssa-loop.h"
42 : #include "symbol-summary.h"
43 : #include "sreal.h"
44 : #include "ipa-cp.h"
45 : #include "ipa-prop.h"
46 : #include "ipa-fnsummary.h"
47 : #include "ipa-inline.h"
48 : #include "cfgloop.h"
49 : #include "tree-scalar-evolution.h"
50 : #include "ipa-utils.h"
51 : #include "cfgexpand.h"
52 : #include "gimplify.h"
53 : #include "attribs.h"
54 :
55 : /* Cached node/edge growths. */
56 : fast_call_summary<edge_growth_cache_entry *, va_heap> *edge_growth_cache = NULL;
57 :
58 : /* The context cache remembers estimated time/size and hints for given
59 : ipa_call_context of a call. */
60 : class node_context_cache_entry
61 : {
62 : public:
63 : ipa_cached_call_context ctx;
64 : sreal time, nonspec_time;
65 : int size;
66 : ipa_hints hints;
67 :
68 1162729 : node_context_cache_entry ()
69 1162729 : : ctx ()
70 : {
71 : }
72 1162729 : ~node_context_cache_entry ()
73 : {
74 1162729 : ctx.release ();
75 : }
76 : };
77 :
78 : /* At the moment we implement primitive single entry LRU cache. */
79 : class node_context_summary
80 : {
81 : public:
82 : node_context_cache_entry entry;
83 :
84 1162729 : node_context_summary ()
85 0 : : entry ()
86 : {
87 : }
88 1162729 : ~node_context_summary ()
89 : {
90 1162729 : }
91 : };
92 :
93 : /* Summary holding the context cache. */
94 : static fast_function_summary <node_context_summary *, va_heap>
95 : *node_context_cache = NULL;
96 : /* Statistics about the context cache effectivity. */
97 : static long node_context_cache_hit, node_context_cache_miss,
98 : node_context_cache_clear;
99 :
100 : /* Give initial reasons why inlining would fail on EDGE. This gets either
101 : nullified or usually overwritten by more precise reasons later. */
102 :
103 : void
104 38058210 : initialize_inline_failed (struct cgraph_edge *e)
105 : {
106 38058210 : struct cgraph_node *callee = e->callee;
107 :
108 38058210 : if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
109 75948151 : && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
110 : ;
111 38045078 : else if (e->indirect_unknown_callee)
112 668414 : e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
113 37376664 : else if (!callee->definition)
114 19635983 : e->inline_failed = CIF_BODY_NOT_AVAILABLE;
115 17740681 : else if (callee->redefined_extern_inline)
116 190 : e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
117 : else
118 17740491 : e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
119 38058210 : gcc_checking_assert (!e->call_stmt_cannot_inline_p
120 : || cgraph_inline_failed_type (e->inline_failed)
121 : == CIF_FINAL_ERROR);
122 38058210 : }
123 :
124 : /* Allocate edge growth caches. */
125 :
126 : void
127 232276 : initialize_growth_caches ()
128 : {
129 232276 : edge_growth_cache
130 232276 : = new fast_call_summary<edge_growth_cache_entry *, va_heap> (symtab);
131 232276 : node_context_cache
132 232276 : = new fast_function_summary<node_context_summary *, va_heap> (symtab);
133 232276 : edge_growth_cache->disable_duplication_hook ();
134 232276 : node_context_cache->disable_insertion_hook ();
135 232276 : node_context_cache->disable_duplication_hook ();
136 232276 : }
137 :
138 : /* Free growth caches. */
139 :
140 : void
141 232276 : free_growth_caches (void)
142 : {
143 232276 : delete edge_growth_cache;
144 232276 : delete node_context_cache;
145 232276 : edge_growth_cache = NULL;
146 232276 : node_context_cache = NULL;
147 232276 : if (dump_file)
148 178 : fprintf (dump_file, "node context cache: %li hits, %li misses,"
149 : " %li initializations\n",
150 : node_context_cache_hit, node_context_cache_miss,
151 : node_context_cache_clear);
152 232276 : node_context_cache_hit = 0;
153 232276 : node_context_cache_miss = 0;
154 232276 : node_context_cache_clear = 0;
155 232276 : }
156 :
157 : /* Return hints derived from EDGE. */
158 :
159 : int
160 6579563 : simple_edge_hints (struct cgraph_edge *edge)
161 : {
162 6579563 : int hints = 0;
163 5053240 : struct cgraph_node *to = (edge->caller->inlined_to
164 6579563 : ? edge->caller->inlined_to : edge->caller);
165 6579563 : struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
166 6579563 : int to_scc_no = ipa_fn_summaries->get (to)->scc_no;
167 6579563 : int callee_scc_no = ipa_fn_summaries->get (callee)->scc_no;
168 :
169 6579563 : if (to_scc_no && to_scc_no == callee_scc_no && !edge->recursive_p ())
170 : hints |= INLINE_HINT_same_scc;
171 :
172 6579563 : if (cross_module_call_p (edge))
173 4103 : hints |= INLINE_HINT_cross_module;
174 :
175 6579563 : return hints;
176 : }
177 :
178 : /* Estimate the time cost for the caller when inlining EDGE.
179 : Only to be called via estimate_edge_time, that handles the
180 : caching mechanism.
181 :
182 : When caching, also update the cache entry. Compute both time and
183 : size, since we always need both metrics eventually. */
184 :
185 : sreal
186 6583364 : do_estimate_edge_time (struct cgraph_edge *edge, sreal *ret_nonspec_time)
187 : {
188 6583364 : sreal time, nonspec_time;
189 6583364 : int size;
190 6583364 : ipa_hints hints;
191 6583364 : struct cgraph_node *callee;
192 6583364 : clause_t clause, nonspec_clause;
193 6583364 : ipa_auto_call_arg_values avals;
194 6583364 : class ipa_call_summary *es = ipa_call_summaries->get (edge);
195 6583364 : int min_size = -1;
196 :
197 6583364 : callee = edge->callee->ultimate_alias_target ();
198 :
199 6583364 : gcc_checking_assert (edge->inline_failed);
200 6583364 : evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
201 : &avals, true);
202 6583364 : ipa_call_context ctx (callee, clause, nonspec_clause, es->param, &avals);
203 6583364 : if (node_context_cache != NULL)
204 : {
205 6579563 : node_context_summary *e = node_context_cache->get_create (callee);
206 6579563 : if (e->entry.ctx.equal_to (ctx))
207 : {
208 4730497 : node_context_cache_hit++;
209 4730497 : size = e->entry.size;
210 4730497 : time = e->entry.time;
211 4730497 : nonspec_time = e->entry.nonspec_time;
212 4730497 : hints = e->entry.hints;
213 4730497 : if (flag_checking
214 4730497 : && !opt_for_fn (callee->decl, flag_profile_partial_training)
215 9460994 : && !callee->count.ipa_p ())
216 : {
217 4699956 : ipa_call_estimates chk_estimates;
218 4699956 : ctx.estimate_size_and_time (&chk_estimates);
219 14099868 : gcc_assert (chk_estimates.size == size
220 : && chk_estimates.time == time
221 : && chk_estimates.nonspecialized_time == nonspec_time
222 : && chk_estimates.hints == hints);
223 : }
224 : }
225 : else
226 : {
227 1849066 : if (e->entry.ctx.exists_p ())
228 686337 : node_context_cache_miss++;
229 : else
230 1162729 : node_context_cache_clear++;
231 1849066 : e->entry.ctx.release ();
232 1849066 : ipa_call_estimates estimates;
233 1849066 : ctx.estimate_size_and_time (&estimates);
234 1849066 : size = estimates.size;
235 1849066 : e->entry.size = size;
236 1849066 : time = estimates.time;
237 1849066 : e->entry.time = time;
238 1849066 : nonspec_time = estimates.nonspecialized_time;
239 1849066 : e->entry.nonspec_time = nonspec_time;
240 1849066 : hints = estimates.hints;
241 1849066 : e->entry.hints = hints;
242 1849066 : e->entry.ctx.duplicate_from (ctx);
243 : }
244 : }
245 : else
246 : {
247 3801 : ipa_call_estimates estimates;
248 3801 : ctx.estimate_size_and_time (&estimates);
249 3801 : size = estimates.size;
250 3801 : time = estimates.time;
251 3801 : nonspec_time = estimates.nonspecialized_time;
252 3801 : hints = estimates.hints;
253 : }
254 :
255 : /* When we have profile feedback or function attribute, we can quite safely
256 : identify hot edges and for those we disable size limits. Don't do that
257 : when probability that caller will call the callee is low however, since it
258 : may hurt optimization of the caller's hot path. */
259 6583364 : if ((edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
260 252 : && (edge->count.ipa () * 2
261 126 : > (edge->caller->inlined_to
262 6583381 : ? edge->caller->inlined_to->count.ipa ()
263 98 : : edge->caller->count.ipa ())))
264 6851931 : || (lookup_attribute ("hot", DECL_ATTRIBUTES (edge->caller->decl))
265 : != NULL
266 2 : && lookup_attribute ("hot", DECL_ATTRIBUTES (edge->callee->decl))
267 : != NULL))
268 109 : hints |= INLINE_HINT_known_hot;
269 :
270 6583364 : gcc_checking_assert (size >= 0);
271 6583364 : gcc_checking_assert (time >= 0);
272 :
273 : /* When caching, update the cache entry. */
274 6583364 : if (edge_growth_cache != NULL)
275 : {
276 6579563 : if (min_size >= 0)
277 : ipa_fn_summaries->get (edge->callee->function_symbol ())->min_size
278 : = min_size;
279 6579563 : edge_growth_cache_entry *entry
280 6579563 : = edge_growth_cache->get_create (edge);
281 6579563 : entry->time = time;
282 6579563 : entry->nonspec_time = nonspec_time;
283 :
284 6579563 : entry->size = size + (size >= 0);
285 6579563 : hints |= simple_edge_hints (edge);
286 6579563 : entry->hints = hints + 1;
287 : }
288 6583364 : if (ret_nonspec_time)
289 142602 : *ret_nonspec_time = nonspec_time;
290 13166728 : return time;
291 6583364 : }
292 :
293 : /* Reset cache for NODE.
294 : This must be done each time NODE body is modified. */
295 : void
296 3240796 : reset_node_cache (struct cgraph_node *node)
297 : {
298 3240796 : if (node_context_cache)
299 3238281 : node_context_cache->remove (node);
300 3240796 : }
301 :
302 : /* Remove EDGE from caches once it was inlined. */
303 : void
304 3886859 : ipa_remove_from_growth_caches (struct cgraph_edge *edge)
305 : {
306 3886859 : if (node_context_cache)
307 867189 : node_context_cache->remove (edge->callee);
308 3886859 : if (edge_growth_cache)
309 867189 : edge_growth_cache->remove (edge);
310 3886859 : }
311 :
312 : /* Return estimated callee growth after inlining EDGE.
313 : Only to be called via estimate_edge_size. */
314 :
315 : int
316 18288925 : do_estimate_edge_size (struct cgraph_edge *edge)
317 : {
318 18288925 : int size;
319 18288925 : struct cgraph_node *callee;
320 18288925 : clause_t clause, nonspec_clause;
321 :
322 : /* When we do caching, use do_estimate_edge_time to populate the entry. */
323 :
324 18288925 : if (edge_growth_cache != NULL)
325 : {
326 6385697 : do_estimate_edge_time (edge);
327 6385697 : size = edge_growth_cache->get (edge)->size;
328 6385697 : gcc_checking_assert (size);
329 6385697 : return size - (size > 0);
330 : }
331 :
332 11903228 : callee = edge->callee->ultimate_alias_target ();
333 :
334 : /* Early inliner runs without caching, go ahead and do the dirty work. */
335 11903228 : gcc_checking_assert (edge->inline_failed);
336 11903228 : ipa_auto_call_arg_values avals;
337 11903228 : evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
338 : &avals, true);
339 11903228 : ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals);
340 11903228 : ipa_call_estimates estimates;
341 11903228 : ctx.estimate_size_and_time (&estimates, false, false);
342 11903228 : return estimates.size;
343 11903228 : }
344 :
345 :
346 : /* Estimate the growth of the caller when inlining EDGE.
347 : Only to be called via estimate_edge_size. */
348 :
349 : ipa_hints
350 0 : do_estimate_edge_hints (struct cgraph_edge *edge)
351 : {
352 0 : struct cgraph_node *callee;
353 0 : clause_t clause, nonspec_clause;
354 :
355 : /* When we do caching, use do_estimate_edge_time to populate the entry. */
356 :
357 0 : if (edge_growth_cache != NULL)
358 : {
359 0 : do_estimate_edge_time (edge);
360 0 : ipa_hints hints = edge_growth_cache->get (edge)->hints;
361 0 : gcc_checking_assert (hints);
362 0 : return hints - 1;
363 : }
364 :
365 0 : callee = edge->callee->ultimate_alias_target ();
366 :
367 : /* Early inliner runs without caching, go ahead and do the dirty work. */
368 0 : gcc_checking_assert (edge->inline_failed);
369 0 : ipa_auto_call_arg_values avals;
370 0 : evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
371 : &avals, true);
372 0 : ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals);
373 0 : ipa_call_estimates estimates;
374 0 : ctx.estimate_size_and_time (&estimates, false, true);
375 0 : ipa_hints hints = estimates.hints | simple_edge_hints (edge);
376 0 : return hints;
377 0 : }
378 :
379 : /* Estimate the size of NODE after inlining EDGE which should be an
380 : edge to either NODE or a call inlined into NODE. */
381 :
382 : int
383 6478132 : estimate_size_after_inlining (struct cgraph_node *node,
384 : struct cgraph_edge *edge)
385 : {
386 6478132 : class ipa_call_summary *es = ipa_call_summaries->get (edge);
387 6478132 : ipa_size_summary *s = ipa_size_summaries->get (node);
388 6478132 : if (!es->predicate || *es->predicate != false)
389 : {
390 6478132 : int size = s->size + estimate_edge_growth (edge);
391 6478132 : gcc_assert (size >= 0);
392 : return size;
393 : }
394 0 : return s->size;
395 : }
396 :
397 :
398 : struct growth_data
399 : {
400 : struct cgraph_node *node;
401 : bool self_recursive;
402 : bool uninlinable;
403 : int growth;
404 : int cap;
405 : };
406 :
407 :
408 : /* Worker for do_estimate_growth. Collect growth for all callers. */
409 :
410 : static bool
411 1758123 : do_estimate_growth_1 (struct cgraph_node *node, void *data)
412 : {
413 1758123 : struct cgraph_edge *e;
414 1758123 : struct growth_data *d = (struct growth_data *) data;
415 :
416 4026016 : for (e = node->callers; e; e = e->next_caller)
417 : {
418 2356750 : gcc_checking_assert (e->inline_failed);
419 :
420 : /* Don't count callback edges into growth, since they are never inlined
421 : anyway. */
422 2356750 : if (e->callback)
423 1957 : continue;
424 :
425 2354793 : if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
426 2354793 : || !opt_for_fn (e->caller->decl, optimize))
427 : {
428 290 : d->uninlinable = true;
429 290 : if (d->cap < INT_MAX)
430 : return true;
431 290 : continue;
432 : }
433 :
434 2354503 : if (e->recursive_p ())
435 : {
436 10863 : d->self_recursive = true;
437 10863 : if (d->cap < INT_MAX)
438 : return true;
439 9500 : continue;
440 : }
441 2343640 : d->growth += estimate_edge_growth (e);
442 2343640 : if (d->growth > d->cap)
443 : return true;
444 : }
445 : return false;
446 : }
447 :
448 : /* Return estimated savings for eliminating offline copy of NODE by inlining
449 : it everywhere. */
450 :
451 : static int
452 3178343 : offline_size (struct cgraph_node *node, ipa_size_summary *info)
453 : {
454 3178343 : if (!DECL_EXTERNAL (node->decl))
455 : {
456 3143168 : if (node->will_be_removed_from_program_if_no_direct_calls_p ())
457 847006 : return info->size;
458 : /* COMDAT functions are very often not shared across multiple units
459 : since they come from various template instantiations.
460 : Take this into account. */
461 2296162 : else if (DECL_COMDAT (node->decl)
462 2296162 : && node->can_remove_if_no_direct_calls_p ())
463 : {
464 620848 : int prob = opt_for_fn (node->decl, param_comdat_sharing_probability);
465 620848 : return (info->size * (100 - prob) + 50) / 100;
466 : }
467 : }
468 : return 0;
469 : }
470 :
471 : /* Estimate the growth caused by inlining NODE into all callers. */
472 :
473 : int
474 1351909 : estimate_growth (struct cgraph_node *node)
475 : {
476 1351909 : struct growth_data d = { node, false, false, 0, INT_MAX };
477 1351909 : ipa_size_summary *info = ipa_size_summaries->get (node);
478 :
479 1351909 : if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
480 : return 1;
481 :
482 : /* For self recursive functions the growth estimation really should be
483 : infinity. We don't want to return very large values because the growth
484 : plays various roles in badness computation fractions. Be sure to not
485 : return zero or negative growths. */
486 1351909 : if (d.self_recursive)
487 5040 : d.growth = d.growth < info->size ? info->size : d.growth;
488 1346869 : else if (!d.uninlinable)
489 1346677 : d.growth -= offline_size (node, info);
490 :
491 1351909 : return d.growth;
492 : }
493 :
494 : /* Verify if there are fewer than MAX_CALLERS. */
495 :
496 : static bool
497 118288 : check_callers (cgraph_node *node, int *growth, int *n, int offline,
498 : int min_size, struct cgraph_edge *known_edge)
499 : {
500 118288 : ipa_ref *ref;
501 :
502 118288 : if (!node->can_remove_if_no_direct_calls_and_refs_p ())
503 : return true;
504 :
505 176448 : for (cgraph_edge *e = node->callers; e; e = e->next_caller)
506 : {
507 148778 : edge_growth_cache_entry *entry;
508 :
509 148778 : if (e == known_edge)
510 27539 : continue;
511 121239 : if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
512 : return true;
513 118703 : if (edge_growth_cache != NULL
514 90866 : && (entry = edge_growth_cache->get (e)) != NULL
515 190827 : && entry->size != 0)
516 72124 : *growth += entry->size - (entry->size > 0);
517 : else
518 : {
519 46579 : class ipa_call_summary *es = ipa_call_summaries->get (e);
520 46579 : if (!es)
521 : return true;
522 46575 : *growth += min_size - es->call_stmt_size;
523 46575 : if (--(*n) < 0)
524 : return false;
525 : }
526 118317 : if (*growth > offline)
527 : return true;
528 : }
529 :
530 27670 : if (*n > 0)
531 27620 : FOR_EACH_ALIAS (node, ref)
532 4 : if (check_callers (dyn_cast <cgraph_node *> (ref->referring), growth, n,
533 : offline, min_size, known_edge))
534 : return true;
535 :
536 : return false;
537 : }
538 :
539 :
540 : /* Decide if growth of NODE is positive. This is cheaper than calculating
541 : actual growth. If edge growth of KNOWN_EDGE is known
542 : it is passed by EDGE_GROWTH. */
543 :
544 : bool
545 1831666 : growth_positive_p (struct cgraph_node *node,
546 : struct cgraph_edge * known_edge, int edge_growth)
547 : {
548 1831666 : struct cgraph_edge *e;
549 :
550 1831666 : ipa_size_summary *s = ipa_size_summaries->get (node);
551 :
552 : /* First quickly check if NODE is removable at all. */
553 1831666 : int offline = offline_size (node, s);
554 1831666 : if (offline <= 0 && known_edge && edge_growth > 0)
555 : return true;
556 :
557 1428519 : int min_size = ipa_fn_summaries->get (node)->min_size;
558 1428519 : int n = 10;
559 :
560 1428519 : int min_growth = known_edge ? edge_growth : 0;
561 2227027 : for (e = node->callers; e; e = e->next_caller)
562 : {
563 1838781 : edge_growth_cache_entry *entry;
564 :
565 1838781 : if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
566 : return true;
567 1514198 : if (e == known_edge)
568 270323 : continue;
569 1243875 : if (edge_growth_cache != NULL
570 643162 : && (entry = edge_growth_cache->get (e)) != NULL
571 1765599 : && entry->size != 0)
572 521724 : min_growth += entry->size - (entry->size > 0);
573 : else
574 : {
575 722151 : class ipa_call_summary *es = ipa_call_summaries->get (e);
576 722151 : if (!es)
577 : return true;
578 722018 : min_growth += min_size - es->call_stmt_size;
579 722018 : if (--n <= 0)
580 : break;
581 : }
582 1231524 : if (min_growth > offline)
583 : return true;
584 : }
585 :
586 400464 : ipa_ref *ref;
587 400464 : if (n > 0)
588 416296 : FOR_EACH_ALIAS (node, ref)
589 236572 : if (check_callers (dyn_cast <cgraph_node *> (ref->referring),
590 : &min_growth, &n, offline, min_size, known_edge))
591 : return true;
592 :
593 310228 : struct growth_data d = { node, false, false, 0, offline };
594 310228 : if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
595 : return true;
596 221371 : if (d.self_recursive || d.uninlinable)
597 : return true;
598 221371 : return (d.growth > offline);
599 : }
|