Line data Source code
1 : /* Analysis used by inlining decision heuristics.
2 : Copyright (C) 2003-2026 Free Software Foundation, Inc.
3 : Contributed by Jan Hubicka
4 :
5 : This file is part of GCC.
6 :
7 : GCC is free software; you can redistribute it and/or modify it under
8 : the terms of the GNU General Public License as published by the Free
9 : Software Foundation; either version 3, or (at your option) any later
10 : version.
11 :
12 : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 : WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 : for more details.
16 :
17 : You should have received a copy of the GNU General Public License
18 : along with GCC; see the file COPYING3. If not see
19 : <http://www.gnu.org/licenses/>. */
20 :
21 : #include "config.h"
22 : #include "system.h"
23 : #include "coretypes.h"
24 : #include "backend.h"
25 : #include "tree.h"
26 : #include "gimple.h"
27 : #include "alloc-pool.h"
28 : #include "tree-pass.h"
29 : #include "ssa.h"
30 : #include "tree-streamer.h"
31 : #include "cgraph.h"
32 : #include "diagnostic.h"
33 : #include "fold-const.h"
34 : #include "print-tree.h"
35 : #include "tree-inline.h"
36 : #include "gimple-pretty-print.h"
37 : #include "cfganal.h"
38 : #include "gimple-iterator.h"
39 : #include "tree-cfg.h"
40 : #include "tree-ssa-loop-niter.h"
41 : #include "tree-ssa-loop.h"
42 : #include "symbol-summary.h"
43 : #include "sreal.h"
44 : #include "ipa-cp.h"
45 : #include "ipa-prop.h"
46 : #include "ipa-fnsummary.h"
47 : #include "ipa-inline.h"
48 : #include "cfgloop.h"
49 : #include "tree-scalar-evolution.h"
50 : #include "ipa-utils.h"
51 : #include "cfgexpand.h"
52 : #include "gimplify.h"
53 : #include "attribs.h"
54 :
55 : /* Cached node/edge growths. */
56 : fast_call_summary<edge_growth_cache_entry *, va_heap> *edge_growth_cache = NULL;
57 :
58 : /* The context cache remembers estimated time/size and hints for given
59 : ipa_call_context of a call. */
60 : class node_context_cache_entry
61 : {
62 : public:
63 : ipa_cached_call_context ctx;
64 : sreal time, nonspec_time;
65 : int size;
66 : ipa_hints hints;
67 :
68 1171131 : node_context_cache_entry ()
69 1171131 : : ctx ()
70 : {
71 : }
72 1171131 : ~node_context_cache_entry ()
73 : {
74 1171131 : ctx.release ();
75 : }
76 : };
77 :
78 : /* At the moment we implement primitive single entry LRU cache. */
79 : class node_context_summary
80 : {
81 : public:
82 : node_context_cache_entry entry;
83 :
84 1171131 : node_context_summary ()
85 0 : : entry ()
86 : {
87 : }
88 1171131 : ~node_context_summary ()
89 : {
90 1171131 : }
91 : };
92 :
93 : /* Summary holding the context cache. */
94 : static fast_function_summary <node_context_summary *, va_heap>
95 : *node_context_cache = NULL;
96 : /* Statistics about the context cache effectivity. */
97 : static long node_context_cache_hit, node_context_cache_miss,
98 : node_context_cache_clear;
99 :
100 : /* Give initial reasons why inlining would fail on EDGE. This gets either
101 : nullified or usually overwritten by more precise reasons later. */
102 :
103 : void
104 37949791 : initialize_inline_failed (struct cgraph_edge *e)
105 : {
106 37949791 : struct cgraph_node *callee = e->callee;
107 :
108 37949791 : if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
109 75731302 : && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
110 : ;
111 37936663 : else if (e->indirect_unknown_callee)
112 666022 : e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
113 37270641 : else if (!callee->definition)
114 19600453 : e->inline_failed = CIF_BODY_NOT_AVAILABLE;
115 17670188 : else if (callee->redefined_extern_inline)
116 190 : e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
117 : else
118 17669998 : e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
119 37949791 : gcc_checking_assert (!e->call_stmt_cannot_inline_p
120 : || cgraph_inline_failed_type (e->inline_failed)
121 : == CIF_FINAL_ERROR);
122 37949791 : }
123 :
124 : /* Allocate edge growth caches. */
125 :
126 : void
127 232036 : initialize_growth_caches ()
128 : {
129 232036 : edge_growth_cache
130 232036 : = new fast_call_summary<edge_growth_cache_entry *, va_heap> (symtab);
131 232036 : node_context_cache
132 232036 : = new fast_function_summary<node_context_summary *, va_heap> (symtab);
133 232036 : edge_growth_cache->disable_duplication_hook ();
134 232036 : node_context_cache->disable_insertion_hook ();
135 232036 : node_context_cache->disable_duplication_hook ();
136 232036 : }
137 :
138 : /* Free growth caches. */
139 :
140 : void
141 232036 : free_growth_caches (void)
142 : {
143 232036 : delete edge_growth_cache;
144 232036 : delete node_context_cache;
145 232036 : edge_growth_cache = NULL;
146 232036 : node_context_cache = NULL;
147 232036 : if (dump_file)
148 178 : fprintf (dump_file, "node context cache: %li hits, %li misses,"
149 : " %li initializations\n",
150 : node_context_cache_hit, node_context_cache_miss,
151 : node_context_cache_clear);
152 232036 : node_context_cache_hit = 0;
153 232036 : node_context_cache_miss = 0;
154 232036 : node_context_cache_clear = 0;
155 232036 : }
156 :
157 : /* Return hints derived from EDGE. */
158 :
159 : int
160 6801986 : simple_edge_hints (struct cgraph_edge *edge)
161 : {
162 6801986 : int hints = 0;
163 5216712 : struct cgraph_node *to = (edge->caller->inlined_to
164 6801986 : ? edge->caller->inlined_to : edge->caller);
165 6801986 : struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
166 6801986 : int to_scc_no = ipa_fn_summaries->get (to)->scc_no;
167 6801986 : int callee_scc_no = ipa_fn_summaries->get (callee)->scc_no;
168 :
169 6801986 : if (to_scc_no && to_scc_no == callee_scc_no && !edge->recursive_p ())
170 : hints |= INLINE_HINT_same_scc;
171 :
172 6801986 : if (cross_module_call_p (edge))
173 4103 : hints |= INLINE_HINT_cross_module;
174 :
175 6801986 : return hints;
176 : }
177 :
178 : /* Estimate the time cost for the caller when inlining EDGE.
179 : Only to be called via estimate_edge_time, that handles the
180 : caching mechanism.
181 :
182 : When caching, also update the cache entry. Compute both time and
183 : size, since we always need both metrics eventually. */
184 :
185 : sreal
186 6805787 : do_estimate_edge_time (struct cgraph_edge *edge, sreal *ret_nonspec_time)
187 : {
188 6805787 : sreal time, nonspec_time;
189 6805787 : int size;
190 6805787 : ipa_hints hints;
191 6805787 : struct cgraph_node *callee;
192 6805787 : clause_t clause, nonspec_clause;
193 6805787 : ipa_auto_call_arg_values avals;
194 6805787 : class ipa_call_summary *es = ipa_call_summaries->get (edge);
195 6805787 : int min_size = -1;
196 :
197 6805787 : callee = edge->callee->ultimate_alias_target ();
198 :
199 6805787 : gcc_checking_assert (edge->inline_failed);
200 6805787 : evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
201 : &avals, true);
202 6805787 : ipa_call_context ctx (callee, clause, nonspec_clause, es->param, &avals);
203 6805787 : if (node_context_cache != NULL)
204 : {
205 6801986 : node_context_summary *e = node_context_cache->get_create (callee);
206 6801986 : if (e->entry.ctx.equal_to (ctx))
207 : {
208 4935547 : node_context_cache_hit++;
209 4935547 : size = e->entry.size;
210 4935547 : time = e->entry.time;
211 4935547 : nonspec_time = e->entry.nonspec_time;
212 4935547 : hints = e->entry.hints;
213 4935547 : if (flag_checking
214 4935547 : && !opt_for_fn (callee->decl, flag_profile_partial_training)
215 9871094 : && !callee->count.ipa_p ())
216 : {
217 4908013 : ipa_call_estimates chk_estimates;
218 4908013 : ctx.estimate_size_and_time (&chk_estimates);
219 14724039 : gcc_assert (chk_estimates.size == size
220 : && chk_estimates.time == time
221 : && chk_estimates.nonspecialized_time == nonspec_time
222 : && chk_estimates.hints == hints);
223 : }
224 : }
225 : else
226 : {
227 1866439 : if (e->entry.ctx.exists_p ())
228 695308 : node_context_cache_miss++;
229 : else
230 1171131 : node_context_cache_clear++;
231 1866439 : e->entry.ctx.release ();
232 1866439 : ipa_call_estimates estimates;
233 1866439 : ctx.estimate_size_and_time (&estimates);
234 1866439 : size = estimates.size;
235 1866439 : e->entry.size = size;
236 1866439 : time = estimates.time;
237 1866439 : e->entry.time = time;
238 1866439 : nonspec_time = estimates.nonspecialized_time;
239 1866439 : e->entry.nonspec_time = nonspec_time;
240 1866439 : hints = estimates.hints;
241 1866439 : e->entry.hints = hints;
242 1866439 : e->entry.ctx.duplicate_from (ctx);
243 : }
244 : }
245 : else
246 : {
247 3801 : ipa_call_estimates estimates;
248 3801 : ctx.estimate_size_and_time (&estimates);
249 3801 : size = estimates.size;
250 3801 : time = estimates.time;
251 3801 : nonspec_time = estimates.nonspecialized_time;
252 3801 : hints = estimates.hints;
253 : }
254 :
255 : /* When we have profile feedback or function attribute, we can quite safely
256 : identify hot edges and for those we disable size limits. Don't do that
257 : when probability that caller will call the callee is low however, since it
258 : may hurt optimization of the caller's hot path. */
259 6805787 : if ((edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
260 264 : && (edge->count.ipa () * 2
261 132 : > (edge->caller->inlined_to
262 6805804 : ? edge->caller->inlined_to->count.ipa ()
263 104 : : edge->caller->count.ipa ())))
264 7116162 : || (lookup_attribute ("hot", DECL_ATTRIBUTES (edge->caller->decl))
265 : != NULL
266 2 : && lookup_attribute ("hot", DECL_ATTRIBUTES (edge->callee->decl))
267 : != NULL))
268 115 : hints |= INLINE_HINT_known_hot;
269 :
270 6805787 : gcc_checking_assert (size >= 0);
271 6805787 : gcc_checking_assert (time >= 0);
272 :
273 : /* When caching, update the cache entry. */
274 6805787 : if (edge_growth_cache != NULL)
275 : {
276 6801986 : if (min_size >= 0)
277 : ipa_fn_summaries->get (edge->callee->function_symbol ())->min_size
278 : = min_size;
279 6801986 : edge_growth_cache_entry *entry
280 6801986 : = edge_growth_cache->get_create (edge);
281 6801986 : entry->time = time;
282 6801986 : entry->nonspec_time = nonspec_time;
283 :
284 6801986 : entry->size = size + (size >= 0);
285 6801986 : hints |= simple_edge_hints (edge);
286 6801986 : entry->hints = hints + 1;
287 : }
288 6805787 : if (ret_nonspec_time)
289 150490 : *ret_nonspec_time = nonspec_time;
290 13611574 : return time;
291 6805787 : }
292 :
293 : /* Reset cache for NODE.
294 : This must be done each time NODE body is modified. */
295 : void
296 3337998 : reset_node_cache (struct cgraph_node *node)
297 : {
298 3337998 : if (node_context_cache)
299 3335719 : node_context_cache->remove (node);
300 3337998 : }
301 :
302 : /* Remove EDGE from caches once it was inlined. */
303 : void
304 3896214 : ipa_remove_from_growth_caches (struct cgraph_edge *edge)
305 : {
306 3896214 : if (node_context_cache)
307 888510 : node_context_cache->remove (edge->callee);
308 3896214 : if (edge_growth_cache)
309 888510 : edge_growth_cache->remove (edge);
310 3896214 : }
311 :
312 : /* Return estimated callee growth after inlining EDGE.
313 : Only to be called via estimate_edge_size. */
314 :
315 : int
316 18411285 : do_estimate_edge_size (struct cgraph_edge *edge)
317 : {
318 18411285 : int size;
319 18411285 : struct cgraph_node *callee;
320 18411285 : clause_t clause, nonspec_clause;
321 :
322 : /* When we do caching, use do_estimate_edge_time to populate the entry. */
323 :
324 18411285 : if (edge_growth_cache != NULL)
325 : {
326 6595142 : do_estimate_edge_time (edge);
327 6595142 : size = edge_growth_cache->get (edge)->size;
328 6595142 : gcc_checking_assert (size);
329 6595142 : return size - (size > 0);
330 : }
331 :
332 11816143 : callee = edge->callee->ultimate_alias_target ();
333 :
334 : /* Early inliner runs without caching, go ahead and do the dirty work. */
335 11816143 : gcc_checking_assert (edge->inline_failed);
336 11816143 : ipa_auto_call_arg_values avals;
337 11816143 : evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
338 : &avals, true);
339 11816143 : ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals);
340 11816143 : ipa_call_estimates estimates;
341 11816143 : ctx.estimate_size_and_time (&estimates, false, false);
342 11816143 : return estimates.size;
343 11816143 : }
344 :
345 :
346 : /* Estimate the growth of the caller when inlining EDGE.
347 : Only to be called via estimate_edge_size. */
348 :
349 : ipa_hints
350 0 : do_estimate_edge_hints (struct cgraph_edge *edge)
351 : {
352 0 : struct cgraph_node *callee;
353 0 : clause_t clause, nonspec_clause;
354 :
355 : /* When we do caching, use do_estimate_edge_time to populate the entry. */
356 :
357 0 : if (edge_growth_cache != NULL)
358 : {
359 0 : do_estimate_edge_time (edge);
360 0 : ipa_hints hints = edge_growth_cache->get (edge)->hints;
361 0 : gcc_checking_assert (hints);
362 0 : return hints - 1;
363 : }
364 :
365 0 : callee = edge->callee->ultimate_alias_target ();
366 :
367 : /* Early inliner runs without caching, go ahead and do the dirty work. */
368 0 : gcc_checking_assert (edge->inline_failed);
369 0 : ipa_auto_call_arg_values avals;
370 0 : evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
371 : &avals, true);
372 0 : ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals);
373 0 : ipa_call_estimates estimates;
374 0 : ctx.estimate_size_and_time (&estimates, false, true);
375 0 : ipa_hints hints = estimates.hints | simple_edge_hints (edge);
376 0 : return hints;
377 0 : }
378 :
379 : /* Estimate the size of NODE after inlining EDGE which should be an
380 : edge to either NODE or a call inlined into NODE. */
381 :
382 : int
383 6500069 : estimate_size_after_inlining (struct cgraph_node *node,
384 : struct cgraph_edge *edge)
385 : {
386 6500069 : class ipa_call_summary *es = ipa_call_summaries->get (edge);
387 6500069 : ipa_size_summary *s = ipa_size_summaries->get (node);
388 6500069 : if (!es->predicate || *es->predicate != false)
389 : {
390 6500069 : int size = s->size + estimate_edge_growth (edge);
391 6500069 : gcc_assert (size >= 0);
392 : return size;
393 : }
394 0 : return s->size;
395 : }
396 :
397 :
398 : struct growth_data
399 : {
400 : struct cgraph_node *node;
401 : bool self_recursive;
402 : bool uninlinable;
403 : int growth;
404 : int cap;
405 : };
406 :
407 :
408 : /* Worker for do_estimate_growth. Collect growth for all callers. */
409 :
410 : static bool
411 1760179 : do_estimate_growth_1 (struct cgraph_node *node, void *data)
412 : {
413 1760179 : struct cgraph_edge *e;
414 1760179 : struct growth_data *d = (struct growth_data *) data;
415 :
416 4017879 : for (e = node->callers; e; e = e->next_caller)
417 : {
418 2341399 : gcc_checking_assert (e->inline_failed);
419 :
420 : /* Don't count callback edges into growth, since they are never inlined
421 : anyway. */
422 2341399 : if (e->callback)
423 1958 : continue;
424 :
425 2339441 : if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
426 2339441 : || !opt_for_fn (e->caller->decl, optimize))
427 : {
428 354 : d->uninlinable = true;
429 354 : if (d->cap < INT_MAX)
430 : return true;
431 354 : continue;
432 : }
433 :
434 2339087 : if (e->recursive_p ())
435 : {
436 10919 : d->self_recursive = true;
437 10919 : if (d->cap < INT_MAX)
438 : return true;
439 9554 : continue;
440 : }
441 2328168 : d->growth += estimate_edge_growth (e);
442 2328168 : if (d->growth > d->cap)
443 : return true;
444 : }
445 : return false;
446 : }
447 :
448 : /* Return estimated savings for eliminating offline copy of NODE by inlining
449 : it everywhere. */
450 :
451 : static int
452 3218311 : offline_size (struct cgraph_node *node, ipa_size_summary *info)
453 : {
454 3218311 : if (!DECL_EXTERNAL (node->decl))
455 : {
456 3089566 : if (node->will_be_removed_from_program_if_no_direct_calls_p ())
457 857458 : return info->size;
458 : /* COMDAT functions are very often not shared across multiple units
459 : since they come from various template instantiations.
460 : Take this into account. */
461 2232108 : else if (DECL_COMDAT (node->decl)
462 2232108 : && node->can_remove_if_no_direct_calls_p ())
463 : {
464 552400 : int prob = opt_for_fn (node->decl, param_comdat_sharing_probability);
465 552400 : return (info->size * (100 - prob) + 50) / 100;
466 : }
467 : }
468 : return 0;
469 : }
470 :
471 : /* Estimate the growth caused by inlining NODE into all callers. */
472 :
473 : int
474 1354485 : estimate_growth (struct cgraph_node *node)
475 : {
476 1354485 : struct growth_data d = { node, false, false, 0, INT_MAX };
477 1354485 : ipa_size_summary *info = ipa_size_summaries->get (node);
478 :
479 1354485 : if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
480 : return 1;
481 :
482 : /* For self recursive functions the growth estimation really should be
483 : infinity. We don't want to return very large values because the growth
484 : plays various roles in badness computation fractions. Be sure to not
485 : return zero or negative growths. */
486 1354485 : if (d.self_recursive)
487 5094 : d.growth = d.growth < info->size ? info->size : d.growth;
488 1349391 : else if (!d.uninlinable)
489 1349135 : d.growth -= offline_size (node, info);
490 :
491 1354485 : return d.growth;
492 : }
493 :
494 : /* Verify if there are fewer than MAX_CALLERS. */
495 :
496 : static bool
497 124638 : check_callers (cgraph_node *node, int *growth, int *n, int offline,
498 : int min_size, struct cgraph_edge *known_edge)
499 : {
500 124638 : ipa_ref *ref;
501 :
502 124638 : if (!node->can_remove_if_no_direct_calls_and_refs_p ())
503 : return true;
504 :
505 184969 : for (cgraph_edge *e = node->callers; e; e = e->next_caller)
506 : {
507 156036 : edge_growth_cache_entry *entry;
508 :
509 156036 : if (e == known_edge)
510 29067 : continue;
511 126969 : if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
512 : return true;
513 123985 : if (edge_growth_cache != NULL
514 95716 : && (entry = edge_growth_cache->get (e)) != NULL
515 200453 : && entry->size != 0)
516 76468 : *growth += entry->size - (entry->size > 0);
517 : else
518 : {
519 47517 : class ipa_call_summary *es = ipa_call_summaries->get (e);
520 47517 : if (!es)
521 : return true;
522 47496 : *growth += min_size - es->call_stmt_size;
523 47496 : if (--(*n) < 0)
524 : return false;
525 : }
526 123583 : if (*growth > offline)
527 : return true;
528 : }
529 :
530 28933 : if (*n > 0)
531 28883 : FOR_EACH_ALIAS (node, ref)
532 4 : if (check_callers (dyn_cast <cgraph_node *> (ref->referring), growth, n,
533 : offline, min_size, known_edge))
534 : return true;
535 :
536 : return false;
537 : }
538 :
539 :
540 : /* Decide if growth of NODE is positive. This is cheaper than calculating
541 : actual growth. If edge growth of KNOWN_EDGE is known
542 : it is passed by EDGE_GROWTH. */
543 :
544 : bool
545 1869176 : growth_positive_p (struct cgraph_node *node,
546 : struct cgraph_edge * known_edge, int edge_growth)
547 : {
548 1869176 : struct cgraph_edge *e;
549 :
550 1869176 : ipa_size_summary *s = ipa_size_summaries->get (node);
551 :
552 : /* First quickly check if NODE is removable at all. */
553 1869176 : int offline = offline_size (node, s);
554 1869176 : if (offline <= 0 && known_edge && edge_growth > 0)
555 : return true;
556 :
557 1380110 : int min_size = ipa_fn_summaries->get (node)->min_size;
558 1380110 : int n = 10;
559 :
560 1380110 : int min_growth = known_edge ? edge_growth : 0;
561 2132547 : for (e = node->callers; e; e = e->next_caller)
562 : {
563 1739366 : edge_growth_cache_entry *entry;
564 :
565 1739366 : if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
566 : return true;
567 1410405 : if (e == known_edge)
568 265835 : continue;
569 1144570 : if (edge_growth_cache != NULL
570 580023 : && (entry = edge_growth_cache->get (e)) != NULL
571 1616501 : && entry->size != 0)
572 471931 : min_growth += entry->size - (entry->size > 0);
573 : else
574 : {
575 672639 : class ipa_call_summary *es = ipa_call_summaries->get (e);
576 672639 : if (!es)
577 : return true;
578 672446 : min_growth += min_size - es->call_stmt_size;
579 672446 : if (--n <= 0)
580 : break;
581 : }
582 1134214 : if (min_growth > offline)
583 : return true;
584 : }
585 :
586 403344 : ipa_ref *ref;
587 403344 : if (n > 0)
588 422493 : FOR_EACH_ALIAS (node, ref)
589 249272 : if (check_callers (dyn_cast <cgraph_node *> (ref->referring),
590 : &min_growth, &n, offline, min_size, known_edge))
591 : return true;
592 :
593 308020 : struct growth_data d = { node, false, false, 0, offline };
594 308020 : if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
595 : return true;
596 224321 : if (d.self_recursive || d.uninlinable)
597 : return true;
598 224321 : return (d.growth > offline);
599 : }
|