LCOV - code coverage report
Current view: top level - gcc - ipa-inline-analysis.cc (source / functions) Coverage Total Hit
Test: gcc.info Lines: 92.3 % 261 241
Test Date: 2026-02-28 14:20:25 Functions: 93.3 % 15 14
Legend: Lines:     hit not hit

            Line data    Source code
       1              : /* Analysis used by inlining decision heuristics.
       2              :    Copyright (C) 2003-2026 Free Software Foundation, Inc.
       3              :    Contributed by Jan Hubicka
       4              : 
       5              : This file is part of GCC.
       6              : 
       7              : GCC is free software; you can redistribute it and/or modify it under
       8              : the terms of the GNU General Public License as published by the Free
       9              : Software Foundation; either version 3, or (at your option) any later
      10              : version.
      11              : 
      12              : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
      13              : WARRANTY; without even the implied warranty of MERCHANTABILITY or
      14              : FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
      15              : for more details.
      16              : 
      17              : You should have received a copy of the GNU General Public License
      18              : along with GCC; see the file COPYING3.  If not see
      19              : <http://www.gnu.org/licenses/>.  */
      20              : 
      21              : #include "config.h"
      22              : #include "system.h"
      23              : #include "coretypes.h"
      24              : #include "backend.h"
      25              : #include "tree.h"
      26              : #include "gimple.h"
      27              : #include "alloc-pool.h"
      28              : #include "tree-pass.h"
      29              : #include "ssa.h"
      30              : #include "tree-streamer.h"
      31              : #include "cgraph.h"
      32              : #include "diagnostic.h"
      33              : #include "fold-const.h"
      34              : #include "print-tree.h"
      35              : #include "tree-inline.h"
      36              : #include "gimple-pretty-print.h"
      37              : #include "cfganal.h"
      38              : #include "gimple-iterator.h"
      39              : #include "tree-cfg.h"
      40              : #include "tree-ssa-loop-niter.h"
      41              : #include "tree-ssa-loop.h"
      42              : #include "symbol-summary.h"
      43              : #include "sreal.h"
      44              : #include "ipa-cp.h"
      45              : #include "ipa-prop.h"
      46              : #include "ipa-fnsummary.h"
      47              : #include "ipa-inline.h"
      48              : #include "cfgloop.h"
      49              : #include "tree-scalar-evolution.h"
      50              : #include "ipa-utils.h"
      51              : #include "cfgexpand.h"
      52              : #include "gimplify.h"
      53              : #include "attribs.h"
      54              : 
      55              : /* Cached node/edge growths.  */
      56              : fast_call_summary<edge_growth_cache_entry *, va_heap> *edge_growth_cache = NULL;
      57              : 
      58              : /* The context cache remembers estimated time/size and hints for given
      59              :    ipa_call_context of a call.  */
      60              : class node_context_cache_entry
      61              : {
      62              : public:
      63              :   ipa_cached_call_context ctx;
      64              :   sreal time, nonspec_time;
      65              :   int size;
      66              :   ipa_hints hints;
      67              : 
      68      1166590 :   node_context_cache_entry ()
      69      1166590 :   : ctx ()
      70              :   {
      71              :   }
      72      1166590 :   ~node_context_cache_entry ()
      73              :   {
      74      1166590 :     ctx.release ();
      75              :   }
      76              : };
      77              : 
      78              : /* At the moment we implement primitive single entry LRU cache.  */
      79              : class node_context_summary
      80              : {
      81              : public:
      82              :   node_context_cache_entry entry;
      83              : 
      84      1166590 :   node_context_summary ()
      85            0 :   : entry ()
      86              :   {
      87              :   }
      88      1166590 :   ~node_context_summary ()
      89              :   {
      90      1166590 :   }
      91              : };
      92              : 
      93              : /* Summary holding the context cache.  */
      94              : static fast_function_summary <node_context_summary *, va_heap>
      95              :         *node_context_cache = NULL;
      96              : /* Statistics about the context cache effectivity.  */
      97              : static long node_context_cache_hit, node_context_cache_miss,
      98              :             node_context_cache_clear;
      99              : 
     100              : /* Give initial reasons why inlining would fail on EDGE.  This gets either
     101              :    nullified or usually overwritten by more precise reasons later.  */
     102              : 
     103              : void
     104     38006773 : initialize_inline_failed (struct cgraph_edge *e)
     105              : {
     106     38006773 :   struct cgraph_node *callee = e->callee;
     107              : 
     108     38006773 :   if (e->inline_failed && e->inline_failed != CIF_BODY_NOT_AVAILABLE
     109     75845508 :       && cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
     110              :     ;
     111     37993650 :   else if (e->indirect_unknown_callee)
     112       669416 :     e->inline_failed = CIF_INDIRECT_UNKNOWN_CALL;
     113     37324234 :   else if (!callee->definition)
     114     19541949 :     e->inline_failed = CIF_BODY_NOT_AVAILABLE;
     115     17782285 :   else if (callee->redefined_extern_inline)
     116          190 :     e->inline_failed = CIF_REDEFINED_EXTERN_INLINE;
     117              :   else
     118     17782095 :     e->inline_failed = CIF_FUNCTION_NOT_CONSIDERED;
     119     38006773 :   gcc_checking_assert (!e->call_stmt_cannot_inline_p
     120              :                        || cgraph_inline_failed_type (e->inline_failed)
     121              :                             == CIF_FINAL_ERROR);
     122     38006773 : }
     123              : 
     124              : /* Allocate edge growth caches.  */
     125              : 
     126              : void
     127       230059 : initialize_growth_caches ()
     128              : {
     129       230059 :   edge_growth_cache
     130       230059 :     = new fast_call_summary<edge_growth_cache_entry *, va_heap> (symtab);
     131       230059 :   node_context_cache
     132       230059 :     = new fast_function_summary<node_context_summary *, va_heap> (symtab);
     133       230059 :   edge_growth_cache->disable_duplication_hook ();
     134       230059 :   node_context_cache->disable_insertion_hook ();
     135       230059 :   node_context_cache->disable_duplication_hook ();
     136       230059 : }
     137              : 
     138              : /* Free growth caches.  */
     139              : 
     140              : void
     141       230059 : free_growth_caches (void)
     142              : {
     143       230059 :   delete edge_growth_cache;
     144       230059 :   delete node_context_cache;
     145       230059 :   edge_growth_cache = NULL;
     146       230059 :   node_context_cache = NULL;
     147       230059 :   if (dump_file)
     148          178 :     fprintf (dump_file, "node context cache: %li hits, %li misses,"
     149              :                         " %li initializations\n",
     150              :              node_context_cache_hit, node_context_cache_miss,
     151              :              node_context_cache_clear);
     152       230059 :   node_context_cache_hit = 0;
     153       230059 :   node_context_cache_miss = 0;
     154       230059 :   node_context_cache_clear = 0;
     155       230059 : }
     156              : 
     157              : /* Return hints derived from EDGE.   */
     158              : 
     159              : int
     160      6670193 : simple_edge_hints (struct cgraph_edge *edge)
     161              : {
     162      6670193 :   int hints = 0;
     163      5159247 :   struct cgraph_node *to = (edge->caller->inlined_to
     164      6670193 :                             ? edge->caller->inlined_to : edge->caller);
     165      6670193 :   struct cgraph_node *callee = edge->callee->ultimate_alias_target ();
     166      6670193 :   int to_scc_no = ipa_fn_summaries->get (to)->scc_no;
     167      6670193 :   int callee_scc_no = ipa_fn_summaries->get (callee)->scc_no;
     168              : 
     169      6670193 :   if (to_scc_no && to_scc_no  == callee_scc_no && !edge->recursive_p ())
     170              :     hints |= INLINE_HINT_same_scc;
     171              : 
     172      6670193 :   if (cross_module_call_p (edge))
     173         4103 :     hints |= INLINE_HINT_cross_module;
     174              : 
     175      6670193 :   return hints;
     176              : }
     177              : 
     178              : /* Estimate the time cost for the caller when inlining EDGE.
     179              :    Only to be called via estimate_edge_time, that handles the
     180              :    caching mechanism.
     181              : 
     182              :    When caching, also update the cache entry.  Compute both time and
     183              :    size, since we always need both metrics eventually.  */
     184              : 
     185              : sreal
     186      6673994 : do_estimate_edge_time (struct cgraph_edge *edge, sreal *ret_nonspec_time)
     187              : {
     188      6673994 :   sreal time, nonspec_time;
     189      6673994 :   int size;
     190      6673994 :   ipa_hints hints;
     191      6673994 :   struct cgraph_node *callee;
     192      6673994 :   clause_t clause, nonspec_clause;
     193      6673994 :   ipa_auto_call_arg_values avals;
     194      6673994 :   class ipa_call_summary *es = ipa_call_summaries->get (edge);
     195      6673994 :   int min_size = -1;
     196              : 
     197      6673994 :   callee = edge->callee->ultimate_alias_target ();
     198              : 
     199      6673994 :   gcc_checking_assert (edge->inline_failed);
     200      6673994 :   evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
     201              :                                 &avals, true);
     202      6673994 :   ipa_call_context ctx (callee, clause, nonspec_clause, es->param, &avals);
     203      6673994 :   if (node_context_cache != NULL)
     204              :     {
     205      6670193 :       node_context_summary *e = node_context_cache->get_create (callee);
     206      6670193 :       if (e->entry.ctx.equal_to (ctx))
     207              :         {
     208      4814570 :           node_context_cache_hit++;
     209      4814570 :           size = e->entry.size;
     210      4814570 :           time = e->entry.time;
     211      4814570 :           nonspec_time = e->entry.nonspec_time;
     212      4814570 :           hints = e->entry.hints;
     213      4814570 :           if (flag_checking
     214      4814570 :               && !opt_for_fn (callee->decl, flag_profile_partial_training)
     215      9629140 :               && !callee->count.ipa_p ())
     216              :             {
     217      4784011 :               ipa_call_estimates chk_estimates;
     218      4784011 :               ctx.estimate_size_and_time (&chk_estimates);
     219     14352033 :               gcc_assert (chk_estimates.size == size
     220              :                           && chk_estimates.time == time
     221              :                           && chk_estimates.nonspecialized_time == nonspec_time
     222              :                           && chk_estimates.hints == hints);
     223              :             }
     224              :         }
     225              :       else
     226              :         {
     227      1855623 :           if (e->entry.ctx.exists_p ())
     228       689033 :             node_context_cache_miss++;
     229              :           else
     230      1166590 :             node_context_cache_clear++;
     231      1855623 :           e->entry.ctx.release ();
     232      1855623 :           ipa_call_estimates estimates;
     233      1855623 :           ctx.estimate_size_and_time (&estimates);
     234      1855623 :           size = estimates.size;
     235      1855623 :           e->entry.size = size;
     236      1855623 :           time = estimates.time;
     237      1855623 :           e->entry.time = time;
     238      1855623 :           nonspec_time = estimates.nonspecialized_time;
     239      1855623 :           e->entry.nonspec_time = nonspec_time;
     240      1855623 :           hints = estimates.hints;
     241      1855623 :           e->entry.hints = hints;
     242      1855623 :           e->entry.ctx.duplicate_from (ctx);
     243              :         }
     244              :     }
     245              :   else
     246              :     {
     247         3801 :       ipa_call_estimates estimates;
     248         3801 :       ctx.estimate_size_and_time (&estimates);
     249         3801 :       size = estimates.size;
     250         3801 :       time = estimates.time;
     251         3801 :       nonspec_time = estimates.nonspecialized_time;
     252         3801 :       hints = estimates.hints;
     253              :     }
     254              : 
     255              :   /* When we have profile feedback or function attribute, we can quite safely
     256              :      identify hot edges and for those we disable size limits.  Don't do that
     257              :      when probability that caller will call the callee is low however, since it
     258              :      may hurt optimization of the caller's hot path.  */
     259      6673994 :   if ((edge->count.ipa ().initialized_p () && edge->maybe_hot_p ()
     260          252 :       && (edge->count.ipa () * 2
     261          126 :           > (edge->caller->inlined_to
     262      6674011 :              ? edge->caller->inlined_to->count.ipa ()
     263           98 :              : edge->caller->count.ipa ())))
     264      6949003 :       || (lookup_attribute ("hot", DECL_ATTRIBUTES (edge->caller->decl))
     265              :           != NULL
     266            2 :          && lookup_attribute ("hot", DECL_ATTRIBUTES (edge->callee->decl))
     267              :           != NULL))
     268          109 :     hints |= INLINE_HINT_known_hot;
     269              : 
     270      6673994 :   gcc_checking_assert (size >= 0);
     271      6673994 :   gcc_checking_assert (time >= 0);
     272              : 
     273              :   /* When caching, update the cache entry.  */
     274      6673994 :   if (edge_growth_cache != NULL)
     275              :     {
     276      6670193 :       if (min_size >= 0)
     277              :         ipa_fn_summaries->get (edge->callee->function_symbol ())->min_size
     278              :            = min_size;
     279      6670193 :       edge_growth_cache_entry *entry
     280      6670193 :         = edge_growth_cache->get_create (edge);
     281      6670193 :       entry->time = time;
     282      6670193 :       entry->nonspec_time = nonspec_time;
     283              : 
     284      6670193 :       entry->size = size + (size >= 0);
     285      6670193 :       hints |= simple_edge_hints (edge);
     286      6670193 :       entry->hints = hints + 1;
     287              :     }
     288      6673994 :   if (ret_nonspec_time)
     289       142384 :     *ret_nonspec_time = nonspec_time;
     290     13347988 :   return time;
     291      6673994 : }
     292              : 
     293              : /* Reset cache for NODE.
     294              :    This must be done each time NODE body is modified.  */
     295              : void
     296      3353544 : reset_node_cache (struct cgraph_node *node)
     297              : {
     298      3353544 :   if (node_context_cache)
     299      3352063 :     node_context_cache->remove (node);
     300      3353544 : }
     301              : 
     302              : /* Remove EDGE from caches once it was inlined.  */
     303              : void
     304      3900094 : ipa_remove_from_growth_caches (struct cgraph_edge *edge)
     305              : {
     306      3900094 :   if (node_context_cache)
     307       873371 :     node_context_cache->remove (edge->callee);
     308      3900094 :   if (edge_growth_cache)
     309       873371 :     edge_growth_cache->remove (edge);
     310      3900094 : }
     311              : 
     312              : /* Return estimated callee growth after inlining EDGE.
     313              :    Only to be called via estimate_edge_size.  */
     314              : 
     315              : int
     316     18417638 : do_estimate_edge_size (struct cgraph_edge *edge)
     317              : {
     318     18417638 :   int size;
     319     18417638 :   struct cgraph_node *callee;
     320     18417638 :   clause_t clause, nonspec_clause;
     321              : 
     322              :   /* When we do caching, use do_estimate_edge_time to populate the entry.  */
     323              : 
     324     18417638 :   if (edge_growth_cache != NULL)
     325              :     {
     326      6476745 :       do_estimate_edge_time (edge);
     327      6476745 :       size = edge_growth_cache->get (edge)->size;
     328      6476745 :       gcc_checking_assert (size);
     329      6476745 :       return size - (size > 0);
     330              :     }
     331              : 
     332     11940893 :   callee = edge->callee->ultimate_alias_target ();
     333              : 
     334              :   /* Early inliner runs without caching, go ahead and do the dirty work.  */
     335     11940893 :   gcc_checking_assert (edge->inline_failed);
     336     11940893 :   ipa_auto_call_arg_values avals;
     337     11940893 :   evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
     338              :                                 &avals, true);
     339     11940893 :   ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals);
     340     11940893 :   ipa_call_estimates estimates;
     341     11940893 :   ctx.estimate_size_and_time (&estimates, false, false);
     342     11940893 :   return estimates.size;
     343     11940893 : }
     344              : 
     345              : 
     346              : /* Estimate the growth of the caller when inlining EDGE.
     347              :    Only to be called via estimate_edge_size.  */
     348              : 
     349              : ipa_hints
     350            0 : do_estimate_edge_hints (struct cgraph_edge *edge)
     351              : {
     352            0 :   struct cgraph_node *callee;
     353            0 :   clause_t clause, nonspec_clause;
     354              : 
     355              :   /* When we do caching, use do_estimate_edge_time to populate the entry.  */
     356              : 
     357            0 :   if (edge_growth_cache != NULL)
     358              :     {
     359            0 :       do_estimate_edge_time (edge);
     360            0 :       ipa_hints hints = edge_growth_cache->get (edge)->hints;
     361            0 :       gcc_checking_assert (hints);
     362            0 :       return hints - 1;
     363              :     }
     364              : 
     365            0 :   callee = edge->callee->ultimate_alias_target ();
     366              : 
     367              :   /* Early inliner runs without caching, go ahead and do the dirty work.  */
     368            0 :   gcc_checking_assert (edge->inline_failed);
     369            0 :   ipa_auto_call_arg_values avals;
     370            0 :   evaluate_properties_for_edge (edge, true, &clause, &nonspec_clause,
     371              :                                 &avals, true);
     372            0 :   ipa_call_context ctx (callee, clause, nonspec_clause, vNULL, &avals);
     373            0 :   ipa_call_estimates estimates;
     374            0 :   ctx.estimate_size_and_time (&estimates, false, true);
     375            0 :   ipa_hints hints = estimates.hints | simple_edge_hints (edge);
     376            0 :   return hints;
     377            0 : }
     378              : 
     379              : /* Estimate the size of NODE after inlining EDGE which should be an
     380              :    edge to either NODE or a call inlined into NODE.  */
     381              : 
     382              : int
     383      6504551 : estimate_size_after_inlining (struct cgraph_node *node,
     384              :                               struct cgraph_edge *edge)
     385              : {
     386      6504551 :   class ipa_call_summary *es = ipa_call_summaries->get (edge);
     387      6504551 :   ipa_size_summary *s = ipa_size_summaries->get (node);
     388      6504551 :   if (!es->predicate || *es->predicate != false)
     389              :     {
     390      6504551 :       int size = s->size + estimate_edge_growth (edge);
     391      6504551 :       gcc_assert (size >= 0);
     392              :       return size;
     393              :     }
     394            0 :   return s->size;
     395              : }
     396              : 
     397              : 
     398              : struct growth_data
     399              : {
     400              :   struct cgraph_node *node;
     401              :   bool self_recursive;
     402              :   bool uninlinable;
     403              :   int growth;
     404              :   int cap;
     405              : };
     406              : 
     407              : 
     408              : /* Worker for do_estimate_growth.  Collect growth for all callers.  */
     409              : 
     410              : static bool
     411      1765735 : do_estimate_growth_1 (struct cgraph_node *node, void *data)
     412              : {
     413      1765735 :   struct cgraph_edge *e;
     414      1765735 :   struct growth_data *d = (struct growth_data *) data;
     415              : 
     416      4042231 :   for (e = node->callers; e; e = e->next_caller)
     417              :     {
     418      2365822 :       gcc_checking_assert (e->inline_failed);
     419              : 
     420              :       /* Don't count callback edges into growth, since they are never inlined
     421              :          anyway.  */
     422      2365822 :       if (e->callback)
     423         1950 :         continue;
     424              : 
     425      2363872 :       if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR
     426      2363872 :           || !opt_for_fn (e->caller->decl, optimize))
     427              :         {
     428          288 :           d->uninlinable = true;
     429          288 :           if (d->cap < INT_MAX)
     430              :             return true;
     431          288 :           continue;
     432              :         }
     433              : 
     434      2363584 :       if (e->recursive_p ())
     435              :         {
     436        10908 :           d->self_recursive = true;
     437        10908 :           if (d->cap < INT_MAX)
     438              :             return true;
     439         9545 :           continue;
     440              :         }
     441      2352676 :       d->growth += estimate_edge_growth (e);
     442      2352676 :       if (d->growth > d->cap)
     443              :         return true;
     444              :     }
     445              :   return false;
     446              : }
     447              : 
     448              : /* Return estimated savings for eliminating offline copy of NODE by inlining
     449              :    it everywhere.  */
     450              : 
     451              : static int
     452      3185835 : offline_size (struct cgraph_node *node, ipa_size_summary *info)
     453              : {
     454      3185835 :   if (!DECL_EXTERNAL (node->decl))
     455              :     {
     456      3150038 :       if (node->will_be_removed_from_program_if_no_direct_calls_p ())
     457       841397 :         return info->size;
     458              :       /* COMDAT functions are very often not shared across multiple units
     459              :          since they come from various template instantiations.
     460              :          Take this into account.  */
     461      2308641 :       else if (DECL_COMDAT (node->decl)
     462      2308641 :                && node->can_remove_if_no_direct_calls_p ())
     463              :         {
     464       631209 :           int prob = opt_for_fn (node->decl, param_comdat_sharing_probability);
     465       631209 :           return (info->size * (100 - prob) + 50) / 100;
     466              :         }
     467              :     }
     468              :   return 0;
     469              : }
     470              : 
     471              : /* Estimate the growth caused by inlining NODE into all callers.  */
     472              : 
     473              : int
     474      1357414 : estimate_growth (struct cgraph_node *node)
     475              : {
     476      1357414 :   struct growth_data d = { node, false, false, 0, INT_MAX };
     477      1357414 :   ipa_size_summary *info = ipa_size_summaries->get (node);
     478              : 
     479      1357414 :   if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
     480              :     return 1;
     481              : 
     482              :   /* For self recursive functions the growth estimation really should be
     483              :      infinity.  We don't want to return very large values because the growth
     484              :      plays various roles in badness computation fractions.  Be sure to not
     485              :      return zero or negative growths. */
     486      1357414 :   if (d.self_recursive)
     487         5087 :     d.growth = d.growth < info->size ? info->size : d.growth;
     488      1352327 :   else if (!d.uninlinable)
     489      1352136 :     d.growth -= offline_size (node, info);
     490              : 
     491      1357414 :   return d.growth;
     492              : }
     493              : 
     494              : /* Verify if there are fewer than MAX_CALLERS.  */
     495              : 
     496              : static bool
     497       125108 : check_callers (cgraph_node *node, int *growth, int *n, int offline,
     498              :                int min_size, struct cgraph_edge *known_edge)
     499              : {
     500       125108 :   ipa_ref *ref;
     501              : 
     502       125108 :   if (!node->can_remove_if_no_direct_calls_and_refs_p ())
     503              :     return true;
     504              : 
     505       185506 :   for (cgraph_edge *e = node->callers; e; e = e->next_caller)
     506              :     {
     507       156694 :       edge_growth_cache_entry *entry;
     508              : 
     509       156694 :       if (e == known_edge)
     510        29324 :         continue;
     511       127370 :       if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
     512              :         return true;
     513       124354 :       if (edge_growth_cache != NULL
     514        95977 :           && (entry = edge_growth_cache->get (e)) != NULL
     515       200934 :           && entry->size != 0)
     516        76580 :         *growth += entry->size - (entry->size > 0);
     517              :       else
     518              :         {
     519        47774 :           class ipa_call_summary *es = ipa_call_summaries->get (e);
     520        47774 :           if (!es)
     521              :             return true;
     522        47770 :           *growth += min_size - es->call_stmt_size;
     523        47770 :           if (--(*n) < 0)
     524              :             return false;
     525              :         }
     526       123971 :       if (*growth > offline)
     527              :         return true;
     528              :     }
     529              : 
     530        28812 :   if (*n > 0)
     531        28762 :     FOR_EACH_ALIAS (node, ref)
     532            4 :       if (check_callers (dyn_cast <cgraph_node *> (ref->referring), growth, n,
     533              :                          offline, min_size, known_edge))
     534              :         return true;
     535              : 
     536              :   return false;
     537              : }
     538              : 
     539              : 
     540              : /* Decide if growth of NODE is positive.  This is cheaper than calculating
     541              :    actual growth.  If edge growth of KNOWN_EDGE is known
     542              :    it is passed by EDGE_GROWTH.  */
     543              : 
     544              : bool
     545      1833699 : growth_positive_p (struct cgraph_node *node,
     546              :                    struct cgraph_edge * known_edge, int edge_growth)
     547              : {
     548      1833699 :   struct cgraph_edge *e;
     549              : 
     550      1833699 :   ipa_size_summary *s = ipa_size_summaries->get (node);
     551              : 
     552              :   /* First quickly check if NODE is removable at all.  */
     553      1833699 :   int offline = offline_size (node, s);
     554      1833699 :   if (offline <= 0 && known_edge && edge_growth > 0)
     555              :     return true;
     556              : 
     557      1429205 :   int min_size = ipa_fn_summaries->get (node)->min_size;
     558      1429205 :   int n = 10;
     559              : 
     560      1429205 :   int min_growth = known_edge ? edge_growth : 0;
     561      2223903 :   for (e = node->callers; e; e = e->next_caller)
     562              :     {
     563      1829913 :       edge_growth_cache_entry *entry;
     564              : 
     565      1829913 :       if (cgraph_inline_failed_type (e->inline_failed) == CIF_FINAL_ERROR)
     566              :         return true;
     567      1500988 :       if (e == known_edge)
     568       268040 :         continue;
     569      1232948 :       if (edge_growth_cache != NULL
     570       631465 :           && (entry = edge_growth_cache->get (e)) != NULL
     571      1747319 :           && entry->size != 0)
     572       514371 :         min_growth += entry->size - (entry->size > 0);
     573              :       else
     574              :         {
     575       718577 :           class ipa_call_summary *es = ipa_call_summaries->get (e);
     576       718577 :           if (!es)
     577              :             return true;
     578       718444 :           min_growth += min_size - es->call_stmt_size;
     579       718444 :           if (--n <= 0)
     580              :             break;
     581              :         }
     582      1220686 :       if (min_growth > offline)
     583              :         return true;
     584              :     }
     585              : 
     586       406119 :   ipa_ref *ref;
     587       406119 :   if (n > 0)
     588       423179 :     FOR_EACH_ALIAS (node, ref)
     589       250212 :       if (check_callers (dyn_cast <cgraph_node *> (ref->referring),
     590              :                          &min_growth, &n, offline, min_size, known_edge))
     591              :         return true;
     592              : 
     593       310202 :   struct growth_data d = { node, false, false, 0, offline };
     594       310202 :   if (node->call_for_symbol_and_aliases (do_estimate_growth_1, &d, true))
     595              :     return true;
     596       220876 :   if (d.self_recursive || d.uninlinable)
     597              :     return true;
     598       220876 :   return (d.growth > offline);
     599              : }
        

Generated by: LCOV version 2.4-beta

LCOV profile is generated on x86_64 machine using following configure options: configure --disable-bootstrap --enable-coverage=opt --enable-languages=c,c++,fortran,go,jit,lto,rust,m2 --enable-host-shared. GCC test suite is run with the built compiler.