LCOV - code coverage report
Current view: top level - gcc - ggc-page.cc (source / functions) Coverage Total Hit
Test: gcc.info Lines: 86.0 % 819 704
Test Date: 2024-04-20 14:03:02 Functions: 87.0 % 46 40
Legend: Lines: hit not hit | Branches: + taken - not taken # not executed Branches: - 0 0

             Branch data     Line data    Source code
       1                 :             : /* "Bag-of-pages" garbage collector for the GNU compiler.
       2                 :             :    Copyright (C) 1999-2024 Free Software Foundation, Inc.
       3                 :             : 
       4                 :             : This file is part of GCC.
       5                 :             : 
       6                 :             : GCC is free software; you can redistribute it and/or modify it under
       7                 :             : the terms of the GNU General Public License as published by the Free
       8                 :             : Software Foundation; either version 3, or (at your option) any later
       9                 :             : version.
      10                 :             : 
      11                 :             : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
      12                 :             : WARRANTY; without even the implied warranty of MERCHANTABILITY or
      13                 :             : FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
      14                 :             : for more details.
      15                 :             : 
      16                 :             : You should have received a copy of the GNU General Public License
      17                 :             : along with GCC; see the file COPYING3.  If not see
      18                 :             : <http://www.gnu.org/licenses/>.  */
      19                 :             : 
      20                 :             : #include "config.h"
      21                 :             : #include "system.h"
      22                 :             : #include "coretypes.h"
      23                 :             : #include "backend.h"
      24                 :             : #include "alias.h"
      25                 :             : #include "tree.h"
      26                 :             : #include "rtl.h"
      27                 :             : #include "memmodel.h"
      28                 :             : #include "tm_p.h"
      29                 :             : #include "diagnostic-core.h"
      30                 :             : #include "flags.h"
      31                 :             : #include "ggc-internal.h"
      32                 :             : #include "timevar.h"
      33                 :             : #include "cgraph.h"
      34                 :             : #include "cfgloop.h"
      35                 :             : #include "plugin.h"
      36                 :             : 
      37                 :             : /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
      38                 :             :    file open.  Prefer either to valloc.  */
      39                 :             : #ifdef HAVE_MMAP_ANON
      40                 :             : # undef HAVE_MMAP_DEV_ZERO
      41                 :             : # define USING_MMAP
      42                 :             : #endif
      43                 :             : 
      44                 :             : #ifdef HAVE_MMAP_DEV_ZERO
      45                 :             : # define USING_MMAP
      46                 :             : #endif
      47                 :             : 
      48                 :             : #ifndef USING_MMAP
      49                 :             : #define USING_MALLOC_PAGE_GROUPS
      50                 :             : #endif
      51                 :             : 
      52                 :             : #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
      53                 :             :     && defined(USING_MMAP)
      54                 :             : # define USING_MADVISE
      55                 :             : #endif
      56                 :             : 
      57                 :             : /* Strategy:
      58                 :             : 
      59                 :             :    This garbage-collecting allocator allocates objects on one of a set
      60                 :             :    of pages.  Each page can allocate objects of a single size only;
      61                 :             :    available sizes are powers of two starting at four bytes.  The size
      62                 :             :    of an allocation request is rounded up to the next power of two
      63                 :             :    (`order'), and satisfied from the appropriate page.
      64                 :             : 
      65                 :             :    Each page is recorded in a page-entry, which also maintains an
      66                 :             :    in-use bitmap of object positions on the page.  This allows the
      67                 :             :    allocation state of a particular object to be flipped without
      68                 :             :    touching the page itself.
      69                 :             : 
      70                 :             :    Each page-entry also has a context depth, which is used to track
      71                 :             :    pushing and popping of allocation contexts.  Only objects allocated
      72                 :             :    in the current (highest-numbered) context may be collected.
      73                 :             : 
      74                 :             :    Page entries are arranged in an array of singly-linked lists.  The
      75                 :             :    array is indexed by the allocation size, in bits, of the pages on
      76                 :             :    it; i.e. all pages on a list allocate objects of the same size.
      77                 :             :    Pages are ordered on the list such that all non-full pages precede
      78                 :             :    all full pages, with non-full pages arranged in order of decreasing
      79                 :             :    context depth.
      80                 :             : 
      81                 :             :    Empty pages (of all orders) are kept on a single page cache list,
      82                 :             :    and are considered first when new pages are required; they are
      83                 :             :    deallocated at the start of the next collection if they haven't
      84                 :             :    been recycled by then.  */
      85                 :             : 
      86                 :             : /* Define GGC_DEBUG_LEVEL to print debugging information.
      87                 :             :      0: No debugging output.
      88                 :             :      1: GC statistics only.
      89                 :             :      2: Page-entry allocations/deallocations as well.
      90                 :             :      3: Object allocations as well.
      91                 :             :      4: Object marks as well.  */
      92                 :             : #define GGC_DEBUG_LEVEL (0)
      93                 :             : 
      94                 :             : /* A two-level tree is used to look up the page-entry for a given
      95                 :             :    pointer.  Two chunks of the pointer's bits are extracted to index
      96                 :             :    the first and second levels of the tree, as follows:
      97                 :             : 
      98                 :             :                                    HOST_PAGE_SIZE_BITS
      99                 :             :                            32           |      |
     100                 :             :        msb +----------------+----+------+------+ lsb
     101                 :             :                             |    |      |
     102                 :             :                          PAGE_L1_BITS   |
     103                 :             :                                  |      |
     104                 :             :                                PAGE_L2_BITS
     105                 :             : 
     106                 :             :    The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
     107                 :             :    pages are aligned on system page boundaries.  The next most
     108                 :             :    significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
     109                 :             :    index values in the lookup table, respectively.
     110                 :             : 
     111                 :             :    For 32-bit architectures and the settings below, there are no
     112                 :             :    leftover bits.  For architectures with wider pointers, the lookup
     113                 :             :    tree points to a list of pages, which must be scanned to find the
     114                 :             :    correct one.  */
     115                 :             : 
     116                 :             : #define PAGE_L1_BITS    (8)
     117                 :             : #define PAGE_L2_BITS    (32 - PAGE_L1_BITS - G.lg_pagesize)
     118                 :             : #define PAGE_L1_SIZE    ((uintptr_t) 1 << PAGE_L1_BITS)
     119                 :             : #define PAGE_L2_SIZE    ((uintptr_t) 1 << PAGE_L2_BITS)
     120                 :             : 
     121                 :             : #define LOOKUP_L1(p) \
     122                 :             :   (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
     123                 :             : 
     124                 :             : #define LOOKUP_L2(p) \
     125                 :             :   (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
     126                 :             : 
     127                 :             : /* The number of objects per allocation page, for objects on a page of
     128                 :             :    the indicated ORDER.  */
     129                 :             : #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
     130                 :             : 
     131                 :             : /* The number of objects in P.  */
     132                 :             : #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
     133                 :             : 
     134                 :             : /* The size of an object on a page of the indicated ORDER.  */
     135                 :             : #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
     136                 :             : 
     137                 :             : /* For speed, we avoid doing a general integer divide to locate the
     138                 :             :    offset in the allocation bitmap, by precalculating numbers M, S
     139                 :             :    such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
     140                 :             :    within the page which is evenly divisible by the object size Z.  */
     141                 :             : #define DIV_MULT(ORDER) inverse_table[ORDER].mult
     142                 :             : #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
     143                 :             : #define OFFSET_TO_BIT(OFFSET, ORDER) \
     144                 :             :   (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
     145                 :             : 
     146                 :             : /* We use this structure to determine the alignment required for
     147                 :             :    allocations.  For power-of-two sized allocations, that's not a
     148                 :             :    problem, but it does matter for odd-sized allocations.
     149                 :             :    We do not care about alignment for floating-point types.  */
     150                 :             : 
     151                 :             : struct max_alignment {
     152                 :             :   char c;
     153                 :             :   union {
     154                 :             :     int64_t i;
     155                 :             :     void *p;
     156                 :             :   } u;
     157                 :             : };
     158                 :             : 
     159                 :             : /* The biggest alignment required.  */
     160                 :             : 
     161                 :             : #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
     162                 :             : 
     163                 :             : 
     164                 :             : /* The number of extra orders, not corresponding to power-of-two sized
     165                 :             :    objects.  */
     166                 :             : 
     167                 :             : #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
     168                 :             : 
     169                 :             : #define RTL_SIZE(NSLOTS) \
     170                 :             :   (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
     171                 :             : 
     172                 :             : #define TREE_EXP_SIZE(OPS) \
     173                 :             :   (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
     174                 :             : 
     175                 :             : /* The Ith entry is the maximum size of an object to be stored in the
     176                 :             :    Ith extra order.  Adding a new entry to this array is the *only*
     177                 :             :    thing you need to do to add a new special allocation size.  */
     178                 :             : 
     179                 :             : static const size_t extra_order_size_table[] = {
     180                 :             :   /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
     181                 :             :      There are a lot of structures with these sizes and explicitly
     182                 :             :      listing them risks orders being dropped because they changed size.  */
     183                 :             :   MAX_ALIGNMENT * 3,
     184                 :             :   MAX_ALIGNMENT * 5,
     185                 :             :   MAX_ALIGNMENT * 6,
     186                 :             :   MAX_ALIGNMENT * 7,
     187                 :             :   MAX_ALIGNMENT * 9,
     188                 :             :   MAX_ALIGNMENT * 10,
     189                 :             :   MAX_ALIGNMENT * 11,
     190                 :             :   MAX_ALIGNMENT * 12,
     191                 :             :   MAX_ALIGNMENT * 13,
     192                 :             :   MAX_ALIGNMENT * 14,
     193                 :             :   MAX_ALIGNMENT * 15,
     194                 :             :   sizeof (struct tree_decl_non_common),
     195                 :             :   sizeof (struct tree_field_decl),
     196                 :             :   sizeof (struct tree_parm_decl),
     197                 :             :   sizeof (struct tree_var_decl),
     198                 :             :   sizeof (struct tree_type_non_common),
     199                 :             :   sizeof (struct function),
     200                 :             :   sizeof (struct basic_block_def),
     201                 :             :   sizeof (struct cgraph_node),
     202                 :             :   sizeof (class loop),
     203                 :             : };
     204                 :             : 
     205                 :             : /* The total number of orders.  */
     206                 :             : 
     207                 :             : #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
     208                 :             : 
     209                 :             : /* Compute the smallest nonnegative number which when added to X gives
     210                 :             :    a multiple of F.  */
     211                 :             : 
     212                 :             : #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
     213                 :             : 
     214                 :             : /* Round X to next multiple of the page size */
     215                 :             : 
     216                 :             : #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
     217                 :             : 
     218                 :             : /* The Ith entry is the number of objects on a page or order I.  */
     219                 :             : 
     220                 :             : static unsigned objects_per_page_table[NUM_ORDERS];
     221                 :             : 
     222                 :             : /* The Ith entry is the size of an object on a page of order I.  */
     223                 :             : 
     224                 :             : static size_t object_size_table[NUM_ORDERS];
     225                 :             : 
     226                 :             : /* The Ith entry is a pair of numbers (mult, shift) such that
     227                 :             :    ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
     228                 :             :    for all k evenly divisible by OBJECT_SIZE(I).  */
     229                 :             : 
     230                 :             : static struct
     231                 :             : {
     232                 :             :   size_t mult;
     233                 :             :   unsigned int shift;
     234                 :             : }
     235                 :             : inverse_table[NUM_ORDERS];
     236                 :             : 
     237                 :             : /* A page_entry records the status of an allocation page.  This
     238                 :             :    structure is dynamically sized to fit the bitmap in_use_p.  */
     239                 :             : struct page_entry
     240                 :             : {
     241                 :             :   /* The next page-entry with objects of the same size, or NULL if
     242                 :             :      this is the last page-entry.  */
     243                 :             :   struct page_entry *next;
     244                 :             : 
     245                 :             :   /* The previous page-entry with objects of the same size, or NULL if
     246                 :             :      this is the first page-entry.   The PREV pointer exists solely to
     247                 :             :      keep the cost of ggc_free manageable.  */
     248                 :             :   struct page_entry *prev;
     249                 :             : 
     250                 :             :   /* The number of bytes allocated.  (This will always be a multiple
     251                 :             :      of the host system page size.)  */
     252                 :             :   size_t bytes;
     253                 :             : 
     254                 :             :   /* The address at which the memory is allocated.  */
     255                 :             :   char *page;
     256                 :             : 
     257                 :             : #ifdef USING_MALLOC_PAGE_GROUPS
     258                 :             :   /* Back pointer to the page group this page came from.  */
     259                 :             :   struct page_group *group;
     260                 :             : #endif
     261                 :             : 
     262                 :             :   /* This is the index in the by_depth varray where this page table
     263                 :             :      can be found.  */
     264                 :             :   unsigned long index_by_depth;
     265                 :             : 
     266                 :             :   /* Context depth of this page.  */
     267                 :             :   unsigned short context_depth;
     268                 :             : 
     269                 :             :   /* The number of free objects remaining on this page.  */
     270                 :             :   unsigned short num_free_objects;
     271                 :             : 
     272                 :             :   /* A likely candidate for the bit position of a free object for the
     273                 :             :      next allocation from this page.  */
     274                 :             :   unsigned short next_bit_hint;
     275                 :             : 
     276                 :             :   /* The lg of size of objects allocated from this page.  */
     277                 :             :   unsigned char order;
     278                 :             : 
     279                 :             :   /* Discarded page? */
     280                 :             :   bool discarded;
     281                 :             : 
     282                 :             :   /* A bit vector indicating whether or not objects are in use.  The
     283                 :             :      Nth bit is one if the Nth object on this page is allocated.  This
     284                 :             :      array is dynamically sized.  */
     285                 :             :   unsigned long in_use_p[1];
     286                 :             : };
     287                 :             : 
     288                 :             : #ifdef USING_MALLOC_PAGE_GROUPS
     289                 :             : /* A page_group describes a large allocation from malloc, from which
     290                 :             :    we parcel out aligned pages.  */
     291                 :             : struct page_group
     292                 :             : {
     293                 :             :   /* A linked list of all extant page groups.  */
     294                 :             :   struct page_group *next;
     295                 :             : 
     296                 :             :   /* The address we received from malloc.  */
     297                 :             :   char *allocation;
     298                 :             : 
     299                 :             :   /* The size of the block.  */
     300                 :             :   size_t alloc_size;
     301                 :             : 
     302                 :             :   /* A bitmask of pages in use.  */
     303                 :             :   unsigned int in_use;
     304                 :             : };
     305                 :             : #endif
     306                 :             : 
     307                 :             : #if HOST_BITS_PER_PTR <= 32
     308                 :             : 
     309                 :             : /* On 32-bit hosts, we use a two level page table, as pictured above.  */
     310                 :             : typedef page_entry **page_table[PAGE_L1_SIZE];
     311                 :             : 
     312                 :             : #else
     313                 :             : 
     314                 :             : /* On 64-bit hosts, we use the same two level page tables plus a linked
     315                 :             :    list that disambiguates the top 32-bits.  There will almost always be
     316                 :             :    exactly one entry in the list.  */
     317                 :             : typedef struct page_table_chain
     318                 :             : {
     319                 :             :   struct page_table_chain *next;
     320                 :             :   size_t high_bits;
     321                 :             :   page_entry **table[PAGE_L1_SIZE];
     322                 :             : } *page_table;
     323                 :             : 
     324                 :             : #endif
     325                 :             : 
     326                 :             : class finalizer
     327                 :             : {
     328                 :             : public:
     329                 :    62086964 :   finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
     330                 :             : 
     331                 :   123271355 :   void *addr () const { return m_addr; }
     332                 :             : 
     333                 :    19976769 :   void call () const { m_function (m_addr); }
     334                 :             : 
     335                 :             : private:
     336                 :             :   void *m_addr;
     337                 :             :   void (*m_function)(void *);
     338                 :             : };
     339                 :             : 
     340                 :             : class vec_finalizer
     341                 :             : {
     342                 :             : public:
     343                 :           0 :   vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
     344                 :           0 :     m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
     345                 :             : 
     346                 :           0 :   void call () const
     347                 :             :     {
     348                 :           0 :       for (size_t i = 0; i < m_n_objects; i++)
     349                 :           0 :         m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
     350                 :           0 :     }
     351                 :             : 
     352                 :           0 :   void *addr () const { return reinterpret_cast<void *> (m_addr); }
     353                 :             : 
     354                 :             : private:
     355                 :             :   uintptr_t m_addr;
     356                 :             :   void (*m_function)(void *);
     357                 :             :   size_t m_object_size;
     358                 :             :   size_t m_n_objects;
     359                 :             : };
     360                 :             : 
     361                 :             : #ifdef ENABLE_GC_ALWAYS_COLLECT
     362                 :             : /* List of free objects to be verified as actually free on the
     363                 :             :    next collection.  */
     364                 :             : struct free_object
     365                 :             : {
     366                 :             :   void *object;
     367                 :             :   struct free_object *next;
     368                 :             : };
     369                 :             : #endif
     370                 :             : 
     371                 :             : /* The rest of the global variables.  */
     372                 :             : static struct ggc_globals
     373                 :             : {
     374                 :             :   /* The Nth element in this array is a page with objects of size 2^N.
     375                 :             :      If there are any pages with free objects, they will be at the
     376                 :             :      head of the list.  NULL if there are no page-entries for this
     377                 :             :      object size.  */
     378                 :             :   page_entry *pages[NUM_ORDERS];
     379                 :             : 
     380                 :             :   /* The Nth element in this array is the last page with objects of
     381                 :             :      size 2^N.  NULL if there are no page-entries for this object
     382                 :             :      size.  */
     383                 :             :   page_entry *page_tails[NUM_ORDERS];
     384                 :             : 
     385                 :             :   /* Lookup table for associating allocation pages with object addresses.  */
     386                 :             :   page_table lookup;
     387                 :             : 
     388                 :             :   /* The system's page size.  */
     389                 :             :   size_t pagesize;
     390                 :             :   size_t lg_pagesize;
     391                 :             : 
     392                 :             :   /* Bytes currently allocated.  */
     393                 :             :   size_t allocated;
     394                 :             : 
     395                 :             :   /* Bytes currently allocated at the end of the last collection.  */
     396                 :             :   size_t allocated_last_gc;
     397                 :             : 
     398                 :             :   /* Total amount of memory mapped.  */
     399                 :             :   size_t bytes_mapped;
     400                 :             : 
     401                 :             :   /* Bit N set if any allocations have been done at context depth N.  */
     402                 :             :   unsigned long context_depth_allocations;
     403                 :             : 
     404                 :             :   /* Bit N set if any collections have been done at context depth N.  */
     405                 :             :   unsigned long context_depth_collections;
     406                 :             : 
     407                 :             :   /* The current depth in the context stack.  */
     408                 :             :   unsigned short context_depth;
     409                 :             : 
     410                 :             :   /* A file descriptor open to /dev/zero for reading.  */
     411                 :             : #if defined (HAVE_MMAP_DEV_ZERO)
     412                 :             :   int dev_zero_fd;
     413                 :             : #endif
     414                 :             : 
     415                 :             :   /* A cache of free system pages.  */
     416                 :             :   page_entry *free_pages;
     417                 :             : 
     418                 :             : #ifdef USING_MALLOC_PAGE_GROUPS
     419                 :             :   page_group *page_groups;
     420                 :             : #endif
     421                 :             : 
     422                 :             :   /* The file descriptor for debugging output.  */
     423                 :             :   FILE *debug_file;
     424                 :             : 
     425                 :             :   /* Current number of elements in use in depth below.  */
     426                 :             :   unsigned int depth_in_use;
     427                 :             : 
     428                 :             :   /* Maximum number of elements that can be used before resizing.  */
     429                 :             :   unsigned int depth_max;
     430                 :             : 
     431                 :             :   /* Each element of this array is an index in by_depth where the given
     432                 :             :      depth starts.  This structure is indexed by that given depth we
     433                 :             :      are interested in.  */
     434                 :             :   unsigned int *depth;
     435                 :             : 
     436                 :             :   /* Current number of elements in use in by_depth below.  */
     437                 :             :   unsigned int by_depth_in_use;
     438                 :             : 
     439                 :             :   /* Maximum number of elements that can be used before resizing.  */
     440                 :             :   unsigned int by_depth_max;
     441                 :             : 
     442                 :             :   /* Each element of this array is a pointer to a page_entry, all
     443                 :             :      page_entries can be found in here by increasing depth.
     444                 :             :      index_by_depth in the page_entry is the index into this data
     445                 :             :      structure where that page_entry can be found.  This is used to
     446                 :             :      speed up finding all page_entries at a particular depth.  */
     447                 :             :   page_entry **by_depth;
     448                 :             : 
     449                 :             :   /* Each element is a pointer to the saved in_use_p bits, if any,
     450                 :             :      zero otherwise.  We allocate them all together, to enable a
     451                 :             :      better runtime data access pattern.  */
     452                 :             :   unsigned long **save_in_use;
     453                 :             : 
     454                 :             :   /* Finalizers for single objects.  The first index is collection_depth.  */
     455                 :             :   vec<vec<finalizer> > finalizers;
     456                 :             : 
     457                 :             :   /* Finalizers for vectors of objects.  */
     458                 :             :   vec<vec<vec_finalizer> > vec_finalizers;
     459                 :             : 
     460                 :             : #ifdef ENABLE_GC_ALWAYS_COLLECT
     461                 :             :   /* List of free objects to be verified as actually free on the
     462                 :             :      next collection.  */
     463                 :             :   struct free_object *free_object_list;
     464                 :             : #endif
     465                 :             : 
     466                 :             :   struct
     467                 :             :   {
     468                 :             :     /* Total GC-allocated memory.  */
     469                 :             :     unsigned long long total_allocated;
     470                 :             :     /* Total overhead for GC-allocated memory.  */
     471                 :             :     unsigned long long total_overhead;
     472                 :             : 
     473                 :             :     /* Total allocations and overhead for sizes less than 32, 64 and 128.
     474                 :             :        These sizes are interesting because they are typical cache line
     475                 :             :        sizes.  */
     476                 :             : 
     477                 :             :     unsigned long long total_allocated_under32;
     478                 :             :     unsigned long long total_overhead_under32;
     479                 :             : 
     480                 :             :     unsigned long long total_allocated_under64;
     481                 :             :     unsigned long long total_overhead_under64;
     482                 :             : 
     483                 :             :     unsigned long long total_allocated_under128;
     484                 :             :     unsigned long long total_overhead_under128;
     485                 :             : 
     486                 :             :     /* The allocations for each of the allocation orders.  */
     487                 :             :     unsigned long long total_allocated_per_order[NUM_ORDERS];
     488                 :             : 
     489                 :             :     /* The overhead for each of the allocation orders.  */
     490                 :             :     unsigned long long total_overhead_per_order[NUM_ORDERS];
     491                 :             :   } stats;
     492                 :             : } G;
     493                 :             : 
     494                 :             : /* True if a gc is currently taking place.  */
     495                 :             : 
     496                 :             : static bool in_gc = false;
     497                 :             : 
     498                 :             : /* The size in bytes required to maintain a bitmap for the objects
     499                 :             :    on a page-entry.  */
     500                 :             : #define BITMAP_SIZE(Num_objects) \
     501                 :             :   (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
     502                 :             : 
     503                 :             : /* Allocate pages in chunks of this size, to throttle calls to memory
     504                 :             :    allocation routines.  The first page is used, the rest go onto the
     505                 :             :    free list.  This cannot be larger than HOST_BITS_PER_INT for the
     506                 :             :    in_use bitmask for page_group.  Hosts that need a different value
     507                 :             :    can override this by defining GGC_QUIRE_SIZE explicitly.  */
     508                 :             : #ifndef GGC_QUIRE_SIZE
     509                 :             : # ifdef USING_MMAP
     510                 :             : #  define GGC_QUIRE_SIZE 512    /* 2MB for 4K pages */
     511                 :             : # else
     512                 :             : #  define GGC_QUIRE_SIZE 16
     513                 :             : # endif
     514                 :             : #endif
     515                 :             : 
     516                 :             : /* Initial guess as to how many page table entries we might need.  */
     517                 :             : #define INITIAL_PTE_COUNT 128
     518                 :             : 
     519                 :             : static page_entry *lookup_page_table_entry (const void *);
     520                 :             : static void set_page_table_entry (void *, page_entry *);
     521                 :             : #ifdef USING_MMAP
     522                 :             : static char *alloc_anon (char *, size_t, bool check);
     523                 :             : #endif
     524                 :             : #ifdef USING_MALLOC_PAGE_GROUPS
     525                 :             : static size_t page_group_index (char *, char *);
     526                 :             : static void set_page_group_in_use (page_group *, char *);
     527                 :             : static void clear_page_group_in_use (page_group *, char *);
     528                 :             : #endif
     529                 :             : static struct page_entry * alloc_page (unsigned);
     530                 :             : static void free_page (struct page_entry *);
     531                 :             : static void clear_marks (void);
     532                 :             : static void sweep_pages (void);
     533                 :             : static void ggc_recalculate_in_use_p (page_entry *);
     534                 :             : static void compute_inverse (unsigned);
     535                 :             : static inline void adjust_depth (void);
     536                 :             : static void move_ptes_to_front (int, int);
     537                 :             : 
     538                 :             : void debug_print_page_list (int);
     539                 :             : static void push_depth (unsigned int);
     540                 :             : static void push_by_depth (page_entry *, unsigned long *);
     541                 :             : 
     542                 :             : /* Push an entry onto G.depth.  */
     543                 :             : 
     544                 :             : inline static void
     545                 :      281181 : push_depth (unsigned int i)
     546                 :             : {
     547                 :      281181 :   if (G.depth_in_use >= G.depth_max)
     548                 :             :     {
     549                 :           0 :       G.depth_max *= 2;
     550                 :           0 :       G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
     551                 :             :     }
     552                 :      281181 :   G.depth[G.depth_in_use++] = i;
     553                 :      281181 : }
     554                 :             : 
     555                 :             : /* Push an entry onto G.by_depth and G.save_in_use.  */
     556                 :             : 
     557                 :             : inline static void
     558                 :   566970503 : push_by_depth (page_entry *p, unsigned long *s)
     559                 :             : {
     560                 :   566970503 :   if (G.by_depth_in_use >= G.by_depth_max)
     561                 :             :     {
     562                 :      769763 :       G.by_depth_max *= 2;
     563                 :      769763 :       G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
     564                 :      769763 :       G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
     565                 :             :                                   G.by_depth_max);
     566                 :             :     }
     567                 :   566970503 :   G.by_depth[G.by_depth_in_use] = p;
     568                 :   566970503 :   G.save_in_use[G.by_depth_in_use++] = s;
     569                 :   566970503 : }
     570                 :             : 
     571                 :             : #if (GCC_VERSION < 3001)
     572                 :             : #define prefetch(X) ((void) X)
     573                 :             : #else
     574                 :             : #define prefetch(X) __builtin_prefetch (X)
     575                 :             : #endif
     576                 :             : 
     577                 :             : #define save_in_use_p_i(__i) \
     578                 :             :   (G.save_in_use[__i])
     579                 :             : #define save_in_use_p(__p) \
     580                 :             :   (save_in_use_p_i (__p->index_by_depth))
     581                 :             : 
     582                 :             : /* Traverse the page table and find the entry for a page.
     583                 :             :    If the object wasn't allocated in GC return NULL.  */
     584                 :             : 
     585                 :             : static inline page_entry *
     586                 :  3665349274 : safe_lookup_page_table_entry (const void *p)
     587                 :             : {
     588                 :  3665349274 :   page_entry ***base;
     589                 :  3665349274 :   size_t L1, L2;
     590                 :             : 
     591                 :             : #if HOST_BITS_PER_PTR <= 32
     592                 :             :   base = &G.lookup[0];
     593                 :             : #else
     594                 :  3665349274 :   page_table table = G.lookup;
     595                 :  3665349274 :   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
     596                 :  5010957548 :   while (1)
     597                 :             :     {
     598                 :  4338153411 :       if (table == NULL)
     599                 :             :         return NULL;
     600                 :  3754671129 :       if (table->high_bits == high_bits)
     601                 :             :         break;
     602                 :   672804137 :       table = table->next;
     603                 :             :     }
     604                 :  3081866992 :   base = &table->table[0];
     605                 :             : #endif
     606                 :             : 
     607                 :             :   /* Extract the level 1 and 2 indices.  */
     608                 :  3081866992 :   L1 = LOOKUP_L1 (p);
     609                 :  3081866992 :   L2 = LOOKUP_L2 (p);
     610                 :  3081866992 :   if (! base[L1])
     611                 :             :     return NULL;
     612                 :             : 
     613                 :  3056457430 :   return base[L1][L2];
     614                 :             : }
     615                 :             : 
     616                 :             : /* Traverse the page table and find the entry for a page.
     617                 :             :    Die (probably) if the object wasn't allocated via GC.  */
     618                 :             : 
     619                 :             : static inline page_entry *
     620                 : >21071*10^7 : lookup_page_table_entry (const void *p)
     621                 :             : {
     622                 : >21071*10^7 :   page_entry ***base;
     623                 : >21071*10^7 :   size_t L1, L2;
     624                 :             : 
     625                 :             : #if HOST_BITS_PER_PTR <= 32
     626                 :             :   base = &G.lookup[0];
     627                 :             : #else
     628                 : >21071*10^7 :   page_table table = G.lookup;
     629                 : >21071*10^7 :   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
     630                 : >21663*10^7 :   while (table->high_bits != high_bits)
     631                 :  5917172460 :     table = table->next;
     632                 : >21071*10^7 :   base = &table->table[0];
     633                 :             : #endif
     634                 :             : 
     635                 :             :   /* Extract the level 1 and 2 indices.  */
     636                 : >21071*10^7 :   L1 = LOOKUP_L1 (p);
     637                 : >21071*10^7 :   L2 = LOOKUP_L2 (p);
     638                 :             : 
     639                 : >21071*10^7 :   return base[L1][L2];
     640                 :             : }
     641                 :             : 
     642                 :             : /* Set the page table entry for a page.  */
     643                 :             : 
     644                 :             : static void
     645                 :   587532556 : set_page_table_entry (void *p, page_entry *entry)
     646                 :             : {
     647                 :   587532556 :   page_entry ***base;
     648                 :   587532556 :   size_t L1, L2;
     649                 :             : 
     650                 :             : #if HOST_BITS_PER_PTR <= 32
     651                 :             :   base = &G.lookup[0];
     652                 :             : #else
     653                 :   587532556 :   page_table table;
     654                 :   587532556 :   uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
     655                 :   588543578 :   for (table = G.lookup; table; table = table->next)
     656                 :   588261080 :     if (table->high_bits == high_bits)
     657                 :   587250058 :       goto found;
     658                 :             : 
     659                 :             :   /* Not found -- allocate a new table.  */
     660                 :      282498 :   table = XCNEW (struct page_table_chain);
     661                 :      282498 :   table->next = G.lookup;
     662                 :      282498 :   table->high_bits = high_bits;
     663                 :      282498 :   G.lookup = table;
     664                 :   587532556 : found:
     665                 :   587532556 :   base = &table->table[0];
     666                 :             : #endif
     667                 :             : 
     668                 :             :   /* Extract the level 1 and 2 indices.  */
     669                 :   587532556 :   L1 = LOOKUP_L1 (p);
     670                 :   587532556 :   L2 = LOOKUP_L2 (p);
     671                 :             : 
     672                 :   587532556 :   if (base[L1] == NULL)
     673                 :      612048 :     base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
     674                 :             : 
     675                 :   587532556 :   base[L1][L2] = entry;
     676                 :   587532556 : }
     677                 :             : 
     678                 :             : /* Prints the page-entry for object size ORDER, for debugging.  */
     679                 :             : 
     680                 :             : DEBUG_FUNCTION void
     681                 :           0 : debug_print_page_list (int order)
     682                 :             : {
     683                 :           0 :   page_entry *p;
     684                 :           0 :   printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
     685                 :           0 :           (void *) G.page_tails[order]);
     686                 :           0 :   p = G.pages[order];
     687                 :           0 :   while (p != NULL)
     688                 :             :     {
     689                 :           0 :       printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
     690                 :           0 :               p->num_free_objects);
     691                 :           0 :       p = p->next;
     692                 :             :     }
     693                 :           0 :   printf ("NULL\n");
     694                 :           0 :   fflush (stdout);
     695                 :           0 : }
     696                 :             : 
     697                 :             : #ifdef USING_MMAP
     698                 :             : /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
     699                 :             :    (if non-null).  The ifdef structure here is intended to cause a
     700                 :             :    compile error unless exactly one of the HAVE_* is defined.  */
     701                 :             : 
     702                 :             : static inline char *
     703                 :     5540894 : alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
     704                 :             : {
     705                 :             : #ifdef HAVE_MMAP_ANON
     706                 :     5540894 :   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
     707                 :             :                               MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
     708                 :             : #endif
     709                 :             : #ifdef HAVE_MMAP_DEV_ZERO
     710                 :             :   char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
     711                 :             :                               MAP_PRIVATE, G.dev_zero_fd, 0);
     712                 :             : #endif
     713                 :             : 
     714                 :     5540894 :   if (page == (char *) MAP_FAILED)
     715                 :             :     {
     716                 :           0 :       if (!check)
     717                 :             :         return NULL;
     718                 :           0 :       perror ("virtual memory exhausted");
     719                 :           0 :       exit (FATAL_EXIT_CODE);
     720                 :             :     }
     721                 :             : 
     722                 :             :   /* Remember that we allocated this memory.  */
     723                 :     5540894 :   G.bytes_mapped += size;
     724                 :             : 
     725                 :             :   /* Pretend we don't have access to the allocated pages.  We'll enable
     726                 :             :      access to smaller pieces of the area in ggc_internal_alloc.  Discard the
     727                 :             :      handle to avoid handle leak.  */
     728                 :     5540894 :   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
     729                 :             : 
     730                 :     5540894 :   return page;
     731                 :             : }
     732                 :             : #endif
     733                 :             : #ifdef USING_MALLOC_PAGE_GROUPS
     734                 :             : /* Compute the index for this page into the page group.  */
     735                 :             : 
     736                 :             : static inline size_t
     737                 :             : page_group_index (char *allocation, char *page)
     738                 :             : {
     739                 :             :   return (size_t) (page - allocation) >> G.lg_pagesize;
     740                 :             : }
     741                 :             : 
     742                 :             : /* Set and clear the in_use bit for this page in the page group.  */
     743                 :             : 
     744                 :             : static inline void
     745                 :             : set_page_group_in_use (page_group *group, char *page)
     746                 :             : {
     747                 :             :   group->in_use |= 1 << page_group_index (group->allocation, page);
     748                 :             : }
     749                 :             : 
     750                 :             : static inline void
     751                 :             : clear_page_group_in_use (page_group *group, char *page)
     752                 :             : {
     753                 :             :   group->in_use &= ~(1 << page_group_index (group->allocation, page));
     754                 :             : }
     755                 :             : #endif
     756                 :             : 
     757                 :             : /* Allocate a new page for allocating objects of size 2^ORDER,
     758                 :             :    and return an entry for it.  The entry is not added to the
     759                 :             :    appropriate page_table list.  */
     760                 :             : 
     761                 :             : static inline struct page_entry *
     762                 :   566961674 : alloc_page (unsigned order)
     763                 :             : {
     764                 :   566961674 :   struct page_entry *entry, *p, **pp;
     765                 :   566961674 :   char *page;
     766                 :   566961674 :   size_t num_objects;
     767                 :   566961674 :   size_t bitmap_size;
     768                 :   566961674 :   size_t page_entry_size;
     769                 :   566961674 :   size_t entry_size;
     770                 :             : #ifdef USING_MALLOC_PAGE_GROUPS
     771                 :             :   page_group *group;
     772                 :             : #endif
     773                 :             : 
     774                 :   566961674 :   num_objects = OBJECTS_PER_PAGE (order);
     775                 :   566961674 :   bitmap_size = BITMAP_SIZE (num_objects + 1);
     776                 :   566961674 :   page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
     777                 :   566961674 :   entry_size = num_objects * OBJECT_SIZE (order);
     778                 :   566961674 :   if (entry_size < G.pagesize)
     779                 :             :     entry_size = G.pagesize;
     780                 :   566961674 :   entry_size = PAGE_ALIGN (entry_size);
     781                 :             : 
     782                 :   566961674 :   entry = NULL;
     783                 :   566961674 :   page = NULL;
     784                 :             : 
     785                 :             :   /* Check the list of free pages for one we can use.  */
     786                 :  2277796619 :   for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
     787                 :  2272536571 :     if (p->bytes == entry_size)
     788                 :             :       break;
     789                 :             : 
     790                 :   566961674 :   if (p != NULL)
     791                 :             :     {
     792                 :   561701626 :       if (p->discarded)
     793                 :    11534178 :         G.bytes_mapped += p->bytes;
     794                 :   561701626 :       p->discarded = false;
     795                 :             : 
     796                 :             :       /* Recycle the allocated memory from this page ...  */
     797                 :   561701626 :       *pp = p->next;
     798                 :   561701626 :       page = p->page;
     799                 :             : 
     800                 :             : #ifdef USING_MALLOC_PAGE_GROUPS
     801                 :             :       group = p->group;
     802                 :             : #endif
     803                 :             : 
     804                 :             :       /* ... and, if possible, the page entry itself.  */
     805                 :   561701626 :       if (p->order == order)
     806                 :             :         {
     807                 :    53168506 :           entry = p;
     808                 :    53168506 :           memset (entry, 0, page_entry_size);
     809                 :             :         }
     810                 :             :       else
     811                 :   508533120 :         free (p);
     812                 :             :     }
     813                 :             : #ifdef USING_MMAP
     814                 :     5260048 :   else if (entry_size == G.pagesize)
     815                 :             :     {
     816                 :             :       /* We want just one page.  Allocate a bunch of them and put the
     817                 :             :          extras on the freelist.  (Can only do this optimization with
     818                 :             :          mmap for backing store.)  */
     819                 :     1250642 :       struct page_entry *e, *f = G.free_pages;
     820                 :     1250642 :       int i, entries = GGC_QUIRE_SIZE;
     821                 :             : 
     822                 :     1250642 :       page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
     823                 :     1250642 :       if (page == NULL)
     824                 :             :         {
     825                 :           0 :           page = alloc_anon (NULL, G.pagesize, true);
     826                 :           0 :           entries = 1;
     827                 :             :         }
     828                 :             : 
     829                 :             :       /* This loop counts down so that the chain will be in ascending
     830                 :             :          memory order.  */
     831                 :   640328704 :       for (i = entries - 1; i >= 1; i--)
     832                 :             :         {
     833                 :   639078062 :           e = XCNEWVAR (struct page_entry, page_entry_size);
     834                 :   639078062 :           e->order = order;
     835                 :   639078062 :           e->bytes = G.pagesize;
     836                 :   639078062 :           e->page = page + (i << G.lg_pagesize);
     837                 :   639078062 :           e->next = f;
     838                 :   639078062 :           f = e;
     839                 :             :         }
     840                 :             : 
     841                 :     1250642 :       G.free_pages = f;
     842                 :             :     }
     843                 :             :   else
     844                 :     4009406 :     page = alloc_anon (NULL, entry_size, true);
     845                 :             : #endif
     846                 :             : #ifdef USING_MALLOC_PAGE_GROUPS
     847                 :             :   else
     848                 :             :     {
     849                 :             :       /* Allocate a large block of memory and serve out the aligned
     850                 :             :          pages therein.  This results in much less memory wastage
     851                 :             :          than the traditional implementation of valloc.  */
     852                 :             : 
     853                 :             :       char *allocation, *a, *enda;
     854                 :             :       size_t alloc_size, head_slop, tail_slop;
     855                 :             :       int multiple_pages = (entry_size == G.pagesize);
     856                 :             : 
     857                 :             :       if (multiple_pages)
     858                 :             :         alloc_size = GGC_QUIRE_SIZE * G.pagesize;
     859                 :             :       else
     860                 :             :         alloc_size = entry_size + G.pagesize - 1;
     861                 :             :       allocation = XNEWVEC (char, alloc_size);
     862                 :             : 
     863                 :             :       page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
     864                 :             :       head_slop = page - allocation;
     865                 :             :       if (multiple_pages)
     866                 :             :         tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
     867                 :             :       else
     868                 :             :         tail_slop = alloc_size - entry_size - head_slop;
     869                 :             :       enda = allocation + alloc_size - tail_slop;
     870                 :             : 
     871                 :             :       /* We allocated N pages, which are likely not aligned, leaving
     872                 :             :          us with N-1 usable pages.  We plan to place the page_group
     873                 :             :          structure somewhere in the slop.  */
     874                 :             :       if (head_slop >= sizeof (page_group))
     875                 :             :         group = (page_group *)page - 1;
     876                 :             :       else
     877                 :             :         {
     878                 :             :           /* We magically got an aligned allocation.  Too bad, we have
     879                 :             :              to waste a page anyway.  */
     880                 :             :           if (tail_slop == 0)
     881                 :             :             {
     882                 :             :               enda -= G.pagesize;
     883                 :             :               tail_slop += G.pagesize;
     884                 :             :             }
     885                 :             :           gcc_assert (tail_slop >= sizeof (page_group));
     886                 :             :           group = (page_group *)enda;
     887                 :             :           tail_slop -= sizeof (page_group);
     888                 :             :         }
     889                 :             : 
     890                 :             :       /* Remember that we allocated this memory.  */
     891                 :             :       group->next = G.page_groups;
     892                 :             :       group->allocation = allocation;
     893                 :             :       group->alloc_size = alloc_size;
     894                 :             :       group->in_use = 0;
     895                 :             :       G.page_groups = group;
     896                 :             :       G.bytes_mapped += alloc_size;
     897                 :             : 
     898                 :             :       /* If we allocated multiple pages, put the rest on the free list.  */
     899                 :             :       if (multiple_pages)
     900                 :             :         {
     901                 :             :           struct page_entry *e, *f = G.free_pages;
     902                 :             :           for (a = enda - G.pagesize; a != page; a -= G.pagesize)
     903                 :             :             {
     904                 :             :               e = XCNEWVAR (struct page_entry, page_entry_size);
     905                 :             :               e->order = order;
     906                 :             :               e->bytes = G.pagesize;
     907                 :             :               e->page = a;
     908                 :             :               e->group = group;
     909                 :             :               e->next = f;
     910                 :             :               f = e;
     911                 :             :             }
     912                 :             :           G.free_pages = f;
     913                 :             :         }
     914                 :             :     }
     915                 :             : #endif
     916                 :             : 
     917                 :   566961674 :   if (entry == NULL)
     918                 :   513793168 :     entry = XCNEWVAR (struct page_entry, page_entry_size);
     919                 :             : 
     920                 :   566961674 :   entry->bytes = entry_size;
     921                 :   566961674 :   entry->page = page;
     922                 :   566961674 :   entry->context_depth = G.context_depth;
     923                 :   566961674 :   entry->order = order;
     924                 :   566961674 :   entry->num_free_objects = num_objects;
     925                 :   566961674 :   entry->next_bit_hint = 1;
     926                 :             : 
     927                 :   566961674 :   G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
     928                 :             : 
     929                 :             : #ifdef USING_MALLOC_PAGE_GROUPS
     930                 :             :   entry->group = group;
     931                 :             :   set_page_group_in_use (group, page);
     932                 :             : #endif
     933                 :             : 
     934                 :             :   /* Set the one-past-the-end in-use bit.  This acts as a sentry as we
     935                 :             :      increment the hint.  */
     936                 :   566961674 :   entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
     937                 :   566961674 :     = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
     938                 :             : 
     939                 :   566961674 :   set_page_table_entry (page, entry);
     940                 :             : 
     941                 :   566961674 :   if (GGC_DEBUG_LEVEL >= 2)
     942                 :             :     fprintf (G.debug_file,
     943                 :             :              "Allocating page at %p, object size="
     944                 :             :              HOST_SIZE_T_PRINT_UNSIGNED ", data %p-%p\n",
     945                 :             :              (void *) entry, (fmt_size_t) OBJECT_SIZE (order),
     946                 :             :              (void *) page, (void *) (page + entry_size - 1));
     947                 :             : 
     948                 :   566961674 :   return entry;
     949                 :             : }
     950                 :             : 
     951                 :             : /* Adjust the size of G.depth so that no index greater than the one
     952                 :             :    used by the top of the G.by_depth is used.  */
     953                 :             : 
     954                 :             : static inline void
     955                 :    20354987 : adjust_depth (void)
     956                 :             : {
     957                 :    20354987 :   page_entry *top;
     958                 :             : 
     959                 :    20354987 :   if (G.by_depth_in_use)
     960                 :             :     {
     961                 :    20354987 :       top = G.by_depth[G.by_depth_in_use-1];
     962                 :             : 
     963                 :             :       /* Peel back indices in depth that index into by_depth, so that
     964                 :             :          as new elements are added to by_depth, we note the indices
     965                 :             :          of those elements, if they are for new context depths.  */
     966                 :    20354987 :       while (G.depth_in_use > (size_t)top->context_depth+1)
     967                 :           0 :         --G.depth_in_use;
     968                 :             :     }
     969                 :    20354987 : }
     970                 :             : 
     971                 :             : /* For a page that is no longer needed, put it on the free page list.  */
     972                 :             : 
     973                 :             : static void
     974                 :    20354987 : free_page (page_entry *entry)
     975                 :             : {
     976                 :    20354987 :   if (GGC_DEBUG_LEVEL >= 2)
     977                 :             :     fprintf (G.debug_file,
     978                 :             :              "Deallocating page at %p, data %p-%p\n", (void *) entry,
     979                 :             :              (void *) entry->page, (void *) (entry->page + entry->bytes - 1));
     980                 :             : 
     981                 :             :   /* Mark the page as inaccessible.  Discard the handle to avoid handle
     982                 :             :      leak.  */
     983                 :    20354987 :   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
     984                 :             : 
     985                 :    20354987 :   set_page_table_entry (entry->page, NULL);
     986                 :             : 
     987                 :             : #ifdef USING_MALLOC_PAGE_GROUPS
     988                 :             :   clear_page_group_in_use (entry->group, entry->page);
     989                 :             : #endif
     990                 :             : 
     991                 :    20354987 :   if (G.by_depth_in_use > 1)
     992                 :             :     {
     993                 :    20354987 :       page_entry *top = G.by_depth[G.by_depth_in_use-1];
     994                 :    20354987 :       int i = entry->index_by_depth;
     995                 :             : 
     996                 :             :       /* We cannot free a page from a context deeper than the current
     997                 :             :          one.  */
     998                 :    20354987 :       gcc_assert (entry->context_depth == top->context_depth);
     999                 :             : 
    1000                 :             :       /* Put top element into freed slot.  */
    1001                 :    20354987 :       G.by_depth[i] = top;
    1002                 :    20354987 :       G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
    1003                 :    20354987 :       top->index_by_depth = i;
    1004                 :             :     }
    1005                 :    20354987 :   --G.by_depth_in_use;
    1006                 :             : 
    1007                 :    20354987 :   adjust_depth ();
    1008                 :             : 
    1009                 :    20354987 :   entry->next = G.free_pages;
    1010                 :    20354987 :   G.free_pages = entry;
    1011                 :    20354987 : }
    1012                 :             : 
    1013                 :             : /* Release the free page cache to the system.  */
    1014                 :             : 
    1015                 :             : static void
    1016                 :      687411 : release_pages (void)
    1017                 :             : {
    1018                 :      687411 :   size_t n1 = 0;
    1019                 :      687411 :   size_t n2 = 0;
    1020                 :             : #ifdef USING_MADVISE
    1021                 :      687411 :   page_entry *p, *start_p;
    1022                 :      687411 :   char *start;
    1023                 :      687411 :   size_t len;
    1024                 :      687411 :   size_t mapped_len;
    1025                 :      687411 :   page_entry *next, *prev, *newprev;
    1026                 :      687411 :   size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
    1027                 :             : 
    1028                 :             :   /* First free larger continuous areas to the OS.
    1029                 :             :      This allows other allocators to grab these areas if needed.
    1030                 :             :      This is only done on larger chunks to avoid fragmentation. 
    1031                 :             :      This does not always work because the free_pages list is only
    1032                 :             :      approximately sorted. */
    1033                 :             : 
    1034                 :      687411 :   p = G.free_pages;
    1035                 :      687411 :   prev = NULL;
    1036                 :    37632637 :   while (p)
    1037                 :             :     {
    1038                 :    36945226 :       start = p->page;
    1039                 :    36945226 :       start_p = p;
    1040                 :    36945226 :       len = 0;
    1041                 :    36945226 :       mapped_len = 0;
    1042                 :    36945226 :       newprev = prev;
    1043                 :   124017492 :       while (p && p->page == start + len)
    1044                 :             :         {
    1045                 :    87072266 :           len += p->bytes;
    1046                 :    87072266 :           if (!p->discarded)
    1047                 :    49486605 :               mapped_len += p->bytes;
    1048                 :    87072266 :           newprev = p;
    1049                 :    87072266 :           p = p->next;
    1050                 :             :         }
    1051                 :    36945226 :       if (len >= free_unit)
    1052                 :             :         {
    1053                 :    35246295 :           while (start_p != p)
    1054                 :             :             {
    1055                 :    35115142 :               next = start_p->next;
    1056                 :    35115142 :               free (start_p);
    1057                 :    35115142 :               start_p = next;
    1058                 :             :             }
    1059                 :      131153 :           munmap (start, len);
    1060                 :      131153 :           if (prev)
    1061                 :       45571 :             prev->next = p;
    1062                 :             :           else
    1063                 :       85582 :             G.free_pages = p;
    1064                 :      131153 :           G.bytes_mapped -= mapped_len;
    1065                 :      131153 :           n1 += len;
    1066                 :      131153 :           continue;
    1067                 :             :         }
    1068                 :             :       prev = newprev;
    1069                 :             :    }
    1070                 :             : 
    1071                 :             :   /* Now give back the fragmented pages to the OS, but keep the address 
    1072                 :             :      space to reuse it next time. */
    1073                 :             : 
    1074                 :    40747529 :   for (p = G.free_pages; p; )
    1075                 :             :     {
    1076                 :    40060118 :       if (p->discarded)
    1077                 :             :         {
    1078                 :    37579538 :           p = p->next;
    1079                 :    37579538 :           continue;
    1080                 :             :         }
    1081                 :     2480580 :       start = p->page;
    1082                 :     2480580 :       len = p->bytes;
    1083                 :     2480580 :       start_p = p;
    1084                 :     2480580 :       p = p->next;
    1085                 :    14377586 :       while (p && p->page == start + len)
    1086                 :             :         {
    1087                 :    11897006 :           len += p->bytes;
    1088                 :    11897006 :           p = p->next;
    1089                 :             :         }
    1090                 :             :       /* Give the page back to the kernel, but don't free the mapping.
    1091                 :             :          This avoids fragmentation in the virtual memory map of the 
    1092                 :             :          process. Next time we can reuse it by just touching it. */
    1093                 :     2480580 :       madvise (start, len, MADV_DONTNEED);
    1094                 :             :       /* Don't count those pages as mapped to not touch the garbage collector
    1095                 :             :          unnecessarily. */
    1096                 :     2480580 :       G.bytes_mapped -= len;
    1097                 :     2480580 :       n2 += len;
    1098                 :    16858166 :       while (start_p != p)
    1099                 :             :         {
    1100                 :    14377586 :           start_p->discarded = true;
    1101                 :    14377586 :           start_p = start_p->next;
    1102                 :             :         }
    1103                 :             :     }
    1104                 :             : #endif
    1105                 :             : #if defined(USING_MMAP) && !defined(USING_MADVISE)
    1106                 :             :   page_entry *p, *next;
    1107                 :             :   char *start;
    1108                 :             :   size_t len;
    1109                 :             : 
    1110                 :             :   /* Gather up adjacent pages so they are unmapped together.  */
    1111                 :             :   p = G.free_pages;
    1112                 :             : 
    1113                 :             :   while (p)
    1114                 :             :     {
    1115                 :             :       start = p->page;
    1116                 :             :       next = p->next;
    1117                 :             :       len = p->bytes;
    1118                 :             :       free (p);
    1119                 :             :       p = next;
    1120                 :             : 
    1121                 :             :       while (p && p->page == start + len)
    1122                 :             :         {
    1123                 :             :           next = p->next;
    1124                 :             :           len += p->bytes;
    1125                 :             :           free (p);
    1126                 :             :           p = next;
    1127                 :             :         }
    1128                 :             : 
    1129                 :             :       munmap (start, len);
    1130                 :             :       n1 += len;
    1131                 :             :       G.bytes_mapped -= len;
    1132                 :             :     }
    1133                 :             : 
    1134                 :             :   G.free_pages = NULL;
    1135                 :             : #endif
    1136                 :             : #ifdef USING_MALLOC_PAGE_GROUPS
    1137                 :             :   page_entry **pp, *p;
    1138                 :             :   page_group **gp, *g;
    1139                 :             : 
    1140                 :             :   /* Remove all pages from free page groups from the list.  */
    1141                 :             :   pp = &G.free_pages;
    1142                 :             :   while ((p = *pp) != NULL)
    1143                 :             :     if (p->group->in_use == 0)
    1144                 :             :       {
    1145                 :             :         *pp = p->next;
    1146                 :             :         free (p);
    1147                 :             :       }
    1148                 :             :     else
    1149                 :             :       pp = &p->next;
    1150                 :             : 
    1151                 :             :   /* Remove all free page groups, and release the storage.  */
    1152                 :             :   gp = &G.page_groups;
    1153                 :             :   while ((g = *gp) != NULL)
    1154                 :             :     if (g->in_use == 0)
    1155                 :             :       {
    1156                 :             :         *gp = g->next;
    1157                 :             :         G.bytes_mapped -= g->alloc_size;
    1158                 :             :         n1 += g->alloc_size;
    1159                 :             :         free (g->allocation);
    1160                 :             :       }
    1161                 :             :     else
    1162                 :             :       gp = &g->next;
    1163                 :             : #endif
    1164                 :      687411 :   if (!quiet_flag && (n1 || n2))
    1165                 :             :     {
    1166                 :           0 :       fprintf (stderr, " {GC");
    1167                 :           0 :       if (n1)
    1168                 :           0 :         fprintf (stderr, " released " PRsa (0), SIZE_AMOUNT (n1));
    1169                 :           0 :       if (n2)
    1170                 :           0 :         fprintf (stderr, " madv_dontneed " PRsa (0), SIZE_AMOUNT (n2));
    1171                 :           0 :       fprintf (stderr, "}");
    1172                 :             :     }
    1173                 :      687411 : }
    1174                 :             : 
    1175                 :             : /* This table provides a fast way to determine ceil(log_2(size)) for
    1176                 :             :    allocation requests.  The minimum allocation size is eight bytes.  */
    1177                 :             : #define NUM_SIZE_LOOKUP 512
    1178                 :             : static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
    1179                 :             : {
    1180                 :             :   3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
    1181                 :             :   4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
    1182                 :             :   5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
    1183                 :             :   6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
    1184                 :             :   6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
    1185                 :             :   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
    1186                 :             :   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
    1187                 :             :   7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
    1188                 :             :   7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
    1189                 :             :   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
    1190                 :             :   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
    1191                 :             :   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
    1192                 :             :   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
    1193                 :             :   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
    1194                 :             :   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
    1195                 :             :   8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
    1196                 :             :   8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1197                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1198                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1199                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1200                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1201                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1202                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1203                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1204                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1205                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1206                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1207                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1208                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1209                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1210                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
    1211                 :             :   9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
    1212                 :             : };
    1213                 :             : 
    1214                 :             : /* For a given size of memory requested for allocation, return the
    1215                 :             :    actual size that is going to be allocated, as well as the size
    1216                 :             :    order.  */
    1217                 :             : 
    1218                 :             : static void
    1219                 : 43064263054 : ggc_round_alloc_size_1 (size_t requested_size,
    1220                 :             :                         size_t *size_order,
    1221                 :             :                         size_t *alloced_size)
    1222                 :             : {
    1223                 : 43064263054 :   size_t order, object_size;
    1224                 :             : 
    1225                 : 43064263054 :   if (requested_size < NUM_SIZE_LOOKUP)
    1226                 :             :     {
    1227                 : 42886670239 :       order = size_lookup[requested_size];
    1228                 : 42886670239 :       object_size = OBJECT_SIZE (order);
    1229                 :             :     }
    1230                 :             :   else
    1231                 :             :     {
    1232                 :             :       order = 10;
    1233                 :   266489415 :       while (requested_size > (object_size = OBJECT_SIZE (order)))
    1234                 :    88896600 :         order++;
    1235                 :             :     }
    1236                 :             : 
    1237                 : 43064263054 :   if (size_order)
    1238                 : 40816517192 :     *size_order = order;
    1239                 : 43064263054 :   if (alloced_size)
    1240                 : 43064263054 :     *alloced_size = object_size;
    1241                 : 43064263054 : }
    1242                 :             : 
    1243                 :             : /* For a given size of memory requested for allocation, return the
    1244                 :             :    actual size that is going to be allocated.  */
    1245                 :             : 
    1246                 :             : size_t
    1247                 :  2247745862 : ggc_round_alloc_size (size_t requested_size)
    1248                 :             : {
    1249                 :  2247745862 :   size_t size = 0;
    1250                 :             :   
    1251                 :  2247745862 :   ggc_round_alloc_size_1 (requested_size, NULL, &size);
    1252                 :  2247745862 :   return size;
    1253                 :             : }
    1254                 :             : 
    1255                 :             : /* Push a finalizer onto the appropriate vec.  */
    1256                 :             : 
    1257                 :             : static void
    1258                 :    62086964 : add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
    1259                 :             : {
    1260                 :    62086964 :   if (f == NULL)
    1261                 :             :     /* No finalizer.  */;
    1262                 :    62086964 :   else if (n == 1)
    1263                 :             :     {
    1264                 :    62086964 :       finalizer fin (result, f);
    1265                 :    62086964 :       G.finalizers[G.context_depth].safe_push (fin);
    1266                 :             :     }
    1267                 :             :   else
    1268                 :             :     {
    1269                 :           0 :       vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
    1270                 :           0 :       G.vec_finalizers[G.context_depth].safe_push (fin);
    1271                 :             :     }
    1272                 :    62086964 : }
    1273                 :             : 
    1274                 :             : /* Allocate a chunk of memory of SIZE bytes.  Its contents are undefined.  */
    1275                 :             : 
    1276                 :             : void *
    1277                 : 40816517192 : ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
    1278                 :             :                     MEM_STAT_DECL)
    1279                 :             : {
    1280                 : 40816517192 :   size_t order, word, bit, object_offset, object_size;
    1281                 : 40816517192 :   struct page_entry *entry;
    1282                 : 40816517192 :   void *result;
    1283                 :             : 
    1284                 : 40816517192 :   ggc_round_alloc_size_1 (size, &order, &object_size);
    1285                 :             : 
    1286                 :             :   /* If there are non-full pages for this size allocation, they are at
    1287                 :             :      the head of the list.  */
    1288                 : 40816517192 :   entry = G.pages[order];
    1289                 :             : 
    1290                 :             :   /* If there is no page for this object size, or all pages in this
    1291                 :             :      context are full, allocate a new page.  */
    1292                 : 40816517192 :   if (entry == NULL || entry->num_free_objects == 0)
    1293                 :             :     {
    1294                 :   566961674 :       struct page_entry *new_entry;
    1295                 :   566961674 :       new_entry = alloc_page (order);
    1296                 :             : 
    1297                 :   566961674 :       new_entry->index_by_depth = G.by_depth_in_use;
    1298                 :   566961674 :       push_by_depth (new_entry, 0);
    1299                 :             : 
    1300                 :             :       /* We can skip context depths, if we do, make sure we go all the
    1301                 :             :          way to the new depth.  */
    1302                 :  1134204194 :       while (new_entry->context_depth >= G.depth_in_use)
    1303                 :      280846 :         push_depth (G.by_depth_in_use-1);
    1304                 :             : 
    1305                 :             :       /* If this is the only entry, it's also the tail.  If it is not
    1306                 :             :          the only entry, then we must update the PREV pointer of the
    1307                 :             :          ENTRY (G.pages[order]) to point to our new page entry.  */
    1308                 :   566961674 :       if (entry == NULL)
    1309                 :     8503256 :         G.page_tails[order] = new_entry;
    1310                 :             :       else
    1311                 :   558458418 :         entry->prev = new_entry;
    1312                 :             : 
    1313                 :             :       /* Put new pages at the head of the page list.  By definition the
    1314                 :             :          entry at the head of the list always has a NULL pointer.  */
    1315                 :   566961674 :       new_entry->next = entry;
    1316                 :   566961674 :       new_entry->prev = NULL;
    1317                 :   566961674 :       entry = new_entry;
    1318                 :   566961674 :       G.pages[order] = new_entry;
    1319                 :             : 
    1320                 :             :       /* For a new page, we know the word and bit positions (in the
    1321                 :             :          in_use bitmap) of the first available object -- they're zero.  */
    1322                 :   566961674 :       new_entry->next_bit_hint = 1;
    1323                 :   566961674 :       word = 0;
    1324                 :   566961674 :       bit = 0;
    1325                 :   566961674 :       object_offset = 0;
    1326                 :   566961674 :     }
    1327                 :             :   else
    1328                 :             :     {
    1329                 :             :       /* First try to use the hint left from the previous allocation
    1330                 :             :          to locate a clear bit in the in-use bitmap.  We've made sure
    1331                 :             :          that the one-past-the-end bit is always set, so if the hint
    1332                 :             :          has run over, this test will fail.  */
    1333                 : 40249555518 :       unsigned hint = entry->next_bit_hint;
    1334                 : 40249555518 :       word = hint / HOST_BITS_PER_LONG;
    1335                 : 40249555518 :       bit = hint % HOST_BITS_PER_LONG;
    1336                 :             : 
    1337                 :             :       /* If the hint didn't work, scan the bitmap from the beginning.  */
    1338                 : 40249555518 :       if ((entry->in_use_p[word] >> bit) & 1)
    1339                 :             :         {
    1340                 :  5559678649 :           word = bit = 0;
    1341                 :  5559678649 :           while (~entry->in_use_p[word] == 0)
    1342                 :  1555138486 :             ++word;
    1343                 :             : 
    1344                 :             : #if GCC_VERSION >= 3004
    1345                 :  4004540163 :           bit = __builtin_ctzl (~entry->in_use_p[word]);
    1346                 :             : #else
    1347                 :             :           while ((entry->in_use_p[word] >> bit) & 1)
    1348                 :             :             ++bit;
    1349                 :             : #endif
    1350                 :             : 
    1351                 :  4004540163 :           hint = word * HOST_BITS_PER_LONG + bit;
    1352                 :             :         }
    1353                 :             : 
    1354                 :             :       /* Next time, try the next bit.  */
    1355                 : 40249555518 :       entry->next_bit_hint = hint + 1;
    1356                 :             : 
    1357                 : 40249555518 :       object_offset = hint * object_size;
    1358                 :             :     }
    1359                 :             : 
    1360                 :             :   /* Set the in-use bit.  */
    1361                 : 40816517192 :   entry->in_use_p[word] |= ((unsigned long) 1 << bit);
    1362                 :             : 
    1363                 :             :   /* Keep a running total of the number of free objects.  If this page
    1364                 :             :      fills up, we may have to move it to the end of the list if the
    1365                 :             :      next page isn't full.  If the next page is full, all subsequent
    1366                 :             :      pages are full, so there's no need to move it.  */
    1367                 : 40816517192 :   if (--entry->num_free_objects == 0
    1368                 :  1123523855 :       && entry->next != NULL
    1369                 : 41933184386 :       && entry->next->num_free_objects > 0)
    1370                 :             :     {
    1371                 :             :       /* We have a new head for the list.  */
    1372                 :   515584235 :       G.pages[order] = entry->next;
    1373                 :             : 
    1374                 :             :       /* We are moving ENTRY to the end of the page table list.
    1375                 :             :          The new page at the head of the list will have NULL in
    1376                 :             :          its PREV field and ENTRY will have NULL in its NEXT field.  */
    1377                 :   515584235 :       entry->next->prev = NULL;
    1378                 :   515584235 :       entry->next = NULL;
    1379                 :             : 
    1380                 :             :       /* Append ENTRY to the tail of the list.  */
    1381                 :   515584235 :       entry->prev = G.page_tails[order];
    1382                 :   515584235 :       G.page_tails[order]->next = entry;
    1383                 :   515584235 :       G.page_tails[order] = entry;
    1384                 :             :     }
    1385                 :             : 
    1386                 :             :   /* Calculate the object's address.  */
    1387                 : 40816517192 :   result = entry->page + object_offset;
    1388                 : 40816517192 :   if (GATHER_STATISTICS)
    1389                 :             :     ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
    1390                 :             :                          result FINAL_PASS_MEM_STAT);
    1391                 :             : 
    1392                 :             : #ifdef ENABLE_GC_CHECKING
    1393                 :             :   /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
    1394                 :             :      exact same semantics in presence of memory bugs, regardless of
    1395                 :             :      ENABLE_VALGRIND_CHECKING.  We override this request below.  Drop the
    1396                 :             :      handle to avoid handle leak.  */
    1397                 : 40816517192 :   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
    1398                 :             : 
    1399                 :             :   /* `Poison' the entire allocated object, including any padding at
    1400                 :             :      the end.  */
    1401                 : 40816517192 :   memset (result, 0xaf, object_size);
    1402                 :             : 
    1403                 :             :   /* Make the bytes after the end of the object unaccessible.  Discard the
    1404                 :             :      handle to avoid handle leak.  */
    1405                 :             :   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
    1406                 : 40816517192 :                                                 object_size - size));
    1407                 :             : #endif
    1408                 :             : 
    1409                 :             :   /* Tell Valgrind that the memory is there, but its content isn't
    1410                 :             :      defined.  The bytes at the end of the object are still marked
    1411                 :             :      unaccessible.  */
    1412                 : 40816517192 :   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
    1413                 :             : 
    1414                 :             :   /* Keep track of how many bytes are being allocated.  This
    1415                 :             :      information is used in deciding when to collect.  */
    1416                 : 40816517192 :   G.allocated += object_size;
    1417                 :             : 
    1418                 :             :   /* For timevar statistics.  */
    1419                 : 40816517192 :   timevar_ggc_mem_total += object_size;
    1420                 :             : 
    1421                 : 40816517192 :   if (f)
    1422                 :    62086964 :     add_finalizer (result, f, s, n);
    1423                 :             : 
    1424                 : 40816517192 :   if (GATHER_STATISTICS)
    1425                 :             :     {
    1426                 :             :       size_t overhead = object_size - size;
    1427                 :             : 
    1428                 :             :       G.stats.total_overhead += overhead;
    1429                 :             :       G.stats.total_allocated += object_size;
    1430                 :             :       G.stats.total_overhead_per_order[order] += overhead;
    1431                 :             :       G.stats.total_allocated_per_order[order] += object_size;
    1432                 :             : 
    1433                 :             :       if (size <= 32)
    1434                 :             :         {
    1435                 :             :           G.stats.total_overhead_under32 += overhead;
    1436                 :             :           G.stats.total_allocated_under32 += object_size;
    1437                 :             :         }
    1438                 :             :       if (size <= 64)
    1439                 :             :         {
    1440                 :             :           G.stats.total_overhead_under64 += overhead;
    1441                 :             :           G.stats.total_allocated_under64 += object_size;
    1442                 :             :         }
    1443                 :             :       if (size <= 128)
    1444                 :             :         {
    1445                 :             :           G.stats.total_overhead_under128 += overhead;
    1446                 :             :           G.stats.total_allocated_under128 += object_size;
    1447                 :             :         }
    1448                 :             :     }
    1449                 :             : 
    1450                 : 40816517192 :   if (GGC_DEBUG_LEVEL >= 3)
    1451                 :             :     fprintf (G.debug_file,
    1452                 :             :              "Allocating object, requested size="
    1453                 :             :              HOST_SIZE_T_PRINT_UNSIGNED ", actual=" HOST_SIZE_T_PRINT_UNSIGNED
    1454                 :             :              " at %p on %p\n",
    1455                 :             :              (fmt_size_t) size, (fmt_size_t) object_size, result,
    1456                 :             :              (void *) entry);
    1457                 :             : 
    1458                 : 40816517192 :   return result;
    1459                 :             : }
    1460                 :             : 
    1461                 :             : /* Mark function for strings.  */
    1462                 :             : 
    1463                 :             : void
    1464                 :  5748122685 : gt_ggc_m_S (const void *p)
    1465                 :             : {
    1466                 :  5748122685 :   page_entry *entry;
    1467                 :  5748122685 :   unsigned bit, word;
    1468                 :  5748122685 :   unsigned long mask;
    1469                 :  5748122685 :   unsigned long offset;
    1470                 :             : 
    1471                 :  5748122685 :   if (!p)
    1472                 :             :     return;
    1473                 :             : 
    1474                 :             :   /* Look up the page on which the object is alloced.  If it was not
    1475                 :             :      GC allocated, gracefully bail out.  */
    1476                 :  3665349274 :   entry = safe_lookup_page_table_entry (p);
    1477                 :  3665349274 :   if (!entry)
    1478                 :             :     return;
    1479                 :             : 
    1480                 :             :   /* Calculate the index of the object on the page; this is its bit
    1481                 :             :      position in the in_use_p bitmap.  Note that because a char* might
    1482                 :             :      point to the middle of an object, we need special code here to
    1483                 :             :      make sure P points to the start of an object.  */
    1484                 :  3056457430 :   offset = ((const char *) p - entry->page) % object_size_table[entry->order];
    1485                 :  3056457430 :   if (offset)
    1486                 :             :     {
    1487                 :             :       /* Here we've seen a char* which does not point to the beginning
    1488                 :             :          of an allocated object.  We assume it points to the middle of
    1489                 :             :          a STRING_CST.  */
    1490                 :       70523 :       gcc_assert (offset == offsetof (struct tree_string, str));
    1491                 :       70523 :       p = ((const char *) p) - offset;
    1492                 :       70523 :       gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
    1493                 :       70523 :       return;
    1494                 :             :     }
    1495                 :             : 
    1496                 :  3056386907 :   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
    1497                 :  3056386907 :   word = bit / HOST_BITS_PER_LONG;
    1498                 :  3056386907 :   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
    1499                 :             : 
    1500                 :             :   /* If the bit was previously set, skip it.  */
    1501                 :  3056386907 :   if (entry->in_use_p[word] & mask)
    1502                 :             :     return;
    1503                 :             : 
    1504                 :             :   /* Otherwise set it, and decrement the free object count.  */
    1505                 :  2958810730 :   entry->in_use_p[word] |= mask;
    1506                 :  2958810730 :   entry->num_free_objects -= 1;
    1507                 :             : 
    1508                 :  2958810730 :   if (GGC_DEBUG_LEVEL >= 4)
    1509                 :             :     fprintf (G.debug_file, "Marking %p\n", p);
    1510                 :             : 
    1511                 :  2958810730 :   return;
    1512                 :             : }
    1513                 :             : 
    1514                 :             : 
    1515                 :             : /* User-callable entry points for marking string X.  */
    1516                 :             : 
    1517                 :             : void
    1518                 :           0 : gt_ggc_mx (const char *& x)
    1519                 :             : {
    1520                 :           0 :   gt_ggc_m_S (x);
    1521                 :           0 : }
    1522                 :             : 
    1523                 :             : void
    1524                 :      247608 : gt_ggc_mx (char *& x)
    1525                 :             : {
    1526                 :      247608 :   gt_ggc_m_S (x);
    1527                 :      247608 : }
    1528                 :             : 
    1529                 :             : void
    1530                 :           0 : gt_ggc_mx (unsigned char *& x)
    1531                 :             : {
    1532                 :           0 :   gt_ggc_m_S (x);
    1533                 :           0 : }
    1534                 :             : 
    1535                 :             : void
    1536                 :         352 : gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
    1537                 :             : {
    1538                 :         352 : }
    1539                 :             : 
    1540                 :             : /* If P is not marked, marks it and return false.  Otherwise return true.
    1541                 :             :    P must have been allocated by the GC allocator; it mustn't point to
    1542                 :             :    static objects, stack variables, or memory allocated with malloc.  */
    1543                 :             : 
    1544                 :             : bool
    1545                 : >20624*10^7 : ggc_set_mark (const void *p)
    1546                 :             : {
    1547                 : >20624*10^7 :   page_entry *entry;
    1548                 : >20624*10^7 :   unsigned bit, word;
    1549                 : >20624*10^7 :   unsigned long mask;
    1550                 :             : 
    1551                 :             :   /* Look up the page on which the object is alloced.  If the object
    1552                 :             :      wasn't allocated by the collector, we'll probably die.  */
    1553                 : >20624*10^7 :   entry = lookup_page_table_entry (p);
    1554                 : >20624*10^7 :   gcc_assert (entry);
    1555                 :             : 
    1556                 :             :   /* Calculate the index of the object on the page; this is its bit
    1557                 :             :      position in the in_use_p bitmap.  */
    1558                 : >20624*10^7 :   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
    1559                 : >20624*10^7 :   word = bit / HOST_BITS_PER_LONG;
    1560                 : >20624*10^7 :   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
    1561                 :             : 
    1562                 :             :   /* If the bit was previously set, skip it.  */
    1563                 : >20624*10^7 :   if (entry->in_use_p[word] & mask)
    1564                 :             :     return true;
    1565                 :             : 
    1566                 :             :   /* Otherwise set it, and decrement the free object count.  */
    1567                 : 58461324143 :   entry->in_use_p[word] |= mask;
    1568                 : 58461324143 :   entry->num_free_objects -= 1;
    1569                 :             : 
    1570                 : 58461324143 :   if (GGC_DEBUG_LEVEL >= 4)
    1571                 :             :     fprintf (G.debug_file, "Marking %p\n", p);
    1572                 :             : 
    1573                 : 58461324143 :   return false;
    1574                 :             : }
    1575                 :             : 
    1576                 :             : /* Return true if P has been marked, zero otherwise.
    1577                 :             :    P must have been allocated by the GC allocator; it mustn't point to
    1578                 :             :    static objects, stack variables, or memory allocated with malloc.  */
    1579                 :             : 
    1580                 :             : bool
    1581                 :  1948161516 : ggc_marked_p (const void *p)
    1582                 :             : {
    1583                 :  1948161516 :   page_entry *entry;
    1584                 :  1948161516 :   unsigned bit, word;
    1585                 :  1948161516 :   unsigned long mask;
    1586                 :             : 
    1587                 :             :   /* Look up the page on which the object is alloced.  If the object
    1588                 :             :      wasn't allocated by the collector, we'll probably die.  */
    1589                 :  1948161516 :   entry = lookup_page_table_entry (p);
    1590                 :  1948161516 :   gcc_assert (entry);
    1591                 :             : 
    1592                 :             :   /* Calculate the index of the object on the page; this is its bit
    1593                 :             :      position in the in_use_p bitmap.  */
    1594                 :  1948161516 :   bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
    1595                 :  1948161516 :   word = bit / HOST_BITS_PER_LONG;
    1596                 :  1948161516 :   mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
    1597                 :             : 
    1598                 :  1948161516 :   return (entry->in_use_p[word] & mask) != 0;
    1599                 :             : }
    1600                 :             : 
    1601                 :             : /* Return the size of the gc-able object P.  */
    1602                 :             : 
    1603                 :             : size_t
    1604                 :   448767811 : ggc_get_size (const void *p)
    1605                 :             : {
    1606                 :   448767811 :   page_entry *pe = lookup_page_table_entry (p);
    1607                 :   448767811 :   return OBJECT_SIZE (pe->order);
    1608                 :             : }
    1609                 :             : 
    1610                 :             : /* Release the memory for object P.  */
    1611                 :             : 
    1612                 :             : void
    1613                 :  2097910343 : ggc_free (void *p)
    1614                 :             : {
    1615                 :  2097910343 :   if (in_gc)
    1616                 :             :     return;
    1617                 :             : 
    1618                 :  2078017853 :   page_entry *pe = lookup_page_table_entry (p);
    1619                 :  2078017853 :   size_t order = pe->order;
    1620                 :  2078017853 :   size_t size = OBJECT_SIZE (order);
    1621                 :             : 
    1622                 :  2078017853 :   if (GATHER_STATISTICS)
    1623                 :             :     ggc_free_overhead (p);
    1624                 :             : 
    1625                 :  2078017853 :   if (GGC_DEBUG_LEVEL >= 3)
    1626                 :             :     fprintf (G.debug_file,
    1627                 :             :              "Freeing object, actual size="
    1628                 :             :              HOST_SIZE_T_PRINT_UNSIGNED ", at %p on %p\n",
    1629                 :             :              (fmt_size_t) size, p, (void *) pe);
    1630                 :             : 
    1631                 :             : #ifdef ENABLE_GC_CHECKING
    1632                 :             :   /* Poison the data, to indicate the data is garbage.  */
    1633                 :  2078017853 :   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
    1634                 :  2078017853 :   memset (p, 0xa5, size);
    1635                 :             : #endif
    1636                 :             :   /* Let valgrind know the object is free.  */
    1637                 :  2078017853 :   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
    1638                 :             : 
    1639                 :             : #ifdef ENABLE_GC_ALWAYS_COLLECT
    1640                 :             :   /* In the completely-anal-checking mode, we do *not* immediately free
    1641                 :             :      the data, but instead verify that the data is *actually* not
    1642                 :             :      reachable the next time we collect.  */
    1643                 :             :   {
    1644                 :             :     struct free_object *fo = XNEW (struct free_object);
    1645                 :             :     fo->object = p;
    1646                 :             :     fo->next = G.free_object_list;
    1647                 :             :     G.free_object_list = fo;
    1648                 :             :   }
    1649                 :             : #else
    1650                 :  2078017853 :   {
    1651                 :  2078017853 :     unsigned int bit_offset, word, bit;
    1652                 :             : 
    1653                 :  2078017853 :     G.allocated -= size;
    1654                 :             : 
    1655                 :             :     /* Mark the object not-in-use.  */
    1656                 :  2078017853 :     bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
    1657                 :  2078017853 :     word = bit_offset / HOST_BITS_PER_LONG;
    1658                 :  2078017853 :     bit = bit_offset % HOST_BITS_PER_LONG;
    1659                 :  2078017853 :     pe->in_use_p[word] &= ~(1UL << bit);
    1660                 :             : 
    1661                 :  2078017853 :     if (pe->num_free_objects++ == 0)
    1662                 :             :       {
    1663                 :   222123307 :         page_entry *p, *q;
    1664                 :             : 
    1665                 :             :         /* If the page is completely full, then it's supposed to
    1666                 :             :            be after all pages that aren't.  Since we've freed one
    1667                 :             :            object from a page that was full, we need to move the
    1668                 :             :            page to the head of the list.
    1669                 :             : 
    1670                 :             :            PE is the node we want to move.  Q is the previous node
    1671                 :             :            and P is the next node in the list.  */
    1672                 :   222123307 :         q = pe->prev;
    1673                 :   222123307 :         if (q && q->num_free_objects == 0)
    1674                 :             :           {
    1675                 :   158558240 :             p = pe->next;
    1676                 :             : 
    1677                 :   158558240 :             q->next = p;
    1678                 :             : 
    1679                 :             :             /* If PE was at the end of the list, then Q becomes the
    1680                 :             :                new end of the list.  If PE was not the end of the
    1681                 :             :                list, then we need to update the PREV field for P.  */
    1682                 :   158558240 :             if (!p)
    1683                 :   104940938 :               G.page_tails[order] = q;
    1684                 :             :             else
    1685                 :    53617302 :               p->prev = q;
    1686                 :             : 
    1687                 :             :             /* Move PE to the head of the list.  */
    1688                 :   158558240 :             pe->next = G.pages[order];
    1689                 :   158558240 :             pe->prev = NULL;
    1690                 :   158558240 :             G.pages[order]->prev = pe;
    1691                 :   158558240 :             G.pages[order] = pe;
    1692                 :             :           }
    1693                 :             : 
    1694                 :             :         /* Reset the hint bit to point to the only free object.  */
    1695                 :   222123307 :         pe->next_bit_hint = bit_offset;
    1696                 :             :       }
    1697                 :             :   }
    1698                 :             : #endif
    1699                 :             : }
    1700                 :             : 
    1701                 :             : /* Subroutine of init_ggc which computes the pair of numbers used to
    1702                 :             :    perform division by OBJECT_SIZE (order) and fills in inverse_table[].
    1703                 :             : 
    1704                 :             :    This algorithm is taken from Granlund and Montgomery's paper
    1705                 :             :    "Division by Invariant Integers using Multiplication"
    1706                 :             :    (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
    1707                 :             :    constants).  */
    1708                 :             : 
    1709                 :             : static void
    1710                 :    23591064 : compute_inverse (unsigned order)
    1711                 :             : {
    1712                 :    23591064 :   size_t size, inv;
    1713                 :    23591064 :   unsigned int e;
    1714                 :             : 
    1715                 :    23591064 :   size = OBJECT_SIZE (order);
    1716                 :    23591064 :   e = 0;
    1717                 :   611401742 :   while (size % 2 == 0)
    1718                 :             :     {
    1719                 :   587810678 :       e++;
    1720                 :   587810678 :       size >>= 1;
    1721                 :             :     }
    1722                 :             : 
    1723                 :             :   inv = size;
    1724                 :    48305512 :   while (inv * size != 1)
    1725                 :    24714448 :     inv = inv * (2 - inv*size);
    1726                 :             : 
    1727                 :    23591064 :   DIV_MULT (order) = inv;
    1728                 :    23591064 :   DIV_SHIFT (order) = e;
    1729                 :    23591064 : }
    1730                 :             : 
    1731                 :             : /* Initialize the ggc-mmap allocator.  */
    1732                 :             : void
    1733                 :      281914 : init_ggc (void)
    1734                 :             : {
    1735                 :      281914 :   static bool init_p = false;
    1736                 :      281914 :   unsigned order;
    1737                 :             : 
    1738                 :      281914 :   if (init_p)
    1739                 :             :     return;
    1740                 :      280846 :   init_p = true;
    1741                 :             : 
    1742                 :      280846 :   G.pagesize = getpagesize ();
    1743                 :      280846 :   G.lg_pagesize = exact_log2 (G.pagesize);
    1744                 :             : 
    1745                 :             : #ifdef HAVE_MMAP_DEV_ZERO
    1746                 :             :   G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
    1747                 :             :   if (G.dev_zero_fd == -1)
    1748                 :             :     internal_error ("open /dev/zero: %m");
    1749                 :             : #endif
    1750                 :             : 
    1751                 :             : #if 0
    1752                 :             :   G.debug_file = fopen ("ggc-mmap.debug", "w");
    1753                 :             : #else
    1754                 :      280846 :   G.debug_file = stdout;
    1755                 :             : #endif
    1756                 :             : 
    1757                 :             : #ifdef USING_MMAP
    1758                 :             :   /* StunOS has an amazing off-by-one error for the first mmap allocation
    1759                 :             :      after fiddling with RLIMIT_STACK.  The result, as hard as it is to
    1760                 :             :      believe, is an unaligned page allocation, which would cause us to
    1761                 :             :      hork badly if we tried to use it.  */
    1762                 :      280846 :   {
    1763                 :      280846 :     char *p = alloc_anon (NULL, G.pagesize, true);
    1764                 :      280846 :     struct page_entry *e;
    1765                 :      280846 :     if ((uintptr_t)p & (G.pagesize - 1))
    1766                 :             :       {
    1767                 :             :         /* How losing.  Discard this one and try another.  If we still
    1768                 :             :            can't get something useful, give up.  */
    1769                 :             : 
    1770                 :           0 :         p = alloc_anon (NULL, G.pagesize, true);
    1771                 :           0 :         gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
    1772                 :             :       }
    1773                 :             : 
    1774                 :             :     /* We have a good page, might as well hold onto it...  */
    1775                 :      280846 :     e = XCNEW (struct page_entry);
    1776                 :      280846 :     e->bytes = G.pagesize;
    1777                 :      280846 :     e->page = p;
    1778                 :      280846 :     e->next = G.free_pages;
    1779                 :      280846 :     G.free_pages = e;
    1780                 :             :   }
    1781                 :             : #endif
    1782                 :             : 
    1783                 :             :   /* Initialize the object size table.  */
    1784                 :    18254990 :   for (order = 0; order < HOST_BITS_PER_PTR; ++order)
    1785                 :    17974144 :     object_size_table[order] = (size_t) 1 << order;
    1786                 :     5897766 :   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
    1787                 :             :     {
    1788                 :     5616920 :       size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
    1789                 :             : 
    1790                 :             :       /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
    1791                 :             :          so that we're sure of getting aligned memory.  */
    1792                 :     5616920 :       s = ROUND_UP (s, MAX_ALIGNMENT);
    1793                 :     5616920 :       object_size_table[order] = s;
    1794                 :             :     }
    1795                 :             : 
    1796                 :             :   /* Initialize the objects-per-page and inverse tables.  */
    1797                 :    23871910 :   for (order = 0; order < NUM_ORDERS; ++order)
    1798                 :             :     {
    1799                 :    23591064 :       objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
    1800                 :    23591064 :       if (objects_per_page_table[order] == 0)
    1801                 :    14323146 :         objects_per_page_table[order] = 1;
    1802                 :    23591064 :       compute_inverse (order);
    1803                 :             :     }
    1804                 :             : 
    1805                 :             :   /* Reset the size_lookup array to put appropriately sized objects in
    1806                 :             :      the special orders.  All objects bigger than the previous power
    1807                 :             :      of two, but no greater than the special size, should go in the
    1808                 :             :      new order.  */
    1809                 :     5897766 :   for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
    1810                 :             :     {
    1811                 :     5616920 :       int o;
    1812                 :     5616920 :       int i;
    1813                 :             : 
    1814                 :     5616920 :       i = OBJECT_SIZE (order);
    1815                 :     5616920 :       if (i >= NUM_SIZE_LOOKUP)
    1816                 :           0 :         continue;
    1817                 :             : 
    1818                 :   104474712 :       for (o = size_lookup[i]; o == size_lookup [i]; --i)
    1819                 :    98857792 :         size_lookup[i] = order;
    1820                 :             :     }
    1821                 :             : 
    1822                 :      280846 :   G.depth_in_use = 0;
    1823                 :      280846 :   G.depth_max = 10;
    1824                 :      280846 :   G.depth = XNEWVEC (unsigned int, G.depth_max);
    1825                 :             : 
    1826                 :      280846 :   G.by_depth_in_use = 0;
    1827                 :      280846 :   G.by_depth_max = INITIAL_PTE_COUNT;
    1828                 :      280846 :   G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
    1829                 :      280846 :   G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
    1830                 :             : 
    1831                 :             :   /* Allocate space for the depth 0 finalizers.  */
    1832                 :      280846 :   G.finalizers.safe_push (vNULL);
    1833                 :      280846 :   G.vec_finalizers.safe_push (vNULL);
    1834                 :      280846 :   gcc_assert (G.finalizers.length() == 1);
    1835                 :             : }
    1836                 :             : 
    1837                 :             : /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
    1838                 :             :    reflects reality.  Recalculate NUM_FREE_OBJECTS as well.  */
    1839                 :             : 
    1840                 :             : static void
    1841                 :           0 : ggc_recalculate_in_use_p (page_entry *p)
    1842                 :             : {
    1843                 :           0 :   unsigned int i;
    1844                 :           0 :   size_t num_objects;
    1845                 :             : 
    1846                 :             :   /* Because the past-the-end bit in in_use_p is always set, we
    1847                 :             :      pretend there is one additional object.  */
    1848                 :           0 :   num_objects = OBJECTS_IN_PAGE (p) + 1;
    1849                 :             : 
    1850                 :             :   /* Reset the free object count.  */
    1851                 :           0 :   p->num_free_objects = num_objects;
    1852                 :             : 
    1853                 :             :   /* Combine the IN_USE_P and SAVE_IN_USE_P arrays.  */
    1854                 :           0 :   for (i = 0;
    1855                 :           0 :        i < CEIL (BITMAP_SIZE (num_objects),
    1856                 :             :                  sizeof (*p->in_use_p));
    1857                 :             :        ++i)
    1858                 :             :     {
    1859                 :           0 :       unsigned long j;
    1860                 :             : 
    1861                 :             :       /* Something is in use if it is marked, or if it was in use in a
    1862                 :             :          context further down the context stack.  */
    1863                 :           0 :       p->in_use_p[i] |= save_in_use_p (p)[i];
    1864                 :             : 
    1865                 :             :       /* Decrement the free object count for every object allocated.  */
    1866                 :           0 :       for (j = p->in_use_p[i]; j; j >>= 1)
    1867                 :           0 :         p->num_free_objects -= (j & 1);
    1868                 :             :     }
    1869                 :             : 
    1870                 :           0 :   gcc_assert (p->num_free_objects < num_objects);
    1871                 :           0 : }
    1872                 :             : 
    1873                 :             : /* Unmark all objects.  */
    1874                 :             : 
    1875                 :             : static void
    1876                 :      671812 : clear_marks (void)
    1877                 :             : {
    1878                 :      671812 :   unsigned order;
    1879                 :             : 
    1880                 :    55760396 :   for (order = 2; order < NUM_ORDERS; order++)
    1881                 :             :     {
    1882                 :    55088584 :       page_entry *p;
    1883                 :             : 
    1884                 :  1405442108 :       for (p = G.pages[order]; p != NULL; p = p->next)
    1885                 :             :         {
    1886                 :  1350353524 :           size_t num_objects = OBJECTS_IN_PAGE (p);
    1887                 :  1350353524 :           size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
    1888                 :             : 
    1889                 :             :           /* The data should be page-aligned.  */
    1890                 :  1350353524 :           gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
    1891                 :             : 
    1892                 :             :           /* Pages that aren't in the topmost context are not collected;
    1893                 :             :              nevertheless, we need their in-use bit vectors to store GC
    1894                 :             :              marks.  So, back them up first.  */
    1895                 :  1350353524 :           if (p->context_depth < G.context_depth)
    1896                 :             :             {
    1897                 :           0 :               if (! save_in_use_p (p))
    1898                 :           0 :                 save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
    1899                 :           0 :               memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
    1900                 :             :             }
    1901                 :             : 
    1902                 :             :           /* Reset reset the number of free objects and clear the
    1903                 :             :              in-use bits.  These will be adjusted by mark_obj.  */
    1904                 :  1350353524 :           p->num_free_objects = num_objects;
    1905                 :  1350353524 :           memset (p->in_use_p, 0, bitmap_size);
    1906                 :             : 
    1907                 :             :           /* Make sure the one-past-the-end bit is always set.  */
    1908                 :  1350353524 :           p->in_use_p[num_objects / HOST_BITS_PER_LONG]
    1909                 :  1350353524 :             = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
    1910                 :             :         }
    1911                 :             :     }
    1912                 :      671812 : }
    1913                 :             : 
    1914                 :             : /* Check if any blocks with a registered finalizer have become unmarked. If so
    1915                 :             :    run the finalizer and unregister it because the block is about to be freed.
    1916                 :             :    Note that no garantee is made about what order finalizers will run in so
    1917                 :             :    touching other objects in gc memory is extremely unwise.  */
    1918                 :             : 
    1919                 :             : static void
    1920                 :      671477 : ggc_handle_finalizers ()
    1921                 :             : {
    1922                 :      671477 :   unsigned dlen = G.finalizers.length();
    1923                 :     1342954 :   for (unsigned d = G.context_depth; d < dlen; ++d)
    1924                 :             :     {
    1925                 :      671477 :       vec<finalizer> &v = G.finalizers[d];
    1926                 :      671477 :       unsigned length = v.length ();
    1927                 :   123942832 :       for (unsigned int i = 0; i < length;)
    1928                 :             :         {
    1929                 :   123271355 :           finalizer &f = v[i];
    1930                 :   123271355 :           if (!ggc_marked_p (f.addr ()))
    1931                 :             :             {
    1932                 :    19976769 :               f.call ();
    1933                 :    19976769 :               v.unordered_remove (i);
    1934                 :    19976769 :               length--;
    1935                 :             :             }
    1936                 :             :           else
    1937                 :   103294586 :             i++;
    1938                 :             :         }
    1939                 :             :     }
    1940                 :             : 
    1941                 :     1342954 :   gcc_assert (dlen == G.vec_finalizers.length());
    1942                 :     1342954 :   for (unsigned d = G.context_depth; d < dlen; ++d)
    1943                 :             :     {
    1944                 :      671477 :       vec<vec_finalizer> &vv = G.vec_finalizers[d];
    1945                 :      671477 :       unsigned length = vv.length ();
    1946                 :      671477 :       for (unsigned int i = 0; i < length;)
    1947                 :             :         {
    1948                 :           0 :           vec_finalizer &f = vv[i];
    1949                 :           0 :           if (!ggc_marked_p (f.addr ()))
    1950                 :             :             {
    1951                 :           0 :               f.call ();
    1952                 :           0 :               vv.unordered_remove (i);
    1953                 :           0 :               length--;
    1954                 :             :             }
    1955                 :             :           else
    1956                 :           0 :             i++;
    1957                 :             :         }
    1958                 :             :     }
    1959                 :      671477 : }
    1960                 :             : 
    1961                 :             : /* Free all empty pages.  Partially empty pages need no attention
    1962                 :             :    because the `mark' bit doubles as an `unused' bit.  */
    1963                 :             : 
    1964                 :             : static void
    1965                 :      687411 : sweep_pages (void)
    1966                 :             : {
    1967                 :      687411 :   unsigned order;
    1968                 :             : 
    1969                 :    57055113 :   for (order = 2; order < NUM_ORDERS; order++)
    1970                 :             :     {
    1971                 :             :       /* The last page-entry to consider, regardless of entries
    1972                 :             :          placed at the end of the list.  */
    1973                 :    56367702 :       page_entry * const last = G.page_tails[order];
    1974                 :             : 
    1975                 :    56367702 :       size_t num_objects;
    1976                 :    56367702 :       size_t live_objects;
    1977                 :    56367702 :       page_entry *p, *previous;
    1978                 :    56367702 :       int done;
    1979                 :             : 
    1980                 :    56367702 :       p = G.pages[order];
    1981                 :    56367702 :       if (p == NULL)
    1982                 :    35616626 :         continue;
    1983                 :             : 
    1984                 :             :       previous = NULL;
    1985                 :  1360573506 :       do
    1986                 :             :         {
    1987                 :  1360573506 :           page_entry *next = p->next;
    1988                 :             : 
    1989                 :             :           /* Loop until all entries have been examined.  */
    1990                 :  1360573506 :           done = (p == last);
    1991                 :             : 
    1992                 :  1360573506 :           num_objects = OBJECTS_IN_PAGE (p);
    1993                 :             : 
    1994                 :             :           /* Add all live objects on this page to the count of
    1995                 :             :              allocated memory.  */
    1996                 :  1360573506 :           live_objects = num_objects - p->num_free_objects;
    1997                 :             : 
    1998                 :  1360573506 :           G.allocated += OBJECT_SIZE (order) * live_objects;
    1999                 :             : 
    2000                 :             :           /* Only objects on pages in the topmost context should get
    2001                 :             :              collected.  */
    2002                 :  1360573506 :           if (p->context_depth < G.context_depth)
    2003                 :             :             ;
    2004                 :             : 
    2005                 :             :           /* Remove the page if it's empty.  */
    2006                 :  1360573506 :           else if (live_objects == 0)
    2007                 :             :             {
    2008                 :             :               /* If P was the first page in the list, then NEXT
    2009                 :             :                  becomes the new first page in the list, otherwise
    2010                 :             :                  splice P out of the forward pointers.  */
    2011                 :    20354987 :               if (! previous)
    2012                 :     2948892 :                 G.pages[order] = next;
    2013                 :             :               else
    2014                 :    17406095 :                 previous->next = next;
    2015                 :             : 
    2016                 :             :               /* Splice P out of the back pointers too.  */
    2017                 :    20354987 :               if (next)
    2018                 :    20180534 :                 next->prev = previous;
    2019                 :             : 
    2020                 :             :               /* Are we removing the last element?  */
    2021                 :    20354987 :               if (p == G.page_tails[order])
    2022                 :      174453 :                 G.page_tails[order] = previous;
    2023                 :    20354987 :               free_page (p);
    2024                 :    20354987 :               p = previous;
    2025                 :             :             }
    2026                 :             : 
    2027                 :             :           /* If the page is full, move it to the end.  */
    2028                 :  1340218519 :           else if (p->num_free_objects == 0)
    2029                 :             :             {
    2030                 :             :               /* Don't move it if it's already at the end.  */
    2031                 :   875996446 :               if (p != G.page_tails[order])
    2032                 :             :                 {
    2033                 :             :                   /* Move p to the end of the list.  */
    2034                 :   873582864 :                   p->next = NULL;
    2035                 :   873582864 :                   p->prev = G.page_tails[order];
    2036                 :   873582864 :                   G.page_tails[order]->next = p;
    2037                 :             : 
    2038                 :             :                   /* Update the tail pointer...  */
    2039                 :   873582864 :                   G.page_tails[order] = p;
    2040                 :             : 
    2041                 :             :                   /* ... and the head pointer, if necessary.  */
    2042                 :   873582864 :                   if (! previous)
    2043                 :    30159299 :                     G.pages[order] = next;
    2044                 :             :                   else
    2045                 :   843423565 :                     previous->next = next;
    2046                 :             : 
    2047                 :             :                   /* And update the backpointer in NEXT if necessary.  */
    2048                 :   873582864 :                   if (next)
    2049                 :   873582864 :                     next->prev = previous;
    2050                 :             : 
    2051                 :             :                   p = previous;
    2052                 :             :                 }
    2053                 :             :             }
    2054                 :             : 
    2055                 :             :           /* If we've fallen through to here, it's a page in the
    2056                 :             :              topmost context that is neither full nor empty.  Such a
    2057                 :             :              page must precede pages at lesser context depth in the
    2058                 :             :              list, so move it to the head.  */
    2059                 :   464222073 :           else if (p != G.pages[order])
    2060                 :             :             {
    2061                 :   446858084 :               previous->next = p->next;
    2062                 :             : 
    2063                 :             :               /* Update the backchain in the next node if it exists.  */
    2064                 :   446858084 :               if (p->next)
    2065                 :   442600157 :                 p->next->prev = previous;
    2066                 :             : 
    2067                 :             :               /* Move P to the head of the list.  */
    2068                 :   446858084 :               p->next = G.pages[order];
    2069                 :   446858084 :               p->prev = NULL;
    2070                 :   446858084 :               G.pages[order]->prev = p;
    2071                 :             : 
    2072                 :             :               /* Update the head pointer.  */
    2073                 :   446858084 :               G.pages[order] = p;
    2074                 :             : 
    2075                 :             :               /* Are we moving the last element?  */
    2076                 :   446858084 :               if (G.page_tails[order] == p)
    2077                 :     4257927 :                 G.page_tails[order] = previous;
    2078                 :             :               p = previous;
    2079                 :             :             }
    2080                 :             : 
    2081                 :  1360573506 :           previous = p;
    2082                 :  1360573506 :           p = next;
    2083                 :             :         }
    2084                 :  1360573506 :       while (! done);
    2085                 :             : 
    2086                 :             :       /* Now, restore the in_use_p vectors for any pages from contexts
    2087                 :             :          other than the current one.  */
    2088                 :  1360969595 :       for (p = G.pages[order]; p; p = p->next)
    2089                 :  1340218519 :         if (p->context_depth != G.context_depth)
    2090                 :           0 :           ggc_recalculate_in_use_p (p);
    2091                 :             :     }
    2092                 :      687411 : }
    2093                 :             : 
    2094                 :             : #ifdef ENABLE_GC_CHECKING
    2095                 :             : /* Clobber all free objects.  */
    2096                 :             : 
    2097                 :             : static void
    2098                 :      671812 : poison_pages (void)
    2099                 :             : {
    2100                 :      671812 :   unsigned order;
    2101                 :             : 
    2102                 :    55760396 :   for (order = 2; order < NUM_ORDERS; order++)
    2103                 :             :     {
    2104                 :    55088584 :       size_t size = OBJECT_SIZE (order);
    2105                 :    55088584 :       page_entry *p;
    2106                 :             : 
    2107                 :  1405442108 :       for (p = G.pages[order]; p != NULL; p = p->next)
    2108                 :             :         {
    2109                 :  1350353524 :           size_t num_objects;
    2110                 :  1350353524 :           size_t i;
    2111                 :             : 
    2112                 :  1350353524 :           if (p->context_depth != G.context_depth)
    2113                 :             :             /* Since we don't do any collection for pages in pushed
    2114                 :             :                contexts, there's no need to do any poisoning.  And
    2115                 :             :                besides, the IN_USE_P array isn't valid until we pop
    2116                 :             :                contexts.  */
    2117                 :           0 :             continue;
    2118                 :             : 
    2119                 :  1350353524 :           num_objects = OBJECTS_IN_PAGE (p);
    2120                 : 75858345025 :           for (i = 0; i < num_objects; i++)
    2121                 :             :             {
    2122                 : 74507991501 :               size_t word, bit;
    2123                 : 74507991501 :               word = i / HOST_BITS_PER_LONG;
    2124                 : 74507991501 :               bit = i % HOST_BITS_PER_LONG;
    2125                 : 74507991501 :               if (((p->in_use_p[word] >> bit) & 1) == 0)
    2126                 :             :                 {
    2127                 : 13087856628 :                   char *object = p->page + i * size;
    2128                 :             : 
    2129                 :             :                   /* Keep poison-by-write when we expect to use Valgrind,
    2130                 :             :                      so the exact same memory semantics is kept, in case
    2131                 :             :                      there are memory errors.  We override this request
    2132                 :             :                      below.  */
    2133                 :             :                   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
    2134                 : 13087856628 :                                                                  size));
    2135                 : 13087856628 :                   memset (object, 0xa5, size);
    2136                 :             : 
    2137                 :             :                   /* Drop the handle to avoid handle leak.  */
    2138                 : 13087856628 :                   VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
    2139                 :             :                 }
    2140                 :             :             }
    2141                 :             :         }
    2142                 :             :     }
    2143                 :      671812 : }
    2144                 :             : #else
    2145                 :             : #define poison_pages()
    2146                 :             : #endif
    2147                 :             : 
    2148                 :             : #ifdef ENABLE_GC_ALWAYS_COLLECT
    2149                 :             : /* Validate that the reportedly free objects actually are.  */
    2150                 :             : 
    2151                 :             : static void
    2152                 :             : validate_free_objects (void)
    2153                 :             : {
    2154                 :             :   struct free_object *f, *next, *still_free = NULL;
    2155                 :             : 
    2156                 :             :   for (f = G.free_object_list; f ; f = next)
    2157                 :             :     {
    2158                 :             :       page_entry *pe = lookup_page_table_entry (f->object);
    2159                 :             :       size_t bit, word;
    2160                 :             : 
    2161                 :             :       bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
    2162                 :             :       word = bit / HOST_BITS_PER_LONG;
    2163                 :             :       bit = bit % HOST_BITS_PER_LONG;
    2164                 :             :       next = f->next;
    2165                 :             : 
    2166                 :             :       /* Make certain it isn't visible from any root.  Notice that we
    2167                 :             :          do this check before sweep_pages merges save_in_use_p.  */
    2168                 :             :       gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
    2169                 :             : 
    2170                 :             :       /* If the object comes from an outer context, then retain the
    2171                 :             :          free_object entry, so that we can verify that the address
    2172                 :             :          isn't live on the stack in some outer context.  */
    2173                 :             :       if (pe->context_depth != G.context_depth)
    2174                 :             :         {
    2175                 :             :           f->next = still_free;
    2176                 :             :           still_free = f;
    2177                 :             :         }
    2178                 :             :       else
    2179                 :             :         free (f);
    2180                 :             :     }
    2181                 :             : 
    2182                 :             :   G.free_object_list = still_free;
    2183                 :             : }
    2184                 :             : #else
    2185                 :             : #define validate_free_objects()
    2186                 :             : #endif
    2187                 :             : 
    2188                 :             : /* Top level mark-and-sweep routine.  */
    2189                 :             : 
    2190                 :             : void
    2191                 :   490554788 : ggc_collect (enum ggc_collect mode)
    2192                 :             : {
    2193                 :             :   /* Avoid frequent unnecessary work by skipping collection if the
    2194                 :             :      total allocations haven't expanded much since the last
    2195                 :             :      collection.  */
    2196                 :   490554788 :   float allocated_last_gc =
    2197                 :   490554788 :     MAX (G.allocated_last_gc, (size_t)param_ggc_min_heapsize * ONE_K);
    2198                 :             : 
    2199                 :             :   /* It is also good time to get memory block pool into limits.  */
    2200                 :   490554788 :   memory_block_pool::trim ();
    2201                 :             : 
    2202                 :   490554788 :   float min_expand = allocated_last_gc * param_ggc_min_expand / 100;
    2203                 :   490554788 :   if (mode == GGC_COLLECT_HEURISTIC
    2204                 :   490554744 :       && G.allocated < allocated_last_gc + min_expand)
    2205                 :             :     return;
    2206                 :             : 
    2207                 :      671477 :   timevar_push (TV_GC);
    2208                 :      671477 :   if (GGC_DEBUG_LEVEL >= 2)
    2209                 :             :     fprintf (G.debug_file, "BEGIN COLLECTING\n");
    2210                 :             : 
    2211                 :             :   /* Zero the total allocated bytes.  This will be recalculated in the
    2212                 :             :      sweep phase.  */
    2213                 :      671477 :   size_t allocated = G.allocated;
    2214                 :      671477 :   G.allocated = 0;
    2215                 :             : 
    2216                 :             :   /* Release the pages we freed the last time we collected, but didn't
    2217                 :             :      reuse in the interim.  */
    2218                 :      671477 :   release_pages ();
    2219                 :             : 
    2220                 :             :   /* Output this later so we do not interfere with release_pages.  */
    2221                 :      671477 :   if (!quiet_flag)
    2222                 :           0 :     fprintf (stderr, " {GC " PRsa (0) " -> ", SIZE_AMOUNT (allocated));
    2223                 :             : 
    2224                 :             :   /* Indicate that we've seen collections at this context depth.  */
    2225                 :      671477 :   G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
    2226                 :             : 
    2227                 :      671477 :   invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
    2228                 :             : 
    2229                 :      671477 :   in_gc = true;
    2230                 :      671477 :   clear_marks ();
    2231                 :      671477 :   ggc_mark_roots ();
    2232                 :      671477 :   ggc_handle_finalizers ();
    2233                 :             : 
    2234                 :      671477 :   if (GATHER_STATISTICS)
    2235                 :             :     ggc_prune_overhead_list ();
    2236                 :             : 
    2237                 :      671477 :   poison_pages ();
    2238                 :      671477 :   validate_free_objects ();
    2239                 :      671477 :   sweep_pages ();
    2240                 :             : 
    2241                 :      671477 :   in_gc = false;
    2242                 :      671477 :   G.allocated_last_gc = G.allocated;
    2243                 :             : 
    2244                 :      671477 :   invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
    2245                 :             : 
    2246                 :      671477 :   timevar_pop (TV_GC);
    2247                 :             : 
    2248                 :      671477 :   if (!quiet_flag)
    2249                 :           0 :     fprintf (stderr, PRsa (0) "}", SIZE_AMOUNT (G.allocated));
    2250                 :             :   if (GGC_DEBUG_LEVEL >= 2)
    2251                 :             :     fprintf (G.debug_file, "END COLLECTING\n");
    2252                 :             : }
    2253                 :             : 
    2254                 :             : /* Return free pages to the system.  */
    2255                 :             : 
    2256                 :             : void
    2257                 :       15934 : ggc_trim ()
    2258                 :             : {
    2259                 :       15934 :   timevar_push (TV_GC);
    2260                 :       15934 :   G.allocated = 0;
    2261                 :       15934 :   sweep_pages ();
    2262                 :       15934 :   release_pages ();
    2263                 :       15934 :   if (!quiet_flag)
    2264                 :           0 :     fprintf (stderr, " {GC trimmed to " PRsa (0) ", " PRsa (0) " mapped}",
    2265                 :           0 :              SIZE_AMOUNT (G.allocated), SIZE_AMOUNT (G.bytes_mapped));
    2266                 :       15934 :   timevar_pop (TV_GC);
    2267                 :       15934 : }
    2268                 :             : 
    2269                 :             : /* Assume that all GGC memory is reachable and grow the limits for next
    2270                 :             :    collection.  With checking, trigger GGC so -Q compilation outputs how much
    2271                 :             :    of memory really is reachable.  */
    2272                 :             : 
    2273                 :             : void
    2274                 :      264848 : ggc_grow (void)
    2275                 :             : {
    2276                 :      264848 :   if (!flag_checking)
    2277                 :           0 :     G.allocated_last_gc = MAX (G.allocated_last_gc,
    2278                 :             :                                G.allocated);
    2279                 :             :   else
    2280                 :      264848 :     ggc_collect ();
    2281                 :      264848 :   if (!quiet_flag)
    2282                 :           0 :     fprintf (stderr, " {GC " PRsa (0) "} ", SIZE_AMOUNT (G.allocated));
    2283                 :      264848 : }
    2284                 :             : 
    2285                 :             : void
    2286                 :           0 : ggc_print_statistics (void)
    2287                 :             : {
    2288                 :           0 :   struct ggc_statistics stats;
    2289                 :           0 :   unsigned int i;
    2290                 :           0 :   size_t total_overhead = 0;
    2291                 :             : 
    2292                 :             :   /* Clear the statistics.  */
    2293                 :           0 :   memset (&stats, 0, sizeof (stats));
    2294                 :             : 
    2295                 :             :   /* Make sure collection will really occur.  */
    2296                 :           0 :   G.allocated_last_gc = 0;
    2297                 :             : 
    2298                 :             :   /* Collect and print the statistics common across collectors.  */
    2299                 :           0 :   ggc_print_common_statistics (stderr, &stats);
    2300                 :             : 
    2301                 :             :   /* Release free pages so that we will not count the bytes allocated
    2302                 :             :      there as part of the total allocated memory.  */
    2303                 :           0 :   release_pages ();
    2304                 :             : 
    2305                 :             :   /* Collect some information about the various sizes of
    2306                 :             :      allocation.  */
    2307                 :           0 :   fprintf (stderr,
    2308                 :             :            "Memory still allocated at the end of the compilation process\n");
    2309                 :           0 :   fprintf (stderr, "%-8s %10s  %10s  %10s\n",
    2310                 :             :            "Size", "Allocated", "Used", "Overhead");
    2311                 :           0 :   for (i = 0; i < NUM_ORDERS; ++i)
    2312                 :             :     {
    2313                 :           0 :       page_entry *p;
    2314                 :           0 :       size_t allocated;
    2315                 :           0 :       size_t in_use;
    2316                 :           0 :       size_t overhead;
    2317                 :             : 
    2318                 :             :       /* Skip empty entries.  */
    2319                 :           0 :       if (!G.pages[i])
    2320                 :           0 :         continue;
    2321                 :             : 
    2322                 :             :       overhead = allocated = in_use = 0;
    2323                 :             : 
    2324                 :             :       /* Figure out the total number of bytes allocated for objects of
    2325                 :             :          this size, and how many of them are actually in use.  Also figure
    2326                 :             :          out how much memory the page table is using.  */
    2327                 :           0 :       for (p = G.pages[i]; p; p = p->next)
    2328                 :             :         {
    2329                 :           0 :           allocated += p->bytes;
    2330                 :           0 :           in_use +=
    2331                 :           0 :             (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
    2332                 :             : 
    2333                 :           0 :           overhead += (sizeof (page_entry) - sizeof (long)
    2334                 :           0 :                        + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
    2335                 :             :         }
    2336                 :           0 :       fprintf (stderr, "%-8" PRIu64 " " PRsa (10) " " PRsa (10) " "
    2337                 :             :                PRsa (10) "\n",
    2338                 :           0 :                (uint64_t)OBJECT_SIZE (i),
    2339                 :           0 :                SIZE_AMOUNT (allocated),
    2340                 :           0 :                SIZE_AMOUNT (in_use),
    2341                 :           0 :                SIZE_AMOUNT (overhead));
    2342                 :           0 :       total_overhead += overhead;
    2343                 :             :     }
    2344                 :           0 :   fprintf (stderr, "%-8s " PRsa (10) " " PRsa (10) " " PRsa (10) "\n",
    2345                 :             :            "Total",
    2346                 :           0 :            SIZE_AMOUNT (G.bytes_mapped),
    2347                 :           0 :            SIZE_AMOUNT (G.allocated),
    2348                 :           0 :            SIZE_AMOUNT (total_overhead));
    2349                 :             : 
    2350                 :           0 :   if (GATHER_STATISTICS)
    2351                 :             :     {
    2352                 :             :       fprintf (stderr, "\nTotal allocations and overheads during "
    2353                 :             :                "the compilation process\n");
    2354                 :             : 
    2355                 :             :       fprintf (stderr, "Total Overhead:                          "
    2356                 :             :                PRsa (9) "\n",
    2357                 :             :                SIZE_AMOUNT (G.stats.total_overhead));
    2358                 :             :       fprintf (stderr, "Total Allocated:                         "
    2359                 :             :                PRsa (9) "\n",
    2360                 :             :                SIZE_AMOUNT (G.stats.total_allocated));
    2361                 :             : 
    2362                 :             :       fprintf (stderr, "Total Overhead  under  32B:              "
    2363                 :             :                PRsa (9) "\n",
    2364                 :             :                SIZE_AMOUNT (G.stats.total_overhead_under32));
    2365                 :             :       fprintf (stderr, "Total Allocated under  32B:              "
    2366                 :             :                PRsa (9) "\n",
    2367                 :             :                SIZE_AMOUNT (G.stats.total_allocated_under32));
    2368                 :             :       fprintf (stderr, "Total Overhead  under  64B:              "
    2369                 :             :                PRsa (9) "\n",
    2370                 :             :                SIZE_AMOUNT (G.stats.total_overhead_under64));
    2371                 :             :       fprintf (stderr, "Total Allocated under  64B:              "
    2372                 :             :                PRsa (9) "\n",
    2373                 :             :                SIZE_AMOUNT (G.stats.total_allocated_under64));
    2374                 :             :       fprintf (stderr, "Total Overhead  under 128B:              "
    2375                 :             :                PRsa (9) "\n",
    2376                 :             :                SIZE_AMOUNT (G.stats.total_overhead_under128));
    2377                 :             :       fprintf (stderr, "Total Allocated under 128B:              "
    2378                 :             :                PRsa (9) "\n",
    2379                 :             :                SIZE_AMOUNT (G.stats.total_allocated_under128));
    2380                 :             : 
    2381                 :             :       for (i = 0; i < NUM_ORDERS; i++)
    2382                 :             :         if (G.stats.total_allocated_per_order[i])
    2383                 :             :           {
    2384                 :             :             fprintf (stderr, "Total Overhead  page size %9" PRIu64 ":     "
    2385                 :             :                      PRsa (9) "\n",
    2386                 :             :                      (uint64_t)OBJECT_SIZE (i),
    2387                 :             :                      SIZE_AMOUNT (G.stats.total_overhead_per_order[i]));
    2388                 :             :             fprintf (stderr, "Total Allocated page size %9" PRIu64 ":     "
    2389                 :             :                      PRsa (9) "\n",
    2390                 :             :                      (uint64_t)OBJECT_SIZE (i),
    2391                 :             :                      SIZE_AMOUNT (G.stats.total_allocated_per_order[i]));
    2392                 :             :           }
    2393                 :             :   }
    2394                 :           0 : }
    2395                 :             : 
    2396                 :             : struct ggc_pch_ondisk
    2397                 :             : {
    2398                 :             :   unsigned totals[NUM_ORDERS];
    2399                 :             : };
    2400                 :             : 
    2401                 :             : struct ggc_pch_data
    2402                 :             : {
    2403                 :             :   struct ggc_pch_ondisk d;
    2404                 :             :   uintptr_t base[NUM_ORDERS];
    2405                 :             :   size_t written[NUM_ORDERS];
    2406                 :             : };
    2407                 :             : 
    2408                 :             : struct ggc_pch_data *
    2409                 :         426 : init_ggc_pch (void)
    2410                 :             : {
    2411                 :         426 :   return XCNEW (struct ggc_pch_data);
    2412                 :             : }
    2413                 :             : 
    2414                 :             : void
    2415                 :    23019784 : ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
    2416                 :             :                       size_t size)
    2417                 :             : {
    2418                 :    23019784 :   unsigned order;
    2419                 :             : 
    2420                 :    23019784 :   if (size < NUM_SIZE_LOOKUP)
    2421                 :    22968763 :     order = size_lookup[size];
    2422                 :             :   else
    2423                 :             :     {
    2424                 :             :       order = 10;
    2425                 :       82042 :       while (size > OBJECT_SIZE (order))
    2426                 :       31021 :         order++;
    2427                 :             :     }
    2428                 :             : 
    2429                 :    23019784 :   d->d.totals[order]++;
    2430                 :    23019784 : }
    2431                 :             : 
    2432                 :             : size_t
    2433                 :         426 : ggc_pch_total_size (struct ggc_pch_data *d)
    2434                 :             : {
    2435                 :         426 :   size_t a = 0;
    2436                 :         426 :   unsigned i;
    2437                 :             : 
    2438                 :       36210 :   for (i = 0; i < NUM_ORDERS; i++)
    2439                 :       35784 :     a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
    2440                 :         426 :   return a;
    2441                 :             : }
    2442                 :             : 
    2443                 :             : void
    2444                 :         426 : ggc_pch_this_base (struct ggc_pch_data *d, void *base)
    2445                 :             : {
    2446                 :         426 :   uintptr_t a = (uintptr_t) base;
    2447                 :         426 :   unsigned i;
    2448                 :             : 
    2449                 :       36210 :   for (i = 0; i < NUM_ORDERS; i++)
    2450                 :             :     {
    2451                 :       35784 :       d->base[i] = a;
    2452                 :       35784 :       a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
    2453                 :             :     }
    2454                 :         426 : }
    2455                 :             : 
    2456                 :             : 
    2457                 :             : char *
    2458                 :    23019784 : ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
    2459                 :             :                       size_t size)
    2460                 :             : {
    2461                 :    23019784 :   unsigned order;
    2462                 :    23019784 :   char *result;
    2463                 :             : 
    2464                 :    23019784 :   if (size < NUM_SIZE_LOOKUP)
    2465                 :    22968763 :     order = size_lookup[size];
    2466                 :             :   else
    2467                 :             :     {
    2468                 :             :       order = 10;
    2469                 :       82042 :       while (size > OBJECT_SIZE (order))
    2470                 :       31021 :         order++;
    2471                 :             :     }
    2472                 :             : 
    2473                 :    23019784 :   result = (char *) d->base[order];
    2474                 :    23019784 :   d->base[order] += OBJECT_SIZE (order);
    2475                 :    23019784 :   return result;
    2476                 :             : }
    2477                 :             : 
    2478                 :             : void
    2479                 :         426 : ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
    2480                 :             :                        FILE *f ATTRIBUTE_UNUSED)
    2481                 :             : {
    2482                 :             :   /* Nothing to do.  */
    2483                 :         426 : }
    2484                 :             : 
    2485                 :             : void
    2486                 :    23019784 : ggc_pch_write_object (struct ggc_pch_data *d,
    2487                 :             :                       FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
    2488                 :             :                       size_t size)
    2489                 :             : {
    2490                 :    23019784 :   unsigned order;
    2491                 :    23019784 :   static const char emptyBytes[256] = { 0 };
    2492                 :             : 
    2493                 :    23019784 :   if (size < NUM_SIZE_LOOKUP)
    2494                 :    22968763 :     order = size_lookup[size];
    2495                 :             :   else
    2496                 :             :     {
    2497                 :             :       order = 10;
    2498                 :       82042 :       while (size > OBJECT_SIZE (order))
    2499                 :       31021 :         order++;
    2500                 :             :     }
    2501                 :             : 
    2502                 :    23019784 :   if (fwrite (x, size, 1, f) != 1)
    2503                 :           0 :     fatal_error (input_location, "cannot write PCH file: %m");
    2504                 :             : 
    2505                 :             :   /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
    2506                 :             :      object out to OBJECT_SIZE(order).  This happens for strings.  */
    2507                 :             : 
    2508                 :    23019784 :   if (size != OBJECT_SIZE (order))
    2509                 :             :     {
    2510                 :     1709282 :       unsigned padding = OBJECT_SIZE (order) - size;
    2511                 :             : 
    2512                 :             :       /* To speed small writes, we use a nulled-out array that's larger
    2513                 :             :          than most padding requests as the source for our null bytes.  This
    2514                 :             :          permits us to do the padding with fwrite() rather than fseek(), and
    2515                 :             :          limits the chance the OS may try to flush any outstanding writes.  */
    2516                 :     1709282 :       if (padding <= sizeof (emptyBytes))
    2517                 :             :         {
    2518                 :     1695355 :           if (fwrite (emptyBytes, 1, padding, f) != padding)
    2519                 :           0 :             fatal_error (input_location, "cannot write PCH file");
    2520                 :             :         }
    2521                 :             :       else
    2522                 :             :         {
    2523                 :             :           /* Larger than our buffer?  Just default to fseek.  */
    2524                 :       13927 :           if (fseek (f, padding, SEEK_CUR) != 0)
    2525                 :           0 :             fatal_error (input_location, "cannot write PCH file");
    2526                 :             :         }
    2527                 :             :     }
    2528                 :             : 
    2529                 :    23019784 :   d->written[order]++;
    2530                 :    23019784 :   if (d->written[order] == d->d.totals[order]
    2531                 :    23019784 :       && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
    2532                 :             :                                    G.pagesize),
    2533                 :             :                 SEEK_CUR) != 0)
    2534                 :           0 :     fatal_error (input_location, "cannot write PCH file: %m");
    2535                 :    23019784 : }
    2536                 :             : 
    2537                 :             : void
    2538                 :         426 : ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
    2539                 :             : {
    2540                 :         426 :   if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
    2541                 :           0 :     fatal_error (input_location, "cannot write PCH file: %m");
    2542                 :         426 :   free (d);
    2543                 :         426 : }
    2544                 :             : 
    2545                 :             : /* Move the PCH PTE entries just added to the end of by_depth, to the
    2546                 :             :    front.  */
    2547                 :             : 
    2548                 :             : static void
    2549                 :         335 : move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
    2550                 :             : {
    2551                 :             :   /* First, we swap the new entries to the front of the varrays.  */
    2552                 :         335 :   page_entry **new_by_depth;
    2553                 :         335 :   unsigned long **new_save_in_use;
    2554                 :             : 
    2555                 :         335 :   new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
    2556                 :         335 :   new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
    2557                 :             : 
    2558                 :         335 :   memcpy (&new_by_depth[0],
    2559                 :         335 :           &G.by_depth[count_old_page_tables],
    2560                 :         335 :           count_new_page_tables * sizeof (void *));
    2561                 :         335 :   memcpy (&new_by_depth[count_new_page_tables],
    2562                 :             :           &G.by_depth[0],
    2563                 :             :           count_old_page_tables * sizeof (void *));
    2564                 :         335 :   memcpy (&new_save_in_use[0],
    2565                 :         335 :           &G.save_in_use[count_old_page_tables],
    2566                 :             :           count_new_page_tables * sizeof (void *));
    2567                 :         335 :   memcpy (&new_save_in_use[count_new_page_tables],
    2568                 :             :           &G.save_in_use[0],
    2569                 :             :           count_old_page_tables * sizeof (void *));
    2570                 :             : 
    2571                 :         335 :   free (G.by_depth);
    2572                 :         335 :   free (G.save_in_use);
    2573                 :             : 
    2574                 :         335 :   G.by_depth = new_by_depth;
    2575                 :         335 :   G.save_in_use = new_save_in_use;
    2576                 :             : 
    2577                 :             :   /* Now update all the index_by_depth fields.  */
    2578                 :      171863 :   for (unsigned i = G.by_depth_in_use; i--;)
    2579                 :             :     {
    2580                 :      171528 :       page_entry *p = G.by_depth[i];
    2581                 :      171528 :       p->index_by_depth = i;
    2582                 :             :     }
    2583                 :             : 
    2584                 :             :   /* And last, we update the depth pointers in G.depth.  The first
    2585                 :             :      entry is already 0, and context 0 entries always start at index
    2586                 :             :      0, so there is nothing to update in the first slot.  We need a
    2587                 :             :      second slot, only if we have old ptes, and if we do, they start
    2588                 :             :      at index count_new_page_tables.  */
    2589                 :         335 :   if (count_old_page_tables)
    2590                 :         335 :     push_depth (count_new_page_tables);
    2591                 :         335 : }
    2592                 :             : 
    2593                 :             : void
    2594                 :         335 : ggc_pch_read (FILE *f, void *addr)
    2595                 :             : {
    2596                 :         335 :   struct ggc_pch_ondisk d;
    2597                 :         335 :   unsigned i;
    2598                 :         335 :   char *offs = (char *) addr;
    2599                 :         335 :   unsigned long count_old_page_tables;
    2600                 :         335 :   unsigned long count_new_page_tables;
    2601                 :             : 
    2602                 :         335 :   count_old_page_tables = G.by_depth_in_use;
    2603                 :             : 
    2604                 :         335 :   if (fread (&d, sizeof (d), 1, f) != 1)
    2605                 :           0 :     fatal_error (input_location, "cannot read PCH file: %m");
    2606                 :             : 
    2607                 :             :   /* We've just read in a PCH file.  So, every object that used to be
    2608                 :             :      allocated is now free.  */
    2609                 :         335 :   clear_marks ();
    2610                 :             : #ifdef ENABLE_GC_CHECKING
    2611                 :         335 :   poison_pages ();
    2612                 :             : #endif
    2613                 :             :   /* Since we free all the allocated objects, the free list becomes
    2614                 :             :      useless.  Validate it now, which will also clear it.  */
    2615                 :         335 :   validate_free_objects ();
    2616                 :             : 
    2617                 :             :   /* No object read from a PCH file should ever be freed.  So, set the
    2618                 :             :      context depth to 1, and set the depth of all the currently-allocated
    2619                 :             :      pages to be 1 too.  PCH pages will have depth 0.  */
    2620                 :         335 :   gcc_assert (!G.context_depth);
    2621                 :         335 :   G.context_depth = 1;
    2622                 :             :   /* Allocate space for the depth 1 finalizers.  */
    2623                 :         335 :   G.finalizers.safe_push (vNULL);
    2624                 :         335 :   G.vec_finalizers.safe_push (vNULL);
    2625                 :         335 :   gcc_assert (G.finalizers.length() == 2);
    2626                 :       28475 :   for (i = 0; i < NUM_ORDERS; i++)
    2627                 :             :     {
    2628                 :       28140 :       page_entry *p;
    2629                 :      190839 :       for (p = G.pages[i]; p != NULL; p = p->next)
    2630                 :      162699 :         p->context_depth = G.context_depth;
    2631                 :             :     }
    2632                 :             : 
    2633                 :             :   /* Allocate the appropriate page-table entries for the pages read from
    2634                 :             :      the PCH file.  */
    2635                 :             : 
    2636                 :       28475 :   for (i = 0; i < NUM_ORDERS; i++)
    2637                 :             :     {
    2638                 :       28140 :       struct page_entry *entry;
    2639                 :       28140 :       char *pte;
    2640                 :       28140 :       size_t bytes;
    2641                 :       28140 :       size_t num_objs;
    2642                 :       28140 :       size_t j;
    2643                 :             : 
    2644                 :       28140 :       if (d.totals[i] == 0)
    2645                 :       19311 :         continue;
    2646                 :             : 
    2647                 :        8829 :       bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
    2648                 :        8829 :       num_objs = bytes / OBJECT_SIZE (i);
    2649                 :        8829 :       entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
    2650                 :             :                                             - sizeof (long)
    2651                 :             :                                             + BITMAP_SIZE (num_objs + 1)));
    2652                 :        8829 :       entry->bytes = bytes;
    2653                 :        8829 :       entry->page = offs;
    2654                 :        8829 :       entry->context_depth = 0;
    2655                 :        8829 :       offs += bytes;
    2656                 :        8829 :       entry->num_free_objects = 0;
    2657                 :        8829 :       entry->order = i;
    2658                 :             : 
    2659                 :        8829 :       for (j = 0;
    2660                 :      170526 :            j + HOST_BITS_PER_LONG <= num_objs + 1;
    2661                 :      161697 :            j += HOST_BITS_PER_LONG)
    2662                 :      161697 :         entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
    2663                 :      194781 :       for (; j < num_objs + 1; j++)
    2664                 :      185952 :         entry->in_use_p[j / HOST_BITS_PER_LONG]
    2665                 :      185952 :           |= 1L << (j % HOST_BITS_PER_LONG);
    2666                 :             : 
    2667                 :      215895 :       for (pte = entry->page;
    2668                 :      224724 :            pte < entry->page + entry->bytes;
    2669                 :      215895 :            pte += G.pagesize)
    2670                 :      215895 :         set_page_table_entry (pte, entry);
    2671                 :             : 
    2672                 :        8829 :       if (G.page_tails[i] != NULL)
    2673                 :        8168 :         G.page_tails[i]->next = entry;
    2674                 :             :       else
    2675                 :         661 :         G.pages[i] = entry;
    2676                 :        8829 :       G.page_tails[i] = entry;
    2677                 :             : 
    2678                 :             :       /* We start off by just adding all the new information to the
    2679                 :             :          end of the varrays, later, we will move the new information
    2680                 :             :          to the front of the varrays, as the PCH page tables are at
    2681                 :             :          context 0.  */
    2682                 :        8829 :       push_by_depth (entry, 0);
    2683                 :             :     }
    2684                 :             : 
    2685                 :             :   /* Now, we update the various data structures that speed page table
    2686                 :             :      handling.  */
    2687                 :         335 :   count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
    2688                 :             : 
    2689                 :         335 :   move_ptes_to_front (count_old_page_tables, count_new_page_tables);
    2690                 :             : 
    2691                 :             :   /* Update the statistics.  */
    2692                 :         335 :   G.allocated = G.allocated_last_gc = offs - (char *)addr;
    2693                 :         335 : }
        

Generated by: LCOV version 2.1-beta

LCOV profile is generated on x86_64 machine using following configure options: configure --disable-bootstrap --enable-coverage=opt --enable-languages=c,c++,fortran,go,jit,lto,rust,m2 --enable-host-shared. GCC test suite is run with the built compiler.