Branch data Line data Source code
1 : : /* "Bag-of-pages" garbage collector for the GNU compiler.
2 : : Copyright (C) 1999-2025 Free Software Foundation, Inc.
3 : :
4 : : This file is part of GCC.
5 : :
6 : : GCC is free software; you can redistribute it and/or modify it under
7 : : the terms of the GNU General Public License as published by the Free
8 : : Software Foundation; either version 3, or (at your option) any later
9 : : version.
10 : :
11 : : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 : : WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 : : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 : : for more details.
15 : :
16 : : You should have received a copy of the GNU General Public License
17 : : along with GCC; see the file COPYING3. If not see
18 : : <http://www.gnu.org/licenses/>. */
19 : :
20 : : #include "config.h"
21 : : #include "system.h"
22 : : #include "coretypes.h"
23 : : #include "backend.h"
24 : : #include "alias.h"
25 : : #include "tree.h"
26 : : #include "rtl.h"
27 : : #include "memmodel.h"
28 : : #include "tm_p.h"
29 : : #include "diagnostic-core.h"
30 : : #include "flags.h"
31 : : #include "ggc-internal.h"
32 : : #include "timevar.h"
33 : : #include "cgraph.h"
34 : : #include "cfgloop.h"
35 : : #include "plugin.h"
36 : :
37 : : /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38 : : file open. Prefer either to valloc. */
39 : : #ifdef HAVE_MMAP_ANON
40 : : # undef HAVE_MMAP_DEV_ZERO
41 : : # define USING_MMAP
42 : : #endif
43 : :
44 : : #ifdef HAVE_MMAP_DEV_ZERO
45 : : # define USING_MMAP
46 : : #endif
47 : :
48 : : #ifndef USING_MMAP
49 : : #define USING_MALLOC_PAGE_GROUPS
50 : : #endif
51 : :
52 : : #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
53 : : && defined(USING_MMAP)
54 : : # define USING_MADVISE
55 : : #endif
56 : :
57 : : /* Strategy:
58 : :
59 : : This garbage-collecting allocator allocates objects on one of a set
60 : : of pages. Each page can allocate objects of a single size only;
61 : : available sizes are powers of two starting at four bytes. The size
62 : : of an allocation request is rounded up to the next power of two
63 : : (`order'), and satisfied from the appropriate page.
64 : :
65 : : Each page is recorded in a page-entry, which also maintains an
66 : : in-use bitmap of object positions on the page. This allows the
67 : : allocation state of a particular object to be flipped without
68 : : touching the page itself.
69 : :
70 : : Each page-entry also has a context depth, which is used to track
71 : : pushing and popping of allocation contexts. Only objects allocated
72 : : in the current (highest-numbered) context may be collected.
73 : :
74 : : Page entries are arranged in an array of singly-linked lists. The
75 : : array is indexed by the allocation size, in bits, of the pages on
76 : : it; i.e. all pages on a list allocate objects of the same size.
77 : : Pages are ordered on the list such that all non-full pages precede
78 : : all full pages, with non-full pages arranged in order of decreasing
79 : : context depth.
80 : :
81 : : Empty pages (of all orders) are kept on a single page cache list,
82 : : and are considered first when new pages are required; they are
83 : : deallocated at the start of the next collection if they haven't
84 : : been recycled by then. */
85 : :
86 : : /* Define GGC_DEBUG_LEVEL to print debugging information.
87 : : 0: No debugging output.
88 : : 1: GC statistics only.
89 : : 2: Page-entry allocations/deallocations as well.
90 : : 3: Object allocations as well.
91 : : 4: Object marks as well. */
92 : : #define GGC_DEBUG_LEVEL (0)
93 : :
94 : : /* A two-level tree is used to look up the page-entry for a given
95 : : pointer. Two chunks of the pointer's bits are extracted to index
96 : : the first and second levels of the tree, as follows:
97 : :
98 : : HOST_PAGE_SIZE_BITS
99 : : 32 | |
100 : : msb +----------------+----+------+------+ lsb
101 : : | | |
102 : : PAGE_L1_BITS |
103 : : | |
104 : : PAGE_L2_BITS
105 : :
106 : : The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
107 : : pages are aligned on system page boundaries. The next most
108 : : significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
109 : : index values in the lookup table, respectively.
110 : :
111 : : For 32-bit architectures and the settings below, there are no
112 : : leftover bits. For architectures with wider pointers, the lookup
113 : : tree points to a list of pages, which must be scanned to find the
114 : : correct one. */
115 : :
116 : : #define PAGE_L1_BITS (8)
117 : : #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
118 : : #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
119 : : #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
120 : :
121 : : #define LOOKUP_L1(p) \
122 : : (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
123 : :
124 : : #define LOOKUP_L2(p) \
125 : : (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
126 : :
127 : : /* The number of objects per allocation page, for objects on a page of
128 : : the indicated ORDER. */
129 : : #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
130 : :
131 : : /* The number of objects in P. */
132 : : #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
133 : :
134 : : /* The size of an object on a page of the indicated ORDER. */
135 : : #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
136 : :
137 : : /* For speed, we avoid doing a general integer divide to locate the
138 : : offset in the allocation bitmap, by precalculating numbers M, S
139 : : such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
140 : : within the page which is evenly divisible by the object size Z. */
141 : : #define DIV_MULT(ORDER) inverse_table[ORDER].mult
142 : : #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
143 : : #define OFFSET_TO_BIT(OFFSET, ORDER) \
144 : : (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
145 : :
146 : : /* We use this structure to determine the alignment required for
147 : : allocations. For power-of-two sized allocations, that's not a
148 : : problem, but it does matter for odd-sized allocations.
149 : : We do not care about alignment for floating-point types. */
150 : :
151 : : struct max_alignment {
152 : : char c;
153 : : union {
154 : : int64_t i;
155 : : void *p;
156 : : } u;
157 : : };
158 : :
159 : : /* The biggest alignment required. */
160 : :
161 : : #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
162 : :
163 : :
164 : : /* The number of extra orders, not corresponding to power-of-two sized
165 : : objects. */
166 : :
167 : : #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
168 : :
169 : : #define RTL_SIZE(NSLOTS) \
170 : : (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
171 : :
172 : : #define TREE_EXP_SIZE(OPS) \
173 : : (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
174 : :
175 : : /* The Ith entry is the maximum size of an object to be stored in the
176 : : Ith extra order. Adding a new entry to this array is the *only*
177 : : thing you need to do to add a new special allocation size. */
178 : :
179 : : static const size_t extra_order_size_table[] = {
180 : : /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
181 : : There are a lot of structures with these sizes and explicitly
182 : : listing them risks orders being dropped because they changed size. */
183 : : MAX_ALIGNMENT * 3,
184 : : MAX_ALIGNMENT * 5,
185 : : MAX_ALIGNMENT * 6,
186 : : MAX_ALIGNMENT * 7,
187 : : MAX_ALIGNMENT * 9,
188 : : MAX_ALIGNMENT * 10,
189 : : MAX_ALIGNMENT * 11,
190 : : MAX_ALIGNMENT * 12,
191 : : MAX_ALIGNMENT * 13,
192 : : MAX_ALIGNMENT * 14,
193 : : MAX_ALIGNMENT * 15,
194 : : sizeof (struct tree_decl_non_common),
195 : : sizeof (struct tree_field_decl),
196 : : sizeof (struct tree_parm_decl),
197 : : sizeof (struct tree_var_decl),
198 : : sizeof (struct tree_type_non_common),
199 : : sizeof (struct function),
200 : : sizeof (struct basic_block_def),
201 : : sizeof (struct cgraph_node),
202 : : sizeof (class loop),
203 : : };
204 : :
205 : : /* The total number of orders. */
206 : :
207 : : #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
208 : :
209 : : /* Compute the smallest nonnegative number which when added to X gives
210 : : a multiple of F. */
211 : :
212 : : #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
213 : :
214 : : /* Round X to next multiple of the page size */
215 : :
216 : : #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
217 : :
218 : : /* The Ith entry is the number of objects on a page or order I. */
219 : :
220 : : static unsigned objects_per_page_table[NUM_ORDERS];
221 : :
222 : : /* The Ith entry is the size of an object on a page of order I. */
223 : :
224 : : static size_t object_size_table[NUM_ORDERS];
225 : :
226 : : /* The Ith entry is a pair of numbers (mult, shift) such that
227 : : ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
228 : : for all k evenly divisible by OBJECT_SIZE(I). */
229 : :
230 : : static struct
231 : : {
232 : : size_t mult;
233 : : unsigned int shift;
234 : : }
235 : : inverse_table[NUM_ORDERS];
236 : :
237 : : /* A page_entry records the status of an allocation page. This
238 : : structure is dynamically sized to fit the bitmap in_use_p. */
239 : : struct page_entry
240 : : {
241 : : /* The next page-entry with objects of the same size, or NULL if
242 : : this is the last page-entry. */
243 : : struct page_entry *next;
244 : :
245 : : /* The previous page-entry with objects of the same size, or NULL if
246 : : this is the first page-entry. The PREV pointer exists solely to
247 : : keep the cost of ggc_free manageable. */
248 : : struct page_entry *prev;
249 : :
250 : : /* The number of bytes allocated. (This will always be a multiple
251 : : of the host system page size.) */
252 : : size_t bytes;
253 : :
254 : : /* The address at which the memory is allocated. */
255 : : char *page;
256 : :
257 : : #ifdef USING_MALLOC_PAGE_GROUPS
258 : : /* Back pointer to the page group this page came from. */
259 : : struct page_group *group;
260 : : #endif
261 : :
262 : : /* This is the index in the by_depth varray where this page table
263 : : can be found. */
264 : : unsigned long index_by_depth;
265 : :
266 : : /* Context depth of this page. */
267 : : unsigned short context_depth;
268 : :
269 : : /* The number of free objects remaining on this page. */
270 : : unsigned short num_free_objects;
271 : :
272 : : /* A likely candidate for the bit position of a free object for the
273 : : next allocation from this page. */
274 : : unsigned short next_bit_hint;
275 : :
276 : : /* The lg of size of objects allocated from this page. */
277 : : unsigned char order;
278 : :
279 : : /* Discarded page? */
280 : : bool discarded;
281 : :
282 : : /* A bit vector indicating whether or not objects are in use. The
283 : : Nth bit is one if the Nth object on this page is allocated. This
284 : : array is dynamically sized. */
285 : : unsigned long in_use_p[1];
286 : : };
287 : :
288 : : #ifdef USING_MALLOC_PAGE_GROUPS
289 : : /* A page_group describes a large allocation from malloc, from which
290 : : we parcel out aligned pages. */
291 : : struct page_group
292 : : {
293 : : /* A linked list of all extant page groups. */
294 : : struct page_group *next;
295 : :
296 : : /* The address we received from malloc. */
297 : : char *allocation;
298 : :
299 : : /* The size of the block. */
300 : : size_t alloc_size;
301 : :
302 : : /* A bitmask of pages in use. */
303 : : unsigned int in_use;
304 : : };
305 : : #endif
306 : :
307 : : #if HOST_BITS_PER_PTR <= 32
308 : :
309 : : /* On 32-bit hosts, we use a two level page table, as pictured above. */
310 : : typedef page_entry **page_table[PAGE_L1_SIZE];
311 : :
312 : : #else
313 : :
314 : : /* On 64-bit hosts, we use the same two level page tables plus a linked
315 : : list that disambiguates the top 32-bits. There will almost always be
316 : : exactly one entry in the list. */
317 : : typedef struct page_table_chain
318 : : {
319 : : struct page_table_chain *next;
320 : : size_t high_bits;
321 : : page_entry **table[PAGE_L1_SIZE];
322 : : } *page_table;
323 : :
324 : : #endif
325 : :
326 : : class finalizer
327 : : {
328 : : public:
329 : 63867357 : finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
330 : :
331 : 135010275 : void *addr () const { return m_addr; }
332 : :
333 : 22594538 : void call () const { m_function (m_addr); }
334 : :
335 : : private:
336 : : void *m_addr;
337 : : void (*m_function)(void *);
338 : : };
339 : :
340 : : class vec_finalizer
341 : : {
342 : : public:
343 : 0 : vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
344 : 0 : m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
345 : :
346 : 0 : void call () const
347 : : {
348 : 0 : for (size_t i = 0; i < m_n_objects; i++)
349 : 0 : m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
350 : 0 : }
351 : :
352 : 0 : void *addr () const { return reinterpret_cast<void *> (m_addr); }
353 : :
354 : : private:
355 : : uintptr_t m_addr;
356 : : void (*m_function)(void *);
357 : : size_t m_object_size;
358 : : size_t m_n_objects;
359 : : };
360 : :
361 : : #ifdef ENABLE_GC_ALWAYS_COLLECT
362 : : /* List of free objects to be verified as actually free on the
363 : : next collection. */
364 : : struct free_object
365 : : {
366 : : void *object;
367 : : struct free_object *next;
368 : : };
369 : : #endif
370 : :
371 : : /* The rest of the global variables. */
372 : : static struct ggc_globals
373 : : {
374 : : /* The Nth element in this array is a page with objects of size 2^N.
375 : : If there are any pages with free objects, they will be at the
376 : : head of the list. NULL if there are no page-entries for this
377 : : object size. */
378 : : page_entry *pages[NUM_ORDERS];
379 : :
380 : : /* The Nth element in this array is the last page with objects of
381 : : size 2^N. NULL if there are no page-entries for this object
382 : : size. */
383 : : page_entry *page_tails[NUM_ORDERS];
384 : :
385 : : /* Lookup table for associating allocation pages with object addresses. */
386 : : page_table lookup;
387 : :
388 : : /* The system's page size. */
389 : : size_t pagesize;
390 : : size_t lg_pagesize;
391 : :
392 : : /* Bytes currently allocated. */
393 : : size_t allocated;
394 : :
395 : : /* Bytes currently allocated at the end of the last collection. */
396 : : size_t allocated_last_gc;
397 : :
398 : : /* Total amount of memory mapped. */
399 : : size_t bytes_mapped;
400 : :
401 : : /* Bit N set if any allocations have been done at context depth N. */
402 : : unsigned long context_depth_allocations;
403 : :
404 : : /* Bit N set if any collections have been done at context depth N. */
405 : : unsigned long context_depth_collections;
406 : :
407 : : /* The current depth in the context stack. */
408 : : unsigned short context_depth;
409 : :
410 : : /* A file descriptor open to /dev/zero for reading. */
411 : : #if defined (HAVE_MMAP_DEV_ZERO)
412 : : int dev_zero_fd;
413 : : #endif
414 : :
415 : : /* A cache of free system pages. */
416 : : page_entry *free_pages;
417 : :
418 : : #ifdef USING_MALLOC_PAGE_GROUPS
419 : : page_group *page_groups;
420 : : #endif
421 : :
422 : : /* The file descriptor for debugging output. */
423 : : FILE *debug_file;
424 : :
425 : : /* Current number of elements in use in depth below. */
426 : : unsigned int depth_in_use;
427 : :
428 : : /* Maximum number of elements that can be used before resizing. */
429 : : unsigned int depth_max;
430 : :
431 : : /* Each element of this array is an index in by_depth where the given
432 : : depth starts. This structure is indexed by that given depth we
433 : : are interested in. */
434 : : unsigned int *depth;
435 : :
436 : : /* Current number of elements in use in by_depth below. */
437 : : unsigned int by_depth_in_use;
438 : :
439 : : /* Maximum number of elements that can be used before resizing. */
440 : : unsigned int by_depth_max;
441 : :
442 : : /* Each element of this array is a pointer to a page_entry, all
443 : : page_entries can be found in here by increasing depth.
444 : : index_by_depth in the page_entry is the index into this data
445 : : structure where that page_entry can be found. This is used to
446 : : speed up finding all page_entries at a particular depth. */
447 : : page_entry **by_depth;
448 : :
449 : : /* Each element is a pointer to the saved in_use_p bits, if any,
450 : : zero otherwise. We allocate them all together, to enable a
451 : : better runtime data access pattern. */
452 : : unsigned long **save_in_use;
453 : :
454 : : /* Finalizers for single objects. The first index is collection_depth. */
455 : : vec<vec<finalizer> > finalizers;
456 : :
457 : : /* Finalizers for vectors of objects. */
458 : : vec<vec<vec_finalizer> > vec_finalizers;
459 : :
460 : : #ifdef ENABLE_GC_ALWAYS_COLLECT
461 : : /* List of free objects to be verified as actually free on the
462 : : next collection. */
463 : : struct free_object *free_object_list;
464 : : #endif
465 : :
466 : : struct
467 : : {
468 : : /* Total GC-allocated memory. */
469 : : unsigned long long total_allocated;
470 : : /* Total overhead for GC-allocated memory. */
471 : : unsigned long long total_overhead;
472 : :
473 : : /* Total allocations and overhead for sizes less than 32, 64 and 128.
474 : : These sizes are interesting because they are typical cache line
475 : : sizes. */
476 : :
477 : : unsigned long long total_allocated_under32;
478 : : unsigned long long total_overhead_under32;
479 : :
480 : : unsigned long long total_allocated_under64;
481 : : unsigned long long total_overhead_under64;
482 : :
483 : : unsigned long long total_allocated_under128;
484 : : unsigned long long total_overhead_under128;
485 : :
486 : : /* The allocations for each of the allocation orders. */
487 : : unsigned long long total_allocated_per_order[NUM_ORDERS];
488 : :
489 : : /* The overhead for each of the allocation orders. */
490 : : unsigned long long total_overhead_per_order[NUM_ORDERS];
491 : : } stats;
492 : : } G;
493 : :
494 : : /* True if a gc is currently taking place. */
495 : :
496 : : static bool in_gc = false;
497 : :
498 : : /* The size in bytes required to maintain a bitmap for the objects
499 : : on a page-entry. */
500 : : #define BITMAP_SIZE(Num_objects) \
501 : : (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
502 : :
503 : : /* Allocate pages in chunks of this size, to throttle calls to memory
504 : : allocation routines. The first page is used, the rest go onto the
505 : : free list. This cannot be larger than HOST_BITS_PER_INT for the
506 : : in_use bitmask for page_group. Hosts that need a different value
507 : : can override this by defining GGC_QUIRE_SIZE explicitly. */
508 : : #ifndef GGC_QUIRE_SIZE
509 : : # ifdef USING_MMAP
510 : : # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
511 : : # else
512 : : # define GGC_QUIRE_SIZE 16
513 : : # endif
514 : : #endif
515 : :
516 : : /* Initial guess as to how many page table entries we might need. */
517 : : #define INITIAL_PTE_COUNT 128
518 : :
519 : : static page_entry *lookup_page_table_entry (const void *);
520 : : static void set_page_table_entry (void *, page_entry *);
521 : : #ifdef USING_MMAP
522 : : static char *alloc_anon (char *, size_t, bool check);
523 : : #endif
524 : : #ifdef USING_MALLOC_PAGE_GROUPS
525 : : static size_t page_group_index (char *, char *);
526 : : static void set_page_group_in_use (page_group *, char *);
527 : : static void clear_page_group_in_use (page_group *, char *);
528 : : #endif
529 : : static struct page_entry * alloc_page (unsigned);
530 : : static void free_page (struct page_entry *);
531 : : static void clear_marks (void);
532 : : static void sweep_pages (void);
533 : : static void ggc_recalculate_in_use_p (page_entry *);
534 : : static void compute_inverse (unsigned);
535 : : static inline void adjust_depth (void);
536 : : static void move_ptes_to_front (int, int);
537 : :
538 : : void debug_print_page_list (int);
539 : : static void push_depth (unsigned int);
540 : : static void push_by_depth (page_entry *, unsigned long *);
541 : :
542 : : /* Push an entry onto G.depth. */
543 : :
544 : : inline static void
545 : 282374 : push_depth (unsigned int i)
546 : : {
547 : 282374 : if (G.depth_in_use >= G.depth_max)
548 : : {
549 : 0 : G.depth_max *= 2;
550 : 0 : G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
551 : : }
552 : 282374 : G.depth[G.depth_in_use++] = i;
553 : 282374 : }
554 : :
555 : : /* Push an entry onto G.by_depth and G.save_in_use. */
556 : :
557 : : inline static void
558 : 610139613 : push_by_depth (page_entry *p, unsigned long *s)
559 : : {
560 : 610139613 : if (G.by_depth_in_use >= G.by_depth_max)
561 : : {
562 : 826917 : G.by_depth_max *= 2;
563 : 826917 : G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
564 : 826917 : G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
565 : : G.by_depth_max);
566 : : }
567 : 610139613 : G.by_depth[G.by_depth_in_use] = p;
568 : 610139613 : G.save_in_use[G.by_depth_in_use++] = s;
569 : 610139613 : }
570 : :
571 : : #if (GCC_VERSION < 3001)
572 : : #define prefetch(X) ((void) X)
573 : : #else
574 : : #define prefetch(X) __builtin_prefetch (X)
575 : : #endif
576 : :
577 : : #define save_in_use_p_i(__i) \
578 : : (G.save_in_use[__i])
579 : : #define save_in_use_p(__p) \
580 : : (save_in_use_p_i (__p->index_by_depth))
581 : :
582 : : /* Traverse the page table and find the entry for a page.
583 : : If the object wasn't allocated in GC return NULL. */
584 : :
585 : : static inline page_entry *
586 : 5659828809 : safe_lookup_page_table_entry (const void *p)
587 : : {
588 : 5659828809 : page_entry ***base;
589 : 5659828809 : size_t L1, L2;
590 : :
591 : : #if HOST_BITS_PER_PTR <= 32
592 : : base = &G.lookup[0];
593 : : #else
594 : 5659828809 : page_table table = G.lookup;
595 : 5659828809 : uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
596 : 7881104983 : while (1)
597 : : {
598 : 6770466896 : if (table == NULL)
599 : : return NULL;
600 : 6200830609 : if (table->high_bits == high_bits)
601 : : break;
602 : 1110638087 : table = table->next;
603 : : }
604 : 5090192522 : base = &table->table[0];
605 : : #endif
606 : :
607 : : /* Extract the level 1 and 2 indices. */
608 : 5090192522 : L1 = LOOKUP_L1 (p);
609 : 5090192522 : L2 = LOOKUP_L2 (p);
610 : 5090192522 : if (! base[L1])
611 : : return NULL;
612 : :
613 : 5069692524 : return base[L1][L2];
614 : : }
615 : :
616 : : /* Traverse the page table and find the entry for a page.
617 : : Die (probably) if the object wasn't allocated via GC. */
618 : :
619 : : static inline page_entry *
620 : >22849*10^7 : lookup_page_table_entry (const void *p)
621 : : {
622 : >22849*10^7 : page_entry ***base;
623 : >22849*10^7 : size_t L1, L2;
624 : :
625 : : #if HOST_BITS_PER_PTR <= 32
626 : : base = &G.lookup[0];
627 : : #else
628 : >22849*10^7 : page_table table = G.lookup;
629 : >22849*10^7 : uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
630 : >23898*10^7 : while (table->high_bits != high_bits)
631 : 10488779131 : table = table->next;
632 : >22849*10^7 : base = &table->table[0];
633 : : #endif
634 : :
635 : : /* Extract the level 1 and 2 indices. */
636 : >22849*10^7 : L1 = LOOKUP_L1 (p);
637 : >22849*10^7 : L2 = LOOKUP_L2 (p);
638 : :
639 : >22849*10^7 : return base[L1][L2];
640 : : }
641 : :
642 : : /* Set the page table entry for a page. */
643 : :
644 : : static void
645 : 635912881 : set_page_table_entry (void *p, page_entry *entry)
646 : : {
647 : 635912881 : page_entry ***base;
648 : 635912881 : size_t L1, L2;
649 : :
650 : : #if HOST_BITS_PER_PTR <= 32
651 : : base = &G.lookup[0];
652 : : #else
653 : 635912881 : page_table table;
654 : 635912881 : uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
655 : 636662120 : for (table = G.lookup; table; table = table->next)
656 : 636378418 : if (table->high_bits == high_bits)
657 : 635629179 : goto found;
658 : :
659 : : /* Not found -- allocate a new table. */
660 : 283702 : table = XCNEW (struct page_table_chain);
661 : 283702 : table->next = G.lookup;
662 : 283702 : table->high_bits = high_bits;
663 : 283702 : G.lookup = table;
664 : 635912881 : found:
665 : 635912881 : base = &table->table[0];
666 : : #endif
667 : :
668 : : /* Extract the level 1 and 2 indices. */
669 : 635912881 : L1 = LOOKUP_L1 (p);
670 : 635912881 : L2 = LOOKUP_L2 (p);
671 : :
672 : 635912881 : if (base[L1] == NULL)
673 : 628067 : base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
674 : :
675 : 635912881 : base[L1][L2] = entry;
676 : 635912881 : }
677 : :
678 : : /* Prints the page-entry for object size ORDER, for debugging. */
679 : :
680 : : DEBUG_FUNCTION void
681 : 0 : debug_print_page_list (int order)
682 : : {
683 : 0 : page_entry *p;
684 : 0 : printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
685 : 0 : (void *) G.page_tails[order]);
686 : 0 : p = G.pages[order];
687 : 0 : while (p != NULL)
688 : : {
689 : 0 : printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
690 : 0 : p->num_free_objects);
691 : 0 : p = p->next;
692 : : }
693 : 0 : printf ("NULL\n");
694 : 0 : fflush (stdout);
695 : 0 : }
696 : :
697 : : #ifdef USING_MMAP
698 : : /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
699 : : (if non-null). The ifdef structure here is intended to cause a
700 : : compile error unless exactly one of the HAVE_* is defined. */
701 : :
702 : : static inline char *
703 : 6282417 : alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
704 : : {
705 : : #ifdef HAVE_MMAP_ANON
706 : 6282417 : char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
707 : : MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
708 : : #endif
709 : : #ifdef HAVE_MMAP_DEV_ZERO
710 : : char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
711 : : MAP_PRIVATE, G.dev_zero_fd, 0);
712 : : #endif
713 : :
714 : 6282417 : if (page == (char *) MAP_FAILED)
715 : : {
716 : 0 : if (!check)
717 : : return NULL;
718 : 0 : perror ("virtual memory exhausted");
719 : 0 : exit (FATAL_EXIT_CODE);
720 : : }
721 : :
722 : : /* Remember that we allocated this memory. */
723 : 6282417 : G.bytes_mapped += size;
724 : :
725 : : /* Pretend we don't have access to the allocated pages. We'll enable
726 : : access to smaller pieces of the area in ggc_internal_alloc. Discard the
727 : : handle to avoid handle leak. */
728 : 6282417 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
729 : :
730 : 6282417 : return page;
731 : : }
732 : : #endif
733 : : #ifdef USING_MALLOC_PAGE_GROUPS
734 : : /* Compute the index for this page into the page group. */
735 : :
736 : : static inline size_t
737 : : page_group_index (char *allocation, char *page)
738 : : {
739 : : return (size_t) (page - allocation) >> G.lg_pagesize;
740 : : }
741 : :
742 : : /* Set and clear the in_use bit for this page in the page group. */
743 : :
744 : : static inline void
745 : : set_page_group_in_use (page_group *group, char *page)
746 : : {
747 : : group->in_use |= 1 << page_group_index (group->allocation, page);
748 : : }
749 : :
750 : : static inline void
751 : : clear_page_group_in_use (page_group *group, char *page)
752 : : {
753 : : group->in_use &= ~(1 << page_group_index (group->allocation, page));
754 : : }
755 : : #endif
756 : :
757 : : /* Allocate a new page for allocating objects of size 2^ORDER,
758 : : and return an entry for it. The entry is not added to the
759 : : appropriate page_table list. */
760 : :
761 : : static inline struct page_entry *
762 : 610130050 : alloc_page (unsigned order)
763 : : {
764 : 610130050 : struct page_entry *entry, *p, **pp;
765 : 610130050 : char *page;
766 : 610130050 : size_t num_objects;
767 : 610130050 : size_t bitmap_size;
768 : 610130050 : size_t page_entry_size;
769 : 610130050 : size_t entry_size;
770 : : #ifdef USING_MALLOC_PAGE_GROUPS
771 : : page_group *group;
772 : : #endif
773 : :
774 : 610130050 : num_objects = OBJECTS_PER_PAGE (order);
775 : 610130050 : bitmap_size = BITMAP_SIZE (num_objects + 1);
776 : 610130050 : page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
777 : 610130050 : entry_size = num_objects * OBJECT_SIZE (order);
778 : 610130050 : if (entry_size < G.pagesize)
779 : : entry_size = G.pagesize;
780 : 610130050 : entry_size = PAGE_ALIGN (entry_size);
781 : :
782 : 610130050 : entry = NULL;
783 : 610130050 : page = NULL;
784 : :
785 : : /* Check the list of free pages for one we can use. */
786 : 2579886763 : for (pp = &G.free_pages, p = *pp; p; pp = &p->next, p = *pp)
787 : 2573886373 : if (p->bytes == entry_size)
788 : : break;
789 : :
790 : 610130050 : if (p != NULL)
791 : : {
792 : 604129660 : if (p->discarded)
793 : 10764731 : G.bytes_mapped += p->bytes;
794 : 604129660 : p->discarded = false;
795 : :
796 : : /* Recycle the allocated memory from this page ... */
797 : 604129660 : *pp = p->next;
798 : 604129660 : page = p->page;
799 : :
800 : : #ifdef USING_MALLOC_PAGE_GROUPS
801 : : group = p->group;
802 : : #endif
803 : :
804 : : /* ... and, if possible, the page entry itself. */
805 : 604129660 : if (p->order == order)
806 : : {
807 : 68914807 : entry = p;
808 : 68914807 : memset (entry, 0, page_entry_size);
809 : : }
810 : : else
811 : 535214853 : free (p);
812 : : }
813 : : #ifdef USING_MMAP
814 : 6000390 : else if (entry_size == G.pagesize)
815 : : {
816 : : /* We want just one page. Allocate a bunch of them and put the
817 : : extras on the freelist. (Can only do this optimization with
818 : : mmap for backing store.) */
819 : 1350292 : struct page_entry *e, *f = G.free_pages;
820 : 1350292 : int i, entries = GGC_QUIRE_SIZE;
821 : :
822 : 1350292 : page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
823 : 1350292 : if (page == NULL)
824 : : {
825 : 0 : page = alloc_anon (NULL, G.pagesize, true);
826 : 0 : entries = 1;
827 : : }
828 : :
829 : : /* This loop counts down so that the chain will be in ascending
830 : : memory order. */
831 : 691349504 : for (i = entries - 1; i >= 1; i--)
832 : : {
833 : 689999212 : e = XCNEWVAR (struct page_entry, page_entry_size);
834 : 689999212 : e->order = order;
835 : 689999212 : e->bytes = G.pagesize;
836 : 689999212 : e->page = page + (i << G.lg_pagesize);
837 : 689999212 : e->next = f;
838 : 689999212 : f = e;
839 : : }
840 : :
841 : 1350292 : G.free_pages = f;
842 : : }
843 : : else
844 : 4650098 : page = alloc_anon (NULL, entry_size, true);
845 : : #endif
846 : : #ifdef USING_MALLOC_PAGE_GROUPS
847 : : else
848 : : {
849 : : /* Allocate a large block of memory and serve out the aligned
850 : : pages therein. This results in much less memory wastage
851 : : than the traditional implementation of valloc. */
852 : :
853 : : char *allocation, *a, *enda;
854 : : size_t alloc_size, head_slop, tail_slop;
855 : : int multiple_pages = (entry_size == G.pagesize);
856 : :
857 : : if (multiple_pages)
858 : : alloc_size = GGC_QUIRE_SIZE * G.pagesize;
859 : : else
860 : : alloc_size = entry_size + G.pagesize - 1;
861 : : allocation = XNEWVEC (char, alloc_size);
862 : :
863 : : page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
864 : : head_slop = page - allocation;
865 : : if (multiple_pages)
866 : : tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
867 : : else
868 : : tail_slop = alloc_size - entry_size - head_slop;
869 : : enda = allocation + alloc_size - tail_slop;
870 : :
871 : : /* We allocated N pages, which are likely not aligned, leaving
872 : : us with N-1 usable pages. We plan to place the page_group
873 : : structure somewhere in the slop. */
874 : : if (head_slop >= sizeof (page_group))
875 : : group = (page_group *)page - 1;
876 : : else
877 : : {
878 : : /* We magically got an aligned allocation. Too bad, we have
879 : : to waste a page anyway. */
880 : : if (tail_slop == 0)
881 : : {
882 : : enda -= G.pagesize;
883 : : tail_slop += G.pagesize;
884 : : }
885 : : gcc_assert (tail_slop >= sizeof (page_group));
886 : : group = (page_group *)enda;
887 : : tail_slop -= sizeof (page_group);
888 : : }
889 : :
890 : : /* Remember that we allocated this memory. */
891 : : group->next = G.page_groups;
892 : : group->allocation = allocation;
893 : : group->alloc_size = alloc_size;
894 : : group->in_use = 0;
895 : : G.page_groups = group;
896 : : G.bytes_mapped += alloc_size;
897 : :
898 : : /* If we allocated multiple pages, put the rest on the free list. */
899 : : if (multiple_pages)
900 : : {
901 : : struct page_entry *e, *f = G.free_pages;
902 : : for (a = enda - G.pagesize; a != page; a -= G.pagesize)
903 : : {
904 : : e = XCNEWVAR (struct page_entry, page_entry_size);
905 : : e->order = order;
906 : : e->bytes = G.pagesize;
907 : : e->page = a;
908 : : e->group = group;
909 : : e->next = f;
910 : : f = e;
911 : : }
912 : : G.free_pages = f;
913 : : }
914 : : }
915 : : #endif
916 : :
917 : 610130050 : if (entry == NULL)
918 : 541215243 : entry = XCNEWVAR (struct page_entry, page_entry_size);
919 : :
920 : 610130050 : entry->bytes = entry_size;
921 : 610130050 : entry->page = page;
922 : 610130050 : entry->context_depth = G.context_depth;
923 : 610130050 : entry->order = order;
924 : 610130050 : entry->num_free_objects = num_objects;
925 : 610130050 : entry->next_bit_hint = 1;
926 : :
927 : 610130050 : G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
928 : :
929 : : #ifdef USING_MALLOC_PAGE_GROUPS
930 : : entry->group = group;
931 : : set_page_group_in_use (group, page);
932 : : #endif
933 : :
934 : : /* Set the one-past-the-end in-use bit. This acts as a sentry as we
935 : : increment the hint. */
936 : 610130050 : entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
937 : 610130050 : = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
938 : :
939 : 610130050 : set_page_table_entry (page, entry);
940 : :
941 : 610130050 : if (GGC_DEBUG_LEVEL >= 2)
942 : : fprintf (G.debug_file,
943 : : "Allocating page at %p, object size="
944 : : HOST_SIZE_T_PRINT_UNSIGNED ", data %p-%p\n",
945 : : (void *) entry, (fmt_size_t) OBJECT_SIZE (order),
946 : : (void *) page, (void *) (page + entry_size - 1));
947 : :
948 : 610130050 : return entry;
949 : : }
950 : :
951 : : /* Adjust the size of G.depth so that no index greater than the one
952 : : used by the top of the G.by_depth is used. */
953 : :
954 : : static inline void
955 : 25548969 : adjust_depth (void)
956 : : {
957 : 25548969 : page_entry *top;
958 : :
959 : 25548969 : if (G.by_depth_in_use)
960 : : {
961 : 25548969 : top = G.by_depth[G.by_depth_in_use-1];
962 : :
963 : : /* Peel back indices in depth that index into by_depth, so that
964 : : as new elements are added to by_depth, we note the indices
965 : : of those elements, if they are for new context depths. */
966 : 25548969 : while (G.depth_in_use > (size_t)top->context_depth+1)
967 : 0 : --G.depth_in_use;
968 : : }
969 : 25548969 : }
970 : :
971 : : /* For a page that is no longer needed, put it on the free page list. */
972 : :
973 : : static void
974 : 25548969 : free_page (page_entry *entry)
975 : : {
976 : 25548969 : if (GGC_DEBUG_LEVEL >= 2)
977 : : fprintf (G.debug_file,
978 : : "Deallocating page at %p, data %p-%p\n", (void *) entry,
979 : : (void *) entry->page, (void *) (entry->page + entry->bytes - 1));
980 : :
981 : : /* Mark the page as inaccessible. Discard the handle to avoid handle
982 : : leak. */
983 : 25548969 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
984 : :
985 : 25548969 : set_page_table_entry (entry->page, NULL);
986 : :
987 : : #ifdef USING_MALLOC_PAGE_GROUPS
988 : : clear_page_group_in_use (entry->group, entry->page);
989 : : #endif
990 : :
991 : 25548969 : if (G.by_depth_in_use > 1)
992 : : {
993 : 25548969 : page_entry *top = G.by_depth[G.by_depth_in_use-1];
994 : 25548969 : int i = entry->index_by_depth;
995 : :
996 : : /* We cannot free a page from a context deeper than the current
997 : : one. */
998 : 25548969 : gcc_assert (entry->context_depth == top->context_depth);
999 : :
1000 : : /* Put top element into freed slot. */
1001 : 25548969 : G.by_depth[i] = top;
1002 : 25548969 : G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1003 : 25548969 : top->index_by_depth = i;
1004 : : }
1005 : 25548969 : --G.by_depth_in_use;
1006 : :
1007 : 25548969 : adjust_depth ();
1008 : :
1009 : 25548969 : entry->next = G.free_pages;
1010 : 25548969 : G.free_pages = entry;
1011 : 25548969 : }
1012 : :
1013 : : /* Release the free page cache to the system. */
1014 : :
1015 : : static void
1016 : 714284 : release_pages (void)
1017 : : {
1018 : 714284 : size_t n1 = 0;
1019 : 714284 : size_t n2 = 0;
1020 : : #ifdef USING_MADVISE
1021 : 714284 : page_entry *p, *start_p;
1022 : 714284 : char *start;
1023 : 714284 : size_t len;
1024 : 714284 : size_t mapped_len;
1025 : 714284 : page_entry *next, *prev, *newprev;
1026 : 714284 : size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1027 : :
1028 : : /* First free larger continuous areas to the OS.
1029 : : This allows other allocators to grab these areas if needed.
1030 : : This is only done on larger chunks to avoid fragmentation.
1031 : : This does not always work because the free_pages list is only
1032 : : approximately sorted. */
1033 : :
1034 : 714284 : p = G.free_pages;
1035 : 714284 : prev = NULL;
1036 : 137974115 : while (p)
1037 : : {
1038 : 137259831 : start = p->page;
1039 : 137259831 : start_p = p;
1040 : 137259831 : len = 0;
1041 : 137259831 : mapped_len = 0;
1042 : 137259831 : newprev = prev;
1043 : 395761433 : while (p && p->page == start + len)
1044 : : {
1045 : 258501602 : len += p->bytes;
1046 : 258501602 : if (!p->discarded)
1047 : 51641204 : mapped_len += p->bytes;
1048 : 258501602 : newprev = p;
1049 : 258501602 : p = p->next;
1050 : : }
1051 : 137259831 : if (len >= free_unit)
1052 : : {
1053 : 37910086 : while (start_p != p)
1054 : : {
1055 : 37750001 : next = start_p->next;
1056 : 37750001 : free (start_p);
1057 : 37750001 : start_p = next;
1058 : : }
1059 : 160085 : munmap (start, len);
1060 : 160085 : if (prev)
1061 : 33231 : prev->next = p;
1062 : : else
1063 : 126854 : G.free_pages = p;
1064 : 160085 : G.bytes_mapped -= mapped_len;
1065 : 160085 : n1 += len;
1066 : 160085 : continue;
1067 : : }
1068 : : prev = newprev;
1069 : : }
1070 : :
1071 : : /* Now give back the fragmented pages to the OS, but keep the address
1072 : : space to reuse it next time. */
1073 : :
1074 : 210399262 : for (p = G.free_pages; p; )
1075 : : {
1076 : 209684978 : if (p->discarded)
1077 : : {
1078 : 206844966 : p = p->next;
1079 : 206844966 : continue;
1080 : : }
1081 : 2840012 : start = p->page;
1082 : 2840012 : len = p->bytes;
1083 : 2840012 : start_p = p;
1084 : 2840012 : p = p->next;
1085 : 13906635 : while (p && p->page == start + len)
1086 : : {
1087 : 11066623 : len += p->bytes;
1088 : 11066623 : p = p->next;
1089 : : }
1090 : : /* Give the page back to the kernel, but don't free the mapping.
1091 : : This avoids fragmentation in the virtual memory map of the
1092 : : process. Next time we can reuse it by just touching it. */
1093 : 2840012 : madvise (start, len, MADV_DONTNEED);
1094 : : /* Don't count those pages as mapped to not touch the garbage collector
1095 : : unnecessarily. */
1096 : 2840012 : G.bytes_mapped -= len;
1097 : 2840012 : n2 += len;
1098 : 16746647 : while (start_p != p)
1099 : : {
1100 : 13906635 : start_p->discarded = true;
1101 : 13906635 : start_p = start_p->next;
1102 : : }
1103 : : }
1104 : : #endif
1105 : : #if defined(USING_MMAP) && !defined(USING_MADVISE)
1106 : : page_entry *p, *next;
1107 : : char *start;
1108 : : size_t len;
1109 : :
1110 : : /* Gather up adjacent pages so they are unmapped together. */
1111 : : p = G.free_pages;
1112 : :
1113 : : while (p)
1114 : : {
1115 : : start = p->page;
1116 : : next = p->next;
1117 : : len = p->bytes;
1118 : : free (p);
1119 : : p = next;
1120 : :
1121 : : while (p && p->page == start + len)
1122 : : {
1123 : : next = p->next;
1124 : : len += p->bytes;
1125 : : free (p);
1126 : : p = next;
1127 : : }
1128 : :
1129 : : munmap (start, len);
1130 : : n1 += len;
1131 : : G.bytes_mapped -= len;
1132 : : }
1133 : :
1134 : : G.free_pages = NULL;
1135 : : #endif
1136 : : #ifdef USING_MALLOC_PAGE_GROUPS
1137 : : page_entry **pp, *p;
1138 : : page_group **gp, *g;
1139 : :
1140 : : /* Remove all pages from free page groups from the list. */
1141 : : pp = &G.free_pages;
1142 : : while ((p = *pp) != NULL)
1143 : : if (p->group->in_use == 0)
1144 : : {
1145 : : *pp = p->next;
1146 : : free (p);
1147 : : }
1148 : : else
1149 : : pp = &p->next;
1150 : :
1151 : : /* Remove all free page groups, and release the storage. */
1152 : : gp = &G.page_groups;
1153 : : while ((g = *gp) != NULL)
1154 : : if (g->in_use == 0)
1155 : : {
1156 : : *gp = g->next;
1157 : : G.bytes_mapped -= g->alloc_size;
1158 : : n1 += g->alloc_size;
1159 : : free (g->allocation);
1160 : : }
1161 : : else
1162 : : gp = &g->next;
1163 : : #endif
1164 : 714284 : if (!quiet_flag && (n1 || n2))
1165 : : {
1166 : 0 : fprintf (stderr, " {GC");
1167 : 0 : if (n1)
1168 : 0 : fprintf (stderr, " released " PRsa (0), SIZE_AMOUNT (n1));
1169 : 0 : if (n2)
1170 : 0 : fprintf (stderr, " madv_dontneed " PRsa (0), SIZE_AMOUNT (n2));
1171 : 0 : fprintf (stderr, "}");
1172 : : }
1173 : 714284 : }
1174 : :
1175 : : /* This table provides a fast way to determine ceil(log_2(size)) for
1176 : : allocation requests. The minimum allocation size is eight bytes. */
1177 : : #define NUM_SIZE_LOOKUP 512
1178 : : static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1179 : : {
1180 : : 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1181 : : 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1182 : : 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1183 : : 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1184 : : 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1185 : : 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1186 : : 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1187 : : 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1188 : : 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1189 : : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1190 : : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1191 : : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1192 : : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1193 : : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1194 : : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1195 : : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1196 : : 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1197 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1198 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1199 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1200 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1201 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1202 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1203 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1204 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1205 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1206 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1207 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1208 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1209 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1210 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1211 : : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1212 : : };
1213 : :
1214 : : /* For a given size of memory requested for allocation, return the
1215 : : actual size that is going to be allocated, as well as the size
1216 : : order. */
1217 : :
1218 : : static void
1219 : 44409949000 : ggc_round_alloc_size_1 (size_t requested_size,
1220 : : size_t *size_order,
1221 : : size_t *alloced_size)
1222 : : {
1223 : 44409949000 : size_t order, object_size;
1224 : :
1225 : 44409949000 : if (requested_size < NUM_SIZE_LOOKUP)
1226 : : {
1227 : 44205221112 : order = size_lookup[requested_size];
1228 : 44205221112 : object_size = OBJECT_SIZE (order);
1229 : : }
1230 : : else
1231 : : {
1232 : : order = 10;
1233 : 306526323 : while (requested_size > (object_size = OBJECT_SIZE (order)))
1234 : 101798435 : order++;
1235 : : }
1236 : :
1237 : 44409949000 : if (size_order)
1238 : 42104907168 : *size_order = order;
1239 : 44409949000 : if (alloced_size)
1240 : 44409949000 : *alloced_size = object_size;
1241 : 44409949000 : }
1242 : :
1243 : : /* For a given size of memory requested for allocation, return the
1244 : : actual size that is going to be allocated. */
1245 : :
1246 : : size_t
1247 : 2305041832 : ggc_round_alloc_size (size_t requested_size)
1248 : : {
1249 : 2305041832 : size_t size = 0;
1250 : :
1251 : 2305041832 : ggc_round_alloc_size_1 (requested_size, NULL, &size);
1252 : 2305041832 : return size;
1253 : : }
1254 : :
1255 : : /* Push a finalizer onto the appropriate vec. */
1256 : :
1257 : : static void
1258 : 63867357 : add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
1259 : : {
1260 : 63867357 : if (f == NULL)
1261 : : /* No finalizer. */;
1262 : 63867357 : else if (n == 1)
1263 : : {
1264 : 63867357 : finalizer fin (result, f);
1265 : 63867357 : G.finalizers[G.context_depth].safe_push (fin);
1266 : : }
1267 : : else
1268 : : {
1269 : 0 : vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
1270 : 0 : G.vec_finalizers[G.context_depth].safe_push (fin);
1271 : : }
1272 : 63867357 : }
1273 : :
1274 : : /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1275 : :
1276 : : #ifdef HAVE_ATTRIBUTE_ALIAS
1277 : : extern "C" void *
1278 : 42104907168 : ggc_internal_alloc_ (size_t size, void (*f)(void *), size_t s, size_t n
1279 : : MEM_STAT_DECL)
1280 : : #else
1281 : : void *
1282 : : ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1283 : : MEM_STAT_DECL)
1284 : : #endif
1285 : : {
1286 : 42104907168 : size_t order, word, bit, object_offset, object_size;
1287 : 42104907168 : struct page_entry *entry;
1288 : 42104907168 : void *result;
1289 : :
1290 : 42104907168 : ggc_round_alloc_size_1 (size, &order, &object_size);
1291 : :
1292 : : /* If there are non-full pages for this size allocation, they are at
1293 : : the head of the list. */
1294 : 42104907168 : entry = G.pages[order];
1295 : :
1296 : : /* If there is no page for this object size, or all pages in this
1297 : : context are full, allocate a new page. */
1298 : 42104907168 : if (entry == NULL || entry->num_free_objects == 0)
1299 : : {
1300 : 610130050 : struct page_entry *new_entry;
1301 : 610130050 : new_entry = alloc_page (order);
1302 : :
1303 : 610130050 : new_entry->index_by_depth = G.by_depth_in_use;
1304 : 610130050 : push_by_depth (new_entry, 0);
1305 : :
1306 : : /* We can skip context depths, if we do, make sure we go all the
1307 : : way to the new depth. */
1308 : 1220542127 : while (new_entry->context_depth >= G.depth_in_use)
1309 : 282027 : push_depth (G.by_depth_in_use-1);
1310 : :
1311 : : /* If this is the only entry, it's also the tail. If it is not
1312 : : the only entry, then we must update the PREV pointer of the
1313 : : ENTRY (G.pages[order]) to point to our new page entry. */
1314 : 610130050 : if (entry == NULL)
1315 : 8832879 : G.page_tails[order] = new_entry;
1316 : : else
1317 : 601297171 : entry->prev = new_entry;
1318 : :
1319 : : /* Put new pages at the head of the page list. By definition the
1320 : : entry at the head of the list always has a NULL pointer. */
1321 : 610130050 : new_entry->next = entry;
1322 : 610130050 : new_entry->prev = NULL;
1323 : 610130050 : entry = new_entry;
1324 : 610130050 : G.pages[order] = new_entry;
1325 : :
1326 : : /* For a new page, we know the word and bit positions (in the
1327 : : in_use bitmap) of the first available object -- they're zero. */
1328 : 610130050 : new_entry->next_bit_hint = 1;
1329 : 610130050 : word = 0;
1330 : 610130050 : bit = 0;
1331 : 610130050 : object_offset = 0;
1332 : 610130050 : }
1333 : : else
1334 : : {
1335 : : /* First try to use the hint left from the previous allocation
1336 : : to locate a clear bit in the in-use bitmap. We've made sure
1337 : : that the one-past-the-end bit is always set, so if the hint
1338 : : has run over, this test will fail. */
1339 : 41494777118 : unsigned hint = entry->next_bit_hint;
1340 : 41494777118 : word = hint / HOST_BITS_PER_LONG;
1341 : 41494777118 : bit = hint % HOST_BITS_PER_LONG;
1342 : :
1343 : : /* If the hint didn't work, scan the bitmap from the beginning. */
1344 : 41494777118 : if ((entry->in_use_p[word] >> bit) & 1)
1345 : : {
1346 : 5501507693 : word = bit = 0;
1347 : 5501507693 : while (~entry->in_use_p[word] == 0)
1348 : 1545865044 : ++word;
1349 : :
1350 : : #if GCC_VERSION >= 3004
1351 : 3955642649 : bit = __builtin_ctzl (~entry->in_use_p[word]);
1352 : : #else
1353 : : while ((entry->in_use_p[word] >> bit) & 1)
1354 : : ++bit;
1355 : : #endif
1356 : :
1357 : 3955642649 : hint = word * HOST_BITS_PER_LONG + bit;
1358 : : }
1359 : :
1360 : : /* Next time, try the next bit. */
1361 : 41494777118 : entry->next_bit_hint = hint + 1;
1362 : :
1363 : 41494777118 : object_offset = hint * object_size;
1364 : : }
1365 : :
1366 : : /* Set the in-use bit. */
1367 : 42104907168 : entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1368 : :
1369 : : /* Keep a running total of the number of free objects. If this page
1370 : : fills up, we may have to move it to the end of the list if the
1371 : : next page isn't full. If the next page is full, all subsequent
1372 : : pages are full, so there's no need to move it. */
1373 : 42104907168 : if (--entry->num_free_objects == 0
1374 : 1157063822 : && entry->next != NULL
1375 : 43255176749 : && entry->next->num_free_objects > 0)
1376 : : {
1377 : : /* We have a new head for the list. */
1378 : 510880436 : G.pages[order] = entry->next;
1379 : :
1380 : : /* We are moving ENTRY to the end of the page table list.
1381 : : The new page at the head of the list will have NULL in
1382 : : its PREV field and ENTRY will have NULL in its NEXT field. */
1383 : 510880436 : entry->next->prev = NULL;
1384 : 510880436 : entry->next = NULL;
1385 : :
1386 : : /* Append ENTRY to the tail of the list. */
1387 : 510880436 : entry->prev = G.page_tails[order];
1388 : 510880436 : G.page_tails[order]->next = entry;
1389 : 510880436 : G.page_tails[order] = entry;
1390 : : }
1391 : :
1392 : : /* Calculate the object's address. */
1393 : 42104907168 : result = entry->page + object_offset;
1394 : 42104907168 : if (GATHER_STATISTICS)
1395 : : ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1396 : : result FINAL_PASS_MEM_STAT);
1397 : :
1398 : : #ifdef ENABLE_GC_CHECKING
1399 : : /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1400 : : exact same semantics in presence of memory bugs, regardless of
1401 : : ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1402 : : handle to avoid handle leak. */
1403 : 42104907168 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1404 : :
1405 : : /* `Poison' the entire allocated object, including any padding at
1406 : : the end. */
1407 : 42104907168 : memset (result, 0xaf, object_size);
1408 : :
1409 : : /* Make the bytes after the end of the object unaccessible. Discard the
1410 : : handle to avoid handle leak. */
1411 : : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1412 : 42104907168 : object_size - size));
1413 : : #endif
1414 : :
1415 : : /* Tell Valgrind that the memory is there, but its content isn't
1416 : : defined. The bytes at the end of the object are still marked
1417 : : unaccessible. */
1418 : 42104907168 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1419 : :
1420 : : /* Keep track of how many bytes are being allocated. This
1421 : : information is used in deciding when to collect. */
1422 : 42104907168 : G.allocated += object_size;
1423 : :
1424 : : /* For timevar statistics. */
1425 : 42104907168 : timevar_ggc_mem_total += object_size;
1426 : :
1427 : 42104907168 : if (f)
1428 : 63867357 : add_finalizer (result, f, s, n);
1429 : :
1430 : 42104907168 : if (GATHER_STATISTICS)
1431 : : {
1432 : : size_t overhead = object_size - size;
1433 : :
1434 : : G.stats.total_overhead += overhead;
1435 : : G.stats.total_allocated += object_size;
1436 : : G.stats.total_overhead_per_order[order] += overhead;
1437 : : G.stats.total_allocated_per_order[order] += object_size;
1438 : :
1439 : : if (size <= 32)
1440 : : {
1441 : : G.stats.total_overhead_under32 += overhead;
1442 : : G.stats.total_allocated_under32 += object_size;
1443 : : }
1444 : : if (size <= 64)
1445 : : {
1446 : : G.stats.total_overhead_under64 += overhead;
1447 : : G.stats.total_allocated_under64 += object_size;
1448 : : }
1449 : : if (size <= 128)
1450 : : {
1451 : : G.stats.total_overhead_under128 += overhead;
1452 : : G.stats.total_allocated_under128 += object_size;
1453 : : }
1454 : : }
1455 : :
1456 : 42104907168 : if (GGC_DEBUG_LEVEL >= 3)
1457 : : fprintf (G.debug_file,
1458 : : "Allocating object, requested size="
1459 : : HOST_SIZE_T_PRINT_UNSIGNED ", actual=" HOST_SIZE_T_PRINT_UNSIGNED
1460 : : " at %p on %p\n",
1461 : : (fmt_size_t) size, (fmt_size_t) object_size, result,
1462 : : (void *) entry);
1463 : :
1464 : 42104907168 : return result;
1465 : : }
1466 : :
1467 : : #ifdef HAVE_ATTRIBUTE_ALIAS
1468 : : extern void *
1469 : : ggc_internal_alloc (size_t size, void (*f)(void *), size_t s,
1470 : : size_t n MEM_STAT_DECL)
1471 : : __attribute__((__alias__ ("ggc_internal_alloc_")));
1472 : : extern void *
1473 : : ggc_internal_alloc_no_dtor (size_t size, void (*f)(void *), size_t s,
1474 : : size_t n MEM_STAT_DECL)
1475 : : __attribute__((__alias__ ("ggc_internal_alloc_")));
1476 : : #else
1477 : : #ifdef __GNUC__
1478 : : __attribute__ ((__noinline__))
1479 : : #endif
1480 : : void *
1481 : : ggc_internal_alloc_no_dtor (size_t size, void (*f)(void *), size_t s,
1482 : : size_t n MEM_STAT_DECL)
1483 : : {
1484 : : return ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
1485 : : }
1486 : : #endif
1487 : :
1488 : : /* Mark function for strings. */
1489 : :
1490 : : void
1491 : 7784803756 : gt_ggc_m_S (const void *p)
1492 : : {
1493 : 7784803756 : page_entry *entry;
1494 : 7784803756 : unsigned bit, word;
1495 : 7784803756 : unsigned long mask;
1496 : 7784803756 : unsigned long offset;
1497 : :
1498 : 7784803756 : if (!p)
1499 : : return;
1500 : :
1501 : : /* Look up the page on which the object is alloced. If it was not
1502 : : GC allocated, gracefully bail out. */
1503 : 5659828809 : entry = safe_lookup_page_table_entry (p);
1504 : 5659828809 : if (!entry)
1505 : : return;
1506 : :
1507 : : /* Calculate the index of the object on the page; this is its bit
1508 : : position in the in_use_p bitmap. Note that because a char* might
1509 : : point to the middle of an object, we need special code here to
1510 : : make sure P points to the start of an object. */
1511 : 5069692524 : offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1512 : 5069692524 : if (offset)
1513 : : {
1514 : : /* Here we've seen a char* which does not point to the beginning
1515 : : of an allocated object. We assume it points to the middle of
1516 : : a STRING_CST. */
1517 : 85836 : gcc_assert (offset == offsetof (struct tree_string, str));
1518 : 85836 : p = ((const char *) p) - offset;
1519 : 85836 : gt_ggc_mx_lang_tree_node (CONST_CAST (void *, p));
1520 : 85836 : return;
1521 : : }
1522 : :
1523 : 5069606688 : bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1524 : 5069606688 : word = bit / HOST_BITS_PER_LONG;
1525 : 5069606688 : mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1526 : :
1527 : : /* If the bit was previously set, skip it. */
1528 : 5069606688 : if (entry->in_use_p[word] & mask)
1529 : : return;
1530 : :
1531 : : /* Otherwise set it, and decrement the free object count. */
1532 : 4969199972 : entry->in_use_p[word] |= mask;
1533 : 4969199972 : entry->num_free_objects -= 1;
1534 : :
1535 : 4969199972 : if (GGC_DEBUG_LEVEL >= 4)
1536 : : fprintf (G.debug_file, "Marking %p\n", p);
1537 : :
1538 : 4969199972 : return;
1539 : : }
1540 : :
1541 : :
1542 : : /* User-callable entry points for marking string X. */
1543 : :
1544 : : void
1545 : 0 : gt_ggc_mx (const char *& x)
1546 : : {
1547 : 0 : gt_ggc_m_S (x);
1548 : 0 : }
1549 : :
1550 : : void
1551 : 274611 : gt_ggc_mx (char *& x)
1552 : : {
1553 : 274611 : gt_ggc_m_S (x);
1554 : 274611 : }
1555 : :
1556 : : void
1557 : 0 : gt_ggc_mx (unsigned char *& x)
1558 : : {
1559 : 0 : gt_ggc_m_S (x);
1560 : 0 : }
1561 : :
1562 : : void
1563 : 424 : gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1564 : : {
1565 : 424 : }
1566 : :
1567 : : /* If P is not marked, marks it and return false. Otherwise return true.
1568 : : P must have been allocated by the GC allocator; it mustn't point to
1569 : : static objects, stack variables, or memory allocated with malloc. */
1570 : :
1571 : : bool
1572 : >22145*10^7 : ggc_set_mark (const void *p)
1573 : : {
1574 : >22145*10^7 : page_entry *entry;
1575 : >22145*10^7 : unsigned bit, word;
1576 : >22145*10^7 : unsigned long mask;
1577 : :
1578 : : /* Look up the page on which the object is alloced. If the object
1579 : : wasn't allocated by the collector, we'll probably die. */
1580 : >22145*10^7 : entry = lookup_page_table_entry (p);
1581 : >22145*10^7 : gcc_assert (entry);
1582 : :
1583 : : /* Calculate the index of the object on the page; this is its bit
1584 : : position in the in_use_p bitmap. */
1585 : >22145*10^7 : bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1586 : >22145*10^7 : word = bit / HOST_BITS_PER_LONG;
1587 : >22145*10^7 : mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1588 : :
1589 : : /* If the bit was previously set, skip it. */
1590 : >22145*10^7 : if (entry->in_use_p[word] & mask)
1591 : : return true;
1592 : :
1593 : : /* Otherwise set it, and decrement the free object count. */
1594 : 64768382943 : entry->in_use_p[word] |= mask;
1595 : 64768382943 : entry->num_free_objects -= 1;
1596 : :
1597 : 64768382943 : if (GGC_DEBUG_LEVEL >= 4)
1598 : : fprintf (G.debug_file, "Marking %p\n", p);
1599 : :
1600 : 64768382943 : return false;
1601 : : }
1602 : :
1603 : : /* Return true if P has been marked, zero otherwise.
1604 : : P must have been allocated by the GC allocator; it mustn't point to
1605 : : static objects, stack variables, or memory allocated with malloc. */
1606 : :
1607 : : bool
1608 : 4487115135 : ggc_marked_p (const void *p)
1609 : : {
1610 : 4487115135 : page_entry *entry;
1611 : 4487115135 : unsigned bit, word;
1612 : 4487115135 : unsigned long mask;
1613 : :
1614 : : /* Look up the page on which the object is alloced. If the object
1615 : : wasn't allocated by the collector, we'll probably die. */
1616 : 4487115135 : entry = lookup_page_table_entry (p);
1617 : 4487115135 : gcc_assert (entry);
1618 : :
1619 : : /* Calculate the index of the object on the page; this is its bit
1620 : : position in the in_use_p bitmap. */
1621 : 4487115135 : bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1622 : 4487115135 : word = bit / HOST_BITS_PER_LONG;
1623 : 4487115135 : mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1624 : :
1625 : 4487115135 : return (entry->in_use_p[word] & mask) != 0;
1626 : : }
1627 : :
1628 : : /* Return the size of the gc-able object P. */
1629 : :
1630 : : size_t
1631 : 459381882 : ggc_get_size (const void *p)
1632 : : {
1633 : 459381882 : page_entry *pe = lookup_page_table_entry (p);
1634 : 459381882 : return OBJECT_SIZE (pe->order);
1635 : : }
1636 : :
1637 : : /* Release the memory for object P. */
1638 : :
1639 : : void
1640 : 2118690545 : ggc_free (void *p)
1641 : : {
1642 : 2118690545 : if (in_gc)
1643 : : return;
1644 : :
1645 : 2096237380 : page_entry *pe = lookup_page_table_entry (p);
1646 : 2096237380 : size_t order = pe->order;
1647 : 2096237380 : size_t size = OBJECT_SIZE (order);
1648 : :
1649 : 2096237380 : if (GATHER_STATISTICS)
1650 : : ggc_free_overhead (p);
1651 : :
1652 : 2096237380 : if (GGC_DEBUG_LEVEL >= 3)
1653 : : fprintf (G.debug_file,
1654 : : "Freeing object, actual size="
1655 : : HOST_SIZE_T_PRINT_UNSIGNED ", at %p on %p\n",
1656 : : (fmt_size_t) size, p, (void *) pe);
1657 : :
1658 : : #ifdef ENABLE_GC_CHECKING
1659 : : /* Poison the data, to indicate the data is garbage. */
1660 : 2096237380 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1661 : 2096237380 : memset (p, 0xa5, size);
1662 : : #endif
1663 : : /* Let valgrind know the object is free. */
1664 : 2096237380 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1665 : :
1666 : : #ifdef ENABLE_GC_ALWAYS_COLLECT
1667 : : /* In the completely-anal-checking mode, we do *not* immediately free
1668 : : the data, but instead verify that the data is *actually* not
1669 : : reachable the next time we collect. */
1670 : : {
1671 : : struct free_object *fo = XNEW (struct free_object);
1672 : : fo->object = p;
1673 : : fo->next = G.free_object_list;
1674 : : G.free_object_list = fo;
1675 : : }
1676 : : #else
1677 : 2096237380 : {
1678 : 2096237380 : unsigned int bit_offset, word, bit;
1679 : :
1680 : 2096237380 : G.allocated -= size;
1681 : :
1682 : : /* Mark the object not-in-use. */
1683 : 2096237380 : bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1684 : 2096237380 : word = bit_offset / HOST_BITS_PER_LONG;
1685 : 2096237380 : bit = bit_offset % HOST_BITS_PER_LONG;
1686 : 2096237380 : pe->in_use_p[word] &= ~(1UL << bit);
1687 : :
1688 : 2096237380 : if (pe->num_free_objects++ == 0)
1689 : : {
1690 : 225379280 : page_entry *p, *q;
1691 : :
1692 : : /* If the page is completely full, then it's supposed to
1693 : : be after all pages that aren't. Since we've freed one
1694 : : object from a page that was full, we need to move the
1695 : : page to the head of the list.
1696 : :
1697 : : PE is the node we want to move. Q is the previous node
1698 : : and P is the next node in the list. */
1699 : 225379280 : q = pe->prev;
1700 : 225379280 : if (q && q->num_free_objects == 0)
1701 : : {
1702 : 164259335 : p = pe->next;
1703 : :
1704 : 164259335 : q->next = p;
1705 : :
1706 : : /* If PE was at the end of the list, then Q becomes the
1707 : : new end of the list. If PE was not the end of the
1708 : : list, then we need to update the PREV field for P. */
1709 : 164259335 : if (!p)
1710 : 103656196 : G.page_tails[order] = q;
1711 : : else
1712 : 60603139 : p->prev = q;
1713 : :
1714 : : /* Move PE to the head of the list. */
1715 : 164259335 : pe->next = G.pages[order];
1716 : 164259335 : pe->prev = NULL;
1717 : 164259335 : G.pages[order]->prev = pe;
1718 : 164259335 : G.pages[order] = pe;
1719 : : }
1720 : :
1721 : : /* Reset the hint bit to point to the only free object. */
1722 : 225379280 : pe->next_bit_hint = bit_offset;
1723 : : }
1724 : : }
1725 : : #endif
1726 : : }
1727 : :
1728 : : /* Subroutine of init_ggc which computes the pair of numbers used to
1729 : : perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1730 : :
1731 : : This algorithm is taken from Granlund and Montgomery's paper
1732 : : "Division by Invariant Integers using Multiplication"
1733 : : (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1734 : : constants). */
1735 : :
1736 : : static void
1737 : 23690268 : compute_inverse (unsigned order)
1738 : : {
1739 : 23690268 : size_t size, inv;
1740 : 23690268 : unsigned int e;
1741 : :
1742 : 23690268 : size = OBJECT_SIZE (order);
1743 : 23690268 : e = 0;
1744 : 613126698 : while (size % 2 == 0)
1745 : : {
1746 : 589436430 : e++;
1747 : 589436430 : size >>= 1;
1748 : : }
1749 : :
1750 : : inv = size;
1751 : 49918779 : while (inv * size != 1)
1752 : 26228511 : inv = inv * (2 - inv*size);
1753 : :
1754 : 23690268 : DIV_MULT (order) = inv;
1755 : 23690268 : DIV_SHIFT (order) = e;
1756 : 23690268 : }
1757 : :
1758 : : /* Initialize the ggc-mmap allocator. */
1759 : : void
1760 : 283157 : init_ggc (void)
1761 : : {
1762 : 283157 : static bool init_p = false;
1763 : 283157 : unsigned order;
1764 : :
1765 : 283157 : if (init_p)
1766 : : return;
1767 : 282027 : init_p = true;
1768 : :
1769 : 282027 : G.pagesize = getpagesize ();
1770 : 282027 : G.lg_pagesize = exact_log2 (G.pagesize);
1771 : :
1772 : : #ifdef HAVE_MMAP_DEV_ZERO
1773 : : G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1774 : : if (G.dev_zero_fd == -1)
1775 : : internal_error ("open /dev/zero: %m");
1776 : : #endif
1777 : :
1778 : : #if 0
1779 : : G.debug_file = fopen ("ggc-mmap.debug", "w");
1780 : : #else
1781 : 282027 : G.debug_file = stdout;
1782 : : #endif
1783 : :
1784 : : #ifdef USING_MMAP
1785 : : /* StunOS has an amazing off-by-one error for the first mmap allocation
1786 : : after fiddling with RLIMIT_STACK. The result, as hard as it is to
1787 : : believe, is an unaligned page allocation, which would cause us to
1788 : : hork badly if we tried to use it. */
1789 : 282027 : {
1790 : 282027 : char *p = alloc_anon (NULL, G.pagesize, true);
1791 : 282027 : struct page_entry *e;
1792 : 282027 : if ((uintptr_t)p & (G.pagesize - 1))
1793 : : {
1794 : : /* How losing. Discard this one and try another. If we still
1795 : : can't get something useful, give up. */
1796 : :
1797 : 0 : p = alloc_anon (NULL, G.pagesize, true);
1798 : 0 : gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1799 : : }
1800 : :
1801 : : /* We have a good page, might as well hold onto it... */
1802 : 282027 : e = XCNEW (struct page_entry);
1803 : 282027 : e->bytes = G.pagesize;
1804 : 282027 : e->page = p;
1805 : 282027 : e->next = G.free_pages;
1806 : 282027 : G.free_pages = e;
1807 : : }
1808 : : #endif
1809 : :
1810 : : /* Initialize the object size table. */
1811 : 18331755 : for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1812 : 18049728 : object_size_table[order] = (size_t) 1 << order;
1813 : 5922567 : for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1814 : : {
1815 : 5640540 : size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1816 : :
1817 : : /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1818 : : so that we're sure of getting aligned memory. */
1819 : 5640540 : s = ROUND_UP (s, MAX_ALIGNMENT);
1820 : 5640540 : object_size_table[order] = s;
1821 : : }
1822 : :
1823 : : /* Initialize the objects-per-page and inverse tables. */
1824 : 23972295 : for (order = 0; order < NUM_ORDERS; ++order)
1825 : : {
1826 : 23690268 : objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1827 : 23690268 : if (objects_per_page_table[order] == 0)
1828 : 14383377 : objects_per_page_table[order] = 1;
1829 : 23690268 : compute_inverse (order);
1830 : : }
1831 : :
1832 : : /* Reset the size_lookup array to put appropriately sized objects in
1833 : : the special orders. All objects bigger than the previous power
1834 : : of two, but no greater than the special size, should go in the
1835 : : new order. */
1836 : 5922567 : for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1837 : : {
1838 : 5640540 : int o;
1839 : 5640540 : int i;
1840 : :
1841 : 5640540 : i = OBJECT_SIZE (order);
1842 : 5640540 : if (i >= NUM_SIZE_LOOKUP)
1843 : 0 : continue;
1844 : :
1845 : 111682692 : for (o = size_lookup[i]; o == size_lookup [i]; --i)
1846 : 106042152 : size_lookup[i] = order;
1847 : : }
1848 : :
1849 : 282027 : G.depth_in_use = 0;
1850 : 282027 : G.depth_max = 10;
1851 : 282027 : G.depth = XNEWVEC (unsigned int, G.depth_max);
1852 : :
1853 : 282027 : G.by_depth_in_use = 0;
1854 : 282027 : G.by_depth_max = INITIAL_PTE_COUNT;
1855 : 282027 : G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1856 : 282027 : G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1857 : :
1858 : : /* Allocate space for the depth 0 finalizers. */
1859 : 282027 : G.finalizers.safe_push (vNULL);
1860 : 282027 : G.vec_finalizers.safe_push (vNULL);
1861 : 282027 : gcc_assert (G.finalizers.length() == 1);
1862 : : }
1863 : :
1864 : : /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1865 : : reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1866 : :
1867 : : static void
1868 : 0 : ggc_recalculate_in_use_p (page_entry *p)
1869 : : {
1870 : 0 : unsigned int i;
1871 : 0 : size_t num_objects;
1872 : :
1873 : : /* Because the past-the-end bit in in_use_p is always set, we
1874 : : pretend there is one additional object. */
1875 : 0 : num_objects = OBJECTS_IN_PAGE (p) + 1;
1876 : :
1877 : : /* Reset the free object count. */
1878 : 0 : p->num_free_objects = num_objects;
1879 : :
1880 : : /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1881 : 0 : for (i = 0;
1882 : 0 : i < CEIL (BITMAP_SIZE (num_objects),
1883 : : sizeof (*p->in_use_p));
1884 : : ++i)
1885 : : {
1886 : 0 : unsigned long j;
1887 : :
1888 : : /* Something is in use if it is marked, or if it was in use in a
1889 : : context further down the context stack. */
1890 : 0 : p->in_use_p[i] |= save_in_use_p (p)[i];
1891 : :
1892 : : /* Decrement the free object count for every object allocated. */
1893 : 0 : for (j = p->in_use_p[i]; j; j >>= 1)
1894 : 0 : p->num_free_objects -= (j & 1);
1895 : : }
1896 : :
1897 : 0 : gcc_assert (p->num_free_objects < num_objects);
1898 : 0 : }
1899 : :
1900 : : /* Unmark all objects. */
1901 : :
1902 : : static void
1903 : 697459 : clear_marks (void)
1904 : : {
1905 : 697459 : unsigned order;
1906 : :
1907 : 57889097 : for (order = 2; order < NUM_ORDERS; order++)
1908 : : {
1909 : 57191638 : page_entry *p;
1910 : :
1911 : 1734680879 : for (p = G.pages[order]; p != NULL; p = p->next)
1912 : : {
1913 : 1677489241 : size_t num_objects = OBJECTS_IN_PAGE (p);
1914 : 1677489241 : size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1915 : :
1916 : : /* The data should be page-aligned. */
1917 : 1677489241 : gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1918 : :
1919 : : /* Pages that aren't in the topmost context are not collected;
1920 : : nevertheless, we need their in-use bit vectors to store GC
1921 : : marks. So, back them up first. */
1922 : 1677489241 : if (p->context_depth < G.context_depth)
1923 : : {
1924 : 0 : if (! save_in_use_p (p))
1925 : 0 : save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
1926 : 0 : memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
1927 : : }
1928 : :
1929 : : /* Reset reset the number of free objects and clear the
1930 : : in-use bits. These will be adjusted by mark_obj. */
1931 : 1677489241 : p->num_free_objects = num_objects;
1932 : 1677489241 : memset (p->in_use_p, 0, bitmap_size);
1933 : :
1934 : : /* Make sure the one-past-the-end bit is always set. */
1935 : 1677489241 : p->in_use_p[num_objects / HOST_BITS_PER_LONG]
1936 : 1677489241 : = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
1937 : : }
1938 : : }
1939 : 697459 : }
1940 : :
1941 : : /* Check if any blocks with a registered finalizer have become unmarked. If so
1942 : : run the finalizer and unregister it because the block is about to be freed.
1943 : : Note that no garantee is made about what order finalizers will run in so
1944 : : touching other objects in gc memory is extremely unwise. */
1945 : :
1946 : : static void
1947 : 697112 : ggc_handle_finalizers ()
1948 : : {
1949 : 697112 : unsigned dlen = G.finalizers.length();
1950 : 1394224 : for (unsigned d = G.context_depth; d < dlen; ++d)
1951 : : {
1952 : 697112 : vec<finalizer> &v = G.finalizers[d];
1953 : 697112 : unsigned length = v.length ();
1954 : 135707387 : for (unsigned int i = 0; i < length;)
1955 : : {
1956 : 135010275 : finalizer &f = v[i];
1957 : 135010275 : if (!ggc_marked_p (f.addr ()))
1958 : : {
1959 : 22594538 : f.call ();
1960 : 22594538 : v.unordered_remove (i);
1961 : 22594538 : length--;
1962 : : }
1963 : : else
1964 : 112415737 : i++;
1965 : : }
1966 : : }
1967 : :
1968 : 1394224 : gcc_assert (dlen == G.vec_finalizers.length());
1969 : 1394224 : for (unsigned d = G.context_depth; d < dlen; ++d)
1970 : : {
1971 : 697112 : vec<vec_finalizer> &vv = G.vec_finalizers[d];
1972 : 697112 : unsigned length = vv.length ();
1973 : 697112 : for (unsigned int i = 0; i < length;)
1974 : : {
1975 : 0 : vec_finalizer &f = vv[i];
1976 : 0 : if (!ggc_marked_p (f.addr ()))
1977 : : {
1978 : 0 : f.call ();
1979 : 0 : vv.unordered_remove (i);
1980 : 0 : length--;
1981 : : }
1982 : : else
1983 : 0 : i++;
1984 : : }
1985 : : }
1986 : 697112 : }
1987 : :
1988 : : /* Free all empty pages. Partially empty pages need no attention
1989 : : because the `mark' bit doubles as an `unused' bit. */
1990 : :
1991 : : static void
1992 : 714284 : sweep_pages (void)
1993 : : {
1994 : 714284 : unsigned order;
1995 : :
1996 : 59285572 : for (order = 2; order < NUM_ORDERS; order++)
1997 : : {
1998 : : /* The last page-entry to consider, regardless of entries
1999 : : placed at the end of the list. */
2000 : 58571288 : page_entry * const last = G.page_tails[order];
2001 : :
2002 : 58571288 : size_t num_objects;
2003 : 58571288 : size_t live_objects;
2004 : 58571288 : page_entry *p, *previous;
2005 : 58571288 : int done;
2006 : :
2007 : 58571288 : p = G.pages[order];
2008 : 58571288 : if (p == NULL)
2009 : 36142931 : continue;
2010 : :
2011 : : previous = NULL;
2012 : 1689415344 : do
2013 : : {
2014 : 1689415344 : page_entry *next = p->next;
2015 : :
2016 : : /* Loop until all entries have been examined. */
2017 : 1689415344 : done = (p == last);
2018 : :
2019 : 1689415344 : num_objects = OBJECTS_IN_PAGE (p);
2020 : :
2021 : : /* Add all live objects on this page to the count of
2022 : : allocated memory. */
2023 : 1689415344 : live_objects = num_objects - p->num_free_objects;
2024 : :
2025 : 1689415344 : G.allocated += OBJECT_SIZE (order) * live_objects;
2026 : :
2027 : : /* Only objects on pages in the topmost context should get
2028 : : collected. */
2029 : 1689415344 : if (p->context_depth < G.context_depth)
2030 : : ;
2031 : :
2032 : : /* Remove the page if it's empty. */
2033 : 1689415344 : else if (live_objects == 0)
2034 : : {
2035 : : /* If P was the first page in the list, then NEXT
2036 : : becomes the new first page in the list, otherwise
2037 : : splice P out of the forward pointers. */
2038 : 25548969 : if (! previous)
2039 : 4016902 : G.pages[order] = next;
2040 : : else
2041 : 21532067 : previous->next = next;
2042 : :
2043 : : /* Splice P out of the back pointers too. */
2044 : 25548969 : if (next)
2045 : 25301540 : next->prev = previous;
2046 : :
2047 : : /* Are we removing the last element? */
2048 : 25548969 : if (p == G.page_tails[order])
2049 : 247429 : G.page_tails[order] = previous;
2050 : 25548969 : free_page (p);
2051 : 25548969 : p = previous;
2052 : : }
2053 : :
2054 : : /* If the page is full, move it to the end. */
2055 : 1663866375 : else if (p->num_free_objects == 0)
2056 : : {
2057 : : /* Don't move it if it's already at the end. */
2058 : 1091421558 : if (p != G.page_tails[order])
2059 : : {
2060 : : /* Move p to the end of the list. */
2061 : 1089274357 : p->next = NULL;
2062 : 1089274357 : p->prev = G.page_tails[order];
2063 : 1089274357 : G.page_tails[order]->next = p;
2064 : :
2065 : : /* Update the tail pointer... */
2066 : 1089274357 : G.page_tails[order] = p;
2067 : :
2068 : : /* ... and the head pointer, if necessary. */
2069 : 1089274357 : if (! previous)
2070 : 39776244 : G.pages[order] = next;
2071 : : else
2072 : 1049498113 : previous->next = next;
2073 : :
2074 : : /* And update the backpointer in NEXT if necessary. */
2075 : 1089274357 : if (next)
2076 : 1089274357 : next->prev = previous;
2077 : :
2078 : : p = previous;
2079 : : }
2080 : : }
2081 : :
2082 : : /* If we've fallen through to here, it's a page in the
2083 : : topmost context that is neither full nor empty. Such a
2084 : : page must precede pages at lesser context depth in the
2085 : : list, so move it to the head. */
2086 : 572444817 : else if (p != G.pages[order])
2087 : : {
2088 : 553647546 : previous->next = p->next;
2089 : :
2090 : : /* Update the backchain in the next node if it exists. */
2091 : 553647546 : if (p->next)
2092 : 549724404 : p->next->prev = previous;
2093 : :
2094 : : /* Move P to the head of the list. */
2095 : 553647546 : p->next = G.pages[order];
2096 : 553647546 : p->prev = NULL;
2097 : 553647546 : G.pages[order]->prev = p;
2098 : :
2099 : : /* Update the head pointer. */
2100 : 553647546 : G.pages[order] = p;
2101 : :
2102 : : /* Are we moving the last element? */
2103 : 553647546 : if (G.page_tails[order] == p)
2104 : 3923142 : G.page_tails[order] = previous;
2105 : : p = previous;
2106 : : }
2107 : :
2108 : 1689415344 : previous = p;
2109 : 1689415344 : p = next;
2110 : : }
2111 : 1689415344 : while (! done);
2112 : :
2113 : : /* Now, restore the in_use_p vectors for any pages from contexts
2114 : : other than the current one. */
2115 : 1686294732 : for (p = G.pages[order]; p; p = p->next)
2116 : 1663866375 : if (p->context_depth != G.context_depth)
2117 : 0 : ggc_recalculate_in_use_p (p);
2118 : : }
2119 : 714284 : }
2120 : :
2121 : : #ifdef ENABLE_GC_CHECKING
2122 : : /* Clobber all free objects. */
2123 : :
2124 : : static void
2125 : 697459 : poison_pages (void)
2126 : : {
2127 : 697459 : unsigned order;
2128 : :
2129 : 57889097 : for (order = 2; order < NUM_ORDERS; order++)
2130 : : {
2131 : 57191638 : size_t size = OBJECT_SIZE (order);
2132 : 57191638 : page_entry *p;
2133 : :
2134 : 1734680879 : for (p = G.pages[order]; p != NULL; p = p->next)
2135 : : {
2136 : 1677489241 : size_t num_objects;
2137 : 1677489241 : size_t i;
2138 : :
2139 : 1677489241 : if (p->context_depth != G.context_depth)
2140 : : /* Since we don't do any collection for pages in pushed
2141 : : contexts, there's no need to do any poisoning. And
2142 : : besides, the IN_USE_P array isn't valid until we pop
2143 : : contexts. */
2144 : 0 : continue;
2145 : :
2146 : 1677489241 : num_objects = OBJECTS_IN_PAGE (p);
2147 : 88167374259 : for (i = 0; i < num_objects; i++)
2148 : : {
2149 : 86489885018 : size_t word, bit;
2150 : 86489885018 : word = i / HOST_BITS_PER_LONG;
2151 : 86489885018 : bit = i % HOST_BITS_PER_LONG;
2152 : 86489885018 : if (((p->in_use_p[word] >> bit) & 1) == 0)
2153 : : {
2154 : 16752302103 : char *object = p->page + i * size;
2155 : :
2156 : : /* Keep poison-by-write when we expect to use Valgrind,
2157 : : so the exact same memory semantics is kept, in case
2158 : : there are memory errors. We override this request
2159 : : below. */
2160 : : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2161 : 16752302103 : size));
2162 : 16752302103 : memset (object, 0xa5, size);
2163 : :
2164 : : /* Drop the handle to avoid handle leak. */
2165 : 16752302103 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
2166 : : }
2167 : : }
2168 : : }
2169 : : }
2170 : 697459 : }
2171 : : #else
2172 : : #define poison_pages()
2173 : : #endif
2174 : :
2175 : : #ifdef ENABLE_GC_ALWAYS_COLLECT
2176 : : /* Validate that the reportedly free objects actually are. */
2177 : :
2178 : : static void
2179 : : validate_free_objects (void)
2180 : : {
2181 : : struct free_object *f, *next, *still_free = NULL;
2182 : :
2183 : : for (f = G.free_object_list; f ; f = next)
2184 : : {
2185 : : page_entry *pe = lookup_page_table_entry (f->object);
2186 : : size_t bit, word;
2187 : :
2188 : : bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2189 : : word = bit / HOST_BITS_PER_LONG;
2190 : : bit = bit % HOST_BITS_PER_LONG;
2191 : : next = f->next;
2192 : :
2193 : : /* Make certain it isn't visible from any root. Notice that we
2194 : : do this check before sweep_pages merges save_in_use_p. */
2195 : : gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2196 : :
2197 : : /* If the object comes from an outer context, then retain the
2198 : : free_object entry, so that we can verify that the address
2199 : : isn't live on the stack in some outer context. */
2200 : : if (pe->context_depth != G.context_depth)
2201 : : {
2202 : : f->next = still_free;
2203 : : still_free = f;
2204 : : }
2205 : : else
2206 : : free (f);
2207 : : }
2208 : :
2209 : : G.free_object_list = still_free;
2210 : : }
2211 : : #else
2212 : : #define validate_free_objects()
2213 : : #endif
2214 : :
2215 : : /* Top level mark-and-sweep routine. */
2216 : :
2217 : : void
2218 : 517262109 : ggc_collect (enum ggc_collect mode)
2219 : : {
2220 : : /* Avoid frequent unnecessary work by skipping collection if the
2221 : : total allocations haven't expanded much since the last
2222 : : collection. */
2223 : 517262109 : float allocated_last_gc =
2224 : 517262109 : MAX (G.allocated_last_gc, (size_t)param_ggc_min_heapsize * ONE_K);
2225 : :
2226 : : /* It is also good time to get memory block pool into limits. */
2227 : 517262109 : memory_block_pool::trim ();
2228 : :
2229 : 517262109 : float min_expand = allocated_last_gc * param_ggc_min_expand / 100;
2230 : 517262109 : if (mode == GGC_COLLECT_HEURISTIC
2231 : 517262065 : && G.allocated < allocated_last_gc + min_expand)
2232 : : return;
2233 : :
2234 : 697112 : timevar_push (TV_GC);
2235 : 697112 : if (GGC_DEBUG_LEVEL >= 2)
2236 : : fprintf (G.debug_file, "BEGIN COLLECTING\n");
2237 : :
2238 : : /* Zero the total allocated bytes. This will be recalculated in the
2239 : : sweep phase. */
2240 : 697112 : size_t allocated = G.allocated;
2241 : 697112 : G.allocated = 0;
2242 : :
2243 : : /* Release the pages we freed the last time we collected, but didn't
2244 : : reuse in the interim. */
2245 : 697112 : release_pages ();
2246 : :
2247 : : /* Output this later so we do not interfere with release_pages. */
2248 : 697112 : if (!quiet_flag)
2249 : 0 : fprintf (stderr, " {GC " PRsa (0) " -> ", SIZE_AMOUNT (allocated));
2250 : :
2251 : : /* Indicate that we've seen collections at this context depth. */
2252 : 697112 : G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2253 : :
2254 : 697112 : invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2255 : :
2256 : 697112 : in_gc = true;
2257 : 697112 : clear_marks ();
2258 : 697112 : ggc_mark_roots ();
2259 : 697112 : ggc_handle_finalizers ();
2260 : :
2261 : 697112 : if (GATHER_STATISTICS)
2262 : : ggc_prune_overhead_list ();
2263 : :
2264 : 697112 : poison_pages ();
2265 : 697112 : validate_free_objects ();
2266 : 697112 : sweep_pages ();
2267 : :
2268 : 697112 : in_gc = false;
2269 : 697112 : G.allocated_last_gc = G.allocated;
2270 : :
2271 : 697112 : invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2272 : :
2273 : 697112 : timevar_pop (TV_GC);
2274 : :
2275 : 697112 : if (!quiet_flag)
2276 : 0 : fprintf (stderr, PRsa (0) "}", SIZE_AMOUNT (G.allocated));
2277 : : if (GGC_DEBUG_LEVEL >= 2)
2278 : : fprintf (G.debug_file, "END COLLECTING\n");
2279 : : }
2280 : :
2281 : : /* Return free pages to the system. */
2282 : :
2283 : : void
2284 : 17172 : ggc_trim ()
2285 : : {
2286 : 17172 : timevar_push (TV_GC);
2287 : 17172 : G.allocated = 0;
2288 : 17172 : sweep_pages ();
2289 : 17172 : release_pages ();
2290 : 17172 : if (!quiet_flag)
2291 : 0 : fprintf (stderr, " {GC trimmed to " PRsa (0) ", " PRsa (0) " mapped}",
2292 : 0 : SIZE_AMOUNT (G.allocated), SIZE_AMOUNT (G.bytes_mapped));
2293 : 17172 : timevar_pop (TV_GC);
2294 : 17172 : }
2295 : :
2296 : : /* Assume that all GGC memory is reachable and grow the limits for next
2297 : : collection. With checking, trigger GGC so -Q compilation outputs how much
2298 : : of memory really is reachable. */
2299 : :
2300 : : void
2301 : 282830 : ggc_grow (void)
2302 : : {
2303 : 282830 : if (!flag_checking)
2304 : 0 : G.allocated_last_gc = MAX (G.allocated_last_gc,
2305 : : G.allocated);
2306 : : else
2307 : 282830 : ggc_collect ();
2308 : 282830 : if (!quiet_flag)
2309 : 0 : fprintf (stderr, " {GC " PRsa (0) "} ", SIZE_AMOUNT (G.allocated));
2310 : 282830 : }
2311 : :
2312 : : void
2313 : 0 : ggc_print_statistics (void)
2314 : : {
2315 : 0 : struct ggc_statistics stats;
2316 : 0 : unsigned int i;
2317 : 0 : size_t total_overhead = 0;
2318 : :
2319 : : /* Clear the statistics. */
2320 : 0 : memset (&stats, 0, sizeof (stats));
2321 : :
2322 : : /* Make sure collection will really occur. */
2323 : 0 : G.allocated_last_gc = 0;
2324 : :
2325 : : /* Collect and print the statistics common across collectors. */
2326 : 0 : ggc_print_common_statistics (stderr, &stats);
2327 : :
2328 : : /* Release free pages so that we will not count the bytes allocated
2329 : : there as part of the total allocated memory. */
2330 : 0 : release_pages ();
2331 : :
2332 : : /* Collect some information about the various sizes of
2333 : : allocation. */
2334 : 0 : fprintf (stderr,
2335 : : "Memory still allocated at the end of the compilation process\n");
2336 : 0 : fprintf (stderr, "%-8s %10s %10s %10s\n",
2337 : : "Size", "Allocated", "Used", "Overhead");
2338 : 0 : for (i = 0; i < NUM_ORDERS; ++i)
2339 : : {
2340 : 0 : page_entry *p;
2341 : 0 : size_t allocated;
2342 : 0 : size_t in_use;
2343 : 0 : size_t overhead;
2344 : :
2345 : : /* Skip empty entries. */
2346 : 0 : if (!G.pages[i])
2347 : 0 : continue;
2348 : :
2349 : : overhead = allocated = in_use = 0;
2350 : :
2351 : : /* Figure out the total number of bytes allocated for objects of
2352 : : this size, and how many of them are actually in use. Also figure
2353 : : out how much memory the page table is using. */
2354 : 0 : for (p = G.pages[i]; p; p = p->next)
2355 : : {
2356 : 0 : allocated += p->bytes;
2357 : 0 : in_use +=
2358 : 0 : (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2359 : :
2360 : 0 : overhead += (sizeof (page_entry) - sizeof (long)
2361 : 0 : + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2362 : : }
2363 : 0 : fprintf (stderr, "%-8" PRIu64 " " PRsa (10) " " PRsa (10) " "
2364 : : PRsa (10) "\n",
2365 : 0 : (uint64_t)OBJECT_SIZE (i),
2366 : 0 : SIZE_AMOUNT (allocated),
2367 : 0 : SIZE_AMOUNT (in_use),
2368 : 0 : SIZE_AMOUNT (overhead));
2369 : 0 : total_overhead += overhead;
2370 : : }
2371 : 0 : fprintf (stderr, "%-8s " PRsa (10) " " PRsa (10) " " PRsa (10) "\n",
2372 : : "Total",
2373 : 0 : SIZE_AMOUNT (G.bytes_mapped),
2374 : 0 : SIZE_AMOUNT (G.allocated),
2375 : 0 : SIZE_AMOUNT (total_overhead));
2376 : :
2377 : 0 : if (GATHER_STATISTICS)
2378 : : {
2379 : : fprintf (stderr, "\nTotal allocations and overheads during "
2380 : : "the compilation process\n");
2381 : :
2382 : : fprintf (stderr, "Total Overhead: "
2383 : : PRsa (9) "\n",
2384 : : SIZE_AMOUNT (G.stats.total_overhead));
2385 : : fprintf (stderr, "Total Allocated: "
2386 : : PRsa (9) "\n",
2387 : : SIZE_AMOUNT (G.stats.total_allocated));
2388 : :
2389 : : fprintf (stderr, "Total Overhead under 32B: "
2390 : : PRsa (9) "\n",
2391 : : SIZE_AMOUNT (G.stats.total_overhead_under32));
2392 : : fprintf (stderr, "Total Allocated under 32B: "
2393 : : PRsa (9) "\n",
2394 : : SIZE_AMOUNT (G.stats.total_allocated_under32));
2395 : : fprintf (stderr, "Total Overhead under 64B: "
2396 : : PRsa (9) "\n",
2397 : : SIZE_AMOUNT (G.stats.total_overhead_under64));
2398 : : fprintf (stderr, "Total Allocated under 64B: "
2399 : : PRsa (9) "\n",
2400 : : SIZE_AMOUNT (G.stats.total_allocated_under64));
2401 : : fprintf (stderr, "Total Overhead under 128B: "
2402 : : PRsa (9) "\n",
2403 : : SIZE_AMOUNT (G.stats.total_overhead_under128));
2404 : : fprintf (stderr, "Total Allocated under 128B: "
2405 : : PRsa (9) "\n",
2406 : : SIZE_AMOUNT (G.stats.total_allocated_under128));
2407 : :
2408 : : for (i = 0; i < NUM_ORDERS; i++)
2409 : : if (G.stats.total_allocated_per_order[i])
2410 : : {
2411 : : fprintf (stderr, "Total Overhead page size %9" PRIu64 ": "
2412 : : PRsa (9) "\n",
2413 : : (uint64_t)OBJECT_SIZE (i),
2414 : : SIZE_AMOUNT (G.stats.total_overhead_per_order[i]));
2415 : : fprintf (stderr, "Total Allocated page size %9" PRIu64 ": "
2416 : : PRsa (9) "\n",
2417 : : (uint64_t)OBJECT_SIZE (i),
2418 : : SIZE_AMOUNT (G.stats.total_allocated_per_order[i]));
2419 : : }
2420 : : }
2421 : 0 : }
2422 : :
2423 : : struct ggc_pch_ondisk
2424 : : {
2425 : : unsigned totals[NUM_ORDERS];
2426 : : };
2427 : :
2428 : : struct ggc_pch_data
2429 : : {
2430 : : struct ggc_pch_ondisk d;
2431 : : uintptr_t base[NUM_ORDERS];
2432 : : size_t written[NUM_ORDERS];
2433 : : };
2434 : :
2435 : : struct ggc_pch_data *
2436 : 460 : init_ggc_pch (void)
2437 : : {
2438 : 460 : return XCNEW (struct ggc_pch_data);
2439 : : }
2440 : :
2441 : : void
2442 : 24103939 : ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2443 : : size_t size)
2444 : : {
2445 : 24103939 : unsigned order;
2446 : :
2447 : 24103939 : if (size < NUM_SIZE_LOOKUP)
2448 : 24025915 : order = size_lookup[size];
2449 : : else
2450 : : {
2451 : : order = 10;
2452 : 114113 : while (size > OBJECT_SIZE (order))
2453 : 36089 : order++;
2454 : : }
2455 : :
2456 : 24103939 : d->d.totals[order]++;
2457 : 24103939 : }
2458 : :
2459 : : size_t
2460 : 460 : ggc_pch_total_size (struct ggc_pch_data *d)
2461 : : {
2462 : 460 : size_t a = 0;
2463 : 460 : unsigned i;
2464 : :
2465 : 39100 : for (i = 0; i < NUM_ORDERS; i++)
2466 : 38640 : a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2467 : 460 : return a;
2468 : : }
2469 : :
2470 : : void
2471 : 460 : ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2472 : : {
2473 : 460 : uintptr_t a = (uintptr_t) base;
2474 : 460 : unsigned i;
2475 : :
2476 : 39100 : for (i = 0; i < NUM_ORDERS; i++)
2477 : : {
2478 : 38640 : d->base[i] = a;
2479 : 38640 : a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2480 : : }
2481 : 460 : }
2482 : :
2483 : :
2484 : : char *
2485 : 24103939 : ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2486 : : size_t size)
2487 : : {
2488 : 24103939 : unsigned order;
2489 : 24103939 : char *result;
2490 : :
2491 : 24103939 : if (size < NUM_SIZE_LOOKUP)
2492 : 24025915 : order = size_lookup[size];
2493 : : else
2494 : : {
2495 : : order = 10;
2496 : 114113 : while (size > OBJECT_SIZE (order))
2497 : 36089 : order++;
2498 : : }
2499 : :
2500 : 24103939 : result = (char *) d->base[order];
2501 : 24103939 : d->base[order] += OBJECT_SIZE (order);
2502 : 24103939 : return result;
2503 : : }
2504 : :
2505 : : void
2506 : 460 : ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2507 : : FILE *f ATTRIBUTE_UNUSED)
2508 : : {
2509 : : /* Nothing to do. */
2510 : 460 : }
2511 : :
2512 : : void
2513 : 24103939 : ggc_pch_write_object (struct ggc_pch_data *d,
2514 : : FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2515 : : size_t size)
2516 : : {
2517 : 24103939 : unsigned order;
2518 : 24103939 : static const char emptyBytes[256] = { 0 };
2519 : :
2520 : 24103939 : if (size < NUM_SIZE_LOOKUP)
2521 : 24025915 : order = size_lookup[size];
2522 : : else
2523 : : {
2524 : : order = 10;
2525 : 114113 : while (size > OBJECT_SIZE (order))
2526 : 36089 : order++;
2527 : : }
2528 : :
2529 : 24103939 : if (fwrite (x, size, 1, f) != 1)
2530 : 0 : fatal_error (input_location, "cannot write PCH file: %m");
2531 : :
2532 : : /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2533 : : object out to OBJECT_SIZE(order). This happens for strings. */
2534 : :
2535 : 24103939 : if (size != OBJECT_SIZE (order))
2536 : : {
2537 : 1887790 : unsigned padding = OBJECT_SIZE (order) - size;
2538 : :
2539 : : /* To speed small writes, we use a nulled-out array that's larger
2540 : : than most padding requests as the source for our null bytes. This
2541 : : permits us to do the padding with fwrite() rather than fseek(), and
2542 : : limits the chance the OS may try to flush any outstanding writes. */
2543 : 1887790 : if (padding <= sizeof (emptyBytes))
2544 : : {
2545 : 1859401 : if (fwrite (emptyBytes, 1, padding, f) != padding)
2546 : 0 : fatal_error (input_location, "cannot write PCH file");
2547 : : }
2548 : : else
2549 : : {
2550 : : /* Larger than our buffer? Just default to fseek. */
2551 : 28389 : if (fseek (f, padding, SEEK_CUR) != 0)
2552 : 0 : fatal_error (input_location, "cannot write PCH file");
2553 : : }
2554 : : }
2555 : :
2556 : 24103939 : d->written[order]++;
2557 : 24103939 : if (d->written[order] == d->d.totals[order]
2558 : 24103939 : && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2559 : : G.pagesize),
2560 : : SEEK_CUR) != 0)
2561 : 0 : fatal_error (input_location, "cannot write PCH file: %m");
2562 : 24103939 : }
2563 : :
2564 : : void
2565 : 460 : ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2566 : : {
2567 : 460 : if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2568 : 0 : fatal_error (input_location, "cannot write PCH file: %m");
2569 : 460 : free (d);
2570 : 460 : }
2571 : :
2572 : : /* Move the PCH PTE entries just added to the end of by_depth, to the
2573 : : front. */
2574 : :
2575 : : static void
2576 : 347 : move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2577 : : {
2578 : : /* First, we swap the new entries to the front of the varrays. */
2579 : 347 : page_entry **new_by_depth;
2580 : 347 : unsigned long **new_save_in_use;
2581 : :
2582 : 347 : new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2583 : 347 : new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2584 : :
2585 : 347 : memcpy (&new_by_depth[0],
2586 : 347 : &G.by_depth[count_old_page_tables],
2587 : 347 : count_new_page_tables * sizeof (void *));
2588 : 347 : memcpy (&new_by_depth[count_new_page_tables],
2589 : : &G.by_depth[0],
2590 : : count_old_page_tables * sizeof (void *));
2591 : 347 : memcpy (&new_save_in_use[0],
2592 : 347 : &G.save_in_use[count_old_page_tables],
2593 : : count_new_page_tables * sizeof (void *));
2594 : 347 : memcpy (&new_save_in_use[count_new_page_tables],
2595 : : &G.save_in_use[0],
2596 : : count_old_page_tables * sizeof (void *));
2597 : :
2598 : 347 : free (G.by_depth);
2599 : 347 : free (G.save_in_use);
2600 : :
2601 : 347 : G.by_depth = new_by_depth;
2602 : 347 : G.save_in_use = new_save_in_use;
2603 : :
2604 : : /* Now update all the index_by_depth fields. */
2605 : 184887 : for (unsigned i = G.by_depth_in_use; i--;)
2606 : : {
2607 : 184540 : page_entry *p = G.by_depth[i];
2608 : 184540 : p->index_by_depth = i;
2609 : : }
2610 : :
2611 : : /* And last, we update the depth pointers in G.depth. The first
2612 : : entry is already 0, and context 0 entries always start at index
2613 : : 0, so there is nothing to update in the first slot. We need a
2614 : : second slot, only if we have old ptes, and if we do, they start
2615 : : at index count_new_page_tables. */
2616 : 347 : if (count_old_page_tables)
2617 : 347 : push_depth (count_new_page_tables);
2618 : 347 : }
2619 : :
2620 : : void
2621 : 347 : ggc_pch_read (FILE *f, void *addr)
2622 : : {
2623 : 347 : struct ggc_pch_ondisk d;
2624 : 347 : unsigned i;
2625 : 347 : char *offs = (char *) addr;
2626 : 347 : unsigned long count_old_page_tables;
2627 : 347 : unsigned long count_new_page_tables;
2628 : :
2629 : 347 : count_old_page_tables = G.by_depth_in_use;
2630 : :
2631 : 347 : if (fread (&d, sizeof (d), 1, f) != 1)
2632 : 0 : fatal_error (input_location, "cannot read PCH file: %m");
2633 : :
2634 : : /* We've just read in a PCH file. So, every object that used to be
2635 : : allocated is now free. */
2636 : 347 : clear_marks ();
2637 : : #ifdef ENABLE_GC_CHECKING
2638 : 347 : poison_pages ();
2639 : : #endif
2640 : : /* Since we free all the allocated objects, the free list becomes
2641 : : useless. Validate it now, which will also clear it. */
2642 : 347 : validate_free_objects ();
2643 : :
2644 : : /* No object read from a PCH file should ever be freed. So, set the
2645 : : context depth to 1, and set the depth of all the currently-allocated
2646 : : pages to be 1 too. PCH pages will have depth 0. */
2647 : 347 : gcc_assert (!G.context_depth);
2648 : 347 : G.context_depth = 1;
2649 : : /* Allocate space for the depth 1 finalizers. */
2650 : 347 : G.finalizers.safe_push (vNULL);
2651 : 347 : G.vec_finalizers.safe_push (vNULL);
2652 : 347 : gcc_assert (G.finalizers.length() == 2);
2653 : 29495 : for (i = 0; i < NUM_ORDERS; i++)
2654 : : {
2655 : 29148 : page_entry *p;
2656 : 204125 : for (p = G.pages[i]; p != NULL; p = p->next)
2657 : 174977 : p->context_depth = G.context_depth;
2658 : : }
2659 : :
2660 : : /* Allocate the appropriate page-table entries for the pages read from
2661 : : the PCH file. */
2662 : :
2663 : 29495 : for (i = 0; i < NUM_ORDERS; i++)
2664 : : {
2665 : 29148 : struct page_entry *entry;
2666 : 29148 : char *pte;
2667 : 29148 : size_t bytes;
2668 : 29148 : size_t num_objs;
2669 : 29148 : size_t j;
2670 : :
2671 : 29148 : if (d.totals[i] == 0)
2672 : 19585 : continue;
2673 : :
2674 : 9563 : bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2675 : 9563 : num_objs = bytes / OBJECT_SIZE (i);
2676 : 9563 : entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2677 : : - sizeof (long)
2678 : : + BITMAP_SIZE (num_objs + 1)));
2679 : 9563 : entry->bytes = bytes;
2680 : 9563 : entry->page = offs;
2681 : 9563 : entry->context_depth = 0;
2682 : 9563 : offs += bytes;
2683 : 9563 : entry->num_free_objects = 0;
2684 : 9563 : entry->order = i;
2685 : :
2686 : 9563 : for (j = 0;
2687 : 178667 : j + HOST_BITS_PER_LONG <= num_objs + 1;
2688 : 169104 : j += HOST_BITS_PER_LONG)
2689 : 169104 : entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2690 : 228575 : for (; j < num_objs + 1; j++)
2691 : 219012 : entry->in_use_p[j / HOST_BITS_PER_LONG]
2692 : 219012 : |= 1L << (j % HOST_BITS_PER_LONG);
2693 : :
2694 : 233862 : for (pte = entry->page;
2695 : 243425 : pte < entry->page + entry->bytes;
2696 : 233862 : pte += G.pagesize)
2697 : 233862 : set_page_table_entry (pte, entry);
2698 : :
2699 : 9563 : if (G.page_tails[i] != NULL)
2700 : 8861 : G.page_tails[i]->next = entry;
2701 : : else
2702 : 702 : G.pages[i] = entry;
2703 : 9563 : G.page_tails[i] = entry;
2704 : :
2705 : : /* We start off by just adding all the new information to the
2706 : : end of the varrays, later, we will move the new information
2707 : : to the front of the varrays, as the PCH page tables are at
2708 : : context 0. */
2709 : 9563 : push_by_depth (entry, 0);
2710 : : }
2711 : :
2712 : : /* Now, we update the various data structures that speed page table
2713 : : handling. */
2714 : 347 : count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2715 : :
2716 : 347 : move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2717 : :
2718 : : /* Update the statistics. */
2719 : 347 : G.allocated = G.allocated_last_gc = offs - (char *)addr;
2720 : 347 : }
|