Line data Source code
1 : /* "Bag-of-pages" garbage collector for the GNU compiler.
2 : Copyright (C) 1999-2026 Free Software Foundation, Inc.
3 :
4 : This file is part of GCC.
5 :
6 : GCC is free software; you can redistribute it and/or modify it under
7 : the terms of the GNU General Public License as published by the Free
8 : Software Foundation; either version 3, or (at your option) any later
9 : version.
10 :
11 : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 : WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 : for more details.
15 :
16 : You should have received a copy of the GNU General Public License
17 : along with GCC; see the file COPYING3. If not see
18 : <http://www.gnu.org/licenses/>. */
19 :
20 : #include "config.h"
21 : #include "system.h"
22 : #include "coretypes.h"
23 : #include "backend.h"
24 : #include "alias.h"
25 : #include "tree.h"
26 : #include "rtl.h"
27 : #include "memmodel.h"
28 : #include "tm_p.h"
29 : #include "diagnostic-core.h"
30 : #include "flags.h"
31 : #include "ggc-internal.h"
32 : #include "timevar.h"
33 : #include "cgraph.h"
34 : #include "cfgloop.h"
35 : #include "plugin.h"
36 :
37 : /* Prefer MAP_ANON(YMOUS) to /dev/zero, since we don't need to keep a
38 : file open. Prefer either to valloc. */
39 : #ifdef HAVE_MMAP_ANON
40 : # undef HAVE_MMAP_DEV_ZERO
41 : # define USING_MMAP
42 : #endif
43 :
44 : #ifdef HAVE_MMAP_DEV_ZERO
45 : # define USING_MMAP
46 : #endif
47 :
48 : #ifndef USING_MMAP
49 : #define USING_MALLOC_PAGE_GROUPS
50 : #endif
51 :
52 : #if defined(HAVE_MADVISE) && HAVE_DECL_MADVISE && defined(MADV_DONTNEED) \
53 : && defined(USING_MMAP)
54 : # define USING_MADVISE
55 : #endif
56 :
57 : /* Strategy:
58 :
59 : This garbage-collecting allocator allocates objects on one of a set
60 : of pages. Each page can allocate objects of a single size only;
61 : available sizes are powers of two starting at four bytes. The size
62 : of an allocation request is rounded up to the next power of two
63 : (`order'), and satisfied from the appropriate page.
64 :
65 : Each page is recorded in a page-entry, which also maintains an
66 : in-use bitmap of object positions on the page. This allows the
67 : allocation state of a particular object to be flipped without
68 : touching the page itself.
69 :
70 : Each page-entry also has a context depth, which is used to track
71 : pushing and popping of allocation contexts. Only objects allocated
72 : in the current (highest-numbered) context may be collected.
73 :
74 : Page entries are arranged in an array of singly-linked lists. The
75 : array is indexed by the allocation size, in bits, of the pages on
76 : it; i.e. all pages on a list allocate objects of the same size.
77 : Pages are ordered on the list such that all non-full pages precede
78 : all full pages, with non-full pages arranged in order of decreasing
79 : context depth.
80 :
81 : Empty pages (of all orders) are kept on a single page cache list,
82 : and are considered first when new pages are required; they are
83 : deallocated at the start of the next collection if they haven't
84 : been recycled by then. */
85 :
86 : /* Define GGC_DEBUG_LEVEL to print debugging information.
87 : 0: No debugging output.
88 : 1: GC statistics only.
89 : 2: Page-entry allocations/deallocations as well.
90 : 3: Object allocations as well.
91 : 4: Object marks as well. */
92 : #define GGC_DEBUG_LEVEL (0)
93 :
94 : /* A two-level tree is used to look up the page-entry for a given
95 : pointer. Two chunks of the pointer's bits are extracted to index
96 : the first and second levels of the tree, as follows:
97 :
98 : HOST_PAGE_SIZE_BITS
99 : 32 | |
100 : msb +----------------+----+------+------+ lsb
101 : | | |
102 : PAGE_L1_BITS |
103 : | |
104 : PAGE_L2_BITS
105 :
106 : The bottommost HOST_PAGE_SIZE_BITS are ignored, since page-entry
107 : pages are aligned on system page boundaries. The next most
108 : significant PAGE_L2_BITS and PAGE_L1_BITS are the second and first
109 : index values in the lookup table, respectively.
110 :
111 : For 32-bit architectures and the settings below, there are no
112 : leftover bits. For architectures with wider pointers, the lookup
113 : tree points to a list of pages, which must be scanned to find the
114 : correct one. */
115 :
116 : #define PAGE_L1_BITS (8)
117 : #define PAGE_L2_BITS (32 - PAGE_L1_BITS - G.lg_pagesize)
118 : #define PAGE_L1_SIZE ((uintptr_t) 1 << PAGE_L1_BITS)
119 : #define PAGE_L2_SIZE ((uintptr_t) 1 << PAGE_L2_BITS)
120 :
121 : #define LOOKUP_L1(p) \
122 : (((uintptr_t) (p) >> (32 - PAGE_L1_BITS)) & ((1 << PAGE_L1_BITS) - 1))
123 :
124 : #define LOOKUP_L2(p) \
125 : (((uintptr_t) (p) >> G.lg_pagesize) & ((1 << PAGE_L2_BITS) - 1))
126 :
127 : /* The number of objects per allocation page, for objects on a page of
128 : the indicated ORDER. */
129 : #define OBJECTS_PER_PAGE(ORDER) objects_per_page_table[ORDER]
130 :
131 : /* The number of objects in P. */
132 : #define OBJECTS_IN_PAGE(P) ((P)->bytes / OBJECT_SIZE ((P)->order))
133 :
134 : /* The size of an object on a page of the indicated ORDER. */
135 : #define OBJECT_SIZE(ORDER) object_size_table[ORDER]
136 :
137 : /* For speed, we avoid doing a general integer divide to locate the
138 : offset in the allocation bitmap, by precalculating numbers M, S
139 : such that (O * M) >> S == O / Z (modulo 2^32), for any offset O
140 : within the page which is evenly divisible by the object size Z. */
141 : #define DIV_MULT(ORDER) inverse_table[ORDER].mult
142 : #define DIV_SHIFT(ORDER) inverse_table[ORDER].shift
143 : #define OFFSET_TO_BIT(OFFSET, ORDER) \
144 : (((OFFSET) * DIV_MULT (ORDER)) >> DIV_SHIFT (ORDER))
145 :
146 : /* We use this structure to determine the alignment required for
147 : allocations. For power-of-two sized allocations, that's not a
148 : problem, but it does matter for odd-sized allocations.
149 : We do not care about alignment for floating-point types. */
150 :
151 : struct max_alignment {
152 : char c;
153 : union {
154 : int64_t i;
155 : void *p;
156 : } u;
157 : };
158 :
159 : /* The biggest alignment required. */
160 :
161 : #define MAX_ALIGNMENT (offsetof (struct max_alignment, u))
162 :
163 :
164 : /* The number of extra orders, not corresponding to power-of-two sized
165 : objects. */
166 :
167 : #define NUM_EXTRA_ORDERS ARRAY_SIZE (extra_order_size_table)
168 :
169 : #define RTL_SIZE(NSLOTS) \
170 : (RTX_HDR_SIZE + (NSLOTS) * sizeof (rtunion))
171 :
172 : #define TREE_EXP_SIZE(OPS) \
173 : (sizeof (struct tree_exp) + ((OPS) - 1) * sizeof (tree))
174 :
175 : /* The Ith entry is the maximum size of an object to be stored in the
176 : Ith extra order. Adding a new entry to this array is the *only*
177 : thing you need to do to add a new special allocation size. */
178 :
179 : static const size_t extra_order_size_table[] = {
180 : /* Extra orders for small non-power-of-two multiples of MAX_ALIGNMENT.
181 : There are a lot of structures with these sizes and explicitly
182 : listing them risks orders being dropped because they changed size. */
183 : MAX_ALIGNMENT * 3,
184 : MAX_ALIGNMENT * 5,
185 : MAX_ALIGNMENT * 6,
186 : MAX_ALIGNMENT * 7,
187 : MAX_ALIGNMENT * 9,
188 : MAX_ALIGNMENT * 10,
189 : MAX_ALIGNMENT * 11,
190 : MAX_ALIGNMENT * 12,
191 : MAX_ALIGNMENT * 13,
192 : MAX_ALIGNMENT * 14,
193 : MAX_ALIGNMENT * 15,
194 : sizeof (struct tree_decl_non_common),
195 : sizeof (struct tree_field_decl),
196 : sizeof (struct tree_parm_decl),
197 : sizeof (struct tree_var_decl),
198 : sizeof (struct tree_type_non_common),
199 : sizeof (struct function),
200 : sizeof (struct basic_block_def),
201 : sizeof (struct cgraph_node),
202 : sizeof (class loop),
203 : };
204 :
205 : /* The total number of orders. */
206 :
207 : #define NUM_ORDERS (HOST_BITS_PER_PTR + NUM_EXTRA_ORDERS)
208 :
209 : /* Compute the smallest nonnegative number which when added to X gives
210 : a multiple of F. */
211 :
212 : #define ROUND_UP_VALUE(x, f) ((f) - 1 - ((f) - 1 + (x)) % (f))
213 :
214 : /* Round X to next multiple of the page size */
215 :
216 : #define PAGE_ALIGN(x) ROUND_UP ((x), G.pagesize)
217 :
218 : /* The Ith entry is the number of objects on a page or order I. */
219 :
220 : static unsigned objects_per_page_table[NUM_ORDERS];
221 :
222 : /* The Ith entry is the size of an object on a page of order I. */
223 :
224 : static size_t object_size_table[NUM_ORDERS];
225 :
226 : /* The Ith entry is a pair of numbers (mult, shift) such that
227 : ((k * mult) >> shift) mod 2^32 == (k / OBJECT_SIZE(I)) mod 2^32,
228 : for all k evenly divisible by OBJECT_SIZE(I). */
229 :
230 : static struct
231 : {
232 : size_t mult;
233 : unsigned int shift;
234 : }
235 : inverse_table[NUM_ORDERS];
236 :
237 : struct free_list;
238 :
239 : /* A page_entry records the status of an allocation page. This
240 : structure is dynamically sized to fit the bitmap in_use_p. */
241 : struct page_entry
242 : {
243 : /* The next page-entry with objects of the same size, or NULL if
244 : this is the last page-entry. */
245 : struct page_entry *next;
246 :
247 : /* The previous page-entry with objects of the same size, or NULL if
248 : this is the first page-entry. The PREV pointer exists solely to
249 : keep the cost of ggc_free manageable. */
250 : struct page_entry *prev;
251 :
252 : /* The number of bytes allocated. (This will always be a multiple
253 : of the host system page size.) */
254 : size_t bytes;
255 :
256 : /* Free list of this page size. */
257 : struct free_list *free_list;
258 :
259 : /* The address at which the memory is allocated. */
260 : char *page;
261 :
262 : #ifdef USING_MALLOC_PAGE_GROUPS
263 : /* Back pointer to the page group this page came from. */
264 : struct page_group *group;
265 : #endif
266 :
267 : /* This is the index in the by_depth varray where this page table
268 : can be found. */
269 : unsigned long index_by_depth;
270 :
271 : /* Context depth of this page. */
272 : unsigned short context_depth;
273 :
274 : /* The number of free objects remaining on this page. */
275 : unsigned short num_free_objects;
276 :
277 : /* A likely candidate for the bit position of a free object for the
278 : next allocation from this page. */
279 : unsigned short next_bit_hint;
280 :
281 : /* The lg of size of objects allocated from this page. */
282 : unsigned char order;
283 :
284 : /* Discarded page? */
285 : bool discarded;
286 :
287 : /* A bit vector indicating whether or not objects are in use. The
288 : Nth bit is one if the Nth object on this page is allocated. This
289 : array is dynamically sized. */
290 : unsigned long in_use_p[1];
291 : };
292 :
293 : #ifdef USING_MALLOC_PAGE_GROUPS
294 : /* A page_group describes a large allocation from malloc, from which
295 : we parcel out aligned pages. */
296 : struct page_group
297 : {
298 : /* A linked list of all extant page groups. */
299 : struct page_group *next;
300 :
301 : /* The address we received from malloc. */
302 : char *allocation;
303 :
304 : /* The size of the block. */
305 : size_t alloc_size;
306 :
307 : /* A bitmask of pages in use. */
308 : unsigned int in_use;
309 : };
310 : #endif
311 :
312 : #if HOST_BITS_PER_PTR <= 32
313 :
314 : /* On 32-bit hosts, we use a two level page table, as pictured above. */
315 : typedef page_entry **page_table[PAGE_L1_SIZE];
316 :
317 : #else
318 :
319 : /* On 64-bit hosts, we use the same two level page tables plus a linked
320 : list that disambiguates the top 32-bits. There will almost always be
321 : exactly one entry in the list. */
322 : typedef struct page_table_chain
323 : {
324 : struct page_table_chain *next;
325 : size_t high_bits;
326 : page_entry **table[PAGE_L1_SIZE];
327 : } *page_table;
328 :
329 : #endif
330 :
331 : class finalizer
332 : {
333 : public:
334 71626396 : finalizer (void *addr, void (*f)(void *)) : m_addr (addr), m_function (f) {}
335 :
336 147380472 : void *addr () const { return m_addr; }
337 :
338 25240916 : void call () const { m_function (m_addr); }
339 :
340 : private:
341 : void *m_addr;
342 : void (*m_function)(void *);
343 : };
344 :
345 : class vec_finalizer
346 : {
347 : public:
348 0 : vec_finalizer (uintptr_t addr, void (*f)(void *), size_t s, size_t n) :
349 0 : m_addr (addr), m_function (f), m_object_size (s), m_n_objects (n) {}
350 :
351 0 : void call () const
352 : {
353 0 : for (size_t i = 0; i < m_n_objects; i++)
354 0 : m_function (reinterpret_cast<void *> (m_addr + (i * m_object_size)));
355 0 : }
356 :
357 0 : void *addr () const { return reinterpret_cast<void *> (m_addr); }
358 :
359 : private:
360 : uintptr_t m_addr;
361 : void (*m_function)(void *);
362 : size_t m_object_size;
363 : size_t m_n_objects;
364 : };
365 :
366 : #ifdef ENABLE_GC_ALWAYS_COLLECT
367 : /* List of free objects to be verified as actually free on the
368 : next collection. */
369 : struct free_object
370 : {
371 : void *object;
372 : struct free_object *next;
373 : };
374 : #endif
375 :
376 : constexpr int num_free_list = 8;
377 :
378 : /* A free_list for pages with BYTES size. */
379 : struct free_list
380 : {
381 : size_t bytes;
382 : page_entry *free_pages;
383 : };
384 :
385 : /* The rest of the global variables. */
386 : static struct ggc_globals
387 : {
388 : /* The Nth element in this array is a page with objects of size 2^N.
389 : If there are any pages with free objects, they will be at the
390 : head of the list. NULL if there are no page-entries for this
391 : object size. */
392 : page_entry *pages[NUM_ORDERS];
393 :
394 : /* The Nth element in this array is the last page with objects of
395 : size 2^N. NULL if there are no page-entries for this object
396 : size. */
397 : page_entry *page_tails[NUM_ORDERS];
398 :
399 : /* Lookup table for associating allocation pages with object addresses. */
400 : page_table lookup;
401 :
402 : /* The system's page size. */
403 : size_t pagesize;
404 : size_t lg_pagesize;
405 :
406 : /* Bytes currently allocated. */
407 : size_t allocated;
408 :
409 : /* Bytes currently allocated at the end of the last collection. */
410 : size_t allocated_last_gc;
411 :
412 : /* Total amount of memory mapped. */
413 : size_t bytes_mapped;
414 :
415 : /* Bit N set if any allocations have been done at context depth N. */
416 : unsigned long context_depth_allocations;
417 :
418 : /* Bit N set if any collections have been done at context depth N. */
419 : unsigned long context_depth_collections;
420 :
421 : /* The current depth in the context stack. */
422 : unsigned short context_depth;
423 :
424 : /* A file descriptor open to /dev/zero for reading. */
425 : #if defined (HAVE_MMAP_DEV_ZERO)
426 : int dev_zero_fd;
427 : #endif
428 :
429 : /* A cache of free system pages. Entry 0 is fallback. */
430 : struct free_list free_lists[num_free_list];
431 :
432 : #ifdef USING_MALLOC_PAGE_GROUPS
433 : page_group *page_groups;
434 : #endif
435 :
436 : /* The file descriptor for debugging output. */
437 : FILE *debug_file;
438 :
439 : /* Current number of elements in use in depth below. */
440 : unsigned int depth_in_use;
441 :
442 : /* Maximum number of elements that can be used before resizing. */
443 : unsigned int depth_max;
444 :
445 : /* Each element of this array is an index in by_depth where the given
446 : depth starts. This structure is indexed by that given depth we
447 : are interested in. */
448 : unsigned int *depth;
449 :
450 : /* Current number of elements in use in by_depth below. */
451 : unsigned int by_depth_in_use;
452 :
453 : /* Maximum number of elements that can be used before resizing. */
454 : unsigned int by_depth_max;
455 :
456 : /* Each element of this array is a pointer to a page_entry, all
457 : page_entries can be found in here by increasing depth.
458 : index_by_depth in the page_entry is the index into this data
459 : structure where that page_entry can be found. This is used to
460 : speed up finding all page_entries at a particular depth. */
461 : page_entry **by_depth;
462 :
463 : /* Each element is a pointer to the saved in_use_p bits, if any,
464 : zero otherwise. We allocate them all together, to enable a
465 : better runtime data access pattern. */
466 : unsigned long **save_in_use;
467 :
468 : /* Finalizers for single objects. The first index is collection_depth. */
469 : vec<vec<finalizer> > finalizers;
470 :
471 : /* Finalizers for vectors of objects. */
472 : vec<vec<vec_finalizer> > vec_finalizers;
473 :
474 : #ifdef ENABLE_GC_ALWAYS_COLLECT
475 : /* List of free objects to be verified as actually free on the
476 : next collection. */
477 : struct free_object *free_object_list;
478 : #endif
479 :
480 : struct
481 : {
482 : /* Total GC-allocated memory. */
483 : unsigned long long total_allocated;
484 : /* Total overhead for GC-allocated memory. */
485 : unsigned long long total_overhead;
486 :
487 : /* Total allocations and overhead for sizes less than 32, 64 and 128.
488 : These sizes are interesting because they are typical cache line
489 : sizes. */
490 :
491 : unsigned long long total_allocated_under32;
492 : unsigned long long total_overhead_under32;
493 :
494 : unsigned long long total_allocated_under64;
495 : unsigned long long total_overhead_under64;
496 :
497 : unsigned long long total_allocated_under128;
498 : unsigned long long total_overhead_under128;
499 :
500 : /* The allocations for each of the allocation orders. */
501 : unsigned long long total_allocated_per_order[NUM_ORDERS];
502 :
503 : /* The overhead for each of the allocation orders. */
504 : unsigned long long total_overhead_per_order[NUM_ORDERS];
505 :
506 : /* Number of fallbacks. */
507 : unsigned long long fallback;
508 : } stats;
509 : } G;
510 :
511 : /* True if a gc is currently taking place. */
512 :
513 : static bool in_gc = false;
514 :
515 : /* The size in bytes required to maintain a bitmap for the objects
516 : on a page-entry. */
517 : #define BITMAP_SIZE(Num_objects) \
518 : (CEIL ((Num_objects), HOST_BITS_PER_LONG) * sizeof (long))
519 :
520 : /* Allocate pages in chunks of this size, to throttle calls to memory
521 : allocation routines. The first page is used, the rest go onto the
522 : free list. This cannot be larger than HOST_BITS_PER_INT for the
523 : in_use bitmask for page_group. Hosts that need a different value
524 : can override this by defining GGC_QUIRE_SIZE explicitly. */
525 : #ifndef GGC_QUIRE_SIZE
526 : # ifdef USING_MMAP
527 : # define GGC_QUIRE_SIZE 512 /* 2MB for 4K pages */
528 : # else
529 : # define GGC_QUIRE_SIZE 16
530 : # endif
531 : #endif
532 :
533 : /* Initial guess as to how many page table entries we might need. */
534 : #define INITIAL_PTE_COUNT 128
535 :
536 : static page_entry *lookup_page_table_entry (const void *);
537 : static void set_page_table_entry (void *, page_entry *);
538 : #ifdef USING_MMAP
539 : static char *alloc_anon (char *, size_t, bool check);
540 : #endif
541 : #ifdef USING_MALLOC_PAGE_GROUPS
542 : static size_t page_group_index (char *, char *);
543 : static void set_page_group_in_use (page_group *, char *);
544 : static void clear_page_group_in_use (page_group *, char *);
545 : #endif
546 : static struct page_entry * alloc_page (unsigned);
547 : static void free_page (struct page_entry *);
548 : static void clear_marks (void);
549 : static void sweep_pages (void);
550 : static void ggc_recalculate_in_use_p (page_entry *);
551 : static void compute_inverse (unsigned);
552 : static inline void adjust_depth (void);
553 : static void move_ptes_to_front (int, int);
554 :
555 : void debug_print_page_list (int);
556 : static void push_depth (unsigned int);
557 : static void push_by_depth (page_entry *, unsigned long *);
558 :
559 : /* Push an entry onto G.depth. */
560 :
561 : inline static void
562 284872 : push_depth (unsigned int i)
563 : {
564 284872 : if (G.depth_in_use >= G.depth_max)
565 : {
566 0 : G.depth_max *= 2;
567 0 : G.depth = XRESIZEVEC (unsigned int, G.depth, G.depth_max);
568 : }
569 284872 : G.depth[G.depth_in_use++] = i;
570 284872 : }
571 :
572 : /* Push an entry onto G.by_depth and G.save_in_use. */
573 :
574 : inline static void
575 757850856 : push_by_depth (page_entry *p, unsigned long *s)
576 : {
577 757850856 : if (G.by_depth_in_use >= G.by_depth_max)
578 : {
579 859297 : G.by_depth_max *= 2;
580 859297 : G.by_depth = XRESIZEVEC (page_entry *, G.by_depth, G.by_depth_max);
581 859297 : G.save_in_use = XRESIZEVEC (unsigned long *, G.save_in_use,
582 : G.by_depth_max);
583 : }
584 757850856 : G.by_depth[G.by_depth_in_use] = p;
585 757850856 : G.save_in_use[G.by_depth_in_use++] = s;
586 757850856 : }
587 :
588 : #if (GCC_VERSION < 3001)
589 : #define prefetch(X) ((void) X)
590 : #else
591 : #define prefetch(X) __builtin_prefetch (X)
592 : #endif
593 :
594 : #define save_in_use_p_i(__i) \
595 : (G.save_in_use[__i])
596 : #define save_in_use_p(__p) \
597 : (save_in_use_p_i (__p->index_by_depth))
598 :
599 : /* Traverse the page table and find the entry for a page.
600 : If the object wasn't allocated in GC return NULL. */
601 :
602 : static inline page_entry *
603 5947276005 : safe_lookup_page_table_entry (const void *p)
604 : {
605 5947276005 : page_entry ***base;
606 5947276005 : size_t L1, L2;
607 :
608 : #if HOST_BITS_PER_PTR <= 32
609 : base = &G.lookup[0];
610 : #else
611 5947276005 : page_table table = G.lookup;
612 5947276005 : uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
613 7316360947 : while (1)
614 : {
615 6631818476 : if (table == NULL)
616 : return NULL;
617 6035080129 : if (table->high_bits == high_bits)
618 : break;
619 684542471 : table = table->next;
620 : }
621 5350537658 : base = &table->table[0];
622 : #endif
623 :
624 : /* Extract the level 1 and 2 indices. */
625 5350537658 : L1 = LOOKUP_L1 (p);
626 5350537658 : L2 = LOOKUP_L2 (p);
627 5350537658 : if (! base[L1])
628 : return NULL;
629 :
630 5323708758 : return base[L1][L2];
631 : }
632 :
633 : /* Traverse the page table and find the entry for a page.
634 : Die (probably) if the object wasn't allocated via GC. */
635 :
636 : static inline page_entry *
637 >27532*10^7 : lookup_page_table_entry (const void *p)
638 : {
639 >27532*10^7 : page_entry ***base;
640 >27532*10^7 : size_t L1, L2;
641 :
642 : #if HOST_BITS_PER_PTR <= 32
643 : base = &G.lookup[0];
644 : #else
645 >27532*10^7 : page_table table = G.lookup;
646 >27532*10^7 : uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
647 >28145*10^7 : while (table->high_bits != high_bits)
648 6132865333 : table = table->next;
649 >27532*10^7 : base = &table->table[0];
650 : #endif
651 :
652 : /* Extract the level 1 and 2 indices. */
653 >27532*10^7 : L1 = LOOKUP_L1 (p);
654 >27532*10^7 : L2 = LOOKUP_L2 (p);
655 :
656 >27532*10^7 : return base[L1][L2];
657 : }
658 :
659 : /* Set the page table entry for a page. */
660 :
661 : static void
662 789328950 : set_page_table_entry (void *p, page_entry *entry)
663 : {
664 789328950 : page_entry ***base;
665 789328950 : size_t L1, L2;
666 :
667 : #if HOST_BITS_PER_PTR <= 32
668 : base = &G.lookup[0];
669 : #else
670 789328950 : page_table table;
671 789328950 : uintptr_t high_bits = (uintptr_t) p & ~ (uintptr_t) 0xffffffff;
672 790906287 : for (table = G.lookup; table; table = table->next)
673 790619814 : if (table->high_bits == high_bits)
674 789042477 : goto found;
675 :
676 : /* Not found -- allocate a new table. */
677 286473 : table = XCNEW (struct page_table_chain);
678 286473 : table->next = G.lookup;
679 286473 : table->high_bits = high_bits;
680 286473 : G.lookup = table;
681 789328950 : found:
682 789328950 : base = &table->table[0];
683 : #endif
684 :
685 : /* Extract the level 1 and 2 indices. */
686 789328950 : L1 = LOOKUP_L1 (p);
687 789328950 : L2 = LOOKUP_L2 (p);
688 :
689 789328950 : if (base[L1] == NULL)
690 672788 : base[L1] = XCNEWVEC (page_entry *, PAGE_L2_SIZE);
691 :
692 789328950 : base[L1][L2] = entry;
693 789328950 : }
694 :
695 : /* Prints the page-entry for object size ORDER, for debugging. */
696 :
697 : DEBUG_FUNCTION void
698 0 : debug_print_page_list (int order)
699 : {
700 0 : page_entry *p;
701 0 : printf ("Head=%p, Tail=%p:\n", (void *) G.pages[order],
702 0 : (void *) G.page_tails[order]);
703 0 : p = G.pages[order];
704 0 : while (p != NULL)
705 : {
706 0 : printf ("%p(%1d|%3d) -> ", (void *) p, p->context_depth,
707 0 : p->num_free_objects);
708 0 : p = p->next;
709 : }
710 0 : printf ("NULL\n");
711 0 : fflush (stdout);
712 0 : }
713 :
714 : #ifdef USING_MMAP
715 : /* Allocate SIZE bytes of anonymous memory, preferably near PREF,
716 : (if non-null). The ifdef structure here is intended to cause a
717 : compile error unless exactly one of the HAVE_* is defined. */
718 :
719 : static inline char *
720 6843758 : alloc_anon (char *pref ATTRIBUTE_UNUSED, size_t size, bool check)
721 : {
722 : #ifdef HAVE_MMAP_ANON
723 6843758 : char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
724 : MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
725 : #endif
726 : #ifdef HAVE_MMAP_DEV_ZERO
727 : char *page = (char *) mmap (pref, size, PROT_READ | PROT_WRITE,
728 : MAP_PRIVATE, G.dev_zero_fd, 0);
729 : #endif
730 :
731 6843758 : if (page == (char *) MAP_FAILED)
732 : {
733 0 : if (!check)
734 : return NULL;
735 0 : perror ("virtual memory exhausted");
736 0 : exit (FATAL_EXIT_CODE);
737 : }
738 :
739 : /* Remember that we allocated this memory. */
740 6843758 : G.bytes_mapped += size;
741 :
742 : /* Pretend we don't have access to the allocated pages. We'll enable
743 : access to smaller pieces of the area in ggc_internal_alloc. Discard the
744 : handle to avoid handle leak. */
745 6843758 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (page, size));
746 :
747 6843758 : return page;
748 : }
749 : #endif
750 : #ifdef USING_MALLOC_PAGE_GROUPS
751 : /* Compute the index for this page into the page group. */
752 :
753 : static inline size_t
754 : page_group_index (char *allocation, char *page)
755 : {
756 : return (size_t) (page - allocation) >> G.lg_pagesize;
757 : }
758 :
759 : /* Set and clear the in_use bit for this page in the page group. */
760 :
761 : static inline void
762 : set_page_group_in_use (page_group *group, char *page)
763 : {
764 : group->in_use |= 1 << page_group_index (group->allocation, page);
765 : }
766 :
767 : static inline void
768 : clear_page_group_in_use (page_group *group, char *page)
769 : {
770 : group->in_use &= ~(1 << page_group_index (group->allocation, page));
771 : }
772 : #endif
773 :
774 : /* Find a free list for ENTRY_SIZE. */
775 :
776 : static inline struct free_list *
777 9088385 : find_free_list (size_t entry_size)
778 : {
779 9088385 : int i;
780 12295873 : for (i = 1; i < num_free_list; i++)
781 : {
782 12174002 : if (G.free_lists[i].bytes == entry_size)
783 7721686 : return &G.free_lists[i];
784 4452316 : if (G.free_lists[i].bytes == 0)
785 : {
786 1244828 : G.free_lists[i].bytes = entry_size;
787 1244828 : return &G.free_lists[i];
788 : }
789 : }
790 : /* Fallback. Does this happen? */
791 : if (GATHER_STATISTICS)
792 : G.stats.fallback++;
793 : return &G.free_lists[0];
794 : }
795 :
796 : /* Fast lookup of free_list by order. */
797 :
798 : static struct free_list *cache_free_list[NUM_ORDERS];
799 :
800 : /* Faster way to find a free list by ORDER for BYTES. */
801 :
802 : static inline struct free_list *
803 757841196 : find_free_list_order (unsigned order, size_t bytes)
804 : {
805 757841196 : if (cache_free_list[order] == NULL)
806 8794203 : cache_free_list[order] = find_free_list (bytes);
807 757841196 : return cache_free_list[order];
808 : }
809 :
810 : /* Allocate a new page for allocating objects of size 2^ORDER,
811 : and return an entry for it. The entry is not added to the
812 : appropriate page_table list. */
813 :
814 : static inline struct page_entry *
815 757841196 : alloc_page (unsigned order)
816 : {
817 757841196 : struct page_entry *entry, *p, **pp;
818 757841196 : char *page;
819 757841196 : size_t num_objects;
820 757841196 : size_t bitmap_size;
821 757841196 : size_t page_entry_size;
822 757841196 : size_t entry_size;
823 : #ifdef USING_MALLOC_PAGE_GROUPS
824 : page_group *group;
825 : #endif
826 757841196 : struct free_list *free_list;
827 :
828 757841196 : num_objects = OBJECTS_PER_PAGE (order);
829 757841196 : bitmap_size = BITMAP_SIZE (num_objects + 1);
830 757841196 : page_entry_size = sizeof (page_entry) - sizeof (long) + bitmap_size;
831 757841196 : entry_size = num_objects * OBJECT_SIZE (order);
832 757841196 : if (entry_size < G.pagesize)
833 : entry_size = G.pagesize;
834 757841196 : entry_size = PAGE_ALIGN (entry_size);
835 :
836 757841196 : entry = NULL;
837 757841196 : page = NULL;
838 :
839 757841196 : free_list = find_free_list_order (order, entry_size);
840 :
841 : /* Check the list of free pages for one we can use. */
842 757959508 : for (pp = &free_list->free_pages, p = *pp; p; pp = &p->next, p = *pp)
843 751400272 : if (p->bytes == entry_size)
844 : break;
845 :
846 757841196 : if (p != NULL)
847 : {
848 751281960 : if (p->discarded)
849 11842913 : G.bytes_mapped += p->bytes;
850 751281960 : p->discarded = false;
851 :
852 : /* Recycle the allocated memory from this page ... */
853 751281960 : *pp = p->next;
854 751281960 : page = p->page;
855 :
856 : #ifdef USING_MALLOC_PAGE_GROUPS
857 : group = p->group;
858 : #endif
859 :
860 : /* ... and, if possible, the page entry itself. */
861 751281960 : if (p->order == order)
862 : {
863 82744815 : entry = p;
864 82744815 : memset (entry, 0, page_entry_size);
865 : }
866 : else
867 668537145 : free (p);
868 : }
869 : #ifdef USING_MMAP
870 6559236 : else if (entry_size == G.pagesize)
871 : {
872 : /* We want just one page. Allocate a bunch of them and put the
873 : extras on the freelist. (Can only do this optimization with
874 : mmap for backing store.) */
875 1647992 : struct page_entry *e, *f = free_list->free_pages;
876 1647992 : int i, entries = GGC_QUIRE_SIZE;
877 :
878 1647992 : page = alloc_anon (NULL, G.pagesize * GGC_QUIRE_SIZE, false);
879 1647992 : if (page == NULL)
880 : {
881 0 : page = alloc_anon (NULL, G.pagesize, true);
882 0 : entries = 1;
883 : }
884 :
885 : /* This loop counts down so that the chain will be in ascending
886 : memory order. */
887 843771904 : for (i = entries - 1; i >= 1; i--)
888 : {
889 842123912 : e = XCNEWVAR (struct page_entry, page_entry_size);
890 842123912 : e->order = order;
891 842123912 : e->bytes = G.pagesize;
892 842123912 : e->free_list = free_list;
893 842123912 : e->page = page + (i << G.lg_pagesize);
894 842123912 : e->next = f;
895 842123912 : f = e;
896 : }
897 :
898 1647992 : free_list->free_pages = f;
899 : }
900 : else
901 4911244 : page = alloc_anon (NULL, entry_size, true);
902 : #endif
903 : #ifdef USING_MALLOC_PAGE_GROUPS
904 : else
905 : {
906 : /* Allocate a large block of memory and serve out the aligned
907 : pages therein. This results in much less memory wastage
908 : than the traditional implementation of valloc. */
909 :
910 : char *allocation, *a, *enda;
911 : size_t alloc_size, head_slop, tail_slop;
912 : int multiple_pages = (entry_size == G.pagesize);
913 :
914 : if (multiple_pages)
915 : alloc_size = GGC_QUIRE_SIZE * G.pagesize;
916 : else
917 : alloc_size = entry_size + G.pagesize - 1;
918 : allocation = XNEWVEC (char, alloc_size);
919 :
920 : page = (char *) (((uintptr_t) allocation + G.pagesize - 1) & -G.pagesize);
921 : head_slop = page - allocation;
922 : if (multiple_pages)
923 : tail_slop = ((size_t) allocation + alloc_size) & (G.pagesize - 1);
924 : else
925 : tail_slop = alloc_size - entry_size - head_slop;
926 : enda = allocation + alloc_size - tail_slop;
927 :
928 : /* We allocated N pages, which are likely not aligned, leaving
929 : us with N-1 usable pages. We plan to place the page_group
930 : structure somewhere in the slop. */
931 : if (head_slop >= sizeof (page_group))
932 : group = (page_group *)page - 1;
933 : else
934 : {
935 : /* We magically got an aligned allocation. Too bad, we have
936 : to waste a page anyway. */
937 : if (tail_slop == 0)
938 : {
939 : enda -= G.pagesize;
940 : tail_slop += G.pagesize;
941 : }
942 : gcc_assert (tail_slop >= sizeof (page_group));
943 : group = (page_group *)enda;
944 : tail_slop -= sizeof (page_group);
945 : }
946 :
947 : /* Remember that we allocated this memory. */
948 : group->next = G.page_groups;
949 : group->allocation = allocation;
950 : group->alloc_size = alloc_size;
951 : group->in_use = 0;
952 : G.page_groups = group;
953 : G.bytes_mapped += alloc_size;
954 :
955 : /* If we allocated multiple pages, put the rest on the free list. */
956 : if (multiple_pages)
957 : {
958 : struct page_entry *e, *f = free_list->free_pages;
959 : for (a = enda - G.pagesize; a != page; a -= G.pagesize)
960 : {
961 : e = XCNEWVAR (struct page_entry, page_entry_size);
962 : e->order = order;
963 : e->bytes = G.pagesize;
964 : e->free_list = free_list;
965 : e->page = a;
966 : e->group = group;
967 : e->next = f;
968 : f = e;
969 : }
970 : free_list->free_pages = f;
971 : }
972 : }
973 : #endif
974 :
975 757841196 : if (entry == NULL)
976 675096381 : entry = XCNEWVAR (struct page_entry, page_entry_size);
977 :
978 757841196 : entry->bytes = entry_size;
979 757841196 : entry->free_list = free_list;
980 757841196 : entry->page = page;
981 757841196 : entry->context_depth = G.context_depth;
982 757841196 : entry->order = order;
983 757841196 : entry->num_free_objects = num_objects;
984 757841196 : entry->next_bit_hint = 1;
985 :
986 757841196 : G.context_depth_allocations |= (unsigned long)1 << G.context_depth;
987 :
988 : #ifdef USING_MALLOC_PAGE_GROUPS
989 : entry->group = group;
990 : set_page_group_in_use (group, page);
991 : #endif
992 :
993 : /* Set the one-past-the-end in-use bit. This acts as a sentry as we
994 : increment the hint. */
995 757841196 : entry->in_use_p[num_objects / HOST_BITS_PER_LONG]
996 757841196 : = (unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG);
997 :
998 757841196 : set_page_table_entry (page, entry);
999 :
1000 757841196 : if (GGC_DEBUG_LEVEL >= 2)
1001 : fprintf (G.debug_file,
1002 : "Allocating page at %p, object size="
1003 : HOST_SIZE_T_PRINT_UNSIGNED ", data %p-%p\n",
1004 : (void *) entry, (fmt_size_t) OBJECT_SIZE (order),
1005 : (void *) page, (void *) (page + entry_size - 1));
1006 :
1007 757841196 : return entry;
1008 : }
1009 :
1010 : /* Adjust the size of G.depth so that no index greater than the one
1011 : used by the top of the G.by_depth is used. */
1012 :
1013 : static inline void
1014 31234151 : adjust_depth (void)
1015 : {
1016 31234151 : page_entry *top;
1017 :
1018 31234151 : if (G.by_depth_in_use)
1019 : {
1020 31234151 : top = G.by_depth[G.by_depth_in_use-1];
1021 :
1022 : /* Peel back indices in depth that index into by_depth, so that
1023 : as new elements are added to by_depth, we note the indices
1024 : of those elements, if they are for new context depths. */
1025 31234151 : while (G.depth_in_use > (size_t)top->context_depth+1)
1026 0 : --G.depth_in_use;
1027 : }
1028 31234151 : }
1029 :
1030 : /* For a page that is no longer needed, put it on the free page list. */
1031 :
1032 : static void
1033 31234151 : free_page (page_entry *entry)
1034 : {
1035 31234151 : if (GGC_DEBUG_LEVEL >= 2)
1036 : fprintf (G.debug_file,
1037 : "Deallocating page at %p, data %p-%p\n", (void *) entry,
1038 : (void *) entry->page, (void *) (entry->page + entry->bytes - 1));
1039 :
1040 : /* Mark the page as inaccessible. Discard the handle to avoid handle
1041 : leak. */
1042 31234151 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (entry->page, entry->bytes));
1043 :
1044 31234151 : set_page_table_entry (entry->page, NULL);
1045 :
1046 : #ifdef USING_MALLOC_PAGE_GROUPS
1047 : clear_page_group_in_use (entry->group, entry->page);
1048 : #endif
1049 :
1050 31234151 : if (G.by_depth_in_use > 1)
1051 : {
1052 31234151 : page_entry *top = G.by_depth[G.by_depth_in_use-1];
1053 31234151 : int i = entry->index_by_depth;
1054 :
1055 : /* We cannot free a page from a context deeper than the current
1056 : one. */
1057 31234151 : gcc_assert (entry->context_depth == top->context_depth);
1058 :
1059 : /* Put top element into freed slot. */
1060 31234151 : G.by_depth[i] = top;
1061 31234151 : G.save_in_use[i] = G.save_in_use[G.by_depth_in_use-1];
1062 31234151 : top->index_by_depth = i;
1063 : }
1064 31234151 : --G.by_depth_in_use;
1065 :
1066 31234151 : adjust_depth ();
1067 :
1068 31234151 : struct free_list *free_list = entry->free_list;
1069 31234151 : entry->next = free_list->free_pages;
1070 31234151 : free_list->free_pages = entry;
1071 31234151 : }
1072 :
1073 : /* Release the free page cache for FREE_LIST to the system. */
1074 :
1075 : static void
1076 5878192 : do_release_pages (struct free_list *free_list, size_t &n1, size_t &n2)
1077 : {
1078 5878192 : (void) n1;
1079 5878192 : (void) n2;
1080 : #ifdef USING_MADVISE
1081 5878192 : page_entry *p, *start_p;
1082 5878192 : char *start;
1083 5878192 : size_t len;
1084 5878192 : size_t mapped_len;
1085 5878192 : page_entry *next, *prev, *newprev;
1086 5878192 : size_t free_unit = (GGC_QUIRE_SIZE/2) * G.pagesize;
1087 :
1088 : /* First free larger continuous areas to the OS.
1089 : This allows other allocators to grab these areas if needed.
1090 : This is only done on larger chunks to avoid fragmentation.
1091 : This does not always work because the free_pages list is only
1092 : approximately sorted. */
1093 :
1094 5878192 : p = free_list->free_pages;
1095 5878192 : prev = NULL;
1096 240445967 : while (p)
1097 : {
1098 234567775 : start = p->page;
1099 234567775 : start_p = p;
1100 234567775 : len = 0;
1101 234567775 : mapped_len = 0;
1102 234567775 : newprev = prev;
1103 568478274 : while (p && p->page == start + len)
1104 : {
1105 333910499 : len += p->bytes;
1106 333910499 : if (!p->discarded)
1107 47099139 : mapped_len += p->bytes;
1108 333910499 : newprev = p;
1109 333910499 : p = p->next;
1110 : }
1111 234567775 : if (len >= free_unit)
1112 : {
1113 31755317 : while (start_p != p)
1114 : {
1115 31607124 : next = start_p->next;
1116 31607124 : free (start_p);
1117 31607124 : start_p = next;
1118 : }
1119 148193 : munmap (start, len);
1120 148193 : if (prev)
1121 1940 : prev->next = p;
1122 : else
1123 146253 : free_list->free_pages = p;
1124 148193 : G.bytes_mapped -= mapped_len;
1125 148193 : n1 += len;
1126 148193 : continue;
1127 : }
1128 : prev = newprev;
1129 : }
1130 :
1131 : /* Now give back the fragmented pages to the OS, but keep the address
1132 : space to reuse it next time. */
1133 :
1134 296220688 : for (p = free_list->free_pages; p; )
1135 : {
1136 290342496 : if (p->discarded)
1137 : {
1138 286793612 : p = p->next;
1139 286793612 : continue;
1140 : }
1141 3548884 : start = p->page;
1142 3548884 : len = p->bytes;
1143 3548884 : start_p = p;
1144 3548884 : p = p->next;
1145 15509763 : while (p && p->page == start + len)
1146 : {
1147 11960879 : len += p->bytes;
1148 11960879 : p = p->next;
1149 : }
1150 : /* Give the page back to the kernel, but don't free the mapping.
1151 : This avoids fragmentation in the virtual memory map of the
1152 : process. Next time we can reuse it by just touching it. */
1153 3548884 : madvise (start, len, MADV_DONTNEED);
1154 : /* Don't count those pages as mapped to not touch the garbage collector
1155 : unnecessarily. */
1156 3548884 : G.bytes_mapped -= len;
1157 3548884 : n2 += len;
1158 19058647 : while (start_p != p)
1159 : {
1160 15509763 : start_p->discarded = true;
1161 15509763 : start_p = start_p->next;
1162 : }
1163 : }
1164 : #endif
1165 : #if defined(USING_MMAP) && !defined(USING_MADVISE)
1166 : page_entry *p, *next;
1167 : char *start;
1168 : size_t len;
1169 :
1170 : /* Gather up adjacent pages so they are unmapped together. */
1171 : p = free_list->free_pages;
1172 :
1173 : while (p)
1174 : {
1175 : start = p->page;
1176 : next = p->next;
1177 : len = p->bytes;
1178 : free (p);
1179 : p = next;
1180 :
1181 : while (p && p->page == start + len)
1182 : {
1183 : next = p->next;
1184 : len += p->bytes;
1185 : free (p);
1186 : p = next;
1187 : }
1188 :
1189 : munmap (start, len);
1190 : n1 += len;
1191 : G.bytes_mapped -= len;
1192 : }
1193 :
1194 : free_list->free_pages = NULL;
1195 : #endif
1196 : #ifdef USING_MALLOC_PAGE_GROUPS
1197 : page_entry **pp, *p;
1198 :
1199 : /* Remove all pages from free page groups from the list. */
1200 : pp = &free_list->free_pages;
1201 : while ((p = *pp) != NULL)
1202 : if (p->group->in_use == 0)
1203 : {
1204 : *pp = p->next;
1205 : free (p);
1206 : }
1207 : else
1208 : pp = &p->next;
1209 : #endif
1210 5878192 : }
1211 :
1212 : /* Release the free page cache to the system. */
1213 :
1214 : static void
1215 734774 : release_pages ()
1216 : {
1217 734774 : size_t n1 = 0;
1218 734774 : size_t n2 = 0;
1219 6612966 : for (int i = 0; i < num_free_list; i++)
1220 5878192 : do_release_pages (&G.free_lists[i], n1, n2);
1221 : #ifdef USING_MALLOC_PAGE_GROUPS
1222 : page_group **gp, *g;
1223 :
1224 : /* Remove all free page groups, and release the storage. */
1225 : gp = &G.page_groups;
1226 : while ((g = *gp) != NULL)
1227 : if (g->in_use == 0)
1228 : {
1229 : *gp = g->next;
1230 : G.bytes_mapped -= g->alloc_size;
1231 : n1 += g->alloc_size;
1232 : free (g->allocation);
1233 : }
1234 : else
1235 : gp = &g->next;
1236 : #endif
1237 734774 : if (!quiet_flag && (n1 || n2))
1238 : {
1239 0 : fprintf (stderr, " {GC");
1240 0 : if (n1)
1241 0 : fprintf (stderr, " released " PRsa (0), SIZE_AMOUNT (n1));
1242 0 : if (n2)
1243 0 : fprintf (stderr, " madv_dontneed " PRsa (0), SIZE_AMOUNT (n2));
1244 0 : fprintf (stderr, "}");
1245 : }
1246 734774 : }
1247 :
1248 : /* This table provides a fast way to determine ceil(log_2(size)) for
1249 : allocation requests. The minimum allocation size is eight bytes. */
1250 : #define NUM_SIZE_LOOKUP 512
1251 : static unsigned char size_lookup[NUM_SIZE_LOOKUP] =
1252 : {
1253 : 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
1254 : 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
1255 : 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1256 : 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
1257 : 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1258 : 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1259 : 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1260 : 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
1261 : 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1262 : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1263 : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1264 : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1265 : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1266 : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1267 : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1268 : 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
1269 : 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1270 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1271 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1272 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1273 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1274 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1275 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1276 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1277 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1278 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1279 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1280 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1281 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1282 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1283 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
1284 : 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9
1285 : };
1286 :
1287 : /* For a given size of memory requested for allocation, return the
1288 : actual size that is going to be allocated, as well as the size
1289 : order. */
1290 :
1291 : static void
1292 57144671573 : ggc_round_alloc_size_1 (size_t requested_size,
1293 : size_t *size_order,
1294 : size_t *alloced_size)
1295 : {
1296 57144671573 : size_t order, object_size;
1297 :
1298 57144671573 : if (requested_size < NUM_SIZE_LOOKUP)
1299 : {
1300 56795835489 : order = size_lookup[requested_size];
1301 56795835489 : object_size = OBJECT_SIZE (order);
1302 : }
1303 : else
1304 : {
1305 : order = 10;
1306 480030727 : while (requested_size > (object_size = OBJECT_SIZE (order)))
1307 131194643 : order++;
1308 : }
1309 :
1310 57144671573 : if (size_order)
1311 53972825276 : *size_order = order;
1312 57144671573 : if (alloced_size)
1313 57144671573 : *alloced_size = object_size;
1314 57144671573 : }
1315 :
1316 : /* For a given size of memory requested for allocation, return the
1317 : actual size that is going to be allocated. */
1318 :
1319 : size_t
1320 3171846297 : ggc_round_alloc_size (size_t requested_size)
1321 : {
1322 3171846297 : size_t size = 0;
1323 :
1324 3171846297 : ggc_round_alloc_size_1 (requested_size, NULL, &size);
1325 3171846297 : return size;
1326 : }
1327 :
1328 : /* Push a finalizer onto the appropriate vec. */
1329 :
1330 : static void
1331 71626396 : add_finalizer (void *result, void (*f)(void *), size_t s, size_t n)
1332 : {
1333 71626396 : if (f == NULL)
1334 : /* No finalizer. */;
1335 71626396 : else if (n == 1)
1336 : {
1337 71626396 : finalizer fin (result, f);
1338 71626396 : G.finalizers[G.context_depth].safe_push (fin);
1339 : }
1340 : else
1341 : {
1342 0 : vec_finalizer fin (reinterpret_cast<uintptr_t> (result), f, s, n);
1343 0 : G.vec_finalizers[G.context_depth].safe_push (fin);
1344 : }
1345 71626396 : }
1346 :
1347 : /* Allocate a chunk of memory of SIZE bytes. Its contents are undefined. */
1348 :
1349 : #ifdef HAVE_ATTRIBUTE_ALIAS
1350 : extern "C" void *
1351 53972825276 : ggc_internal_alloc_ (size_t size, void (*f)(void *), size_t s, size_t n
1352 : MEM_STAT_DECL)
1353 : #else
1354 : void *
1355 : ggc_internal_alloc (size_t size, void (*f)(void *), size_t s, size_t n
1356 : MEM_STAT_DECL)
1357 : #endif
1358 : {
1359 53972825276 : size_t order, word, bit, object_offset, object_size;
1360 53972825276 : struct page_entry *entry;
1361 53972825276 : void *result;
1362 :
1363 53972825276 : ggc_round_alloc_size_1 (size, &order, &object_size);
1364 :
1365 : /* If there are non-full pages for this size allocation, they are at
1366 : the head of the list. */
1367 53972825276 : entry = G.pages[order];
1368 :
1369 : /* If there is no page for this object size, or all pages in this
1370 : context are full, allocate a new page. */
1371 53972825276 : if (entry == NULL || entry->num_free_objects == 0)
1372 : {
1373 757841196 : struct page_entry *new_entry;
1374 757841196 : new_entry = alloc_page (order);
1375 :
1376 757841196 : new_entry->index_by_depth = G.by_depth_in_use;
1377 757841196 : push_by_depth (new_entry, 0);
1378 :
1379 : /* We can skip context depths, if we do, make sure we go all the
1380 : way to the new depth. */
1381 1515966914 : while (new_entry->context_depth >= G.depth_in_use)
1382 284522 : push_depth (G.by_depth_in_use-1);
1383 :
1384 : /* If this is the only entry, it's also the tail. If it is not
1385 : the only entry, then we must update the PREV pointer of the
1386 : ENTRY (G.pages[order]) to point to our new page entry. */
1387 757841196 : if (entry == NULL)
1388 8944144 : G.page_tails[order] = new_entry;
1389 : else
1390 748897052 : entry->prev = new_entry;
1391 :
1392 : /* Put new pages at the head of the page list. By definition the
1393 : entry at the head of the list always has a NULL pointer. */
1394 757841196 : new_entry->next = entry;
1395 757841196 : new_entry->prev = NULL;
1396 757841196 : entry = new_entry;
1397 757841196 : G.pages[order] = new_entry;
1398 :
1399 : /* For a new page, we know the word and bit positions (in the
1400 : in_use bitmap) of the first available object -- they're zero. */
1401 757841196 : new_entry->next_bit_hint = 1;
1402 757841196 : word = 0;
1403 757841196 : bit = 0;
1404 757841196 : object_offset = 0;
1405 757841196 : }
1406 : else
1407 : {
1408 : /* First try to use the hint left from the previous allocation
1409 : to locate a clear bit in the in-use bitmap. We've made sure
1410 : that the one-past-the-end bit is always set, so if the hint
1411 : has run over, this test will fail. */
1412 53214984080 : unsigned hint = entry->next_bit_hint;
1413 53214984080 : word = hint / HOST_BITS_PER_LONG;
1414 53214984080 : bit = hint % HOST_BITS_PER_LONG;
1415 :
1416 : /* If the hint didn't work, scan the bitmap from the beginning. */
1417 53214984080 : if ((entry->in_use_p[word] >> bit) & 1)
1418 : {
1419 7439823730 : word = bit = 0;
1420 7439823730 : while (~entry->in_use_p[word] == 0)
1421 2105954914 : ++word;
1422 :
1423 : #if GCC_VERSION >= 3004
1424 5333868816 : bit = __builtin_ctzl (~entry->in_use_p[word]);
1425 : #else
1426 : while ((entry->in_use_p[word] >> bit) & 1)
1427 : ++bit;
1428 : #endif
1429 :
1430 5333868816 : hint = word * HOST_BITS_PER_LONG + bit;
1431 : }
1432 :
1433 : /* Next time, try the next bit. */
1434 53214984080 : entry->next_bit_hint = hint + 1;
1435 :
1436 53214984080 : object_offset = hint * object_size;
1437 : }
1438 :
1439 : /* Set the in-use bit. */
1440 53972825276 : entry->in_use_p[word] |= ((unsigned long) 1 << bit);
1441 :
1442 : /* Keep a running total of the number of free objects. If this page
1443 : fills up, we may have to move it to the end of the list if the
1444 : next page isn't full. If the next page is full, all subsequent
1445 : pages are full, so there's no need to move it. */
1446 53972825276 : if (--entry->num_free_objects == 0
1447 1466275450 : && entry->next != NULL
1448 55432058778 : && entry->next->num_free_objects > 0)
1449 : {
1450 : /* We have a new head for the list. */
1451 664154045 : G.pages[order] = entry->next;
1452 :
1453 : /* We are moving ENTRY to the end of the page table list.
1454 : The new page at the head of the list will have NULL in
1455 : its PREV field and ENTRY will have NULL in its NEXT field. */
1456 664154045 : entry->next->prev = NULL;
1457 664154045 : entry->next = NULL;
1458 :
1459 : /* Append ENTRY to the tail of the list. */
1460 664154045 : entry->prev = G.page_tails[order];
1461 664154045 : G.page_tails[order]->next = entry;
1462 664154045 : G.page_tails[order] = entry;
1463 : }
1464 :
1465 : /* Calculate the object's address. */
1466 53972825276 : result = entry->page + object_offset;
1467 53972825276 : if (GATHER_STATISTICS)
1468 : ggc_record_overhead (OBJECT_SIZE (order), OBJECT_SIZE (order) - size,
1469 : result FINAL_PASS_MEM_STAT);
1470 :
1471 : #ifdef ENABLE_GC_CHECKING
1472 : /* Keep poisoning-by-writing-0xaf the object, in an attempt to keep the
1473 : exact same semantics in presence of memory bugs, regardless of
1474 : ENABLE_VALGRIND_CHECKING. We override this request below. Drop the
1475 : handle to avoid handle leak. */
1476 53972825276 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, object_size));
1477 :
1478 : /* `Poison' the entire allocated object, including any padding at
1479 : the end. */
1480 53972825276 : memset (result, 0xaf, object_size);
1481 :
1482 : /* Make the bytes after the end of the object unaccessible. Discard the
1483 : handle to avoid handle leak. */
1484 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS ((char *) result + size,
1485 53972825276 : object_size - size));
1486 : #endif
1487 :
1488 : /* Tell Valgrind that the memory is there, but its content isn't
1489 : defined. The bytes at the end of the object are still marked
1490 : unaccessible. */
1491 53972825276 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (result, size));
1492 :
1493 : /* Keep track of how many bytes are being allocated. This
1494 : information is used in deciding when to collect. */
1495 53972825276 : G.allocated += object_size;
1496 :
1497 : /* For timevar statistics. */
1498 53972825276 : timevar_ggc_mem_total += object_size;
1499 :
1500 53972825276 : if (f)
1501 71626396 : add_finalizer (result, f, s, n);
1502 :
1503 53972825276 : if (GATHER_STATISTICS)
1504 : {
1505 : size_t overhead = object_size - size;
1506 :
1507 : G.stats.total_overhead += overhead;
1508 : G.stats.total_allocated += object_size;
1509 : G.stats.total_overhead_per_order[order] += overhead;
1510 : G.stats.total_allocated_per_order[order] += object_size;
1511 :
1512 : if (size <= 32)
1513 : {
1514 : G.stats.total_overhead_under32 += overhead;
1515 : G.stats.total_allocated_under32 += object_size;
1516 : }
1517 : if (size <= 64)
1518 : {
1519 : G.stats.total_overhead_under64 += overhead;
1520 : G.stats.total_allocated_under64 += object_size;
1521 : }
1522 : if (size <= 128)
1523 : {
1524 : G.stats.total_overhead_under128 += overhead;
1525 : G.stats.total_allocated_under128 += object_size;
1526 : }
1527 : }
1528 :
1529 53972825276 : if (GGC_DEBUG_LEVEL >= 3)
1530 : fprintf (G.debug_file,
1531 : "Allocating object, requested size="
1532 : HOST_SIZE_T_PRINT_UNSIGNED ", actual=" HOST_SIZE_T_PRINT_UNSIGNED
1533 : " at %p on %p\n",
1534 : (fmt_size_t) size, (fmt_size_t) object_size, result,
1535 : (void *) entry);
1536 :
1537 53972825276 : return result;
1538 : }
1539 :
1540 : #ifdef HAVE_ATTRIBUTE_ALIAS
1541 : extern void *
1542 : ggc_internal_alloc (size_t size, void (*f)(void *), size_t s,
1543 : size_t n MEM_STAT_DECL)
1544 : __attribute__((__alias__ ("ggc_internal_alloc_")));
1545 : extern void *
1546 : ggc_internal_alloc_no_dtor (size_t size, void (*f)(void *), size_t s,
1547 : size_t n MEM_STAT_DECL)
1548 : __attribute__((__alias__ ("ggc_internal_alloc_")));
1549 : #else
1550 : #ifdef __GNUC__
1551 : __attribute__ ((__noinline__))
1552 : #endif
1553 : void *
1554 : ggc_internal_alloc_no_dtor (size_t size, void (*f)(void *), size_t s,
1555 : size_t n MEM_STAT_DECL)
1556 : {
1557 : return ggc_internal_alloc (size, f, s, n PASS_MEM_STAT);
1558 : }
1559 : #endif
1560 :
1561 : /* Mark function for strings. */
1562 :
1563 : void
1564 8520619390 : gt_ggc_m_S (const void *p)
1565 : {
1566 8520619390 : page_entry *entry;
1567 8520619390 : unsigned bit, word;
1568 8520619390 : unsigned long mask;
1569 8520619390 : unsigned long offset;
1570 :
1571 8520619390 : if (!p)
1572 : return;
1573 :
1574 : /* Look up the page on which the object is alloced. If it was not
1575 : GC allocated, gracefully bail out. */
1576 5947276005 : entry = safe_lookup_page_table_entry (p);
1577 5947276005 : if (!entry)
1578 : return;
1579 :
1580 : /* Calculate the index of the object on the page; this is its bit
1581 : position in the in_use_p bitmap. Note that because a char* might
1582 : point to the middle of an object, we need special code here to
1583 : make sure P points to the start of an object. */
1584 5323708758 : offset = ((const char *) p - entry->page) % object_size_table[entry->order];
1585 5323708758 : if (offset)
1586 : {
1587 : /* Here we've seen a char* which does not point to the beginning
1588 : of an allocated object. We assume it points to the middle of
1589 : a STRING_CST. */
1590 53043 : gcc_assert (offset == offsetof (struct tree_string, str));
1591 53043 : p = ((const char *) p) - offset;
1592 53043 : gt_ggc_mx_lang_tree_node (const_cast<void *> (p));
1593 53043 : return;
1594 : }
1595 :
1596 5323655715 : bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1597 5323655715 : word = bit / HOST_BITS_PER_LONG;
1598 5323655715 : mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1599 :
1600 : /* If the bit was previously set, skip it. */
1601 5323655715 : if (entry->in_use_p[word] & mask)
1602 : return;
1603 :
1604 : /* Otherwise set it, and decrement the free object count. */
1605 5213895827 : entry->in_use_p[word] |= mask;
1606 5213895827 : entry->num_free_objects -= 1;
1607 :
1608 5213895827 : if (GGC_DEBUG_LEVEL >= 4)
1609 : fprintf (G.debug_file, "Marking %p\n", p);
1610 :
1611 5213895827 : return;
1612 : }
1613 :
1614 :
1615 : /* User-callable entry points for marking string X. */
1616 :
1617 : void
1618 0 : gt_ggc_mx (const char *& x)
1619 : {
1620 0 : gt_ggc_m_S (x);
1621 0 : }
1622 :
1623 : void
1624 280301 : gt_ggc_mx (char *& x)
1625 : {
1626 280301 : gt_ggc_m_S (x);
1627 280301 : }
1628 :
1629 : void
1630 0 : gt_ggc_mx (unsigned char *& x)
1631 : {
1632 0 : gt_ggc_m_S (x);
1633 0 : }
1634 :
1635 : void
1636 443 : gt_ggc_mx (unsigned char& x ATTRIBUTE_UNUSED)
1637 : {
1638 443 : }
1639 :
1640 : /* If P is not marked, marks it and return false. Otherwise return true.
1641 : P must have been allocated by the GC allocator; it mustn't point to
1642 : static objects, stack variables, or memory allocated with malloc. */
1643 :
1644 : bool
1645 >26658*10^7 : ggc_set_mark (const void *p)
1646 : {
1647 >26658*10^7 : page_entry *entry;
1648 >26658*10^7 : unsigned bit, word;
1649 >26658*10^7 : unsigned long mask;
1650 :
1651 : /* Look up the page on which the object is alloced. If the object
1652 : wasn't allocated by the collector, we'll probably die. */
1653 >26658*10^7 : entry = lookup_page_table_entry (p);
1654 >26658*10^7 : gcc_assert (entry);
1655 :
1656 : /* Calculate the index of the object on the page; this is its bit
1657 : position in the in_use_p bitmap. */
1658 >26658*10^7 : bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1659 >26658*10^7 : word = bit / HOST_BITS_PER_LONG;
1660 >26658*10^7 : mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1661 :
1662 : /* If the bit was previously set, skip it. */
1663 >26658*10^7 : if (entry->in_use_p[word] & mask)
1664 : return true;
1665 :
1666 : /* Otherwise set it, and decrement the free object count. */
1667 77937956006 : entry->in_use_p[word] |= mask;
1668 77937956006 : entry->num_free_objects -= 1;
1669 :
1670 77937956006 : if (GGC_DEBUG_LEVEL >= 4)
1671 : fprintf (G.debug_file, "Marking %p\n", p);
1672 :
1673 77937956006 : return false;
1674 : }
1675 :
1676 : /* Return true if P has been marked, zero otherwise.
1677 : P must have been allocated by the GC allocator; it mustn't point to
1678 : static objects, stack variables, or memory allocated with malloc. */
1679 :
1680 : bool
1681 5176428697 : ggc_marked_p (const void *p)
1682 : {
1683 5176428697 : page_entry *entry;
1684 5176428697 : unsigned bit, word;
1685 5176428697 : unsigned long mask;
1686 :
1687 : /* Look up the page on which the object is alloced. If the object
1688 : wasn't allocated by the collector, we'll probably die. */
1689 5176428697 : entry = lookup_page_table_entry (p);
1690 5176428697 : gcc_assert (entry);
1691 :
1692 : /* Calculate the index of the object on the page; this is its bit
1693 : position in the in_use_p bitmap. */
1694 5176428697 : bit = OFFSET_TO_BIT (((const char *) p) - entry->page, entry->order);
1695 5176428697 : word = bit / HOST_BITS_PER_LONG;
1696 5176428697 : mask = (unsigned long) 1 << (bit % HOST_BITS_PER_LONG);
1697 :
1698 5176428697 : return (entry->in_use_p[word] & mask) != 0;
1699 : }
1700 :
1701 : /* Return the size of the gc-able object P. */
1702 :
1703 : size_t
1704 725154374 : ggc_get_size (const void *p)
1705 : {
1706 725154374 : page_entry *pe = lookup_page_table_entry (p);
1707 725154374 : return OBJECT_SIZE (pe->order);
1708 : }
1709 :
1710 : /* Release the memory for object P. */
1711 :
1712 : void
1713 2860519342 : ggc_free (void *p)
1714 : {
1715 2860519342 : if (in_gc)
1716 : return;
1717 :
1718 2835397696 : page_entry *pe = lookup_page_table_entry (p);
1719 2835397696 : size_t order = pe->order;
1720 2835397696 : size_t size = OBJECT_SIZE (order);
1721 :
1722 2835397696 : if (GATHER_STATISTICS)
1723 : ggc_free_overhead (p);
1724 :
1725 2835397696 : if (GGC_DEBUG_LEVEL >= 3)
1726 : fprintf (G.debug_file,
1727 : "Freeing object, actual size="
1728 : HOST_SIZE_T_PRINT_UNSIGNED ", at %p on %p\n",
1729 : (fmt_size_t) size, p, (void *) pe);
1730 :
1731 : #ifdef ENABLE_GC_CHECKING
1732 : /* Poison the data, to indicate the data is garbage. */
1733 2835397696 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (p, size));
1734 2835397696 : memset (p, 0xa5, size);
1735 : #endif
1736 : /* Let valgrind know the object is free. */
1737 2835397696 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (p, size));
1738 :
1739 : #ifdef ENABLE_GC_ALWAYS_COLLECT
1740 : /* In the completely-anal-checking mode, we do *not* immediately free
1741 : the data, but instead verify that the data is *actually* not
1742 : reachable the next time we collect. */
1743 : {
1744 : struct free_object *fo = XNEW (struct free_object);
1745 : fo->object = p;
1746 : fo->next = G.free_object_list;
1747 : G.free_object_list = fo;
1748 : }
1749 : #else
1750 2835397696 : {
1751 2835397696 : unsigned int bit_offset, word, bit;
1752 :
1753 2835397696 : G.allocated -= size;
1754 :
1755 : /* Mark the object not-in-use. */
1756 2835397696 : bit_offset = OFFSET_TO_BIT (((const char *) p) - pe->page, order);
1757 2835397696 : word = bit_offset / HOST_BITS_PER_LONG;
1758 2835397696 : bit = bit_offset % HOST_BITS_PER_LONG;
1759 2835397696 : pe->in_use_p[word] &= ~(1UL << bit);
1760 :
1761 2835397696 : if (pe->num_free_objects++ == 0)
1762 : {
1763 286266213 : page_entry *p, *q;
1764 :
1765 : /* If the page is completely full, then it's supposed to
1766 : be after all pages that aren't. Since we've freed one
1767 : object from a page that was full, we need to move the
1768 : page to the head of the list.
1769 :
1770 : PE is the node we want to move. Q is the previous node
1771 : and P is the next node in the list. */
1772 286266213 : q = pe->prev;
1773 286266213 : if (q && q->num_free_objects == 0)
1774 : {
1775 213171251 : p = pe->next;
1776 :
1777 213171251 : q->next = p;
1778 :
1779 : /* If PE was at the end of the list, then Q becomes the
1780 : new end of the list. If PE was not the end of the
1781 : list, then we need to update the PREV field for P. */
1782 213171251 : if (!p)
1783 132758054 : G.page_tails[order] = q;
1784 : else
1785 80413197 : p->prev = q;
1786 :
1787 : /* Move PE to the head of the list. */
1788 213171251 : pe->next = G.pages[order];
1789 213171251 : pe->prev = NULL;
1790 213171251 : G.pages[order]->prev = pe;
1791 213171251 : G.pages[order] = pe;
1792 : }
1793 :
1794 : /* Reset the hint bit to point to the only free object. */
1795 286266213 : pe->next_bit_hint = bit_offset;
1796 : }
1797 : }
1798 : #endif
1799 : }
1800 :
1801 : /* Subroutine of init_ggc which computes the pair of numbers used to
1802 : perform division by OBJECT_SIZE (order) and fills in inverse_table[].
1803 :
1804 : This algorithm is taken from Granlund and Montgomery's paper
1805 : "Division by Invariant Integers using Multiplication"
1806 : (Proc. SIGPLAN PLDI, 1994), section 9 (Exact division by
1807 : constants). */
1808 :
1809 : static void
1810 23899848 : compute_inverse (unsigned order)
1811 : {
1812 23899848 : size_t size, inv;
1813 23899848 : unsigned int e;
1814 :
1815 23899848 : size = OBJECT_SIZE (order);
1816 23899848 : e = 0;
1817 618550828 : while (size % 2 == 0)
1818 : {
1819 594650980 : e++;
1820 594650980 : size >>= 1;
1821 : }
1822 :
1823 : inv = size;
1824 50360394 : while (inv * size != 1)
1825 26460546 : inv = inv * (2 - inv*size);
1826 :
1827 23899848 : DIV_MULT (order) = inv;
1828 23899848 : DIV_SHIFT (order) = e;
1829 23899848 : }
1830 :
1831 : /* Initialize the ggc-mmap allocator. */
1832 : void
1833 285722 : init_ggc (void)
1834 : {
1835 285722 : static bool init_p = false;
1836 285722 : unsigned order;
1837 :
1838 285722 : if (init_p)
1839 : return;
1840 284522 : init_p = true;
1841 :
1842 284522 : G.pagesize = getpagesize ();
1843 284522 : G.lg_pagesize = exact_log2 (G.pagesize);
1844 :
1845 : #ifdef HAVE_MMAP_DEV_ZERO
1846 : G.dev_zero_fd = open ("/dev/zero", O_RDONLY);
1847 : if (G.dev_zero_fd == -1)
1848 : internal_error ("open /dev/zero: %m");
1849 : #endif
1850 :
1851 : #if 0
1852 : G.debug_file = fopen ("ggc-mmap.debug", "w");
1853 : #else
1854 284522 : G.debug_file = stdout;
1855 : #endif
1856 :
1857 : #ifdef USING_MMAP
1858 : /* StunOS has an amazing off-by-one error for the first mmap allocation
1859 : after fiddling with RLIMIT_STACK. The result, as hard as it is to
1860 : believe, is an unaligned page allocation, which would cause us to
1861 : hork badly if we tried to use it. */
1862 284522 : {
1863 284522 : char *p = alloc_anon (NULL, G.pagesize, true);
1864 284522 : struct page_entry *e;
1865 284522 : if ((uintptr_t)p & (G.pagesize - 1))
1866 : {
1867 : /* How losing. Discard this one and try another. If we still
1868 : can't get something useful, give up. */
1869 :
1870 0 : p = alloc_anon (NULL, G.pagesize, true);
1871 0 : gcc_assert (!((uintptr_t)p & (G.pagesize - 1)));
1872 : }
1873 :
1874 : /* We have a good page, might as well hold onto it... */
1875 284522 : e = XCNEW (struct page_entry);
1876 284522 : e->bytes = G.pagesize;
1877 284522 : e->free_list = find_free_list (G.pagesize);
1878 284522 : e->page = p;
1879 284522 : e->next = e->free_list->free_pages;
1880 284522 : e->free_list->free_pages = e;
1881 : }
1882 : #endif
1883 :
1884 : /* Initialize the object size table. */
1885 18493930 : for (order = 0; order < HOST_BITS_PER_PTR; ++order)
1886 18209408 : object_size_table[order] = (size_t) 1 << order;
1887 5974962 : for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1888 : {
1889 5690440 : size_t s = extra_order_size_table[order - HOST_BITS_PER_PTR];
1890 :
1891 : /* If S is not a multiple of the MAX_ALIGNMENT, then round it up
1892 : so that we're sure of getting aligned memory. */
1893 5690440 : s = ROUND_UP (s, MAX_ALIGNMENT);
1894 5690440 : object_size_table[order] = s;
1895 : }
1896 :
1897 : /* Initialize the objects-per-page and inverse tables. */
1898 24184370 : for (order = 0; order < NUM_ORDERS; ++order)
1899 : {
1900 23899848 : objects_per_page_table[order] = G.pagesize / OBJECT_SIZE (order);
1901 23899848 : if (objects_per_page_table[order] == 0)
1902 14510622 : objects_per_page_table[order] = 1;
1903 23899848 : compute_inverse (order);
1904 : }
1905 :
1906 : /* Reset the size_lookup array to put appropriately sized objects in
1907 : the special orders. All objects bigger than the previous power
1908 : of two, but no greater than the special size, should go in the
1909 : new order. */
1910 5974962 : for (order = HOST_BITS_PER_PTR; order < NUM_ORDERS; ++order)
1911 : {
1912 5690440 : int o;
1913 5690440 : int i;
1914 :
1915 5690440 : i = OBJECT_SIZE (order);
1916 5690440 : if (i >= NUM_SIZE_LOOKUP)
1917 0 : continue;
1918 :
1919 112670712 : for (o = size_lookup[i]; o == size_lookup [i]; --i)
1920 106980272 : size_lookup[i] = order;
1921 : }
1922 :
1923 284522 : G.depth_in_use = 0;
1924 284522 : G.depth_max = 10;
1925 284522 : G.depth = XNEWVEC (unsigned int, G.depth_max);
1926 :
1927 284522 : G.by_depth_in_use = 0;
1928 284522 : G.by_depth_max = INITIAL_PTE_COUNT;
1929 284522 : G.by_depth = XNEWVEC (page_entry *, G.by_depth_max);
1930 284522 : G.save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
1931 :
1932 : /* Allocate space for the depth 0 finalizers. */
1933 284522 : G.finalizers.safe_push (vNULL);
1934 284522 : G.vec_finalizers.safe_push (vNULL);
1935 284522 : gcc_assert (G.finalizers.length() == 1);
1936 : }
1937 :
1938 : /* Merge the SAVE_IN_USE_P and IN_USE_P arrays in P so that IN_USE_P
1939 : reflects reality. Recalculate NUM_FREE_OBJECTS as well. */
1940 :
1941 : static void
1942 0 : ggc_recalculate_in_use_p (page_entry *p)
1943 : {
1944 0 : unsigned int i;
1945 0 : size_t num_objects;
1946 :
1947 : /* Because the past-the-end bit in in_use_p is always set, we
1948 : pretend there is one additional object. */
1949 0 : num_objects = OBJECTS_IN_PAGE (p) + 1;
1950 :
1951 : /* Reset the free object count. */
1952 0 : p->num_free_objects = num_objects;
1953 :
1954 : /* Combine the IN_USE_P and SAVE_IN_USE_P arrays. */
1955 0 : for (i = 0;
1956 0 : i < CEIL (BITMAP_SIZE (num_objects),
1957 : sizeof (*p->in_use_p));
1958 : ++i)
1959 : {
1960 0 : unsigned long j;
1961 :
1962 : /* Something is in use if it is marked, or if it was in use in a
1963 : context further down the context stack. */
1964 0 : p->in_use_p[i] |= save_in_use_p (p)[i];
1965 :
1966 : /* Decrement the free object count for every object allocated. */
1967 0 : for (j = p->in_use_p[i]; j; j >>= 1)
1968 0 : p->num_free_objects -= (j & 1);
1969 : }
1970 :
1971 0 : gcc_assert (p->num_free_objects < num_objects);
1972 0 : }
1973 :
1974 : /* Unmark all objects. */
1975 :
1976 : static void
1977 719490 : clear_marks (void)
1978 : {
1979 719490 : unsigned order;
1980 :
1981 59717670 : for (order = 2; order < NUM_ORDERS; order++)
1982 : {
1983 58998180 : page_entry *p;
1984 :
1985 2024278774 : for (p = G.pages[order]; p != NULL; p = p->next)
1986 : {
1987 1965280594 : size_t num_objects = OBJECTS_IN_PAGE (p);
1988 1965280594 : size_t bitmap_size = BITMAP_SIZE (num_objects + 1);
1989 :
1990 : /* The data should be page-aligned. */
1991 1965280594 : gcc_assert (!((uintptr_t) p->page & (G.pagesize - 1)));
1992 :
1993 : /* Pages that aren't in the topmost context are not collected;
1994 : nevertheless, we need their in-use bit vectors to store GC
1995 : marks. So, back them up first. */
1996 1965280594 : if (p->context_depth < G.context_depth)
1997 : {
1998 0 : if (! save_in_use_p (p))
1999 0 : save_in_use_p (p) = XNEWVAR (unsigned long, bitmap_size);
2000 0 : memcpy (save_in_use_p (p), p->in_use_p, bitmap_size);
2001 : }
2002 :
2003 : /* Reset reset the number of free objects and clear the
2004 : in-use bits. These will be adjusted by mark_obj. */
2005 1965280594 : p->num_free_objects = num_objects;
2006 1965280594 : memset (p->in_use_p, 0, bitmap_size);
2007 :
2008 : /* Make sure the one-past-the-end bit is always set. */
2009 1965280594 : p->in_use_p[num_objects / HOST_BITS_PER_LONG]
2010 1965280594 : = ((unsigned long) 1 << (num_objects % HOST_BITS_PER_LONG));
2011 : }
2012 : }
2013 719490 : }
2014 :
2015 : /* Check if any blocks with a registered finalizer have become unmarked. If so
2016 : run the finalizer and unregister it because the block is about to be freed.
2017 : Note that no guarantee is made about what order finalizers will run in so
2018 : touching other objects in gc memory is extremely unwise. */
2019 :
2020 : static void
2021 719140 : ggc_handle_finalizers ()
2022 : {
2023 719140 : unsigned dlen = G.finalizers.length();
2024 1438280 : for (unsigned d = G.context_depth; d < dlen; ++d)
2025 : {
2026 719140 : vec<finalizer> &v = G.finalizers[d];
2027 719140 : unsigned length = v.length ();
2028 148099612 : for (unsigned int i = 0; i < length;)
2029 : {
2030 147380472 : finalizer &f = v[i];
2031 147380472 : if (!ggc_marked_p (f.addr ()))
2032 : {
2033 25240916 : f.call ();
2034 25240916 : v.unordered_remove (i);
2035 25240916 : length--;
2036 : }
2037 : else
2038 122139556 : i++;
2039 : }
2040 : }
2041 :
2042 1438280 : gcc_assert (dlen == G.vec_finalizers.length());
2043 1438280 : for (unsigned d = G.context_depth; d < dlen; ++d)
2044 : {
2045 719140 : vec<vec_finalizer> &vv = G.vec_finalizers[d];
2046 719140 : unsigned length = vv.length ();
2047 719140 : for (unsigned int i = 0; i < length;)
2048 : {
2049 0 : vec_finalizer &f = vv[i];
2050 0 : if (!ggc_marked_p (f.addr ()))
2051 : {
2052 0 : f.call ();
2053 0 : vv.unordered_remove (i);
2054 0 : length--;
2055 : }
2056 : else
2057 0 : i++;
2058 : }
2059 : }
2060 719140 : }
2061 :
2062 : /* Free all empty pages. Partially empty pages need no attention
2063 : because the `mark' bit doubles as an `unused' bit. */
2064 :
2065 : static void
2066 734774 : sweep_pages (void)
2067 : {
2068 734774 : unsigned order;
2069 :
2070 60986242 : for (order = 2; order < NUM_ORDERS; order++)
2071 : {
2072 : /* The last page-entry to consider, regardless of entries
2073 : placed at the end of the list. */
2074 60251468 : page_entry * const last = G.page_tails[order];
2075 :
2076 60251468 : size_t num_objects;
2077 60251468 : size_t live_objects;
2078 60251468 : page_entry *p, *previous;
2079 60251468 : int done;
2080 :
2081 60251468 : p = G.pages[order];
2082 60251468 : if (p == NULL)
2083 37150454 : continue;
2084 :
2085 : previous = NULL;
2086 1975953100 : do
2087 : {
2088 1975953100 : page_entry *next = p->next;
2089 :
2090 : /* Loop until all entries have been examined. */
2091 1975953100 : done = (p == last);
2092 :
2093 1975953100 : num_objects = OBJECTS_IN_PAGE (p);
2094 :
2095 : /* Add all live objects on this page to the count of
2096 : allocated memory. */
2097 1975953100 : live_objects = num_objects - p->num_free_objects;
2098 :
2099 1975953100 : G.allocated += OBJECT_SIZE (order) * live_objects;
2100 :
2101 : /* Only objects on pages in the topmost context should get
2102 : collected. */
2103 1975953100 : if (p->context_depth < G.context_depth)
2104 : ;
2105 :
2106 : /* Remove the page if it's empty. */
2107 1975953100 : else if (live_objects == 0)
2108 : {
2109 : /* If P was the first page in the list, then NEXT
2110 : becomes the new first page in the list, otherwise
2111 : splice P out of the forward pointers. */
2112 31234151 : if (! previous)
2113 4440927 : G.pages[order] = next;
2114 : else
2115 26793224 : previous->next = next;
2116 :
2117 : /* Splice P out of the back pointers too. */
2118 31234151 : if (next)
2119 30982694 : next->prev = previous;
2120 :
2121 : /* Are we removing the last element? */
2122 31234151 : if (p == G.page_tails[order])
2123 251457 : G.page_tails[order] = previous;
2124 31234151 : free_page (p);
2125 31234151 : p = previous;
2126 : }
2127 :
2128 : /* If the page is full, move it to the end. */
2129 1944718949 : else if (p->num_free_objects == 0)
2130 : {
2131 : /* Don't move it if it's already at the end. */
2132 1284380416 : if (p != G.page_tails[order])
2133 : {
2134 : /* Move p to the end of the list. */
2135 1282336771 : p->next = NULL;
2136 1282336771 : p->prev = G.page_tails[order];
2137 1282336771 : G.page_tails[order]->next = p;
2138 :
2139 : /* Update the tail pointer... */
2140 1282336771 : G.page_tails[order] = p;
2141 :
2142 : /* ... and the head pointer, if necessary. */
2143 1282336771 : if (! previous)
2144 43407039 : G.pages[order] = next;
2145 : else
2146 1238929732 : previous->next = next;
2147 :
2148 : /* And update the backpointer in NEXT if necessary. */
2149 1282336771 : if (next)
2150 1282336771 : next->prev = previous;
2151 :
2152 : p = previous;
2153 : }
2154 : }
2155 :
2156 : /* If we've fallen through to here, it's a page in the
2157 : topmost context that is neither full nor empty. Such a
2158 : page must precede pages at lesser context depth in the
2159 : list, so move it to the head. */
2160 660338533 : else if (p != G.pages[order])
2161 : {
2162 640975123 : previous->next = p->next;
2163 :
2164 : /* Update the backchain in the next node if it exists. */
2165 640975123 : if (p->next)
2166 637487085 : p->next->prev = previous;
2167 :
2168 : /* Move P to the head of the list. */
2169 640975123 : p->next = G.pages[order];
2170 640975123 : p->prev = NULL;
2171 640975123 : G.pages[order]->prev = p;
2172 :
2173 : /* Update the head pointer. */
2174 640975123 : G.pages[order] = p;
2175 :
2176 : /* Are we moving the last element? */
2177 640975123 : if (G.page_tails[order] == p)
2178 3488038 : G.page_tails[order] = previous;
2179 : p = previous;
2180 : }
2181 :
2182 1975953100 : previous = p;
2183 1975953100 : p = next;
2184 : }
2185 1975953100 : while (! done);
2186 :
2187 : /* Now, restore the in_use_p vectors for any pages from contexts
2188 : other than the current one. */
2189 1967819963 : for (p = G.pages[order]; p; p = p->next)
2190 1944718949 : if (p->context_depth != G.context_depth)
2191 0 : ggc_recalculate_in_use_p (p);
2192 : }
2193 734774 : }
2194 :
2195 : #ifdef ENABLE_GC_CHECKING
2196 : /* Clobber all free objects. */
2197 :
2198 : static void
2199 719490 : poison_pages (void)
2200 : {
2201 719490 : unsigned order;
2202 :
2203 59717670 : for (order = 2; order < NUM_ORDERS; order++)
2204 : {
2205 58998180 : size_t size = OBJECT_SIZE (order);
2206 58998180 : page_entry *p;
2207 :
2208 2024278774 : for (p = G.pages[order]; p != NULL; p = p->next)
2209 : {
2210 1965280594 : size_t num_objects;
2211 1965280594 : size_t i;
2212 :
2213 1965280594 : if (p->context_depth != G.context_depth)
2214 : /* Since we don't do any collection for pages in pushed
2215 : contexts, there's no need to do any poisoning. And
2216 : besides, the IN_USE_P array isn't valid until we pop
2217 : contexts. */
2218 0 : continue;
2219 :
2220 1965280594 : num_objects = OBJECTS_IN_PAGE (p);
2221 >10433*10^7 : for (i = 0; i < num_objects; i++)
2222 : {
2223 >10236*10^7 : size_t word, bit;
2224 >10236*10^7 : word = i / HOST_BITS_PER_LONG;
2225 >10236*10^7 : bit = i % HOST_BITS_PER_LONG;
2226 >10236*10^7 : if (((p->in_use_p[word] >> bit) & 1) == 0)
2227 : {
2228 19217637990 : char *object = p->page + i * size;
2229 :
2230 : /* Keep poison-by-write when we expect to use Valgrind,
2231 : so the exact same memory semantics is kept, in case
2232 : there are memory errors. We override this request
2233 : below. */
2234 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_UNDEFINED (object,
2235 19217637990 : size));
2236 19217637990 : memset (object, 0xa5, size);
2237 :
2238 : /* Drop the handle to avoid handle leak. */
2239 19217637990 : VALGRIND_DISCARD (VALGRIND_MAKE_MEM_NOACCESS (object, size));
2240 : }
2241 : }
2242 : }
2243 : }
2244 719490 : }
2245 : #else
2246 : #define poison_pages()
2247 : #endif
2248 :
2249 : #ifdef ENABLE_GC_ALWAYS_COLLECT
2250 : /* Validate that the reportedly free objects actually are. */
2251 :
2252 : static void
2253 : validate_free_objects (void)
2254 : {
2255 : struct free_object *f, *next, *still_free = NULL;
2256 :
2257 : for (f = G.free_object_list; f ; f = next)
2258 : {
2259 : page_entry *pe = lookup_page_table_entry (f->object);
2260 : size_t bit, word;
2261 :
2262 : bit = OFFSET_TO_BIT ((char *)f->object - pe->page, pe->order);
2263 : word = bit / HOST_BITS_PER_LONG;
2264 : bit = bit % HOST_BITS_PER_LONG;
2265 : next = f->next;
2266 :
2267 : /* Make certain it isn't visible from any root. Notice that we
2268 : do this check before sweep_pages merges save_in_use_p. */
2269 : gcc_assert (!(pe->in_use_p[word] & (1UL << bit)));
2270 :
2271 : /* If the object comes from an outer context, then retain the
2272 : free_object entry, so that we can verify that the address
2273 : isn't live on the stack in some outer context. */
2274 : if (pe->context_depth != G.context_depth)
2275 : {
2276 : f->next = still_free;
2277 : still_free = f;
2278 : }
2279 : else
2280 : free (f);
2281 : }
2282 :
2283 : G.free_object_list = still_free;
2284 : }
2285 : #else
2286 : #define validate_free_objects()
2287 : #endif
2288 :
2289 : /* Top level mark-and-sweep routine. */
2290 :
2291 : void
2292 551085536 : ggc_collect (enum ggc_collect mode)
2293 : {
2294 : /* Avoid frequent unnecessary work by skipping collection if the
2295 : total allocations haven't expanded much since the last
2296 : collection. */
2297 551085536 : float allocated_last_gc =
2298 551085536 : MAX (G.allocated_last_gc, (size_t)param_ggc_min_heapsize * ONE_K);
2299 :
2300 : /* It is also good time to get memory block pool into limits. */
2301 551085536 : memory_block_pool::trim ();
2302 :
2303 551085536 : float min_expand = allocated_last_gc * param_ggc_min_expand / 100;
2304 551085536 : if (mode == GGC_COLLECT_HEURISTIC
2305 551085492 : && G.allocated < allocated_last_gc + min_expand)
2306 : return;
2307 :
2308 719140 : timevar_push (TV_GC);
2309 719140 : if (GGC_DEBUG_LEVEL >= 2)
2310 : fprintf (G.debug_file, "BEGIN COLLECTING\n");
2311 :
2312 : /* Zero the total allocated bytes. This will be recalculated in the
2313 : sweep phase. */
2314 719140 : size_t allocated = G.allocated;
2315 719140 : G.allocated = 0;
2316 :
2317 : /* Release the pages we freed the last time we collected, but didn't
2318 : reuse in the interim. */
2319 719140 : release_pages ();
2320 :
2321 : /* Output this later so we do not interfere with release_pages. */
2322 719140 : if (!quiet_flag)
2323 0 : fprintf (stderr, " {GC " PRsa (0) " -> ", SIZE_AMOUNT (allocated));
2324 :
2325 : /* Indicate that we've seen collections at this context depth. */
2326 719140 : G.context_depth_collections = ((unsigned long)1 << (G.context_depth + 1)) - 1;
2327 :
2328 719140 : invoke_plugin_callbacks (PLUGIN_GGC_START, NULL);
2329 :
2330 719140 : in_gc = true;
2331 719140 : clear_marks ();
2332 719140 : ggc_mark_roots ();
2333 719140 : ggc_handle_finalizers ();
2334 :
2335 719140 : if (GATHER_STATISTICS)
2336 : ggc_prune_overhead_list ();
2337 :
2338 719140 : poison_pages ();
2339 719140 : validate_free_objects ();
2340 719140 : sweep_pages ();
2341 :
2342 719140 : in_gc = false;
2343 719140 : G.allocated_last_gc = G.allocated;
2344 :
2345 719140 : invoke_plugin_callbacks (PLUGIN_GGC_END, NULL);
2346 :
2347 719140 : timevar_pop (TV_GC);
2348 :
2349 719140 : if (!quiet_flag)
2350 0 : fprintf (stderr, PRsa (0) "}", SIZE_AMOUNT (G.allocated));
2351 : if (GGC_DEBUG_LEVEL >= 2)
2352 : fprintf (G.debug_file, "END COLLECTING\n");
2353 : }
2354 :
2355 : /* Return free pages to the system. */
2356 :
2357 : void
2358 15634 : ggc_trim ()
2359 : {
2360 15634 : timevar_push (TV_GC);
2361 15634 : G.allocated = 0;
2362 15634 : sweep_pages ();
2363 15634 : release_pages ();
2364 15634 : if (!quiet_flag)
2365 0 : fprintf (stderr, " {GC trimmed to " PRsa (0) ", " PRsa (0) " mapped}",
2366 0 : SIZE_AMOUNT (G.allocated), SIZE_AMOUNT (G.bytes_mapped));
2367 15634 : timevar_pop (TV_GC);
2368 15634 : }
2369 :
2370 : /* Assume that all GGC memory is reachable and grow the limits for next
2371 : collection. With checking, trigger GGC so -Q compilation outputs how much
2372 : of memory really is reachable. */
2373 :
2374 : void
2375 265331 : ggc_grow (void)
2376 : {
2377 265331 : if (!flag_checking)
2378 0 : G.allocated_last_gc = MAX (G.allocated_last_gc,
2379 : G.allocated);
2380 : else
2381 265331 : ggc_collect ();
2382 265331 : if (!quiet_flag)
2383 0 : fprintf (stderr, " {GC " PRsa (0) "} ", SIZE_AMOUNT (G.allocated));
2384 265331 : }
2385 :
2386 : void
2387 0 : ggc_print_statistics (void)
2388 : {
2389 0 : struct ggc_statistics stats;
2390 0 : unsigned int i;
2391 0 : size_t total_overhead = 0;
2392 :
2393 : /* Clear the statistics. */
2394 0 : memset (&stats, 0, sizeof (stats));
2395 :
2396 : /* Make sure collection will really occur. */
2397 0 : G.allocated_last_gc = 0;
2398 :
2399 : /* Collect and print the statistics common across collectors. */
2400 0 : ggc_print_common_statistics (stderr, &stats);
2401 :
2402 : /* Release free pages so that we will not count the bytes allocated
2403 : there as part of the total allocated memory. */
2404 0 : release_pages ();
2405 :
2406 : /* Collect some information about the various sizes of
2407 : allocation. */
2408 0 : fprintf (stderr,
2409 : "Memory still allocated at the end of the compilation process\n");
2410 0 : fprintf (stderr, "%-8s %10s %10s %10s\n",
2411 : "Size", "Allocated", "Used", "Overhead");
2412 0 : for (i = 0; i < NUM_ORDERS; ++i)
2413 : {
2414 0 : page_entry *p;
2415 0 : size_t allocated;
2416 0 : size_t in_use;
2417 0 : size_t overhead;
2418 :
2419 : /* Skip empty entries. */
2420 0 : if (!G.pages[i])
2421 0 : continue;
2422 :
2423 : overhead = allocated = in_use = 0;
2424 :
2425 : /* Figure out the total number of bytes allocated for objects of
2426 : this size, and how many of them are actually in use. Also figure
2427 : out how much memory the page table is using. */
2428 0 : for (p = G.pages[i]; p; p = p->next)
2429 : {
2430 0 : allocated += p->bytes;
2431 0 : in_use +=
2432 0 : (OBJECTS_IN_PAGE (p) - p->num_free_objects) * OBJECT_SIZE (i);
2433 :
2434 0 : overhead += (sizeof (page_entry) - sizeof (long)
2435 0 : + BITMAP_SIZE (OBJECTS_IN_PAGE (p) + 1));
2436 : }
2437 0 : fprintf (stderr, "%-8" PRIu64 " " PRsa (10) " " PRsa (10) " "
2438 : PRsa (10) "\n",
2439 0 : (uint64_t)OBJECT_SIZE (i),
2440 0 : SIZE_AMOUNT (allocated),
2441 0 : SIZE_AMOUNT (in_use),
2442 0 : SIZE_AMOUNT (overhead));
2443 0 : total_overhead += overhead;
2444 : }
2445 0 : fprintf (stderr, "%-8s " PRsa (10) " " PRsa (10) " " PRsa (10) "\n",
2446 : "Total",
2447 0 : SIZE_AMOUNT (G.bytes_mapped),
2448 0 : SIZE_AMOUNT (G.allocated),
2449 0 : SIZE_AMOUNT (total_overhead));
2450 :
2451 0 : if (GATHER_STATISTICS)
2452 : {
2453 : fprintf (stderr, "\nTotal allocations and overheads during "
2454 : "the compilation process\n");
2455 :
2456 : fprintf (stderr, "Total Overhead: "
2457 : PRsa (9) "\n",
2458 : SIZE_AMOUNT (G.stats.total_overhead));
2459 : fprintf (stderr, "Total Allocated: "
2460 : PRsa (9) "\n",
2461 : SIZE_AMOUNT (G.stats.total_allocated));
2462 :
2463 : fprintf (stderr, "Total Overhead under 32B: "
2464 : PRsa (9) "\n",
2465 : SIZE_AMOUNT (G.stats.total_overhead_under32));
2466 : fprintf (stderr, "Total Allocated under 32B: "
2467 : PRsa (9) "\n",
2468 : SIZE_AMOUNT (G.stats.total_allocated_under32));
2469 : fprintf (stderr, "Total Overhead under 64B: "
2470 : PRsa (9) "\n",
2471 : SIZE_AMOUNT (G.stats.total_overhead_under64));
2472 : fprintf (stderr, "Total Allocated under 64B: "
2473 : PRsa (9) "\n",
2474 : SIZE_AMOUNT (G.stats.total_allocated_under64));
2475 : fprintf (stderr, "Total Overhead under 128B: "
2476 : PRsa (9) "\n",
2477 : SIZE_AMOUNT (G.stats.total_overhead_under128));
2478 : fprintf (stderr, "Total Allocated under 128B: "
2479 : PRsa (9) "\n",
2480 : SIZE_AMOUNT (G.stats.total_allocated_under128));
2481 : fprintf (stderr, "Number of free list fallbacks: "
2482 : PRsa (9) "\n",
2483 : SIZE_AMOUNT (G.stats.fallback));
2484 :
2485 : for (i = 0; i < NUM_ORDERS; i++)
2486 : if (G.stats.total_allocated_per_order[i])
2487 : {
2488 : fprintf (stderr, "Total Overhead page size %9" PRIu64 ": "
2489 : PRsa (9) "\n",
2490 : (uint64_t)OBJECT_SIZE (i),
2491 : SIZE_AMOUNT (G.stats.total_overhead_per_order[i]));
2492 : fprintf (stderr, "Total Allocated page size %9" PRIu64 ": "
2493 : PRsa (9) "\n",
2494 : (uint64_t)OBJECT_SIZE (i),
2495 : SIZE_AMOUNT (G.stats.total_allocated_per_order[i]));
2496 : }
2497 : }
2498 0 : }
2499 :
2500 : struct ggc_pch_ondisk
2501 : {
2502 : unsigned totals[NUM_ORDERS];
2503 : };
2504 :
2505 : struct ggc_pch_data
2506 : {
2507 : struct ggc_pch_ondisk d;
2508 : uintptr_t base[NUM_ORDERS];
2509 : size_t written[NUM_ORDERS];
2510 : };
2511 :
2512 : struct ggc_pch_data *
2513 428 : init_ggc_pch (void)
2514 : {
2515 428 : return XCNEW (struct ggc_pch_data);
2516 : }
2517 :
2518 : void
2519 27065646 : ggc_pch_count_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2520 : size_t size)
2521 : {
2522 27065646 : unsigned order;
2523 :
2524 27065646 : if (size < NUM_SIZE_LOOKUP)
2525 26948727 : order = size_lookup[size];
2526 : else
2527 : {
2528 : order = 10;
2529 151046 : while (size > OBJECT_SIZE (order))
2530 34127 : order++;
2531 : }
2532 :
2533 27065646 : d->d.totals[order]++;
2534 27065646 : }
2535 :
2536 : size_t
2537 428 : ggc_pch_total_size (struct ggc_pch_data *d)
2538 : {
2539 428 : size_t a = 0;
2540 428 : unsigned i;
2541 :
2542 36380 : for (i = 0; i < NUM_ORDERS; i++)
2543 35952 : a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2544 428 : return a;
2545 : }
2546 :
2547 : void
2548 428 : ggc_pch_this_base (struct ggc_pch_data *d, void *base)
2549 : {
2550 428 : uintptr_t a = (uintptr_t) base;
2551 428 : unsigned i;
2552 :
2553 36380 : for (i = 0; i < NUM_ORDERS; i++)
2554 : {
2555 35952 : d->base[i] = a;
2556 35952 : a += PAGE_ALIGN (d->d.totals[i] * OBJECT_SIZE (i));
2557 : }
2558 428 : }
2559 :
2560 :
2561 : char *
2562 27065646 : ggc_pch_alloc_object (struct ggc_pch_data *d, void *x ATTRIBUTE_UNUSED,
2563 : size_t size)
2564 : {
2565 27065646 : unsigned order;
2566 27065646 : char *result;
2567 :
2568 27065646 : if (size < NUM_SIZE_LOOKUP)
2569 26948727 : order = size_lookup[size];
2570 : else
2571 : {
2572 : order = 10;
2573 151046 : while (size > OBJECT_SIZE (order))
2574 34127 : order++;
2575 : }
2576 :
2577 27065646 : result = (char *) d->base[order];
2578 27065646 : d->base[order] += OBJECT_SIZE (order);
2579 27065646 : return result;
2580 : }
2581 :
2582 : void
2583 428 : ggc_pch_prepare_write (struct ggc_pch_data *d ATTRIBUTE_UNUSED,
2584 : FILE *f ATTRIBUTE_UNUSED)
2585 : {
2586 : /* Nothing to do. */
2587 428 : }
2588 :
2589 : void
2590 27065646 : ggc_pch_write_object (struct ggc_pch_data *d,
2591 : FILE *f, void *x, void *newx ATTRIBUTE_UNUSED,
2592 : size_t size)
2593 : {
2594 27065646 : unsigned order;
2595 27065646 : static const char emptyBytes[256] = { 0 };
2596 :
2597 27065646 : if (size < NUM_SIZE_LOOKUP)
2598 26948727 : order = size_lookup[size];
2599 : else
2600 : {
2601 : order = 10;
2602 151046 : while (size > OBJECT_SIZE (order))
2603 34127 : order++;
2604 : }
2605 :
2606 27065646 : if (fwrite (x, size, 1, f) != 1)
2607 0 : fatal_error (input_location, "cannot write PCH file: %m");
2608 :
2609 : /* If SIZE is not the same as OBJECT_SIZE(order), then we need to pad the
2610 : object out to OBJECT_SIZE(order). This happens for strings. */
2611 :
2612 27065646 : if (size != OBJECT_SIZE (order))
2613 : {
2614 1894089 : unsigned padding = OBJECT_SIZE (order) - size;
2615 :
2616 : /* To speed small writes, we use a nulled-out array that's larger
2617 : than most padding requests as the source for our null bytes. This
2618 : permits us to do the padding with fwrite() rather than fseek(), and
2619 : limits the chance the OS may try to flush any outstanding writes. */
2620 1894089 : if (padding <= sizeof (emptyBytes))
2621 : {
2622 1860974 : if (fwrite (emptyBytes, 1, padding, f) != padding)
2623 0 : fatal_error (input_location, "cannot write PCH file");
2624 : }
2625 : else
2626 : {
2627 : /* Larger than our buffer? Just default to fseek. */
2628 33115 : if (fseek (f, padding, SEEK_CUR) != 0)
2629 0 : fatal_error (input_location, "cannot write PCH file");
2630 : }
2631 : }
2632 :
2633 27065646 : d->written[order]++;
2634 27065646 : if (d->written[order] == d->d.totals[order]
2635 27065646 : && fseek (f, ROUND_UP_VALUE (d->d.totals[order] * OBJECT_SIZE (order),
2636 : G.pagesize),
2637 : SEEK_CUR) != 0)
2638 0 : fatal_error (input_location, "cannot write PCH file: %m");
2639 27065646 : }
2640 :
2641 : void
2642 428 : ggc_pch_finish (struct ggc_pch_data *d, FILE *f)
2643 : {
2644 428 : if (fwrite (&d->d, sizeof (d->d), 1, f) != 1)
2645 0 : fatal_error (input_location, "cannot write PCH file: %m");
2646 428 : free (d);
2647 428 : }
2648 :
2649 : /* Move the PCH PTE entries just added to the end of by_depth, to the
2650 : front. */
2651 :
2652 : static void
2653 350 : move_ptes_to_front (int count_old_page_tables, int count_new_page_tables)
2654 : {
2655 : /* First, we swap the new entries to the front of the varrays. */
2656 350 : page_entry **new_by_depth;
2657 350 : unsigned long **new_save_in_use;
2658 :
2659 350 : new_by_depth = XNEWVEC (page_entry *, G.by_depth_max);
2660 350 : new_save_in_use = XNEWVEC (unsigned long *, G.by_depth_max);
2661 :
2662 350 : memcpy (&new_by_depth[0],
2663 350 : &G.by_depth[count_old_page_tables],
2664 350 : count_new_page_tables * sizeof (void *));
2665 350 : memcpy (&new_by_depth[count_new_page_tables],
2666 : &G.by_depth[0],
2667 : count_old_page_tables * sizeof (void *));
2668 350 : memcpy (&new_save_in_use[0],
2669 350 : &G.save_in_use[count_old_page_tables],
2670 : count_new_page_tables * sizeof (void *));
2671 350 : memcpy (&new_save_in_use[count_new_page_tables],
2672 : &G.save_in_use[0],
2673 : count_old_page_tables * sizeof (void *));
2674 :
2675 350 : free (G.by_depth);
2676 350 : free (G.save_in_use);
2677 :
2678 350 : G.by_depth = new_by_depth;
2679 350 : G.save_in_use = new_save_in_use;
2680 :
2681 : /* Now update all the index_by_depth fields. */
2682 191104 : for (unsigned i = G.by_depth_in_use; i--;)
2683 : {
2684 190754 : page_entry *p = G.by_depth[i];
2685 190754 : p->index_by_depth = i;
2686 : }
2687 :
2688 : /* And last, we update the depth pointers in G.depth. The first
2689 : entry is already 0, and context 0 entries always start at index
2690 : 0, so there is nothing to update in the first slot. We need a
2691 : second slot, only if we have old ptes, and if we do, they start
2692 : at index count_new_page_tables. */
2693 350 : if (count_old_page_tables)
2694 350 : push_depth (count_new_page_tables);
2695 350 : }
2696 :
2697 : void
2698 350 : ggc_pch_read (FILE *f, void *addr)
2699 : {
2700 350 : struct ggc_pch_ondisk d;
2701 350 : unsigned i;
2702 350 : char *offs = (char *) addr;
2703 350 : unsigned long count_old_page_tables;
2704 350 : unsigned long count_new_page_tables;
2705 :
2706 350 : count_old_page_tables = G.by_depth_in_use;
2707 :
2708 350 : if (fread (&d, sizeof (d), 1, f) != 1)
2709 0 : fatal_error (input_location, "cannot read PCH file: %m");
2710 :
2711 : /* We've just read in a PCH file. So, every object that used to be
2712 : allocated is now free. */
2713 350 : clear_marks ();
2714 : #ifdef ENABLE_GC_CHECKING
2715 350 : poison_pages ();
2716 : #endif
2717 : /* Since we free all the allocated objects, the free list becomes
2718 : useless. Validate it now, which will also clear it. */
2719 350 : validate_free_objects ();
2720 :
2721 : /* No object read from a PCH file should ever be freed. So, set the
2722 : context depth to 1, and set the depth of all the currently-allocated
2723 : pages to be 1 too. PCH pages will have depth 0. */
2724 350 : gcc_assert (!G.context_depth);
2725 350 : G.context_depth = 1;
2726 : /* Allocate space for the depth 1 finalizers. */
2727 350 : G.finalizers.safe_push (vNULL);
2728 350 : G.vec_finalizers.safe_push (vNULL);
2729 350 : gcc_assert (G.finalizers.length() == 2);
2730 29750 : for (i = 0; i < NUM_ORDERS; i++)
2731 : {
2732 29400 : page_entry *p;
2733 210494 : for (p = G.pages[i]; p != NULL; p = p->next)
2734 181094 : p->context_depth = G.context_depth;
2735 : }
2736 :
2737 : /* Allocate the appropriate page-table entries for the pages read from
2738 : the PCH file. */
2739 :
2740 29750 : for (i = 0; i < NUM_ORDERS; i++)
2741 : {
2742 29400 : struct page_entry *entry;
2743 29400 : char *pte;
2744 29400 : size_t bytes;
2745 29400 : size_t num_objs;
2746 29400 : size_t j;
2747 :
2748 29400 : if (d.totals[i] == 0)
2749 19740 : continue;
2750 :
2751 9660 : bytes = PAGE_ALIGN (d.totals[i] * OBJECT_SIZE (i));
2752 9660 : num_objs = bytes / OBJECT_SIZE (i);
2753 9660 : entry = XCNEWVAR (struct page_entry, (sizeof (struct page_entry)
2754 : - sizeof (long)
2755 : + BITMAP_SIZE (num_objs + 1)));
2756 9660 : entry->bytes = bytes;
2757 9660 : entry->free_list = find_free_list (bytes);
2758 9660 : entry->page = offs;
2759 9660 : entry->context_depth = 0;
2760 9660 : offs += bytes;
2761 9660 : entry->num_free_objects = 0;
2762 9660 : entry->order = i;
2763 :
2764 9660 : for (j = 0;
2765 193729 : j + HOST_BITS_PER_LONG <= num_objs + 1;
2766 184069 : j += HOST_BITS_PER_LONG)
2767 184069 : entry->in_use_p[j / HOST_BITS_PER_LONG] = -1;
2768 219056 : for (; j < num_objs + 1; j++)
2769 209396 : entry->in_use_p[j / HOST_BITS_PER_LONG]
2770 209396 : |= 1L << (j % HOST_BITS_PER_LONG);
2771 :
2772 253603 : for (pte = entry->page;
2773 263263 : pte < entry->page + entry->bytes;
2774 253603 : pte += G.pagesize)
2775 253603 : set_page_table_entry (pte, entry);
2776 :
2777 9660 : if (G.page_tails[i] != NULL)
2778 8960 : G.page_tails[i]->next = entry;
2779 : else
2780 700 : G.pages[i] = entry;
2781 9660 : G.page_tails[i] = entry;
2782 :
2783 : /* We start off by just adding all the new information to the
2784 : end of the varrays, later, we will move the new information
2785 : to the front of the varrays, as the PCH page tables are at
2786 : context 0. */
2787 9660 : push_by_depth (entry, 0);
2788 : }
2789 :
2790 : /* Now, we update the various data structures that speed page table
2791 : handling. */
2792 350 : count_new_page_tables = G.by_depth_in_use - count_old_page_tables;
2793 :
2794 350 : move_ptes_to_front (count_old_page_tables, count_new_page_tables);
2795 :
2796 : /* Update the statistics. */
2797 350 : G.allocated = G.allocated_last_gc = offs - (char *)addr;
2798 350 : }
|