Line data Source code
1 : /* Instruction scheduling pass.
2 : Copyright (C) 1992-2026 Free Software Foundation, Inc.
3 : Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
4 : and currently maintained by, Jim Wilson (wilson@cygnus.com)
5 :
6 : This file is part of GCC.
7 :
8 : GCC is free software; you can redistribute it and/or modify it under
9 : the terms of the GNU General Public License as published by the Free
10 : Software Foundation; either version 3, or (at your option) any later
11 : version.
12 :
13 : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 : WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 : for more details.
17 :
18 : You should have received a copy of the GNU General Public License
19 : along with GCC; see the file COPYING3. If not see
20 : <http://www.gnu.org/licenses/>. */
21 :
22 : #include "config.h"
23 : #include "system.h"
24 : #include "coretypes.h"
25 : #include "backend.h"
26 : #include "target.h"
27 : #include "rtl.h"
28 : #include "cfghooks.h"
29 : #include "df.h"
30 : #include "profile.h"
31 : #include "insn-attr.h"
32 : #include "cfgrtl.h"
33 : #include "cfgbuild.h"
34 : #include "sched-int.h"
35 :
36 :
37 : #ifdef INSN_SCHEDULING
38 :
39 : /* The number of insns to be scheduled in total. */
40 : static int rgn_n_insns;
41 :
42 : /* The number of insns scheduled so far. */
43 : static int sched_rgn_n_insns;
44 :
45 : /* Set of blocks, that already have their dependencies calculated. */
46 : static bitmap_head dont_calc_deps;
47 :
48 : /* Last basic block in current ebb. */
49 : static basic_block last_bb;
50 :
51 : /* Implementations of the sched_info functions for region scheduling. */
52 : static void init_ready_list (void);
53 : static void begin_schedule_ready (rtx_insn *);
54 : static bool schedule_more_p (void);
55 : static const char *ebb_print_insn (const rtx_insn *, int);
56 : static int rank (rtx_insn *, rtx_insn *);
57 : static bool ebb_contributes_to_priority (rtx_insn *, rtx_insn *);
58 : static basic_block earliest_block_with_similiar_load (basic_block, rtx);
59 : static void add_deps_for_risky_insns (rtx_insn *, rtx_insn *);
60 : static void debug_ebb_dependencies (rtx_insn *, rtx_insn *);
61 :
62 : static void ebb_add_remove_insn (rtx_insn *, int);
63 : static void ebb_add_block (basic_block, basic_block);
64 : static basic_block advance_target_bb (basic_block, rtx_insn *);
65 : static void ebb_fix_recovery_cfg (int, int, int);
66 :
67 : /* Allocate memory and store the state of the frontend. Return the allocated
68 : memory. */
69 : static void *
70 0 : save_ebb_state (void)
71 : {
72 0 : int *p = XNEW (int);
73 0 : *p = sched_rgn_n_insns;
74 0 : return p;
75 : }
76 :
77 : /* Restore the state of the frontend from P_, then free it. */
78 : static void
79 0 : restore_ebb_state (void *p_)
80 : {
81 0 : int *p = (int *)p_;
82 0 : sched_rgn_n_insns = *p;
83 0 : free (p_);
84 0 : }
85 :
86 : /* Return true if there are more insns that should be scheduled. */
87 :
88 : static bool
89 2266 : schedule_more_p (void)
90 : {
91 2266 : return sched_rgn_n_insns < rgn_n_insns;
92 : }
93 :
94 : /* Print dependency information about ebb between HEAD and TAIL. */
95 : static void
96 0 : debug_ebb_dependencies (rtx_insn *head, rtx_insn *tail)
97 : {
98 0 : fprintf (sched_dump,
99 : ";; --------------- forward dependences: ------------ \n");
100 :
101 0 : fprintf (sched_dump, "\n;; --- EBB Dependences --- from bb%d to bb%d \n",
102 0 : BLOCK_NUM (head), BLOCK_NUM (tail));
103 :
104 0 : debug_dependencies (head, tail);
105 0 : }
106 :
107 : /* Add all insns that are initially ready to the ready list READY. Called
108 : once before scheduling a set of insns. */
109 :
110 : static void
111 174 : init_ready_list (void)
112 : {
113 174 : int n = 0;
114 174 : rtx_insn *prev_head = current_sched_info->prev_head;
115 174 : rtx_insn *next_tail = current_sched_info->next_tail;
116 174 : rtx_insn *insn;
117 :
118 174 : sched_rgn_n_insns = 0;
119 :
120 : /* Print debugging information. */
121 174 : if (sched_verbose >= 5)
122 0 : debug_ebb_dependencies (NEXT_INSN (prev_head), PREV_INSN (next_tail));
123 :
124 : /* Initialize ready list with all 'ready' insns in target block.
125 : Count number of insns in the target block being scheduled. */
126 1605 : for (insn = NEXT_INSN (prev_head); insn != next_tail; insn = NEXT_INSN (insn))
127 : {
128 1257 : try_ready (insn);
129 1257 : n++;
130 : }
131 :
132 174 : gcc_assert (n == rgn_n_insns);
133 174 : }
134 :
135 : /* INSN is being scheduled after LAST. Update counters. */
136 : static void
137 1257 : begin_schedule_ready (rtx_insn *insn ATTRIBUTE_UNUSED)
138 : {
139 1257 : sched_rgn_n_insns++;
140 1257 : }
141 :
142 : /* INSN is being moved to its place in the schedule, after LAST. */
143 : static void
144 1257 : begin_move_insn (rtx_insn *insn, rtx_insn *last)
145 : {
146 1257 : if (BLOCK_FOR_INSN (insn) == last_bb
147 : /* INSN is a jump in the last block, ... */
148 1057 : && control_flow_insn_p (insn)
149 : /* that is going to be moved over some instructions. */
150 1404 : && last != PREV_INSN (insn))
151 : {
152 1 : edge e;
153 1 : basic_block bb;
154 :
155 : /* An obscure special case, where we do have partially dead
156 : instruction scheduled after last control flow instruction.
157 : In this case we can create new basic block. It is
158 : always exactly one basic block last in the sequence. */
159 :
160 1 : e = find_fallthru_edge (last_bb->succs);
161 :
162 1 : gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX));
163 :
164 1 : gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb
165 : && !IS_SPECULATION_CHECK_P (insn)
166 : && BB_HEAD (last_bb) != insn
167 : && BB_END (last_bb) == insn);
168 :
169 1 : {
170 1 : rtx_insn *x = NEXT_INSN (insn);
171 1 : if (e)
172 1 : gcc_checking_assert (NOTE_P (x) || LABEL_P (x));
173 : else
174 0 : gcc_checking_assert (BARRIER_P (x));
175 : }
176 :
177 1 : if (e)
178 : {
179 1 : bb = split_edge (e);
180 1 : gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb)));
181 : }
182 : else
183 : {
184 : /* Create an empty unreachable block after the INSN. */
185 0 : rtx_insn *next = NEXT_INSN (insn);
186 0 : if (next && BARRIER_P (next))
187 0 : next = NEXT_INSN (next);
188 0 : bb = create_basic_block (next, NULL_RTX, last_bb);
189 : }
190 :
191 : /* split_edge () creates BB before E->DEST. Keep in mind, that
192 : this operation extends scheduling region till the end of BB.
193 : Hence, we need to shift NEXT_TAIL, so haifa-sched.cc won't go out
194 : of the scheduling region. */
195 1 : current_sched_info->next_tail = NEXT_INSN (BB_END (bb));
196 1 : gcc_assert (current_sched_info->next_tail);
197 :
198 : /* Append new basic block to the end of the ebb. */
199 1 : sched_init_only_bb (bb, last_bb);
200 1 : gcc_assert (last_bb == bb);
201 : }
202 1257 : }
203 :
204 : /* Return a string that contains the insn uid and optionally anything else
205 : necessary to identify this insn in an output. It's valid to use a
206 : static buffer for this. The ALIGNED parameter should cause the string
207 : to be formatted so that multiple output lines will line up nicely. */
208 :
209 : static const char *
210 21 : ebb_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED)
211 : {
212 21 : static char tmp[80];
213 :
214 : /* '+' before insn means it is a new cycle start. */
215 21 : if (GET_MODE (insn) == TImode)
216 0 : sprintf (tmp, "+ %4d", INSN_UID (insn));
217 : else
218 21 : sprintf (tmp, " %4d", INSN_UID (insn));
219 :
220 21 : return tmp;
221 : }
222 :
223 : /* Compare priority of two insns. Return a positive number if the second
224 : insn is to be preferred for scheduling, and a negative one if the first
225 : is to be preferred. Zero if they are equally good. */
226 :
227 : static int
228 59942 : rank (rtx_insn *insn1, rtx_insn *insn2)
229 : {
230 59942 : basic_block bb1 = BLOCK_FOR_INSN (insn1);
231 59942 : basic_block bb2 = BLOCK_FOR_INSN (insn2);
232 :
233 59942 : if (bb1->count > bb2->count)
234 : return -1;
235 59942 : if (bb1->count < bb2->count)
236 0 : return 1;
237 : return 0;
238 : }
239 :
240 : /* NEXT is an instruction that depends on INSN (a backward dependence);
241 : return true if we should include this dependence in priority
242 : calculations. */
243 :
244 : static bool
245 4550 : ebb_contributes_to_priority (rtx_insn *next ATTRIBUTE_UNUSED,
246 : rtx_insn *insn ATTRIBUTE_UNUSED)
247 : {
248 4550 : return true;
249 : }
250 :
251 : /* INSN is a JUMP_INSN. Store the set of registers that
252 : must be considered as used by this jump in USED. */
253 :
254 : void
255 90 : ebb_compute_jump_reg_dependencies (rtx insn, regset used)
256 : {
257 90 : basic_block b = BLOCK_FOR_INSN (insn);
258 90 : edge e;
259 90 : edge_iterator ei;
260 :
261 270 : FOR_EACH_EDGE (e, ei, b->succs)
262 180 : if ((e->flags & EDGE_FALLTHRU) == 0)
263 90 : bitmap_ior_into (used, df_get_live_in (e->dest));
264 90 : }
265 :
266 : /* Used in schedule_insns to initialize current_sched_info for scheduling
267 : regions (or single basic blocks). */
268 :
269 : static struct common_sched_info_def ebb_common_sched_info;
270 :
271 : static struct sched_deps_info_def ebb_sched_deps_info =
272 : {
273 : ebb_compute_jump_reg_dependencies,
274 : NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
275 : NULL,
276 : 1, 0, 0
277 : };
278 :
279 : static struct haifa_sched_info ebb_sched_info =
280 : {
281 : init_ready_list,
282 : NULL,
283 : schedule_more_p,
284 : NULL,
285 : rank,
286 : ebb_print_insn,
287 : ebb_contributes_to_priority,
288 : NULL, /* insn_finishes_block_p */
289 :
290 : NULL, NULL,
291 : NULL, NULL,
292 : 1, 0,
293 :
294 : ebb_add_remove_insn,
295 : begin_schedule_ready,
296 : begin_move_insn,
297 : advance_target_bb,
298 :
299 : save_ebb_state,
300 : restore_ebb_state,
301 :
302 : SCHED_EBB
303 : /* We can create new blocks in begin_schedule_ready (). */
304 : | NEW_BBS
305 : };
306 :
307 : /* Returns the earliest block in EBB currently being processed where a
308 : "similar load" 'insn2' is found, and hence LOAD_INSN can move
309 : speculatively into the found block. All the following must hold:
310 :
311 : (1) both loads have 1 base register (PFREE_CANDIDATEs).
312 : (2) load_insn and load2 have a def-use dependence upon
313 : the same insn 'insn1'.
314 :
315 : From all these we can conclude that the two loads access memory
316 : addresses that differ at most by a constant, and hence if moving
317 : load_insn would cause an exception, it would have been caused by
318 : load2 anyhow.
319 :
320 : The function uses list (given by LAST_BLOCK) of already processed
321 : blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */
322 :
323 : static basic_block
324 0 : earliest_block_with_similiar_load (basic_block last_block, rtx load_insn)
325 : {
326 0 : sd_iterator_def back_sd_it;
327 0 : dep_t back_dep;
328 0 : basic_block bb, earliest_block = NULL;
329 :
330 0 : FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep)
331 : {
332 0 : rtx_insn *insn1 = DEP_PRO (back_dep);
333 :
334 0 : if (DEP_TYPE (back_dep) == REG_DEP_TRUE)
335 : /* Found a DEF-USE dependence (insn1, load_insn). */
336 : {
337 0 : sd_iterator_def fore_sd_it;
338 0 : dep_t fore_dep;
339 :
340 0 : FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep)
341 : {
342 0 : rtx_insn *insn2 = DEP_CON (fore_dep);
343 0 : basic_block insn2_block = BLOCK_FOR_INSN (insn2);
344 :
345 0 : if (DEP_TYPE (fore_dep) == REG_DEP_TRUE)
346 : {
347 0 : if (earliest_block != NULL
348 0 : && earliest_block->index < insn2_block->index)
349 0 : continue;
350 :
351 : /* Found a DEF-USE dependence (insn1, insn2). */
352 0 : if (haifa_classify_insn (insn2) != PFREE_CANDIDATE)
353 : /* insn2 not guaranteed to be a 1 base reg load. */
354 0 : continue;
355 :
356 0 : for (bb = last_block; bb; bb = (basic_block) bb->aux)
357 0 : if (insn2_block == bb)
358 : break;
359 :
360 0 : if (!bb)
361 : /* insn2 is the similar load. */
362 0 : earliest_block = insn2_block;
363 : }
364 : }
365 : }
366 : }
367 :
368 0 : return earliest_block;
369 : }
370 :
371 : /* The following function adds dependencies between jumps and risky
372 : insns in given ebb. */
373 :
374 : static void
375 174 : add_deps_for_risky_insns (rtx_insn *head, rtx_insn *tail)
376 : {
377 174 : rtx_insn *insn, *prev;
378 174 : int classification;
379 174 : rtx_insn *last_jump = NULL;
380 174 : rtx_insn *next_tail = NEXT_INSN (tail);
381 174 : basic_block last_block = NULL, bb;
382 :
383 1706 : for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
384 : {
385 1532 : add_delay_dependencies (insn);
386 1532 : if (control_flow_insn_p (insn))
387 : {
388 194 : bb = BLOCK_FOR_INSN (insn);
389 194 : bb->aux = last_block;
390 194 : last_block = bb;
391 : /* Ensure blocks stay in the same order. */
392 194 : if (last_jump)
393 47 : add_dependence (insn, last_jump, REG_DEP_ANTI);
394 : last_jump = insn;
395 : }
396 1338 : else if (INSN_P (insn) && last_jump != NULL_RTX)
397 : {
398 112 : classification = haifa_classify_insn (insn);
399 112 : prev = last_jump;
400 :
401 112 : switch (classification)
402 : {
403 4 : case PFREE_CANDIDATE:
404 4 : if (flag_schedule_speculative_load)
405 : {
406 0 : bb = earliest_block_with_similiar_load (last_block, insn);
407 0 : if (bb)
408 : {
409 0 : bb = (basic_block) bb->aux;
410 0 : if (!bb)
411 : break;
412 0 : prev = BB_END (bb);
413 : }
414 : }
415 : /* Fall through. */
416 48 : case TRAP_RISKY:
417 48 : case IRISKY:
418 48 : case PRISKY_CANDIDATE:
419 : /* ??? We could implement better checking PRISKY_CANDIDATEs
420 : analogous to sched-rgn.cc. */
421 : /* We cannot change the mode of the backward
422 : dependency because REG_DEP_ANTI has the lowest
423 : rank. */
424 48 : if (! sched_insns_conditions_mutex_p (insn, prev))
425 : {
426 48 : if ((current_sched_info->flags & DO_SPECULATION)
427 0 : && (spec_info->mask & BEGIN_CONTROL))
428 : {
429 0 : dep_def _dep, *dep = &_dep;
430 :
431 0 : init_dep (dep, prev, insn, REG_DEP_ANTI);
432 :
433 0 : if (current_sched_info->flags & USE_DEPS_LIST)
434 : {
435 0 : DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
436 : MAX_DEP_WEAK);
437 :
438 : }
439 0 : sd_add_or_update_dep (dep, false);
440 0 : }
441 : else
442 48 : add_dependence (insn, prev, REG_DEP_CONTROL);
443 : }
444 :
445 : break;
446 :
447 : default:
448 : break;
449 : }
450 : }
451 : }
452 : /* Maintain the invariant that bb->aux is clear after use. */
453 368 : while (last_block)
454 : {
455 194 : bb = (basic_block) last_block->aux;
456 194 : last_block->aux = NULL;
457 194 : last_block = bb;
458 : }
459 174 : }
460 :
461 : /* Schedule a single extended basic block, defined by the boundaries
462 : HEAD and TAIL.
463 :
464 : We change our expectations about scheduler behavior depending on
465 : whether MODULO_SCHEDULING is true. If it is, we expect that the
466 : caller has already called set_modulo_params and created delay pairs
467 : as appropriate. If the modulo schedule failed, we return
468 : NULL_RTX. */
469 :
470 : basic_block
471 174 : schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling)
472 : {
473 174 : basic_block first_bb, target_bb;
474 174 : class deps_desc tmp_deps;
475 174 : bool success;
476 :
477 : /* Blah. We should fix the rest of the code not to get confused by
478 : a note or two. */
479 587 : while (head != tail)
480 : {
481 561 : if (NOTE_P (head) || DEBUG_INSN_P (head))
482 274 : head = NEXT_INSN (head);
483 287 : else if (NOTE_P (tail) || DEBUG_INSN_P (tail))
484 46 : tail = PREV_INSN (tail);
485 241 : else if (LABEL_P (head))
486 93 : head = NEXT_INSN (head);
487 : else
488 : break;
489 : }
490 :
491 174 : first_bb = BLOCK_FOR_INSN (head);
492 174 : last_bb = BLOCK_FOR_INSN (tail);
493 :
494 174 : if (no_real_insns_p (head, tail))
495 0 : return BLOCK_FOR_INSN (tail);
496 :
497 174 : gcc_assert (INSN_P (head) && INSN_P (tail));
498 :
499 174 : if (!bitmap_bit_p (&dont_calc_deps, first_bb->index))
500 : {
501 174 : init_deps_global ();
502 :
503 : /* Compute dependencies. */
504 174 : init_deps (&tmp_deps, false);
505 174 : sched_analyze (&tmp_deps, head, tail);
506 174 : free_deps (&tmp_deps);
507 :
508 174 : add_deps_for_risky_insns (head, tail);
509 :
510 174 : if (targetm.sched.dependencies_evaluation_hook)
511 174 : targetm.sched.dependencies_evaluation_hook (head, tail);
512 :
513 174 : finish_deps_global ();
514 : }
515 : else
516 : /* Only recovery blocks can have their dependencies already calculated,
517 : and they always are single block ebbs. */
518 0 : gcc_assert (first_bb == last_bb);
519 :
520 : /* Set priorities. */
521 174 : current_sched_info->sched_max_insns_priority = 0;
522 174 : rgn_n_insns = set_priorities (head, tail);
523 174 : current_sched_info->sched_max_insns_priority++;
524 :
525 174 : current_sched_info->prev_head = PREV_INSN (head);
526 174 : current_sched_info->next_tail = NEXT_INSN (tail);
527 :
528 174 : remove_notes (head, tail);
529 :
530 174 : unlink_bb_notes (first_bb, last_bb);
531 :
532 174 : target_bb = first_bb;
533 :
534 : /* Make ready list big enough to hold all the instructions from the ebb. */
535 174 : sched_extend_ready_list (rgn_n_insns);
536 174 : success = schedule_block (&target_bb, NULL);
537 174 : gcc_assert (success || modulo_scheduling);
538 :
539 : /* Free ready list. */
540 174 : sched_finish_ready_list ();
541 :
542 : /* We might pack all instructions into fewer blocks,
543 : so we may made some of them empty. Can't assert (b == last_bb). */
544 :
545 : /* Sanity check: verify that all region insns were scheduled. */
546 174 : gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns);
547 :
548 : /* Free dependencies. */
549 174 : sched_free_deps (current_sched_info->head, current_sched_info->tail, true);
550 :
551 174 : gcc_assert (haifa_recovery_bb_ever_added_p
552 : || deps_pools_are_empty_p ());
553 :
554 174 : if (EDGE_COUNT (last_bb->preds) == 0)
555 : /* LAST_BB is unreachable. */
556 : {
557 0 : gcc_assert (first_bb != last_bb
558 : && EDGE_COUNT (last_bb->succs) == 0);
559 0 : last_bb = last_bb->prev_bb;
560 0 : delete_basic_block (last_bb->next_bb);
561 : }
562 :
563 174 : return success ? last_bb : NULL;
564 : }
565 :
566 : /* Perform initializations before running schedule_ebbs or a single
567 : schedule_ebb. */
568 : void
569 58 : schedule_ebbs_init (void)
570 : {
571 : /* Setup infos. */
572 58 : {
573 58 : memcpy (&ebb_common_sched_info, &haifa_common_sched_info,
574 : sizeof (ebb_common_sched_info));
575 :
576 58 : ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg;
577 58 : ebb_common_sched_info.add_block = ebb_add_block;
578 58 : ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS;
579 :
580 58 : common_sched_info = &ebb_common_sched_info;
581 58 : sched_deps_info = &ebb_sched_deps_info;
582 58 : current_sched_info = &ebb_sched_info;
583 : }
584 :
585 58 : haifa_sched_init ();
586 :
587 58 : compute_bb_for_insn ();
588 :
589 : /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */
590 58 : bitmap_initialize (&dont_calc_deps, &bitmap_default_obstack);
591 58 : }
592 :
593 : /* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb. */
594 : void
595 58 : schedule_ebbs_finish (void)
596 : {
597 58 : bitmap_release (&dont_calc_deps);
598 :
599 : /* Reposition the prologue and epilogue notes in case we moved the
600 : prologue/epilogue insns. */
601 58 : if (reload_completed)
602 58 : reposition_prologue_and_epilogue_notes ();
603 :
604 58 : haifa_sched_finish ();
605 58 : }
606 :
607 : /* The main entry point in this file. */
608 :
609 : void
610 58 : schedule_ebbs (void)
611 : {
612 58 : basic_block bb;
613 58 : int probability_cutoff;
614 58 : rtx_insn *tail;
615 :
616 : /* Taking care of this degenerate case makes the rest of
617 : this code simpler. */
618 58 : if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
619 : return;
620 :
621 58 : if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ)
622 0 : probability_cutoff = param_tracer_min_branch_probability_feedback;
623 : else
624 58 : probability_cutoff = param_tracer_min_branch_probability;
625 58 : probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff;
626 :
627 58 : schedule_ebbs_init ();
628 :
629 : /* Schedule every region in the subroutine. */
630 232 : FOR_EACH_BB_FN (bb, cfun)
631 : {
632 174 : rtx_insn *head = BB_HEAD (bb);
633 :
634 174 : if (bb->flags & BB_DISABLE_SCHEDULE)
635 0 : continue;
636 :
637 221 : for (;;)
638 : {
639 221 : edge e;
640 221 : tail = BB_END (bb);
641 221 : if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
642 163 : || LABEL_P (BB_HEAD (bb->next_bb)))
643 : break;
644 77 : e = find_fallthru_edge (bb->succs);
645 77 : if (! e)
646 : break;
647 77 : if (e->probability.initialized_p ()
648 77 : && e->probability.to_reg_br_prob_base () <= probability_cutoff)
649 : break;
650 47 : if (e->dest->flags & BB_DISABLE_SCHEDULE)
651 : break;
652 : bb = bb->next_bb;
653 : }
654 :
655 174 : bb = schedule_ebb (head, tail, false);
656 : }
657 58 : schedule_ebbs_finish ();
658 : }
659 :
660 : /* INSN has been added to/removed from current ebb. */
661 : static void
662 0 : ebb_add_remove_insn (rtx_insn *insn ATTRIBUTE_UNUSED, int remove_p)
663 : {
664 0 : if (!remove_p)
665 0 : rgn_n_insns++;
666 : else
667 0 : rgn_n_insns--;
668 0 : }
669 :
670 : /* BB was added to ebb after AFTER. */
671 : static void
672 1 : ebb_add_block (basic_block bb, basic_block after)
673 : {
674 : /* Recovery blocks are always bounded by BARRIERS,
675 : therefore, they always form single block EBB,
676 : therefore, we can use rec->index to identify such EBBs. */
677 1 : if (after == EXIT_BLOCK_PTR_FOR_FN (cfun))
678 0 : bitmap_set_bit (&dont_calc_deps, bb->index);
679 1 : else if (after == last_bb)
680 1 : last_bb = bb;
681 1 : }
682 :
683 : /* Return next block in ebb chain. For parameter meaning please refer to
684 : sched-int.h: struct sched_info: advance_target_bb. */
685 : static basic_block
686 1257 : advance_target_bb (basic_block bb, rtx_insn *insn)
687 : {
688 1257 : if (insn)
689 : {
690 1209 : if (BLOCK_FOR_INSN (insn) != bb
691 6 : && control_flow_insn_p (insn)
692 : /* We handle interblock movement of the speculation check
693 : or over a speculation check in
694 : haifa-sched.cc: move_block_after_check (). */
695 0 : && !IS_SPECULATION_BRANCHY_CHECK_P (insn)
696 1209 : && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb)))
697 : {
698 : /* Assert that we don't move jumps across blocks. */
699 0 : gcc_assert (!control_flow_insn_p (BB_END (bb))
700 : && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb)));
701 : return bb;
702 : }
703 : else
704 1209 : return 0;
705 : }
706 : else
707 : /* Return next non empty block. */
708 : {
709 48 : do
710 : {
711 48 : gcc_assert (bb != last_bb);
712 :
713 48 : bb = bb->next_bb;
714 : }
715 48 : while (bb_note (bb) == BB_END (bb));
716 :
717 : return bb;
718 : }
719 : }
720 :
721 : /* Fix internal data after interblock movement of jump instruction.
722 : For parameter meaning please refer to
723 : sched-int.h: struct sched_info: fix_recovery_cfg. */
724 : static void
725 0 : ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi,
726 : int jump_bb_nexti)
727 : {
728 0 : gcc_assert (last_bb->index != bbi);
729 :
730 0 : if (jump_bb_nexti == last_bb->index)
731 0 : last_bb = BASIC_BLOCK_FOR_FN (cfun, jump_bbi);
732 0 : }
733 :
734 : #endif /* INSN_SCHEDULING */
|