Line data Source code
1 : /* Instruction scheduling pass. This file contains definitions used
2 : internally in the scheduler.
3 : Copyright (C) 1992-2026 Free Software Foundation, Inc.
4 :
5 : This file is part of GCC.
6 :
7 : GCC is free software; you can redistribute it and/or modify it under
8 : the terms of the GNU General Public License as published by the Free
9 : Software Foundation; either version 3, or (at your option) any later
10 : version.
11 :
12 : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 : WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 : for more details.
16 :
17 : You should have received a copy of the GNU General Public License
18 : along with GCC; see the file COPYING3. If not see
19 : <http://www.gnu.org/licenses/>. */
20 :
21 : #ifndef GCC_SCHED_INT_H
22 : #define GCC_SCHED_INT_H
23 :
24 : #ifdef INSN_SCHEDULING
25 :
26 : /* Identificator of a scheduler pass. */
27 : enum sched_pass_id_t { SCHED_PASS_UNKNOWN, SCHED_RGN_PASS, SCHED_EBB_PASS,
28 : SCHED_SMS_PASS, SCHED_SEL_PASS };
29 :
30 : /* The algorithm used to implement -fsched-pressure. */
31 : enum sched_pressure_algorithm
32 : {
33 : SCHED_PRESSURE_NONE,
34 : SCHED_PRESSURE_WEIGHTED,
35 : SCHED_PRESSURE_MODEL
36 : };
37 :
38 : typedef vec<basic_block> bb_vec_t;
39 : typedef vec<rtx_insn *> insn_vec_t;
40 : typedef vec<rtx_insn *> rtx_vec_t;
41 :
42 : extern void sched_init_bbs (void);
43 :
44 : extern void sched_extend_luids (void);
45 : extern void sched_init_insn_luid (rtx_insn *);
46 : extern void sched_init_luids (const bb_vec_t &);
47 : extern void sched_finish_luids (void);
48 :
49 : extern void sched_extend_target (void);
50 :
51 : extern void haifa_init_h_i_d (const bb_vec_t &);
52 : extern void haifa_finish_h_i_d (void);
53 :
54 : /* Hooks that are common to all the schedulers. */
55 : struct common_sched_info_def
56 : {
57 : /* Called after blocks were rearranged due to movement of jump instruction.
58 : The first parameter - index of basic block, in which jump currently is.
59 : The second parameter - index of basic block, in which jump used
60 : to be.
61 : The third parameter - index of basic block, that follows the second
62 : parameter. */
63 : void (*fix_recovery_cfg) (int, int, int);
64 :
65 : /* Called to notify frontend, that new basic block is being added.
66 : The first parameter - new basic block.
67 : The second parameter - block, after which new basic block is being added,
68 : or the exit block, if recovery block is being added,
69 : or NULL, if standalone block is being added. */
70 : void (*add_block) (basic_block, basic_block);
71 :
72 : /* Estimate number of insns in the basic block. */
73 : int (*estimate_number_of_insns) (basic_block);
74 :
75 : /* Given a non-insn (!INSN_P (x)) return
76 : -1 - if this rtx don't need a luid.
77 : 0 - if it should have the same luid as the previous insn.
78 : 1 - if it needs a separate luid. */
79 : int (*luid_for_non_insn) (rtx);
80 :
81 : /* Scheduler pass identifier. It is preferably used in assertions. */
82 : enum sched_pass_id_t sched_pass_id;
83 : };
84 :
85 : extern struct common_sched_info_def *common_sched_info;
86 :
87 : extern const struct common_sched_info_def haifa_common_sched_info;
88 :
89 : /* Return true if selective scheduling pass is working. */
90 : inline bool
91 376584912 : sel_sched_p (void)
92 : {
93 376584912 : return common_sched_info->sched_pass_id == SCHED_SEL_PASS;
94 : }
95 :
96 : /* Returns maximum priority that an insn was assigned to. */
97 : extern int get_rgn_sched_max_insns_priority (void);
98 :
99 : /* Increases effective priority for INSN by AMOUNT. */
100 : extern void sel_add_to_insn_priority (rtx, int);
101 :
102 : /* True if during selective scheduling we need to emulate some of haifa
103 : scheduler behavior. */
104 : extern int sched_emulate_haifa_p;
105 :
106 : /* Mapping from INSN_UID to INSN_LUID. In the end all other per insn data
107 : structures should be indexed by luid. */
108 : extern vec<int> sched_luids;
109 : #define INSN_LUID(INSN) (sched_luids[INSN_UID (INSN)])
110 : #define LUID_BY_UID(UID) (sched_luids[UID])
111 :
112 : #define SET_INSN_LUID(INSN, LUID) \
113 : (sched_luids[INSN_UID (INSN)] = (LUID))
114 :
115 : /* The highest INSN_LUID. */
116 : extern int sched_max_luid;
117 :
118 : extern int insn_luid (rtx);
119 :
120 : /* This list holds ripped off notes from the current block. These notes will
121 : be attached to the beginning of the block when its scheduling is
122 : finished. */
123 : extern rtx_insn *note_list;
124 :
125 : extern void remove_notes (rtx_insn *, rtx_insn *);
126 : extern rtx_insn *restore_other_notes (rtx_insn *, basic_block);
127 : extern void sched_insns_init (rtx);
128 : extern void sched_insns_finish (void);
129 :
130 : extern void *xrecalloc (void *, size_t, size_t, size_t);
131 :
132 : extern void reemit_notes (rtx_insn *);
133 :
134 : /* Functions in haifa-sched.cc. */
135 : extern int haifa_classify_insn (const_rtx);
136 :
137 : /* Functions in sel-sched-ir.cc. */
138 : extern void sel_find_rgns (void);
139 : extern void sel_mark_hard_insn (rtx);
140 :
141 : extern size_t dfa_state_size;
142 :
143 : extern void advance_state (state_t);
144 :
145 : extern void setup_sched_dump (void);
146 : extern void sched_init (void);
147 : extern void sched_finish (void);
148 :
149 : extern bool sel_insn_is_speculation_check (rtx);
150 :
151 : /* Describe the ready list of the scheduler.
152 : VEC holds space enough for all insns in the current region. VECLEN
153 : says how many exactly.
154 : FIRST is the index of the element with the highest priority; i.e. the
155 : last one in the ready list, since elements are ordered by ascending
156 : priority.
157 : N_READY determines how many insns are on the ready list.
158 : N_DEBUG determines how many debug insns are on the ready list. */
159 : struct ready_list
160 : {
161 : rtx_insn **vec;
162 : int veclen;
163 : int first;
164 : int n_ready;
165 : int n_debug;
166 : };
167 :
168 : extern signed char *ready_try;
169 : extern struct ready_list ready;
170 :
171 : extern int max_issue (struct ready_list *, int, state_t, bool, int *);
172 :
173 : extern void ebb_compute_jump_reg_dependencies (rtx, regset);
174 :
175 : extern edge find_fallthru_edge_from (basic_block);
176 :
177 : extern void (* sched_init_only_bb) (basic_block, basic_block);
178 : extern basic_block (* sched_split_block) (basic_block, rtx);
179 : extern basic_block sched_split_block_1 (basic_block, rtx);
180 : extern basic_block (* sched_create_empty_bb) (basic_block);
181 : extern basic_block sched_create_empty_bb_1 (basic_block);
182 :
183 : extern basic_block sched_create_recovery_block (basic_block *);
184 : extern void sched_create_recovery_edges (basic_block, basic_block,
185 : basic_block);
186 :
187 : /* Pointer to data describing the current DFA state. */
188 : extern state_t curr_state;
189 :
190 : /* Type to represent status of a dependence. */
191 : typedef unsigned int ds_t;
192 : #define BITS_PER_DEP_STATUS HOST_BITS_PER_INT
193 :
194 : /* Type to represent weakness of speculative dependence. */
195 : typedef unsigned int dw_t;
196 :
197 : extern enum reg_note ds_to_dk (ds_t);
198 : extern ds_t dk_to_ds (enum reg_note);
199 :
200 : /* Describe a dependency that can be broken by making a replacement
201 : in one of the patterns. LOC is the location, ORIG and NEWVAL the
202 : two alternative contents, and INSN the instruction that must be
203 : changed. */
204 : struct dep_replacement
205 : {
206 : rtx *loc;
207 : rtx orig;
208 : rtx newval;
209 : rtx_insn *insn;
210 : };
211 :
212 : /* Information about the dependency. */
213 : struct _dep
214 : {
215 : /* Producer. */
216 : rtx_insn *pro;
217 :
218 : /* Consumer. */
219 : rtx_insn *con;
220 :
221 : /* If nonnull, holds a pointer to information about how to break the
222 : dependency by making a replacement in one of the insns. There is
223 : only one such dependency for each insn that must be modified in
224 : order to break such a dependency. */
225 : struct dep_replacement *replace;
226 :
227 : /* Dependency status. This field holds all dependency types and additional
228 : information for speculative dependencies. */
229 : ds_t status;
230 :
231 : /* Dependency major type. This field is superseded by STATUS above.
232 : Though, it is still in place because some targets use it. */
233 : ENUM_BITFIELD(reg_note) type:6;
234 :
235 : unsigned nonreg:1;
236 : unsigned multiple:1;
237 :
238 : /* Cached cost of the dependency. Make sure to update UNKNOWN_DEP_COST
239 : when changing the size of this field. */
240 : int cost:20;
241 :
242 : unsigned unused:4;
243 : };
244 :
245 : #define UNKNOWN_DEP_COST ((int) ((unsigned int) -1 << 19))
246 :
247 : typedef struct _dep dep_def;
248 : typedef dep_def *dep_t;
249 :
250 : #define DEP_PRO(D) ((D)->pro)
251 : #define DEP_CON(D) ((D)->con)
252 : #define DEP_TYPE(D) ((D)->type)
253 : #define DEP_STATUS(D) ((D)->status)
254 : #define DEP_COST(D) ((D)->cost)
255 : #define DEP_NONREG(D) ((D)->nonreg)
256 : #define DEP_MULTIPLE(D) ((D)->multiple)
257 : #define DEP_REPLACE(D) ((D)->replace)
258 :
259 : /* Functions to work with dep. */
260 :
261 : extern void init_dep_1 (dep_t, rtx_insn *, rtx_insn *, enum reg_note, ds_t);
262 : extern void init_dep (dep_t, rtx_insn *, rtx_insn *, enum reg_note);
263 :
264 : extern void sd_debug_dep (dep_t);
265 :
266 : /* Definition of this struct resides below. */
267 : struct _dep_node;
268 : typedef struct _dep_node *dep_node_t;
269 :
270 : /* A link in the dependency list. This is essentially an equivalent of a
271 : single {INSN, DEPS}_LIST rtx. */
272 : struct _dep_link
273 : {
274 : /* Dep node with all the data. */
275 : dep_node_t node;
276 :
277 : /* Next link in the list. For the last one it is NULL. */
278 : struct _dep_link *next;
279 :
280 : /* Pointer to the next field of the previous link in the list.
281 : For the first link this points to the deps_list->first.
282 :
283 : With help of this field it is easy to remove and insert links to the
284 : list. */
285 : struct _dep_link **prev_nextp;
286 : };
287 : typedef struct _dep_link *dep_link_t;
288 :
289 : #define DEP_LINK_NODE(N) ((N)->node)
290 : #define DEP_LINK_NEXT(N) ((N)->next)
291 : #define DEP_LINK_PREV_NEXTP(N) ((N)->prev_nextp)
292 :
293 : /* Macros to work dep_link. For most usecases only part of the dependency
294 : information is need. These macros conveniently provide that piece of
295 : information. */
296 :
297 : #define DEP_LINK_DEP(N) (DEP_NODE_DEP (DEP_LINK_NODE (N)))
298 : #define DEP_LINK_PRO(N) (DEP_PRO (DEP_LINK_DEP (N)))
299 : #define DEP_LINK_CON(N) (DEP_CON (DEP_LINK_DEP (N)))
300 : #define DEP_LINK_TYPE(N) (DEP_TYPE (DEP_LINK_DEP (N)))
301 : #define DEP_LINK_STATUS(N) (DEP_STATUS (DEP_LINK_DEP (N)))
302 :
303 : /* A list of dep_links. */
304 : struct _deps_list
305 : {
306 : /* First element. */
307 : dep_link_t first;
308 :
309 : /* Total number of elements in the list. */
310 : int n_links;
311 : };
312 : typedef struct _deps_list *deps_list_t;
313 :
314 : #define DEPS_LIST_FIRST(L) ((L)->first)
315 : #define DEPS_LIST_N_LINKS(L) ((L)->n_links)
316 :
317 : /* Suppose we have a dependence Y between insn pro1 and con1, where pro1 has
318 : additional dependents con0 and con2, and con1 is dependent on additional
319 : insns pro0 and pro1:
320 :
321 : .con0 pro0
322 : . ^ |
323 : . | |
324 : . | |
325 : . X A
326 : . | |
327 : . | |
328 : . | V
329 : .pro1--Y-->con1
330 : . | ^
331 : . | |
332 : . | |
333 : . Z B
334 : . | |
335 : . | |
336 : . V |
337 : .con2 pro2
338 :
339 : This is represented using a "dep_node" for each dependence arc, which are
340 : connected as follows (diagram is centered around Y which is fully shown;
341 : other dep_nodes shown partially):
342 :
343 : . +------------+ +--------------+ +------------+
344 : . : dep_node X : | dep_node Y | : dep_node Z :
345 : . : : | | : :
346 : . : : | | : :
347 : . : forw : | forw | : forw :
348 : . : +--------+ : | +--------+ | : +--------+ :
349 : forw_deps : |dep_link| : | |dep_link| | : |dep_link| :
350 : +-----+ : | +----+ | : | | +----+ | | : | +----+ | :
351 : |first|----->| |next|-+------+->| |next|-+--+----->| |next|-+--->NULL
352 : +-----+ : | +----+ | : | | +----+ | | : | +----+ | :
353 : . ^ ^ : | ^ | : | | ^ | | : | | :
354 : . | | : | | | : | | | | | : | | :
355 : . | +--<----+--+ +--+---<--+--+--+ +--+--+--<---+--+ | :
356 : . | : | | | : | | | | | : | | | :
357 : . | : | +----+ | : | | +----+ | | : | +----+ | :
358 : . | : | |prev| | : | | |prev| | | : | |prev| | :
359 : . | : | |next| | : | | |next| | | : | |next| | :
360 : . | : | +----+ | : | | +----+ | | : | +----+ | :
361 : . | : | | :<-+ | | | |<-+ : | | :<-+
362 : . | : | +----+ | : | | | +----+ | | | : | +----+ | : |
363 : . | : | |node|-+----+ | | |node|-+--+--+ : | |node|-+----+
364 : . | : | +----+ | : | | +----+ | | : | +----+ | :
365 : . | : | | : | | | | : | | :
366 : . | : +--------+ : | +--------+ | : +--------+ :
367 : . | : : | | : :
368 : . | : SAME pro1 : | +--------+ | : SAME pro1 :
369 : . | : DIFF con0 : | |dep | | : DIFF con2 :
370 : . | : : | | | | : :
371 : . | | | +----+ | |
372 : .RTX<------------------------+--+-|pro1| | |
373 : .pro1 | | +----+ | |
374 : . | | | |
375 : . | | +----+ | |
376 : .RTX<------------------------+--+-|con1| | |
377 : .con1 | | +----+ | |
378 : . | | | | |
379 : . | | | +----+ | |
380 : . | | | |kind| | |
381 : . | | | +----+ | |
382 : . | : : | | |stat| | | : :
383 : . | : DIFF pro0 : | | +----+ | | : DIFF pro2 :
384 : . | : SAME con1 : | | | | : SAME con1 :
385 : . | : : | +--------+ | : :
386 : . | : : | | : :
387 : . | : back : | back | : back :
388 : . v : +--------+ : | +--------+ | : +--------+ :
389 : back_deps : |dep_link| : | |dep_link| | : |dep_link| :
390 : +-----+ : | +----+ | : | | +----+ | | : | +----+ | :
391 : |first|----->| |next|-+------+->| |next|-+--+----->| |next|-+--->NULL
392 : +-----+ : | +----+ | : | | +----+ | | : | +----+ | :
393 : . ^ : | ^ | : | | ^ | | : | | :
394 : . | : | | | : | | | | | : | | :
395 : . +--<----+--+ +--+---<--+--+--+ +--+--+--<---+--+ | :
396 : . : | | | : | | | | | : | | | :
397 : . : | +----+ | : | | +----+ | | : | +----+ | :
398 : . : | |prev| | : | | |prev| | | : | |prev| | :
399 : . : | |next| | : | | |next| | | : | |next| | :
400 : . : | +----+ | : | | +----+ | | : | +----+ | :
401 : . : | | :<-+ | | | |<-+ : | | :<-+
402 : . : | +----+ | : | | | +----+ | | | : | +----+ | : |
403 : . : | |node|-+----+ | | |node|-+--+--+ : | |node|-+----+
404 : . : | +----+ | : | | +----+ | | : | +----+ | :
405 : . : | | : | | | | : | | :
406 : . : +--------+ : | +--------+ | : +--------+ :
407 : . : : | | : :
408 : . : dep_node A : | dep_node Y | : dep_node B :
409 : . +------------+ +--------------+ +------------+
410 : */
411 :
412 : struct _dep_node
413 : {
414 : /* Backward link. */
415 : struct _dep_link back;
416 :
417 : /* The dep. */
418 : struct _dep dep;
419 :
420 : /* Forward link. */
421 : struct _dep_link forw;
422 : };
423 :
424 : #define DEP_NODE_BACK(N) (&(N)->back)
425 : #define DEP_NODE_DEP(N) (&(N)->dep)
426 : #define DEP_NODE_FORW(N) (&(N)->forw)
427 :
428 : /* The following enumeration values tell us what dependencies we
429 : should use to implement the barrier. We use true-dependencies for
430 : TRUE_BARRIER and anti-dependencies for MOVE_BARRIER. */
431 : enum reg_pending_barrier_mode
432 : {
433 : NOT_A_BARRIER = 0,
434 : MOVE_BARRIER,
435 : TRUE_BARRIER
436 : };
437 :
438 : /* Whether a register movement is associated with a call. */
439 : enum post_call_group
440 : {
441 : not_post_call,
442 : post_call,
443 : post_call_initial
444 : };
445 :
446 : /* Insns which affect pseudo-registers. */
447 : struct deps_reg
448 : {
449 : rtx_insn_list *uses;
450 : rtx_insn_list *sets;
451 : rtx_insn_list *implicit_sets;
452 : rtx_insn_list *control_uses;
453 : rtx_insn_list *clobbers;
454 : int uses_length;
455 : int clobbers_length;
456 : };
457 :
458 : /* Describe state of dependencies used during sched_analyze phase. */
459 10341087 : class deps_desc
460 : {
461 : public:
462 : /* The *_insns and *_mems are paired lists. Each pending memory operation
463 : will have a pointer to the MEM rtx on one list and a pointer to the
464 : containing insn on the other list in the same place in the list. */
465 :
466 : /* We can't use add_dependence like the old code did, because a single insn
467 : may have multiple memory accesses, and hence needs to be on the list
468 : once for each memory access. Add_dependence won't let you add an insn
469 : to a list more than once. */
470 :
471 : /* An INSN_LIST containing all insns with pending read operations. */
472 : rtx_insn_list *pending_read_insns;
473 :
474 : /* An EXPR_LIST containing all MEM rtx's which are pending reads. The list
475 : can contain stack pointer instead of memory. This is a special case (see
476 : sched-deps.cc::sched_analyze_1). */
477 : rtx_expr_list *pending_read_mems;
478 :
479 : /* An INSN_LIST containing all insns with pending write operations. */
480 : rtx_insn_list *pending_write_insns;
481 :
482 : /* An EXPR_LIST containing all MEM rtx's which are pending writes. */
483 : rtx_expr_list *pending_write_mems;
484 :
485 : /* An INSN_LIST containing all jump insns. */
486 : rtx_insn_list *pending_jump_insns;
487 :
488 : /* We must prevent the above lists from ever growing too large since
489 : the number of dependencies produced is at least O(N*N),
490 : and execution time is at least O(4*N*N), as a function of the
491 : length of these pending lists. */
492 :
493 : /* Indicates the length of the pending_read list. */
494 : int pending_read_list_length;
495 :
496 : /* Indicates the length of the pending_write list. */
497 : int pending_write_list_length;
498 :
499 : /* Length of the pending memory flush list plus the length of the pending
500 : jump insn list. Large functions with no calls may build up extremely
501 : large lists. */
502 : int pending_flush_length;
503 :
504 : /* The last insn upon which all memory references must depend.
505 : This is an insn which flushed the pending lists, creating a dependency
506 : between it and all previously pending memory references. This creates
507 : a barrier (or a checkpoint) which no memory reference is allowed to cross.
508 :
509 : This includes all non constant CALL_INSNs. When we do interprocedural
510 : alias analysis, this restriction can be relaxed.
511 : This may also be an INSN that writes memory if the pending lists grow
512 : too large. */
513 : rtx_insn_list *last_pending_memory_flush;
514 :
515 : /* A list of the last function calls we have seen. We use a list to
516 : represent last function calls from multiple predecessor blocks.
517 : Used to prevent register lifetimes from expanding unnecessarily. */
518 : rtx_insn_list *last_function_call;
519 :
520 : /* A list of the last function calls that may not return normally
521 : we have seen. We use a list to represent last function calls from
522 : multiple predecessor blocks. Used to prevent moving trapping insns
523 : across such calls. */
524 : rtx_insn_list *last_function_call_may_noreturn;
525 :
526 : /* A list of insns which use a pseudo register that does not already
527 : cross a call. We create dependencies between each of those insn
528 : and the next call insn, to ensure that they won't cross a call after
529 : scheduling is done. */
530 : rtx_insn_list *sched_before_next_call;
531 :
532 : /* Similarly, a list of insns which should not cross a branch. */
533 : rtx_insn_list *sched_before_next_jump;
534 :
535 : /* Used to keep post-call pseudo/hard reg movements together with
536 : the call. */
537 : enum post_call_group in_post_call_group_p;
538 :
539 : /* The last debug insn we've seen. */
540 : rtx_insn *last_debug_insn;
541 :
542 : /* The last insn bearing REG_ARGS_SIZE that we've seen. */
543 : rtx_insn *last_args_size;
544 :
545 : /* A list of all prologue insns we have seen without intervening epilogue
546 : insns, and one of all epilogue insns we have seen without intervening
547 : prologue insns. This is used to prevent mixing prologue and epilogue
548 : insns. See PR78029. */
549 : rtx_insn_list *last_prologue;
550 : rtx_insn_list *last_epilogue;
551 :
552 : /* Whether the last *logue insn was an epilogue insn or a prologue insn
553 : instead. */
554 : bool last_logue_was_epilogue;
555 :
556 : /* The maximum register number for the following arrays. Before reload
557 : this is max_reg_num; after reload it is FIRST_PSEUDO_REGISTER. */
558 : int max_reg;
559 :
560 : /* Element N is the next insn that sets (hard or pseudo) register
561 : N within the current basic block; or zero, if there is no
562 : such insn. Needed for new registers which may be introduced
563 : by splitting insns. */
564 : struct deps_reg *reg_last;
565 :
566 : /* Element N is set for each register that has any nonzero element
567 : in reg_last[N].{uses,sets,clobbers}. */
568 : regset_head reg_last_in_use;
569 :
570 : /* Shows the last value of reg_pending_barrier associated with the insn. */
571 : enum reg_pending_barrier_mode last_reg_pending_barrier;
572 :
573 : /* True when this context should be treated as a readonly by
574 : the analysis. */
575 : BOOL_BITFIELD readonly : 1;
576 : };
577 :
578 : typedef class deps_desc *deps_t;
579 :
580 : /* This structure holds some state of the current scheduling pass, and
581 : contains some function pointers that abstract out some of the non-generic
582 : functionality from functions such as schedule_block or schedule_insn.
583 : There is one global variable, current_sched_info, which points to the
584 : sched_info structure currently in use. */
585 : struct haifa_sched_info
586 : {
587 : /* Add all insns that are initially ready to the ready list. Called once
588 : before scheduling a set of insns. */
589 : void (*init_ready_list) (void);
590 : /* Called after taking an insn from the ready list. Returns true if
591 : this insn can be scheduled, false if we should silently discard it. */
592 : bool (*can_schedule_ready_p) (rtx_insn *);
593 : /* Return true if there are more insns that should be scheduled. */
594 : bool (*schedule_more_p) (void);
595 : /* Called after an insn has all its hard dependencies resolved.
596 : Adjusts status of instruction (which is passed through second parameter)
597 : to indicate if instruction should be moved to the ready list or the
598 : queue, or if it should silently discard it (until next resolved
599 : dependence). */
600 : ds_t (*new_ready) (rtx_insn *, ds_t);
601 : /* Compare priority of two insns. Return a positive number if the second
602 : insn is to be preferred for scheduling, and a negative one if the first
603 : is to be preferred. Zero if they are equally good. */
604 : int (*rank) (rtx_insn *, rtx_insn *);
605 : /* Return a string that contains the insn uid and optionally anything else
606 : necessary to identify this insn in an output. It's valid to use a
607 : static buffer for this. The ALIGNED parameter should cause the string
608 : to be formatted so that multiple output lines will line up nicely. */
609 : const char *(*print_insn) (const rtx_insn *, int);
610 : /* Return true if an insn should be included in priority
611 : calculations. */
612 : bool (*contributes_to_priority) (rtx_insn *, rtx_insn *);
613 :
614 : /* Return true if scheduling insn (passed as the parameter) will trigger
615 : finish of scheduling current block. */
616 : bool (*insn_finishes_block_p) (rtx_insn *);
617 :
618 : /* The boundaries of the set of insns to be scheduled. */
619 : rtx_insn *prev_head, *next_tail;
620 :
621 : /* Filled in after the schedule is finished; the first and last scheduled
622 : insns. */
623 : rtx_insn *head, *tail;
624 :
625 : /* If nonzero, enables an additional sanity check in schedule_block. */
626 : unsigned int queue_must_finish_empty:1;
627 :
628 : /* Maximum priority that has been assigned to an insn. */
629 : int sched_max_insns_priority;
630 :
631 : /* Hooks to support speculative scheduling. */
632 :
633 : /* Called to notify frontend that instruction is being added (second
634 : parameter == 0) or removed (second parameter == 1). */
635 : void (*add_remove_insn) (rtx_insn *, int);
636 :
637 : /* Called to notify the frontend that instruction INSN is being
638 : scheduled. */
639 : void (*begin_schedule_ready) (rtx_insn *insn);
640 :
641 : /* Called to notify the frontend that an instruction INSN is about to be
642 : moved to its correct place in the final schedule. This is done for all
643 : insns in order of the schedule. LAST indicates the last scheduled
644 : instruction. */
645 : void (*begin_move_insn) (rtx_insn *insn, rtx_insn *last);
646 :
647 : /* If the second parameter is not NULL, return nonnull value, if the
648 : basic block should be advanced.
649 : If the second parameter is NULL, return the next basic block in EBB.
650 : The first parameter is the current basic block in EBB. */
651 : basic_block (*advance_target_bb) (basic_block, rtx_insn *);
652 :
653 : /* Allocate memory, store the frontend scheduler state in it, and
654 : return it. */
655 : void *(*save_state) (void);
656 : /* Restore frontend scheduler state from the argument, and free the
657 : memory. */
658 : void (*restore_state) (void *);
659 :
660 : /* ??? FIXME: should use straight bitfields inside sched_info instead of
661 : this flag field. */
662 : unsigned int flags;
663 : };
664 :
665 : /* This structure holds description of the properties for speculative
666 : scheduling. */
667 : struct spec_info_def
668 : {
669 : /* Holds types of allowed speculations: BEGIN_{DATA|CONTROL},
670 : BE_IN_{DATA_CONTROL}. */
671 : int mask;
672 :
673 : /* A dump file for additional information on speculative scheduling. */
674 : FILE *dump;
675 :
676 : /* Minimal cumulative weakness of speculative instruction's
677 : dependencies, so that insn will be scheduled. */
678 : dw_t data_weakness_cutoff;
679 :
680 : /* Minimal usefulness of speculative instruction to be considered for
681 : scheduling. */
682 : int control_weakness_cutoff;
683 :
684 : /* Flags from the enum SPEC_SCHED_FLAGS. */
685 : int flags;
686 : };
687 : typedef struct spec_info_def *spec_info_t;
688 :
689 : extern spec_info_t spec_info;
690 :
691 : extern struct haifa_sched_info *current_sched_info;
692 :
693 : /* Do register pressure sensitive insn scheduling if the flag is set
694 : up. */
695 : extern enum sched_pressure_algorithm sched_pressure;
696 :
697 : /* Map regno -> its pressure class. The map defined only when
698 : SCHED_PRESSURE_P is true. */
699 : extern enum reg_class *sched_regno_pressure_class;
700 :
701 : /* Indexed by INSN_UID, the collection of all data associated with
702 : a single instruction. */
703 :
704 : struct _haifa_deps_insn_data
705 : {
706 : /* The number of incoming edges in the forward dependency graph.
707 : As scheduling proceeds, counts are decreased. An insn moves to
708 : the ready queue when its counter reaches zero. */
709 : int dep_count;
710 :
711 : /* Nonzero if instruction has internal dependence
712 : (e.g. add_dependence was invoked with (insn == elem)). */
713 : unsigned int has_internal_dep;
714 :
715 : /* NB: We can't place 'struct _deps_list' here instead of deps_list_t into
716 : h_i_d because when h_i_d extends, addresses of the deps_list->first
717 : change without updating deps_list->first->next->prev_nextp. Thus
718 : BACK_DEPS and RESOLVED_BACK_DEPS are allocated on the heap and FORW_DEPS
719 : list is allocated on the obstack. */
720 :
721 : /* A list of hard backward dependencies. The insn is a consumer of all the
722 : deps mentioned here. */
723 : deps_list_t hard_back_deps;
724 :
725 : /* A list of speculative (weak) dependencies. The insn is a consumer of all
726 : the deps mentioned here. */
727 : deps_list_t spec_back_deps;
728 :
729 : /* A list of insns which depend on the instruction. Unlike 'back_deps',
730 : it represents forward dependencies. */
731 : deps_list_t forw_deps;
732 :
733 : /* A list of scheduled producers of the instruction. Links are being moved
734 : from 'back_deps' to 'resolved_back_deps' while scheduling. */
735 : deps_list_t resolved_back_deps;
736 :
737 : /* A list of scheduled consumers of the instruction. Links are being moved
738 : from 'forw_deps' to 'resolved_forw_deps' while scheduling to fasten the
739 : search in 'forw_deps'. */
740 : deps_list_t resolved_forw_deps;
741 :
742 : /* If the insn is conditional (either through COND_EXEC, or because
743 : it is a conditional branch), this records the condition. NULL
744 : for insns that haven't been seen yet or don't have a condition;
745 : const_true_rtx to mark an insn without a condition, or with a
746 : condition that has been clobbered by a subsequent insn. */
747 : rtx cond;
748 :
749 : /* For a conditional insn, a list of insns that could set the condition
750 : register. Used when generating control dependencies. */
751 : rtx_insn_list *cond_deps;
752 :
753 : /* True if the condition in 'cond' should be reversed to get the actual
754 : condition. */
755 : unsigned int reverse_cond : 1;
756 :
757 : /* Some insns (e.g. call) are not allowed to move across blocks. */
758 : unsigned int cant_move : 1;
759 : };
760 :
761 :
762 : /* Bits used for storing values of the fields in the following
763 : structure. */
764 : #define INCREASE_BITS 8
765 :
766 : /* The structure describes how the corresponding insn increases the
767 : register pressure for each pressure class. */
768 : struct reg_pressure_data
769 : {
770 : /* Pressure increase for given class because of clobber. */
771 : unsigned int clobber_increase : INCREASE_BITS;
772 : /* Increase in register pressure for given class because of register
773 : sets. */
774 : unsigned int set_increase : INCREASE_BITS;
775 : /* Pressure increase for given class because of unused register
776 : set. */
777 : unsigned int unused_set_increase : INCREASE_BITS;
778 : /* Pressure change: #sets - #deaths. */
779 : int change : INCREASE_BITS;
780 : };
781 :
782 : /* The following structure describes usage of registers by insns. */
783 : struct reg_use_data
784 : {
785 : /* Regno used in the insn. */
786 : int regno;
787 : /* Insn using the regno. */
788 : rtx_insn *insn;
789 : /* Cyclic list of elements with the same regno. */
790 : struct reg_use_data *next_regno_use;
791 : /* List of elements with the same insn. */
792 : struct reg_use_data *next_insn_use;
793 : };
794 :
795 : /* The following structure describes used sets of registers by insns.
796 : Registers are pseudos whose pressure class is not NO_REGS or hard
797 : registers available for allocations. */
798 : struct reg_set_data
799 : {
800 : /* Regno used in the insn. */
801 : int regno;
802 : /* Insn setting the regno. */
803 : rtx insn;
804 : /* List of elements with the same insn. */
805 : struct reg_set_data *next_insn_set;
806 : };
807 :
808 : enum autopref_multipass_data_status {
809 : /* Entry is irrelevant for auto-prefetcher. */
810 : AUTOPREF_MULTIPASS_DATA_IRRELEVANT = -2,
811 : /* Entry is uninitialized. */
812 : AUTOPREF_MULTIPASS_DATA_UNINITIALIZED = -1,
813 : /* Entry is relevant for auto-prefetcher and insn can be delayed
814 : to allow another insn through. */
815 : AUTOPREF_MULTIPASS_DATA_NORMAL = 0,
816 : /* Entry is relevant for auto-prefetcher, but insn should not be
817 : delayed as that will break scheduling. */
818 : AUTOPREF_MULTIPASS_DATA_DONT_DELAY = 1
819 : };
820 :
821 : /* Data for modeling cache auto-prefetcher. */
822 : struct autopref_multipass_data_
823 : {
824 : /* Base part of memory address. */
825 : rtx base;
826 :
827 : /* Memory offsets from the base. */
828 : int offset;
829 :
830 : /* Entry status. */
831 : enum autopref_multipass_data_status status;
832 : };
833 : typedef struct autopref_multipass_data_ autopref_multipass_data_def;
834 : typedef autopref_multipass_data_def *autopref_multipass_data_t;
835 :
836 : struct _haifa_insn_data
837 : {
838 : /* We can't place 'struct _deps_list' into h_i_d instead of deps_list_t
839 : because when h_i_d extends, addresses of the deps_list->first
840 : change without updating deps_list->first->next->prev_nextp. */
841 :
842 : /* Logical uid gives the original ordering of the insns. */
843 : int luid;
844 :
845 : /* A priority for each insn. */
846 : int priority;
847 :
848 : /* The fusion priority for each insn. */
849 : int fusion_priority;
850 :
851 : /* The minimum clock tick at which the insn becomes ready. This is
852 : used to note timing constraints for the insns in the pending list. */
853 : int tick;
854 :
855 : /* For insns that are scheduled at a fixed difference from another,
856 : this records the tick in which they must be ready. */
857 : int exact_tick;
858 :
859 : /* INTER_TICK is used to adjust INSN_TICKs of instructions from the
860 : subsequent blocks in a region. */
861 : int inter_tick;
862 :
863 : /* Used temporarily to estimate an INSN_TICK value for an insn given
864 : current knowledge. */
865 : int tick_estimate;
866 :
867 : /* See comment on QUEUE_INDEX macro in haifa-sched.cc. */
868 : int queue_index;
869 :
870 : short cost;
871 :
872 : /* '> 0' if priority is valid,
873 : '== 0' if priority was not yet computed,
874 : '< 0' if priority in invalid and should be recomputed. */
875 : signed char priority_status;
876 :
877 : /* Set if there's DEF-USE dependence between some speculatively
878 : moved load insn and this one. */
879 : unsigned int fed_by_spec_load : 1;
880 : unsigned int is_load_insn : 1;
881 : /* Nonzero if this insn has negative-cost forward dependencies against
882 : an already scheduled insn. */
883 : unsigned int feeds_backtrack_insn : 1;
884 :
885 : /* Nonzero if this insn is a shadow of another, scheduled after a fixed
886 : delay. We only emit shadows at the end of a cycle, with no other
887 : real insns following them. */
888 : unsigned int shadow_p : 1;
889 :
890 : /* Used internally in unschedule_insns_until to mark insns that must have
891 : their TODO_SPEC recomputed. */
892 : unsigned int must_recompute_spec : 1;
893 :
894 : /* What speculations are necessary to apply to schedule the instruction. */
895 : ds_t todo_spec;
896 :
897 : /* What speculations were already applied. */
898 : ds_t done_spec;
899 :
900 : /* What speculations are checked by this instruction. */
901 : ds_t check_spec;
902 :
903 : /* Recovery block for speculation checks. */
904 : basic_block recovery_block;
905 :
906 : /* Original pattern of the instruction. */
907 : rtx orig_pat;
908 :
909 : /* For insns with DEP_CONTROL dependencies, the predicated pattern if it
910 : was ever successfully constructed. */
911 : rtx predicated_pat;
912 :
913 : /* The following array contains info how the insn increases register
914 : pressure. There is an element for each cover class of pseudos
915 : referenced in insns. */
916 : struct reg_pressure_data *reg_pressure;
917 : /* The following array contains maximal reg pressure between last
918 : scheduled insn and given insn. There is an element for each
919 : pressure class of pseudos referenced in insns. This info updated
920 : after scheduling each insn for each insn between the two
921 : mentioned insns. */
922 : int *max_reg_pressure;
923 : /* The following list contains info about used pseudos and hard
924 : registers available for allocation. */
925 : struct reg_use_data *reg_use_list;
926 : /* The following list contains info about set pseudos and hard
927 : registers available for allocation. */
928 : struct reg_set_data *reg_set_list;
929 : /* Info about how scheduling the insn changes cost of register
930 : pressure excess (between source and target). */
931 : int reg_pressure_excess_cost_change;
932 : int model_index;
933 :
934 : /* Original order of insns in the ready list. */
935 : int rfs_debug_orig_order;
936 :
937 : /* The deciding reason for INSN's place in the ready list. */
938 : int last_rfs_win;
939 :
940 : /* Two entries for cache auto-prefetcher model: one for mem reads,
941 : and one for mem writes. */
942 : autopref_multipass_data_def autopref_multipass_data[2];
943 : };
944 :
945 : typedef struct _haifa_insn_data haifa_insn_data_def;
946 : typedef haifa_insn_data_def *haifa_insn_data_t;
947 :
948 :
949 : extern vec<haifa_insn_data_def> h_i_d;
950 :
951 : #define HID(INSN) (&h_i_d[INSN_UID (INSN)])
952 :
953 : /* Accessor macros for h_i_d. There are more in haifa-sched.cc and
954 : sched-rgn.cc. */
955 : #define INSN_PRIORITY(INSN) (HID (INSN)->priority)
956 : #define INSN_FUSION_PRIORITY(INSN) (HID (INSN)->fusion_priority)
957 : #define INSN_REG_PRESSURE(INSN) (HID (INSN)->reg_pressure)
958 : #define INSN_MAX_REG_PRESSURE(INSN) (HID (INSN)->max_reg_pressure)
959 : #define INSN_REG_USE_LIST(INSN) (HID (INSN)->reg_use_list)
960 : #define INSN_REG_SET_LIST(INSN) (HID (INSN)->reg_set_list)
961 : #define INSN_REG_PRESSURE_EXCESS_COST_CHANGE(INSN) \
962 : (HID (INSN)->reg_pressure_excess_cost_change)
963 : #define INSN_PRIORITY_STATUS(INSN) (HID (INSN)->priority_status)
964 : #define INSN_MODEL_INDEX(INSN) (HID (INSN)->model_index)
965 : #define INSN_AUTOPREF_MULTIPASS_DATA(INSN) \
966 : (HID (INSN)->autopref_multipass_data)
967 :
968 : typedef struct _haifa_deps_insn_data haifa_deps_insn_data_def;
969 : typedef haifa_deps_insn_data_def *haifa_deps_insn_data_t;
970 :
971 :
972 : extern vec<haifa_deps_insn_data_def> h_d_i_d;
973 :
974 : #define HDID(INSN) (&h_d_i_d[INSN_LUID (INSN)])
975 : #define INSN_DEP_COUNT(INSN) (HDID (INSN)->dep_count)
976 : #define HAS_INTERNAL_DEP(INSN) (HDID (INSN)->has_internal_dep)
977 : #define INSN_FORW_DEPS(INSN) (HDID (INSN)->forw_deps)
978 : #define INSN_RESOLVED_BACK_DEPS(INSN) (HDID (INSN)->resolved_back_deps)
979 : #define INSN_RESOLVED_FORW_DEPS(INSN) (HDID (INSN)->resolved_forw_deps)
980 : #define INSN_HARD_BACK_DEPS(INSN) (HDID (INSN)->hard_back_deps)
981 : #define INSN_SPEC_BACK_DEPS(INSN) (HDID (INSN)->spec_back_deps)
982 : #define INSN_CACHED_COND(INSN) (HDID (INSN)->cond)
983 : #define INSN_REVERSE_COND(INSN) (HDID (INSN)->reverse_cond)
984 : #define INSN_COND_DEPS(INSN) (HDID (INSN)->cond_deps)
985 : #define CANT_MOVE(INSN) (HDID (INSN)->cant_move)
986 : #define CANT_MOVE_BY_LUID(LUID) (h_d_i_d[LUID].cant_move)
987 :
988 :
989 : #define INSN_PRIORITY(INSN) (HID (INSN)->priority)
990 : #define INSN_PRIORITY_STATUS(INSN) (HID (INSN)->priority_status)
991 : #define INSN_PRIORITY_KNOWN(INSN) (INSN_PRIORITY_STATUS (INSN) > 0)
992 : #define TODO_SPEC(INSN) (HID (INSN)->todo_spec)
993 : #define DONE_SPEC(INSN) (HID (INSN)->done_spec)
994 : #define CHECK_SPEC(INSN) (HID (INSN)->check_spec)
995 : #define RECOVERY_BLOCK(INSN) (HID (INSN)->recovery_block)
996 : #define ORIG_PAT(INSN) (HID (INSN)->orig_pat)
997 : #define PREDICATED_PAT(INSN) (HID (INSN)->predicated_pat)
998 :
999 : /* INSN is either a simple or a branchy speculation check. */
1000 : #define IS_SPECULATION_CHECK_P(INSN) \
1001 : (sel_sched_p () ? sel_insn_is_speculation_check (INSN) : RECOVERY_BLOCK (INSN) != NULL)
1002 :
1003 : /* INSN is a speculation check that will simply reexecute the speculatively
1004 : scheduled instruction if the speculation fails. */
1005 : #define IS_SPECULATION_SIMPLE_CHECK_P(INSN) \
1006 : (RECOVERY_BLOCK (INSN) == EXIT_BLOCK_PTR_FOR_FN (cfun))
1007 :
1008 : /* INSN is a speculation check that will branch to RECOVERY_BLOCK if the
1009 : speculation fails. Insns in that block will reexecute the speculatively
1010 : scheduled code and then will return immediately after INSN thus preserving
1011 : semantics of the program. */
1012 : #define IS_SPECULATION_BRANCHY_CHECK_P(INSN) \
1013 : (RECOVERY_BLOCK (INSN) != NULL \
1014 : && RECOVERY_BLOCK (INSN) != EXIT_BLOCK_PTR_FOR_FN (cfun))
1015 :
1016 :
1017 : /* Dep status (aka ds_t) of the link encapsulates all information for a given
1018 : dependency, including everything that is needed for speculative scheduling.
1019 :
1020 : The lay-out of a ds_t is as follows:
1021 :
1022 : 1. Integers corresponding to the probability of the dependence to *not*
1023 : exist. This is the probability that overcoming this dependence will
1024 : not be followed by execution of the recovery code. Note that however
1025 : high this probability is, the recovery code should still always be
1026 : generated to preserve semantics of the program.
1027 :
1028 : The probability values can be set or retrieved using the functions
1029 : the set_dep_weak() and get_dep_weak() in sched-deps.cc. The values
1030 : are always in the range [0, MAX_DEP_WEAK].
1031 :
1032 : BEGIN_DATA : BITS_PER_DEP_WEAK
1033 : BE_IN_DATA : BITS_PER_DEP_WEAK
1034 : BEGIN_CONTROL : BITS_PER_DEP_WEAK
1035 : BE_IN_CONTROL : BITS_PER_DEP_WEAK
1036 :
1037 : The basic type of DS_T is a host int. For a 32-bits int, the values
1038 : will each take 6 bits.
1039 :
1040 : 2. The type of dependence. This supercedes the old-style REG_NOTE_KIND
1041 : values. TODO: Use this field instead of DEP_TYPE, or make DEP_TYPE
1042 : extract the dependence type from here.
1043 :
1044 : dep_type : 4 => DEP_{TRUE|OUTPUT|ANTI|CONTROL}
1045 :
1046 : 3. Various flags:
1047 :
1048 : HARD_DEP : 1 => Set if an instruction has a non-speculative
1049 : dependence. This is an instruction property
1050 : so this bit can only appear in the TODO_SPEC
1051 : field of an instruction.
1052 : DEP_POSTPONED : 1 => Like HARD_DEP, but the hard dependence may
1053 : still be broken by adjusting the instruction.
1054 : DEP_CANCELLED : 1 => Set if a dependency has been broken using
1055 : some form of speculation.
1056 : RESERVED : 1 => Reserved for use in the delay slot scheduler.
1057 :
1058 : See also: check_dep_status () in sched-deps.cc . */
1059 :
1060 : /* The number of bits per weakness probability. There are 4 weakness types
1061 : and we need 8 bits for other data in a DS_T. */
1062 : #define BITS_PER_DEP_WEAK ((BITS_PER_DEP_STATUS - 8) / 4)
1063 :
1064 : /* Mask of speculative weakness in dep_status. */
1065 : #define DEP_WEAK_MASK ((1 << BITS_PER_DEP_WEAK) - 1)
1066 :
1067 : /* This constant means that dependence is fake with 99.999...% probability.
1068 : This is the maximum value, that can appear in dep_status.
1069 : Note, that we don't want MAX_DEP_WEAK to be the same as DEP_WEAK_MASK for
1070 : debugging reasons. Though, it can be set to DEP_WEAK_MASK, and, when
1071 : done so, we'll get fast (mul for)/(div by) NO_DEP_WEAK. */
1072 : #define MAX_DEP_WEAK (DEP_WEAK_MASK - 1)
1073 :
1074 : /* This constant means that dependence is 99.999...% real and it is a really
1075 : bad idea to overcome it (though this can be done, preserving program
1076 : semantics). */
1077 : #define MIN_DEP_WEAK 1
1078 :
1079 : /* This constant represents 100% probability.
1080 : E.g. it is used to represent weakness of dependence, that doesn't exist.
1081 : This value never appears in a ds_t, it is only used for computing the
1082 : weakness of a dependence. */
1083 : #define NO_DEP_WEAK (MAX_DEP_WEAK + MIN_DEP_WEAK)
1084 :
1085 : /* Default weakness of speculative dependence. Used when we can't say
1086 : neither bad nor good about the dependence. */
1087 : #define UNCERTAIN_DEP_WEAK (MAX_DEP_WEAK - MAX_DEP_WEAK / 4)
1088 :
1089 : /* Offset for speculative weaknesses in dep_status. */
1090 : enum SPEC_TYPES_OFFSETS {
1091 : BEGIN_DATA_BITS_OFFSET = 0,
1092 : BE_IN_DATA_BITS_OFFSET = BEGIN_DATA_BITS_OFFSET + BITS_PER_DEP_WEAK,
1093 : BEGIN_CONTROL_BITS_OFFSET = BE_IN_DATA_BITS_OFFSET + BITS_PER_DEP_WEAK,
1094 : BE_IN_CONTROL_BITS_OFFSET = BEGIN_CONTROL_BITS_OFFSET + BITS_PER_DEP_WEAK
1095 : };
1096 :
1097 : /* The following defines provide numerous constants used to distinguish
1098 : between different types of speculative dependencies. They are also
1099 : used as masks to clear/preserve the bits corresponding to the type
1100 : of dependency weakness. */
1101 :
1102 : /* Dependence can be overcome with generation of new data speculative
1103 : instruction. */
1104 : #define BEGIN_DATA (((ds_t) DEP_WEAK_MASK) << BEGIN_DATA_BITS_OFFSET)
1105 :
1106 : /* This dependence is to the instruction in the recovery block, that was
1107 : formed to recover after data-speculation failure.
1108 : Thus, this dependence can overcome with generating of the copy of
1109 : this instruction in the recovery block. */
1110 : #define BE_IN_DATA (((ds_t) DEP_WEAK_MASK) << BE_IN_DATA_BITS_OFFSET)
1111 :
1112 : /* Dependence can be overcome with generation of new control speculative
1113 : instruction. */
1114 : #define BEGIN_CONTROL (((ds_t) DEP_WEAK_MASK) << BEGIN_CONTROL_BITS_OFFSET)
1115 :
1116 : /* This dependence is to the instruction in the recovery block, that was
1117 : formed to recover after control-speculation failure.
1118 : Thus, this dependence can be overcome with generating of the copy of
1119 : this instruction in the recovery block. */
1120 : #define BE_IN_CONTROL (((ds_t) DEP_WEAK_MASK) << BE_IN_CONTROL_BITS_OFFSET)
1121 :
1122 : /* A few convenient combinations. */
1123 : #define BEGIN_SPEC (BEGIN_DATA | BEGIN_CONTROL)
1124 : #define DATA_SPEC (BEGIN_DATA | BE_IN_DATA)
1125 : #define CONTROL_SPEC (BEGIN_CONTROL | BE_IN_CONTROL)
1126 : #define SPECULATIVE (DATA_SPEC | CONTROL_SPEC)
1127 : #define BE_IN_SPEC (BE_IN_DATA | BE_IN_CONTROL)
1128 :
1129 : /* Constants, that are helpful in iterating through dep_status. */
1130 : #define FIRST_SPEC_TYPE BEGIN_DATA
1131 : #define LAST_SPEC_TYPE BE_IN_CONTROL
1132 : #define SPEC_TYPE_SHIFT BITS_PER_DEP_WEAK
1133 :
1134 : /* Dependence on instruction can be of multiple types
1135 : (e.g. true and output). This fields enhance REG_NOTE_KIND information
1136 : of the dependence. */
1137 : #define DEP_TRUE (((ds_t) 1) << (BE_IN_CONTROL_BITS_OFFSET + BITS_PER_DEP_WEAK))
1138 : #define DEP_OUTPUT (DEP_TRUE << 1)
1139 : #define DEP_ANTI (DEP_OUTPUT << 1)
1140 : #define DEP_CONTROL (DEP_ANTI << 1)
1141 :
1142 : #define DEP_TYPES (DEP_TRUE | DEP_OUTPUT | DEP_ANTI | DEP_CONTROL)
1143 :
1144 : /* Instruction has non-speculative dependence. This bit represents the
1145 : property of an instruction - not the one of a dependence.
1146 : Therefore, it can appear only in the TODO_SPEC field of an instruction. */
1147 : #define HARD_DEP (DEP_CONTROL << 1)
1148 :
1149 : /* Like HARD_DEP, but dependencies can perhaps be broken by modifying
1150 : the instructions. This is used for example to change:
1151 :
1152 : rn++ => rm=[rn + 4]
1153 : rm=[rn] rn++
1154 :
1155 : For instructions that have this bit set, one of the dependencies of
1156 : the instructions will have a non-NULL REPLACE field in its DEP_T.
1157 : Just like HARD_DEP, this bit is only ever set in TODO_SPEC. */
1158 : #define DEP_POSTPONED (HARD_DEP << 1)
1159 :
1160 : /* Set if a dependency is cancelled via speculation. */
1161 : #define DEP_CANCELLED (DEP_POSTPONED << 1)
1162 :
1163 :
1164 : /* This represents the results of calling sched-deps.cc functions,
1165 : which modify dependencies. */
1166 : enum DEPS_ADJUST_RESULT {
1167 : /* No dependence needed (e.g. producer == consumer). */
1168 : DEP_NODEP,
1169 : /* Dependence is already present and wasn't modified. */
1170 : DEP_PRESENT,
1171 : /* Existing dependence was modified to include additional information. */
1172 : DEP_CHANGED,
1173 : /* New dependence has been created. */
1174 : DEP_CREATED
1175 : };
1176 :
1177 : /* Represents the bits that can be set in the flags field of the
1178 : sched_info structure. */
1179 : enum SCHED_FLAGS {
1180 : /* If set, generate links between instruction as DEPS_LIST.
1181 : Otherwise, generate usual INSN_LIST links. */
1182 : USE_DEPS_LIST = 1,
1183 : /* Perform data or control (or both) speculation.
1184 : Results in generation of data and control speculative dependencies.
1185 : Requires USE_DEPS_LIST set. */
1186 : DO_SPECULATION = USE_DEPS_LIST << 1,
1187 : DO_BACKTRACKING = DO_SPECULATION << 1,
1188 : DO_PREDICATION = DO_BACKTRACKING << 1,
1189 : DONT_BREAK_DEPENDENCIES = DO_PREDICATION << 1,
1190 : SCHED_RGN = DONT_BREAK_DEPENDENCIES << 1,
1191 : SCHED_EBB = SCHED_RGN << 1,
1192 : /* Scheduler can possibly create new basic blocks. Used for assertions. */
1193 : NEW_BBS = SCHED_EBB << 1,
1194 : SEL_SCHED = NEW_BBS << 1
1195 : };
1196 :
1197 : enum SPEC_SCHED_FLAGS {
1198 : COUNT_SPEC_IN_CRITICAL_PATH = 1,
1199 : SEL_SCHED_SPEC_DONT_CHECK_CONTROL = COUNT_SPEC_IN_CRITICAL_PATH << 1
1200 : };
1201 :
1202 : #define NOTE_NOT_BB_P(NOTE) (NOTE_P (NOTE) && (NOTE_KIND (NOTE) \
1203 : != NOTE_INSN_BASIC_BLOCK))
1204 :
1205 : extern FILE *sched_dump;
1206 : extern int sched_verbose;
1207 :
1208 : extern spec_info_t spec_info;
1209 : extern bool haifa_recovery_bb_ever_added_p;
1210 :
1211 : /* Exception Free Loads:
1212 :
1213 : We define five classes of speculative loads: IFREE, IRISKY,
1214 : PFREE, PRISKY, and MFREE.
1215 :
1216 : IFREE loads are loads that are proved to be exception-free, just
1217 : by examining the load insn. Examples for such loads are loads
1218 : from TOC and loads of global data.
1219 :
1220 : IRISKY loads are loads that are proved to be exception-risky,
1221 : just by examining the load insn. Examples for such loads are
1222 : volatile loads and loads from shared memory.
1223 :
1224 : PFREE loads are loads for which we can prove, by examining other
1225 : insns, that they are exception-free. Currently, this class consists
1226 : of loads for which we are able to find a "similar load", either in
1227 : the target block, or, if only one split-block exists, in that split
1228 : block. Load2 is similar to load1 if both have same single base
1229 : register. We identify only part of the similar loads, by finding
1230 : an insn upon which both load1 and load2 have a DEF-USE dependence.
1231 :
1232 : PRISKY loads are loads for which we can prove, by examining other
1233 : insns, that they are exception-risky. Currently we have two proofs for
1234 : such loads. The first proof detects loads that are probably guarded by a
1235 : test on the memory address. This proof is based on the
1236 : backward and forward data dependence information for the region.
1237 : Let load-insn be the examined load.
1238 : Load-insn is PRISKY iff ALL the following hold:
1239 :
1240 : - insn1 is not in the same block as load-insn
1241 : - there is a DEF-USE dependence chain (insn1, ..., load-insn)
1242 : - test-insn is either a compare or a branch, not in the same block
1243 : as load-insn
1244 : - load-insn is reachable from test-insn
1245 : - there is a DEF-USE dependence chain (insn1, ..., test-insn)
1246 :
1247 : This proof might fail when the compare and the load are fed
1248 : by an insn not in the region. To solve this, we will add to this
1249 : group all loads that have no input DEF-USE dependence.
1250 :
1251 : The second proof detects loads that are directly or indirectly
1252 : fed by a speculative load. This proof is affected by the
1253 : scheduling process. We will use the flag fed_by_spec_load.
1254 : Initially, all insns have this flag reset. After a speculative
1255 : motion of an insn, if insn is either a load, or marked as
1256 : fed_by_spec_load, we will also mark as fed_by_spec_load every
1257 : insn1 for which a DEF-USE dependence (insn, insn1) exists. A
1258 : load which is fed_by_spec_load is also PRISKY.
1259 :
1260 : MFREE (maybe-free) loads are all the remaining loads. They may be
1261 : exception-free, but we cannot prove it.
1262 :
1263 : Now, all loads in IFREE and PFREE classes are considered
1264 : exception-free, while all loads in IRISKY and PRISKY classes are
1265 : considered exception-risky. As for loads in the MFREE class,
1266 : these are considered either exception-free or exception-risky,
1267 : depending on whether we are pessimistic or optimistic. We have
1268 : to take the pessimistic approach to assure the safety of
1269 : speculative scheduling, but we can take the optimistic approach
1270 : by invoking the -fsched_spec_load_dangerous option. */
1271 :
1272 : enum INSN_TRAP_CLASS
1273 : {
1274 : TRAP_FREE = 0, IFREE = 1, PFREE_CANDIDATE = 2,
1275 : PRISKY_CANDIDATE = 3, IRISKY = 4, TRAP_RISKY = 5
1276 : };
1277 :
1278 : #define WORST_CLASS(class1, class2) \
1279 : ((class1 > class2) ? class1 : class2)
1280 :
1281 : #ifndef __GNUC__
1282 : #define __inline
1283 : #endif
1284 :
1285 : #ifndef HAIFA_INLINE
1286 : #define HAIFA_INLINE __inline
1287 : #endif
1288 :
1289 : struct sched_deps_info_def
1290 : {
1291 : /* Called when computing dependencies for a JUMP_INSN. This function
1292 : should store the set of registers that must be considered as set by
1293 : the jump in the regset. */
1294 : void (*compute_jump_reg_dependencies) (rtx, regset);
1295 :
1296 : /* Start analyzing insn. */
1297 : void (*start_insn) (rtx_insn *);
1298 :
1299 : /* Finish analyzing insn. */
1300 : void (*finish_insn) (void);
1301 :
1302 : /* Start analyzing insn LHS (Left Hand Side). */
1303 : void (*start_lhs) (rtx);
1304 :
1305 : /* Finish analyzing insn LHS. */
1306 : void (*finish_lhs) (void);
1307 :
1308 : /* Start analyzing insn RHS (Right Hand Side). */
1309 : void (*start_rhs) (rtx);
1310 :
1311 : /* Finish analyzing insn RHS. */
1312 : void (*finish_rhs) (void);
1313 :
1314 : /* Note set of the register. */
1315 : void (*note_reg_set) (int);
1316 :
1317 : /* Note clobber of the register. */
1318 : void (*note_reg_clobber) (int);
1319 :
1320 : /* Note use of the register. */
1321 : void (*note_reg_use) (int);
1322 :
1323 : /* Note memory dependence of type DS between MEM1 and MEM2 (which is
1324 : in the INSN2). */
1325 : void (*note_mem_dep) (rtx mem1, rtx mem2, rtx_insn *insn2, ds_t ds);
1326 :
1327 : /* Note a dependence of type DS from the INSN. */
1328 : void (*note_dep) (rtx_insn *, ds_t ds);
1329 :
1330 : /* Nonzero if we should use cselib for better alias analysis. This
1331 : must be 0 if the dependency information is used after sched_analyze
1332 : has completed, e.g. if we're using it to initialize state for successor
1333 : blocks in region scheduling. */
1334 : unsigned int use_cselib : 1;
1335 :
1336 : /* If set, generate links between instruction as DEPS_LIST.
1337 : Otherwise, generate usual INSN_LIST links. */
1338 : unsigned int use_deps_list : 1;
1339 :
1340 : /* Generate data and control speculative dependencies.
1341 : Requires USE_DEPS_LIST set. */
1342 : unsigned int generate_spec_deps : 1;
1343 : };
1344 :
1345 : extern struct sched_deps_info_def *sched_deps_info;
1346 :
1347 :
1348 : /* Functions in sched-deps.cc. */
1349 : extern rtx sched_get_reverse_condition_uncached (const rtx_insn *);
1350 : extern bool sched_insns_conditions_mutex_p (const rtx_insn *,
1351 : const rtx_insn *);
1352 : extern bool sched_insn_is_legitimate_for_speculation_p (const rtx_insn *, ds_t);
1353 : extern void add_dependence (rtx_insn *, rtx_insn *, enum reg_note);
1354 : extern void sched_analyze (class deps_desc *, rtx_insn *, rtx_insn *);
1355 : extern void init_deps (class deps_desc *, bool);
1356 : extern void init_deps_reg_last (class deps_desc *);
1357 : extern void free_deps (class deps_desc *);
1358 : extern void init_deps_global (void);
1359 : extern void finish_deps_global (void);
1360 : extern void deps_analyze_insn (class deps_desc *, rtx_insn *);
1361 : extern void remove_from_deps (class deps_desc *, rtx_insn *);
1362 : extern void init_insn_reg_pressure_info (rtx_insn *);
1363 : extern void get_implicit_reg_pending_clobbers (HARD_REG_SET *, rtx_insn *);
1364 :
1365 : extern dw_t get_dep_weak (ds_t, ds_t);
1366 : extern ds_t set_dep_weak (ds_t, ds_t, dw_t);
1367 : extern dw_t estimate_dep_weak (rtx, rtx);
1368 : extern ds_t ds_merge (ds_t, ds_t);
1369 : extern ds_t ds_full_merge (ds_t, ds_t, rtx, rtx);
1370 : extern ds_t ds_max_merge (ds_t, ds_t);
1371 : extern dw_t ds_weak (ds_t);
1372 : extern ds_t ds_get_speculation_types (ds_t);
1373 : extern ds_t ds_get_max_dep_weak (ds_t);
1374 :
1375 : extern void sched_deps_init (bool);
1376 : extern void sched_deps_finish (void);
1377 :
1378 : extern void haifa_note_reg_set (int);
1379 : extern void haifa_note_reg_clobber (int);
1380 : extern void haifa_note_reg_use (int);
1381 :
1382 : extern void maybe_extend_reg_info_p (void);
1383 :
1384 : extern void deps_start_bb (class deps_desc *, rtx_insn *);
1385 : extern enum reg_note ds_to_dt (ds_t);
1386 :
1387 : extern bool deps_pools_are_empty_p (void);
1388 : extern void sched_free_deps (rtx_insn *, rtx_insn *, bool);
1389 : extern void extend_dependency_caches (int, bool);
1390 :
1391 : extern void debug_ds (ds_t);
1392 :
1393 :
1394 : /* Functions in haifa-sched.cc. */
1395 : extern void initialize_live_range_shrinkage (void);
1396 : extern void finish_live_range_shrinkage (void);
1397 : extern void sched_init_region_reg_pressure_info (void);
1398 : extern void free_global_sched_pressure_data (void);
1399 : extern int haifa_classify_insn (const_rtx);
1400 : extern void get_ebb_head_tail (basic_block, basic_block,
1401 : rtx_insn **, rtx_insn **);
1402 : extern bool no_real_insns_p (const rtx_insn *, const rtx_insn *);
1403 :
1404 : extern int insn_sched_cost (rtx_insn *);
1405 : extern int dep_cost_1 (dep_t, dw_t);
1406 : extern int dep_cost (dep_t);
1407 : extern int set_priorities (rtx_insn *, rtx_insn *);
1408 :
1409 : extern void sched_setup_bb_reg_pressure_info (basic_block, rtx_insn *);
1410 : extern bool schedule_block (basic_block *, state_t);
1411 :
1412 : extern int cycle_issued_insns;
1413 : extern int issue_rate;
1414 : extern int dfa_lookahead;
1415 :
1416 : extern int autopref_multipass_dfa_lookahead_guard (rtx_insn *, int);
1417 :
1418 : extern rtx_insn *ready_element (struct ready_list *, int);
1419 : extern rtx_insn **ready_lastpos (struct ready_list *);
1420 :
1421 : extern int try_ready (rtx_insn *);
1422 : extern void sched_extend_ready_list (int);
1423 : extern void sched_finish_ready_list (void);
1424 : extern void sched_change_pattern (rtx, rtx);
1425 : extern int sched_speculate_insn (rtx_insn *, ds_t, rtx *);
1426 : extern void unlink_bb_notes (basic_block, basic_block);
1427 : extern void add_block (basic_block, basic_block);
1428 : extern rtx_note *bb_note (basic_block);
1429 : extern void concat_note_lists (rtx_insn *, rtx_insn **);
1430 : extern rtx_insn *sched_emit_insn (rtx);
1431 : extern rtx_insn *get_ready_element (int);
1432 : extern int number_in_ready (void);
1433 :
1434 : /* Types and functions in sched-ebb.cc. */
1435 :
1436 : extern basic_block schedule_ebb (rtx_insn *, rtx_insn *, bool);
1437 : extern void schedule_ebbs_init (void);
1438 : extern void schedule_ebbs_finish (void);
1439 :
1440 : /* Types and functions in sched-rgn.cc. */
1441 :
1442 : /* A region is the main entity for interblock scheduling: insns
1443 : are allowed to move between blocks in the same region, along
1444 : control flow graph edges, in the 'up' direction. */
1445 : struct region
1446 : {
1447 : /* Number of extended basic blocks in region. */
1448 : int rgn_nr_blocks;
1449 : /* cblocks in the region (actually index in rgn_bb_table). */
1450 : int rgn_blocks;
1451 : /* Dependencies for this region are already computed. Basically, indicates,
1452 : that this is a recovery block. */
1453 : unsigned int dont_calc_deps : 1;
1454 : /* This region has at least one non-trivial ebb. */
1455 : unsigned int has_real_ebb : 1;
1456 : };
1457 :
1458 : extern int nr_regions;
1459 : extern region *rgn_table;
1460 : extern int *rgn_bb_table;
1461 : extern int *block_to_bb;
1462 : extern int *containing_rgn;
1463 :
1464 : /* Often used short-hand in the scheduler. The rest of the compiler uses
1465 : BLOCK_FOR_INSN(INSN) and an indirect reference to get the basic block
1466 : number ("index"). For historical reasons, the scheduler does not. */
1467 : #define BLOCK_NUM(INSN) (BLOCK_FOR_INSN (INSN)->index + 0)
1468 :
1469 : #define RGN_NR_BLOCKS(rgn) (rgn_table[rgn].rgn_nr_blocks)
1470 : #define RGN_BLOCKS(rgn) (rgn_table[rgn].rgn_blocks)
1471 : #define RGN_DONT_CALC_DEPS(rgn) (rgn_table[rgn].dont_calc_deps)
1472 : #define RGN_HAS_REAL_EBB(rgn) (rgn_table[rgn].has_real_ebb)
1473 : #define BLOCK_TO_BB(block) (block_to_bb[block])
1474 : #define CONTAINING_RGN(block) (containing_rgn[block])
1475 :
1476 : /* The mapping from ebb to block. */
1477 : extern int *ebb_head;
1478 : #define BB_TO_BLOCK(ebb) (rgn_bb_table[ebb_head[ebb]])
1479 : #define EBB_FIRST_BB(ebb) BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (ebb))
1480 : #define EBB_LAST_BB(ebb) \
1481 : BASIC_BLOCK_FOR_FN (cfun, rgn_bb_table[ebb_head[ebb + 1] - 1])
1482 : #define INSN_BB(INSN) (BLOCK_TO_BB (BLOCK_NUM (INSN)))
1483 :
1484 : extern int current_nr_blocks;
1485 : extern int current_blocks;
1486 : extern int target_bb;
1487 : extern bool sched_no_dce;
1488 :
1489 : extern void set_modulo_params (int, int, int, int);
1490 : extern void record_delay_slot_pair (rtx_insn *, rtx_insn *, int, int);
1491 : extern rtx_insn *real_insn_for_shadow (rtx_insn *);
1492 : extern void discard_delay_pairs_above (int);
1493 : extern void free_delay_pairs (void);
1494 : extern void add_delay_dependencies (rtx_insn *);
1495 : extern bool sched_is_disabled_for_current_region_p (void);
1496 : extern void sched_rgn_init (bool);
1497 : extern void sched_rgn_finish (void);
1498 : extern void rgn_setup_region (int);
1499 : extern void sched_rgn_compute_dependencies (int);
1500 : extern void sched_rgn_local_init (int);
1501 : extern void sched_rgn_local_finish (void);
1502 : extern void sched_rgn_local_free (void);
1503 : extern void extend_regions (void);
1504 : extern void rgn_make_new_region_out_of_new_block (basic_block);
1505 :
1506 : extern void compute_priorities (void);
1507 : extern void increase_insn_priority (rtx_insn *, int);
1508 : extern void debug_rgn_dependencies (int);
1509 : extern void debug_dependencies (rtx_insn *, rtx_insn *);
1510 : extern void dump_rgn_dependencies_dot (FILE *);
1511 : extern void dump_rgn_dependencies_dot (const char *);
1512 :
1513 : extern void free_rgn_deps (void);
1514 : extern bool contributes_to_priority (rtx_insn *, rtx_insn *);
1515 : extern void extend_rgns (int *, int *, sbitmap, int *);
1516 : extern void deps_join (class deps_desc *, class deps_desc *);
1517 :
1518 : extern void rgn_setup_common_sched_info (void);
1519 : extern void rgn_setup_sched_infos (void);
1520 :
1521 : extern void debug_regions (void);
1522 : extern void debug_region (int);
1523 : extern void dump_region_dot (FILE *, int);
1524 : extern void dump_region_dot_file (const char *, int);
1525 :
1526 : extern void haifa_sched_init (void);
1527 : extern void haifa_sched_finish (void);
1528 :
1529 : extern void find_modifiable_mems (rtx_insn *, rtx_insn *);
1530 :
1531 : /* sched-deps.cc interface to walk, add, search, update, resolve, delete
1532 : and debug instruction dependencies. */
1533 :
1534 : /* Constants defining dependences lists. */
1535 :
1536 : /* No list. */
1537 : #define SD_LIST_NONE (0)
1538 :
1539 : /* hard_back_deps. */
1540 : #define SD_LIST_HARD_BACK (1)
1541 :
1542 : /* spec_back_deps. */
1543 : #define SD_LIST_SPEC_BACK (2)
1544 :
1545 : /* forw_deps. */
1546 : #define SD_LIST_FORW (4)
1547 :
1548 : /* resolved_back_deps. */
1549 : #define SD_LIST_RES_BACK (8)
1550 :
1551 : /* resolved_forw_deps. */
1552 : #define SD_LIST_RES_FORW (16)
1553 :
1554 : #define SD_LIST_BACK (SD_LIST_HARD_BACK | SD_LIST_SPEC_BACK)
1555 :
1556 : /* A type to hold above flags. */
1557 : typedef int sd_list_types_def;
1558 :
1559 : extern void sd_next_list (const_rtx, sd_list_types_def *, deps_list_t *, bool *);
1560 :
1561 : /* Iterator to walk through, resolve and delete dependencies. */
1562 : struct _sd_iterator
1563 : {
1564 : /* What lists to walk. Can be any combination of SD_LIST_* flags. */
1565 : sd_list_types_def types;
1566 :
1567 : /* Instruction dependencies lists of which will be walked. */
1568 : rtx insn;
1569 :
1570 : /* Pointer to the next field of the previous element. This is not
1571 : simply a pointer to the next element to allow easy deletion from the
1572 : list. When a dep is being removed from the list the iterator
1573 : will automatically advance because the value in *linkp will start
1574 : referring to the next element. */
1575 : dep_link_t *linkp;
1576 :
1577 : /* True if the current list is a resolved one. */
1578 : bool resolved_p;
1579 : };
1580 :
1581 : typedef struct _sd_iterator sd_iterator_def;
1582 :
1583 : /* ??? We can move some definitions that are used in below inline functions
1584 : out of sched-int.h to sched-deps.cc provided that the below functions will
1585 : become global externals.
1586 : These definitions include:
1587 : * struct _deps_list: opaque pointer is needed at global scope.
1588 : * struct _dep_link: opaque pointer is needed at scope of sd_iterator_def.
1589 : * struct _dep_node: opaque pointer is needed at scope of
1590 : struct _deps_link. */
1591 :
1592 : /* Return initialized iterator. */
1593 : inline sd_iterator_def
1594 1856981154 : sd_iterator_start (rtx insn, sd_list_types_def types)
1595 : {
1596 : /* Some dep_link a pointer to which will return NULL. */
1597 1856981154 : static dep_link_t null_link = NULL;
1598 :
1599 1856981154 : sd_iterator_def i;
1600 :
1601 1856981154 : i.types = types;
1602 1856981154 : i.insn = insn;
1603 1856981154 : i.linkp = &null_link;
1604 :
1605 : /* Avoid 'uninitialized warning'. */
1606 1856981154 : i.resolved_p = false;
1607 :
1608 1856981154 : return i;
1609 : }
1610 :
1611 : /* Return the current element. */
1612 : inline bool
1613 4575735351 : sd_iterator_cond (sd_iterator_def *it_ptr, dep_t *dep_ptr)
1614 : {
1615 8519632335 : while (true)
1616 : {
1617 6547683843 : dep_link_t link = *it_ptr->linkp;
1618 :
1619 6547683843 : if (link != NULL)
1620 : {
1621 3094777416 : *dep_ptr = DEP_LINK_DEP (link);
1622 3094777416 : return true;
1623 : }
1624 : else
1625 : {
1626 3452906427 : sd_list_types_def types = it_ptr->types;
1627 :
1628 3452906427 : if (types != SD_LIST_NONE)
1629 : /* Switch to next list. */
1630 : {
1631 1971948493 : deps_list_t list;
1632 :
1633 1971948493 : sd_next_list (it_ptr->insn,
1634 : &it_ptr->types, &list, &it_ptr->resolved_p);
1635 :
1636 1971948493 : if (list)
1637 : {
1638 1971948492 : it_ptr->linkp = &DEPS_LIST_FIRST (list);
1639 1971948492 : continue;
1640 : }
1641 : }
1642 :
1643 1480957935 : *dep_ptr = NULL;
1644 1480957935 : return false;
1645 : }
1646 : }
1647 : }
1648 :
1649 : /* Advance iterator. */
1650 : inline void
1651 2294268861 : sd_iterator_next (sd_iterator_def *it_ptr)
1652 : {
1653 1026201577 : it_ptr->linkp = &DEP_LINK_NEXT (*it_ptr->linkp);
1654 2293887404 : }
1655 :
1656 : /* A cycle wrapper. */
1657 : #define FOR_EACH_DEP(INSN, LIST_TYPES, ITER, DEP) \
1658 : for ((ITER) = sd_iterator_start ((INSN), (LIST_TYPES)); \
1659 : sd_iterator_cond (&(ITER), &(DEP)); \
1660 : sd_iterator_next (&(ITER)))
1661 :
1662 : #define IS_DISPATCH_ON 1
1663 : #define IS_CMP 2
1664 : #define DISPATCH_VIOLATION 3
1665 : #define FITS_DISPATCH_WINDOW 4
1666 : #define DISPATCH_INIT 5
1667 : #define ADD_TO_DISPATCH_WINDOW 6
1668 :
1669 : extern int sd_lists_size (const_rtx, sd_list_types_def);
1670 : extern bool sd_lists_empty_p (const_rtx, sd_list_types_def);
1671 : extern void sd_init_insn (rtx_insn *);
1672 : extern void sd_finish_insn (rtx_insn *);
1673 : extern dep_t sd_find_dep_between (rtx, rtx, bool);
1674 : extern void sd_add_dep (dep_t, bool);
1675 : extern enum DEPS_ADJUST_RESULT sd_add_or_update_dep (dep_t, bool);
1676 : extern void sd_resolve_dep (sd_iterator_def);
1677 : extern void sd_unresolve_dep (sd_iterator_def);
1678 : extern void sd_copy_back_deps (rtx_insn *, rtx_insn *, bool);
1679 : extern void sd_delete_dep (sd_iterator_def);
1680 : extern void sd_debug_lists (rtx, sd_list_types_def);
1681 :
1682 : extern int dep_list_size (rtx_insn *, sd_list_types_def);
1683 :
1684 : /* Macros and declarations for scheduling fusion. */
1685 : #define FUSION_MAX_PRIORITY (INT_MAX)
1686 : extern bool sched_fusion;
1687 :
1688 : #endif /* INSN_SCHEDULING */
1689 :
1690 : #endif /* GCC_SCHED_INT_H */
1691 :
|