Line data Source code
1 : /* Instruction scheduling pass. This file computes dependencies between
2 : instructions.
3 : Copyright (C) 1992-2026 Free Software Foundation, Inc.
4 : Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 : and currently maintained by, Jim Wilson (wilson@cygnus.com)
6 :
7 : This file is part of GCC.
8 :
9 : GCC is free software; you can redistribute it and/or modify it under
10 : the terms of the GNU General Public License as published by the Free
11 : Software Foundation; either version 3, or (at your option) any later
12 : version.
13 :
14 : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15 : WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17 : for more details.
18 :
19 : You should have received a copy of the GNU General Public License
20 : along with GCC; see the file COPYING3. If not see
21 : <http://www.gnu.org/licenses/>. */
22 :
23 : #include "config.h"
24 : #include "system.h"
25 : #include "coretypes.h"
26 : #include "backend.h"
27 : #include "target.h"
28 : #include "rtl.h"
29 : #include "tree.h"
30 : #include "df.h"
31 : #include "insn-config.h"
32 : #include "regs.h"
33 : #include "memmodel.h"
34 : #include "ira.h"
35 : #include "ira-int.h"
36 : #include "insn-attr.h"
37 : #include "cfgbuild.h"
38 : #include "sched-int.h"
39 : #include "cselib.h"
40 : #include "function-abi.h"
41 :
42 : #ifdef INSN_SCHEDULING
43 :
44 : /* Holds current parameters for the dependency analyzer. */
45 : struct sched_deps_info_def *sched_deps_info;
46 :
47 : /* The data is specific to the Haifa scheduler. */
48 : vec<haifa_deps_insn_data_def>
49 : h_d_i_d = vNULL;
50 :
51 : /* Return the major type present in the DS. */
52 : enum reg_note
53 0 : ds_to_dk (ds_t ds)
54 : {
55 0 : if (ds & DEP_TRUE)
56 : return REG_DEP_TRUE;
57 :
58 0 : if (ds & DEP_OUTPUT)
59 : return REG_DEP_OUTPUT;
60 :
61 0 : if (ds & DEP_CONTROL)
62 : return REG_DEP_CONTROL;
63 :
64 0 : gcc_assert (ds & DEP_ANTI);
65 :
66 : return REG_DEP_ANTI;
67 : }
68 :
69 : /* Return equivalent dep_status. */
70 : ds_t
71 0 : dk_to_ds (enum reg_note dk)
72 : {
73 0 : switch (dk)
74 : {
75 : case REG_DEP_TRUE:
76 : return DEP_TRUE;
77 :
78 0 : case REG_DEP_OUTPUT:
79 0 : return DEP_OUTPUT;
80 :
81 0 : case REG_DEP_CONTROL:
82 0 : return DEP_CONTROL;
83 :
84 0 : default:
85 0 : gcc_assert (dk == REG_DEP_ANTI);
86 : return DEP_ANTI;
87 : }
88 : }
89 :
90 : /* Functions to operate with dependence information container - dep_t. */
91 :
92 : /* Init DEP with the arguments. */
93 : void
94 639397463 : init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
95 : {
96 639397463 : DEP_PRO (dep) = pro;
97 639397463 : DEP_CON (dep) = con;
98 639397463 : DEP_TYPE (dep) = type;
99 639397463 : DEP_STATUS (dep) = ds;
100 639397463 : DEP_COST (dep) = UNKNOWN_DEP_COST;
101 639397463 : DEP_NONREG (dep) = 0;
102 639397463 : DEP_MULTIPLE (dep) = 0;
103 639397463 : DEP_REPLACE (dep) = NULL;
104 639397463 : dep->unused = 0;
105 639397463 : }
106 :
107 : /* Init DEP with the arguments.
108 : While most of the scheduler (including targets) only need the major type
109 : of the dependency, it is convenient to hide full dep_status from them. */
110 : void
111 607135891 : init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
112 : {
113 607135891 : ds_t ds;
114 :
115 607135891 : if ((current_sched_info->flags & USE_DEPS_LIST))
116 0 : ds = dk_to_ds (kind);
117 : else
118 : ds = 0;
119 :
120 607135891 : init_dep_1 (dep, pro, con, kind, ds);
121 607135891 : }
122 :
123 : /* Make a copy of FROM in TO. */
124 : static void
125 216173709 : copy_dep (dep_t to, dep_t from)
126 : {
127 216173709 : memcpy (to, from, sizeof (*to));
128 0 : }
129 :
130 : static void dump_ds (FILE *, ds_t);
131 :
132 : /* Define flags for dump_dep (). */
133 :
134 : /* Dump producer of the dependence. */
135 : #define DUMP_DEP_PRO (2)
136 :
137 : /* Dump consumer of the dependence. */
138 : #define DUMP_DEP_CON (4)
139 :
140 : /* Dump type of the dependence. */
141 : #define DUMP_DEP_TYPE (8)
142 :
143 : /* Dump status of the dependence. */
144 : #define DUMP_DEP_STATUS (16)
145 :
146 : /* Dump all information about the dependence. */
147 : #define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
148 : |DUMP_DEP_STATUS)
149 :
150 : /* Dump DEP to DUMP.
151 : FLAGS is a bit mask specifying what information about DEP needs
152 : to be printed.
153 : If FLAGS has the very first bit set, then dump all information about DEP
154 : and propagate this bit into the callee dump functions. */
155 : static void
156 0 : dump_dep (FILE *dump, dep_t dep, int flags)
157 : {
158 0 : if (flags & 1)
159 0 : flags |= DUMP_DEP_ALL;
160 :
161 0 : fprintf (dump, "<");
162 :
163 0 : if (flags & DUMP_DEP_PRO)
164 0 : fprintf (dump, "%d; ", INSN_UID (DEP_PRO (dep)));
165 :
166 0 : if (flags & DUMP_DEP_CON)
167 0 : fprintf (dump, "%d; ", INSN_UID (DEP_CON (dep)));
168 :
169 0 : if (flags & DUMP_DEP_TYPE)
170 : {
171 0 : char t;
172 0 : enum reg_note type = DEP_TYPE (dep);
173 :
174 0 : switch (type)
175 : {
176 : case REG_DEP_TRUE:
177 : t = 't';
178 : break;
179 :
180 0 : case REG_DEP_OUTPUT:
181 0 : t = 'o';
182 0 : break;
183 :
184 0 : case REG_DEP_CONTROL:
185 0 : t = 'c';
186 0 : break;
187 :
188 0 : case REG_DEP_ANTI:
189 0 : t = 'a';
190 0 : break;
191 :
192 0 : default:
193 0 : gcc_unreachable ();
194 0 : break;
195 : }
196 :
197 0 : fprintf (dump, "%c; ", t);
198 : }
199 :
200 0 : if (flags & DUMP_DEP_STATUS)
201 : {
202 0 : if (current_sched_info->flags & USE_DEPS_LIST)
203 0 : dump_ds (dump, DEP_STATUS (dep));
204 : }
205 :
206 0 : fprintf (dump, ">");
207 0 : }
208 :
209 : /* Default flags for dump_dep (). */
210 : static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
211 :
212 : /* Dump all fields of DEP to STDERR. */
213 : void
214 0 : sd_debug_dep (dep_t dep)
215 : {
216 0 : dump_dep (stderr, dep, 1);
217 0 : fprintf (stderr, "\n");
218 0 : }
219 :
220 : /* Determine whether DEP is a dependency link of a non-debug insn on a
221 : debug insn. */
222 :
223 : static inline bool
224 1700207586 : depl_on_debug_p (dep_link_t dep)
225 : {
226 1700207586 : return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
227 398500812 : && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
228 : }
229 :
230 : /* Functions to operate with a single link from the dependencies lists -
231 : dep_link_t. */
232 :
233 : /* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
234 : PREV_NEXT_P. */
235 : static void
236 850103793 : attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
237 : {
238 850103793 : dep_link_t next = *prev_nextp;
239 :
240 850103793 : gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
241 : && DEP_LINK_NEXT (l) == NULL);
242 :
243 : /* Init node being inserted. */
244 850103793 : DEP_LINK_PREV_NEXTP (l) = prev_nextp;
245 850103793 : DEP_LINK_NEXT (l) = next;
246 :
247 : /* Fix next node. */
248 850103793 : if (next != NULL)
249 : {
250 476682792 : gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
251 :
252 476682792 : DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
253 : }
254 :
255 : /* Fix prev node. */
256 850103793 : *prev_nextp = l;
257 850103793 : }
258 :
259 : /* Add dep_link LINK to deps_list L. */
260 : static void
261 850103793 : add_to_deps_list (dep_link_t link, deps_list_t l)
262 : {
263 850103793 : attach_dep_link (link, &DEPS_LIST_FIRST (l));
264 :
265 : /* Don't count debug deps. */
266 850103793 : if (!depl_on_debug_p (link))
267 830740083 : ++DEPS_LIST_N_LINKS (l);
268 850103793 : }
269 :
270 : /* Detach dep_link L from the list. */
271 : static void
272 850103793 : detach_dep_link (dep_link_t l)
273 : {
274 850103793 : dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
275 850103793 : dep_link_t next = DEP_LINK_NEXT (l);
276 :
277 850103793 : *prev_nextp = next;
278 :
279 0 : if (next != NULL)
280 453179502 : DEP_LINK_PREV_NEXTP (next) = prev_nextp;
281 :
282 850103793 : DEP_LINK_PREV_NEXTP (l) = NULL;
283 850103793 : DEP_LINK_NEXT (l) = NULL;
284 0 : }
285 :
286 : /* Remove link LINK from list LIST. */
287 : static void
288 850103793 : remove_from_deps_list (dep_link_t link, deps_list_t list)
289 : {
290 850103793 : detach_dep_link (link);
291 :
292 : /* Don't count debug deps. */
293 850103793 : if (!depl_on_debug_p (link))
294 830740083 : --DEPS_LIST_N_LINKS (list);
295 850103793 : }
296 :
297 : /* Move link LINK from list FROM to list TO. */
298 : static void
299 417756375 : move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
300 : {
301 0 : remove_from_deps_list (link, from);
302 209444748 : add_to_deps_list (link, to);
303 208311627 : }
304 :
305 : /* Return true of LINK is not attached to any list. */
306 : static bool
307 432347418 : dep_link_is_detached_p (dep_link_t link)
308 : {
309 432347418 : return DEP_LINK_PREV_NEXTP (link) == NULL;
310 : }
311 :
312 : /* Pool to hold all dependency nodes (dep_node_t). */
313 : static object_allocator<_dep_node> *dn_pool;
314 :
315 : /* Number of dep_nodes out there. */
316 : static int dn_pool_diff = 0;
317 :
318 : /* Create a dep_node. */
319 : static dep_node_t
320 216173709 : create_dep_node (void)
321 : {
322 216173709 : dep_node_t n = dn_pool->allocate ();
323 216173709 : dep_link_t back = DEP_NODE_BACK (n);
324 216173709 : dep_link_t forw = DEP_NODE_FORW (n);
325 :
326 216173709 : DEP_LINK_NODE (back) = n;
327 216173709 : DEP_LINK_NEXT (back) = NULL;
328 216173709 : DEP_LINK_PREV_NEXTP (back) = NULL;
329 :
330 216173709 : DEP_LINK_NODE (forw) = n;
331 216173709 : DEP_LINK_NEXT (forw) = NULL;
332 216173709 : DEP_LINK_PREV_NEXTP (forw) = NULL;
333 :
334 216173709 : ++dn_pool_diff;
335 :
336 216173709 : return n;
337 : }
338 :
339 : /* Delete dep_node N. N must not be connected to any deps_list. */
340 : static void
341 216173709 : delete_dep_node (dep_node_t n)
342 : {
343 216173709 : gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
344 : && dep_link_is_detached_p (DEP_NODE_FORW (n)));
345 :
346 216173709 : XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
347 :
348 216173709 : --dn_pool_diff;
349 :
350 216173709 : dn_pool->remove (n);
351 216173709 : }
352 :
353 : /* Pool to hold dependencies lists (deps_list_t). */
354 : static object_allocator<_deps_list> *dl_pool;
355 :
356 : /* Number of deps_lists out there. */
357 : static int dl_pool_diff = 0;
358 :
359 : /* Functions to operate with dependences lists - deps_list_t. */
360 :
361 : /* Return true if list L is empty. */
362 : static bool
363 1335327731 : deps_list_empty_p (deps_list_t l)
364 : {
365 1335327731 : return DEPS_LIST_N_LINKS (l) == 0;
366 : }
367 :
368 : /* Create a new deps_list. */
369 : static deps_list_t
370 543128055 : create_deps_list (void)
371 : {
372 543128055 : deps_list_t l = dl_pool->allocate ();
373 :
374 543128055 : DEPS_LIST_FIRST (l) = NULL;
375 543128055 : DEPS_LIST_N_LINKS (l) = 0;
376 :
377 543128055 : ++dl_pool_diff;
378 543128055 : return l;
379 : }
380 :
381 : /* Free deps_list L. */
382 : static void
383 543128055 : free_deps_list (deps_list_t l)
384 : {
385 543128055 : gcc_assert (deps_list_empty_p (l));
386 :
387 543128055 : --dl_pool_diff;
388 :
389 543128055 : dl_pool->remove (l);
390 543128055 : }
391 :
392 : /* Return true if there is no dep_nodes and deps_lists out there.
393 : After the region is scheduled all the dependency nodes and lists
394 : should [generally] be returned to pool. */
395 : bool
396 11295042 : deps_pools_are_empty_p (void)
397 : {
398 11295042 : return dn_pool_diff == 0 && dl_pool_diff == 0;
399 : }
400 :
401 : /* Remove all elements from L. */
402 : static void
403 108625611 : clear_deps_list (deps_list_t l)
404 : {
405 525267999 : do
406 : {
407 316946805 : dep_link_t link = DEPS_LIST_FIRST (l);
408 :
409 316946805 : if (link == NULL)
410 : break;
411 :
412 208321194 : remove_from_deps_list (link, l);
413 208321194 : }
414 : while (1);
415 108625611 : }
416 :
417 : /* Decide whether a dependency should be treated as a hard or a speculative
418 : dependency. */
419 : static bool
420 760860677 : dep_spec_p (dep_t dep)
421 : {
422 760860677 : if (current_sched_info->flags & DO_SPECULATION)
423 : {
424 0 : if (DEP_STATUS (dep) & SPECULATIVE)
425 : return true;
426 : }
427 760860677 : if (current_sched_info->flags & DO_PREDICATION)
428 : {
429 0 : if (DEP_TYPE (dep) == REG_DEP_CONTROL)
430 : return true;
431 : }
432 760860677 : if (DEP_REPLACE (dep) != NULL)
433 1133121 : return true;
434 : return false;
435 : }
436 :
437 : static regset reg_pending_sets;
438 : static regset reg_pending_clobbers;
439 : static regset reg_pending_uses;
440 : static regset reg_pending_control_uses;
441 : static enum reg_pending_barrier_mode reg_pending_barrier;
442 :
443 : /* Hard registers implicitly clobbered or used (or may be implicitly
444 : clobbered or used) by the currently analyzed insn. For example,
445 : insn in its constraint has one register class. Even if there is
446 : currently no hard register in the insn, the particular hard
447 : register will be in the insn after reload pass because the
448 : constraint requires it. */
449 : static HARD_REG_SET implicit_reg_pending_clobbers;
450 : static HARD_REG_SET implicit_reg_pending_uses;
451 :
452 : /* To speed up the test for duplicate dependency links we keep a
453 : record of dependencies created by add_dependence when the average
454 : number of instructions in a basic block is very large.
455 :
456 : Studies have shown that there is typically around 5 instructions between
457 : branches for typical C code. So we can make a guess that the average
458 : basic block is approximately 5 instructions long; we will choose 100X
459 : the average size as a very large basic block.
460 :
461 : Each insn has associated bitmaps for its dependencies. Each bitmap
462 : has enough entries to represent a dependency on any other insn in
463 : the insn chain. All bitmap for true dependencies cache is
464 : allocated then the rest two ones are also allocated. */
465 : static bitmap true_dependency_cache = NULL;
466 : static bitmap output_dependency_cache = NULL;
467 : static bitmap anti_dependency_cache = NULL;
468 : static bitmap control_dependency_cache = NULL;
469 : static bitmap spec_dependency_cache = NULL;
470 : static int cache_size;
471 :
472 : /* True if we should mark added dependencies as a non-register deps. */
473 : static bool mark_as_hard;
474 :
475 : static bool deps_may_trap_p (const_rtx);
476 : static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
477 : static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
478 : enum reg_note, bool);
479 : static void add_dependence_list_and_free (class deps_desc *, rtx_insn *,
480 : rtx_insn_list **, int, enum reg_note,
481 : bool);
482 : static void delete_all_dependences (rtx_insn *);
483 : static void chain_to_prev_insn (rtx_insn *);
484 :
485 : static void flush_pending_lists (class deps_desc *, rtx_insn *, int, int);
486 : static void sched_analyze_1 (class deps_desc *, rtx, rtx_insn *);
487 : static void sched_analyze_2 (class deps_desc *, rtx, rtx_insn *);
488 : static void sched_analyze_insn (class deps_desc *, rtx, rtx_insn *);
489 :
490 : static bool sched_has_condition_p (const rtx_insn *);
491 : static bool conditions_mutex_p (const_rtx, const_rtx, bool, bool);
492 :
493 : static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
494 : rtx, rtx);
495 : static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
496 :
497 : static void check_dep (dep_t, bool);
498 :
499 :
500 : /* Return true if a load of the memory reference MEM can cause a trap. */
501 :
502 : static bool
503 16733 : deps_may_trap_p (const_rtx mem)
504 : {
505 16733 : const_rtx addr = XEXP (mem, 0);
506 :
507 16733 : if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
508 : {
509 6 : const_rtx t = get_reg_known_value (REGNO (addr));
510 6 : if (t)
511 16733 : addr = t;
512 : }
513 16733 : return rtx_addr_can_trap_p (addr);
514 : }
515 :
516 :
517 : /* Find the condition under which INSN is executed. If REV is not NULL,
518 : it is set to TRUE when the returned comparison should be reversed
519 : to get the actual condition. */
520 : static rtx
521 62497152 : sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
522 : {
523 62497152 : rtx pat = PATTERN (insn);
524 62497152 : rtx src;
525 :
526 62497152 : if (rev)
527 62494614 : *rev = false;
528 :
529 62497152 : if (GET_CODE (pat) == COND_EXEC)
530 0 : return COND_EXEC_TEST (pat);
531 :
532 62497152 : if (!any_condjump_p (insn) || !onlyjump_p (insn))
533 57655882 : return 0;
534 :
535 4841270 : src = SET_SRC (pc_set (insn));
536 :
537 4841270 : if (XEXP (src, 2) == pc_rtx)
538 4841270 : return XEXP (src, 0);
539 0 : else if (XEXP (src, 1) == pc_rtx)
540 : {
541 0 : rtx cond = XEXP (src, 0);
542 0 : enum rtx_code revcode = reversed_comparison_code (cond, insn);
543 :
544 0 : if (revcode == UNKNOWN)
545 : return 0;
546 :
547 0 : if (rev)
548 0 : *rev = true;
549 0 : return cond;
550 : }
551 :
552 : return 0;
553 : }
554 :
555 : /* Return the condition under which INSN does not execute (i.e. the
556 : not-taken condition for a conditional branch), or NULL if we cannot
557 : find such a condition. The caller should make a copy of the condition
558 : before using it. */
559 : rtx
560 0 : sched_get_reverse_condition_uncached (const rtx_insn *insn)
561 : {
562 0 : bool rev;
563 0 : rtx cond = sched_get_condition_with_rev_uncached (insn, &rev);
564 0 : if (cond == NULL_RTX)
565 : return cond;
566 0 : if (!rev)
567 : {
568 0 : enum rtx_code revcode = reversed_comparison_code (cond, insn);
569 0 : cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
570 : XEXP (cond, 0),
571 : XEXP (cond, 1));
572 : }
573 : return cond;
574 : }
575 :
576 : /* Caching variant of sched_get_condition_with_rev_uncached.
577 : We only do actual work the first time we come here for an insn; the
578 : results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
579 : static rtx
580 571567843 : sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
581 : {
582 571567843 : bool tmp;
583 :
584 571567843 : if (INSN_LUID (insn) == 0)
585 5528 : return sched_get_condition_with_rev_uncached (insn, rev);
586 :
587 571562315 : if (INSN_CACHED_COND (insn) == const_true_rtx)
588 : return NULL_RTX;
589 :
590 82671799 : if (INSN_CACHED_COND (insn) != NULL_RTX)
591 : {
592 20180175 : if (rev)
593 15335651 : *rev = INSN_REVERSE_COND (insn);
594 20180175 : return INSN_CACHED_COND (insn);
595 : }
596 :
597 62491624 : INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, &tmp);
598 62491624 : INSN_REVERSE_COND (insn) = tmp;
599 :
600 62491624 : if (INSN_CACHED_COND (insn) == NULL_RTX)
601 : {
602 57650354 : INSN_CACHED_COND (insn) = const_true_rtx;
603 57650354 : return NULL_RTX;
604 : }
605 :
606 4841270 : if (rev)
607 0 : *rev = INSN_REVERSE_COND (insn);
608 4841270 : return INSN_CACHED_COND (insn);
609 : }
610 :
611 : /* True when we can find a condition under which INSN is executed. */
612 : static bool
613 64435291 : sched_has_condition_p (const rtx_insn *insn)
614 : {
615 0 : return !! sched_get_condition_with_rev (insn, NULL);
616 : }
617 :
618 :
619 :
620 : /* Return true if conditions COND1 and COND2 can never be both true. */
621 : static bool
622 0 : conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
623 : {
624 0 : if (COMPARISON_P (cond1)
625 0 : && COMPARISON_P (cond2)
626 0 : && GET_CODE (cond1) ==
627 : (rev1==rev2
628 0 : ? reversed_comparison_code (cond2, NULL)
629 : : GET_CODE (cond2))
630 0 : && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
631 0 : && XEXP (cond1, 1) == XEXP (cond2, 1))
632 : return true;
633 : return false;
634 : }
635 :
636 : /* Return true if insn1 and insn2 can never depend on one another because
637 : the conditions under which they are executed are mutually exclusive. */
638 : bool
639 492920656 : sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
640 : {
641 492920656 : rtx cond1, cond2;
642 492920656 : bool rev1 = false, rev2 = false;
643 :
644 : /* df doesn't handle conditional lifetimes entirely correctly;
645 : calls mess up the conditional lifetimes. */
646 492920656 : if (!CALL_P (insn1) && !CALL_P (insn2))
647 : {
648 223480947 : cond1 = sched_get_condition_with_rev (insn1, &rev1);
649 223480947 : cond2 = sched_get_condition_with_rev (insn2, &rev2);
650 223480947 : if (cond1 && cond2
651 0 : && conditions_mutex_p (cond1, cond2, rev1, rev2)
652 : /* Make sure first instruction doesn't affect condition of second
653 : instruction if switched. */
654 0 : && !modified_in_p (cond1, insn2)
655 : /* Make sure second instruction doesn't affect condition of first
656 : instruction if switched. */
657 223480947 : && !modified_in_p (cond2, insn1))
658 : return true;
659 : }
660 : return false;
661 : }
662 :
663 :
664 : /* Return true if INSN can potentially be speculated with type DS. */
665 : bool
666 0 : sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
667 : {
668 0 : if (HAS_INTERNAL_DEP (insn))
669 : return false;
670 :
671 0 : if (!NONJUMP_INSN_P (insn))
672 : return false;
673 :
674 0 : if (SCHED_GROUP_P (insn))
675 : return false;
676 :
677 0 : if (IS_SPECULATION_CHECK_P (const_cast<struct rtx_insn *> (insn)))
678 : return false;
679 :
680 0 : if (side_effects_p (PATTERN (insn)))
681 : return false;
682 :
683 0 : if (ds & BE_IN_SPEC)
684 : /* The following instructions, which depend on a speculatively scheduled
685 : instruction, cannot be speculatively scheduled along. */
686 : {
687 0 : if (may_trap_or_fault_p (PATTERN (insn)))
688 : /* If instruction might fault, it cannot be speculatively scheduled.
689 : For control speculation it's obvious why and for data speculation
690 : it's because the insn might get wrong input if speculation
691 : wasn't successful. */
692 : return false;
693 :
694 0 : if ((ds & BE_IN_DATA)
695 0 : && sched_has_condition_p (insn))
696 : /* If this is a predicated instruction, then it cannot be
697 : speculatively scheduled. See PR35659. */
698 : return false;
699 : }
700 :
701 : return true;
702 : }
703 :
704 : /* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
705 : initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
706 : and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
707 : This function is used to switch sd_iterator to the next list.
708 : !!! For internal use only. Might consider moving it to sched-int.h. */
709 : void
710 5939871130 : sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
711 : deps_list_t *list_ptr, bool *resolved_p_ptr)
712 : {
713 5939871130 : sd_list_types_def types = *types_ptr;
714 :
715 5939871130 : if (types & SD_LIST_HARD_BACK)
716 : {
717 1388835384 : *list_ptr = INSN_HARD_BACK_DEPS (insn);
718 1388835384 : *resolved_p_ptr = false;
719 1388835384 : *types_ptr = types & ~SD_LIST_HARD_BACK;
720 : }
721 4551035746 : else if (types & SD_LIST_SPEC_BACK)
722 : {
723 895782045 : *list_ptr = INSN_SPEC_BACK_DEPS (insn);
724 895782045 : *resolved_p_ptr = false;
725 895782045 : *types_ptr = types & ~SD_LIST_SPEC_BACK;
726 : }
727 3655253701 : else if (types & SD_LIST_FORW)
728 : {
729 2161894805 : *list_ptr = INSN_FORW_DEPS (insn);
730 2161894805 : *resolved_p_ptr = false;
731 2161894805 : *types_ptr = types & ~SD_LIST_FORW;
732 : }
733 1493358896 : else if (types & SD_LIST_RES_BACK)
734 : {
735 938077820 : *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
736 938077820 : *resolved_p_ptr = true;
737 938077820 : *types_ptr = types & ~SD_LIST_RES_BACK;
738 : }
739 555281076 : else if (types & SD_LIST_RES_FORW)
740 : {
741 555281076 : *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
742 555281076 : *resolved_p_ptr = true;
743 555281076 : *types_ptr = types & ~SD_LIST_RES_FORW;
744 : }
745 : else
746 : {
747 0 : *list_ptr = NULL;
748 0 : *resolved_p_ptr = false;
749 0 : *types_ptr = SD_LIST_NONE;
750 : }
751 5939871130 : }
752 :
753 : /* Return the summary size of INSN's lists defined by LIST_TYPES. */
754 : int
755 2610881962 : sd_lists_size (const_rtx insn, sd_list_types_def list_types)
756 : {
757 2610881962 : int size = 0;
758 :
759 5786604923 : while (list_types != SD_LIST_NONE)
760 : {
761 3175722961 : deps_list_t list;
762 3175722961 : bool resolved_p;
763 :
764 3175722961 : sd_next_list (insn, &list_types, &list, &resolved_p);
765 3175722961 : if (list)
766 3175722961 : size += DEPS_LIST_N_LINKS (list);
767 : }
768 :
769 2610881962 : return size;
770 : }
771 :
772 : /* Return true if INSN's lists defined by LIST_TYPES are all empty. */
773 :
774 : bool
775 683515403 : sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
776 : {
777 1027859634 : while (list_types != SD_LIST_NONE)
778 : {
779 792199676 : deps_list_t list;
780 792199676 : bool resolved_p;
781 :
782 792199676 : sd_next_list (insn, &list_types, &list, &resolved_p);
783 792199676 : if (!deps_list_empty_p (list))
784 447855445 : return false;
785 : }
786 :
787 : return true;
788 : }
789 :
790 : /* Initialize data for INSN. */
791 : void
792 108625611 : sd_init_insn (rtx_insn *insn)
793 : {
794 108625611 : INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
795 108625611 : INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
796 108625611 : INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
797 108625611 : INSN_FORW_DEPS (insn) = create_deps_list ();
798 108625611 : INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
799 :
800 : /* ??? It would be nice to allocate dependency caches here. */
801 108625611 : }
802 :
803 : /* Free data for INSN. */
804 : void
805 108625611 : sd_finish_insn (rtx_insn *insn)
806 : {
807 : /* ??? It would be nice to deallocate dependency caches here. */
808 :
809 108625611 : free_deps_list (INSN_HARD_BACK_DEPS (insn));
810 108625611 : INSN_HARD_BACK_DEPS (insn) = NULL;
811 :
812 108625611 : free_deps_list (INSN_SPEC_BACK_DEPS (insn));
813 108625611 : INSN_SPEC_BACK_DEPS (insn) = NULL;
814 :
815 108625611 : free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
816 108625611 : INSN_RESOLVED_BACK_DEPS (insn) = NULL;
817 :
818 108625611 : free_deps_list (INSN_FORW_DEPS (insn));
819 108625611 : INSN_FORW_DEPS (insn) = NULL;
820 :
821 108625611 : free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
822 108625611 : INSN_RESOLVED_FORW_DEPS (insn) = NULL;
823 108625611 : }
824 :
825 : /* Find a dependency between producer PRO and consumer CON.
826 : Search through resolved dependency lists if RESOLVED_P is true.
827 : If no such dependency is found return NULL,
828 : otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
829 : with an iterator pointing to it. */
830 : static dep_t
831 977456450 : sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
832 : sd_iterator_def *sd_it_ptr)
833 : {
834 977456450 : sd_list_types_def pro_list_type;
835 977456450 : sd_list_types_def con_list_type;
836 977456450 : sd_iterator_def sd_it;
837 977456450 : dep_t dep;
838 977456450 : bool found_p = false;
839 :
840 977456450 : if (resolved_p)
841 : {
842 : pro_list_type = SD_LIST_RES_FORW;
843 : con_list_type = SD_LIST_RES_BACK;
844 : }
845 : else
846 : {
847 550273849 : pro_list_type = SD_LIST_FORW;
848 550273849 : con_list_type = SD_LIST_BACK;
849 : }
850 :
851 : /* Walk through either back list of INSN or forw list of ELEM
852 : depending on which one is shorter. */
853 977456450 : if (sd_lists_size (con, con_list_type) < sd_lists_size (pro, pro_list_type))
854 : {
855 : /* Find the dep_link with producer PRO in consumer's back_deps. */
856 882903957 : FOR_EACH_DEP (con, con_list_type, sd_it, dep)
857 564872270 : if (DEP_PRO (dep) == pro)
858 : {
859 : found_p = true;
860 : break;
861 : }
862 : }
863 : else
864 : {
865 : /* Find the dep_link with consumer CON in producer's forw_deps. */
866 1032414686 : FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
867 747880009 : if (DEP_CON (dep) == con)
868 : {
869 : found_p = true;
870 : break;
871 : }
872 : }
873 :
874 977456450 : if (found_p)
875 : {
876 374890086 : if (sd_it_ptr != NULL)
877 331496061 : *sd_it_ptr = sd_it;
878 :
879 374890086 : return dep;
880 : }
881 :
882 : return NULL;
883 : }
884 :
885 : /* Find a dependency between producer PRO and consumer CON.
886 : Use dependency [if available] to check if dependency is present at all.
887 : Search through resolved dependency lists if RESOLVED_P is true.
888 : If the dependency or NULL if none found. */
889 : dep_t
890 439912390 : sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
891 : {
892 439912390 : if (true_dependency_cache != NULL)
893 : /* Avoiding the list walk below can cut compile times dramatically
894 : for some code. */
895 : {
896 8184146 : int elem_luid = INSN_LUID (pro);
897 8184146 : int insn_luid = INSN_LUID (con);
898 :
899 8184146 : if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
900 8150359 : && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
901 7946351 : && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
902 15843747 : && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
903 : return NULL;
904 : }
905 :
906 432252789 : return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
907 : }
908 :
909 : /* Add or update a dependence described by DEP.
910 : MEM1 and MEM2, if non-null, correspond to memory locations in case of
911 : data speculation.
912 :
913 : The function returns a value indicating if an old entry has been changed
914 : or a new entry has been added to insn's backward deps.
915 :
916 : This function merely checks if producer and consumer is the same insn
917 : and doesn't create a dep in this case. Actual manipulation of
918 : dependence data structures is performed in add_or_update_dep_1. */
919 : static enum DEPS_ADJUST_RESULT
920 639368598 : maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
921 : {
922 639368598 : rtx_insn *elem = DEP_PRO (dep);
923 639368598 : rtx_insn *insn = DEP_CON (dep);
924 :
925 639368598 : gcc_assert (INSN_P (insn) && INSN_P (elem));
926 :
927 : /* Don't depend an insn on itself. */
928 639368598 : if (insn == elem)
929 : {
930 91698828 : if (sched_deps_info->generate_spec_deps)
931 : /* INSN has an internal dependence, which we can't overcome. */
932 0 : HAS_INTERNAL_DEP (insn) = 1;
933 :
934 91698828 : return DEP_NODEP;
935 : }
936 :
937 547669770 : return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
938 : }
939 :
940 : /* Ask dependency caches what needs to be done for dependence DEP.
941 : Return DEP_CREATED if new dependence should be created and there is no
942 : need to try to find one searching the dependencies lists.
943 : Return DEP_PRESENT if there already is a dependence described by DEP and
944 : hence nothing is to be done.
945 : Return DEP_CHANGED if there already is a dependence, but it should be
946 : updated to incorporate additional information from DEP. */
947 : static enum DEPS_ADJUST_RESULT
948 5486690 : ask_dependency_caches (dep_t dep)
949 : {
950 5486690 : int elem_luid = INSN_LUID (DEP_PRO (dep));
951 5486690 : int insn_luid = INSN_LUID (DEP_CON (dep));
952 :
953 5486690 : gcc_assert (true_dependency_cache != NULL
954 : && output_dependency_cache != NULL
955 : && anti_dependency_cache != NULL
956 : && control_dependency_cache != NULL);
957 :
958 5486690 : if (!(current_sched_info->flags & USE_DEPS_LIST))
959 : {
960 5486690 : enum reg_note present_dep_type;
961 :
962 5486690 : if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
963 : present_dep_type = REG_DEP_TRUE;
964 5244511 : else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
965 : present_dep_type = REG_DEP_OUTPUT;
966 2694523 : else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
967 : present_dep_type = REG_DEP_ANTI;
968 2466109 : else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
969 : present_dep_type = REG_DEP_CONTROL;
970 : else
971 : /* There is no existing dep so it should be created. */
972 : return DEP_CREATED;
973 :
974 2778402 : if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
975 : /* DEP does not add anything to the existing dependence. */
976 2982802 : return DEP_PRESENT;
977 : }
978 : else
979 : {
980 0 : ds_t present_dep_types = 0;
981 :
982 0 : if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
983 0 : present_dep_types |= DEP_TRUE;
984 0 : if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
985 0 : present_dep_types |= DEP_OUTPUT;
986 0 : if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
987 0 : present_dep_types |= DEP_ANTI;
988 0 : if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
989 0 : present_dep_types |= DEP_CONTROL;
990 :
991 0 : if (present_dep_types == 0)
992 : /* There is no existing dep so it should be created. */
993 : return DEP_CREATED;
994 :
995 0 : if (!(current_sched_info->flags & DO_SPECULATION)
996 0 : || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
997 : {
998 0 : if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
999 : == present_dep_types)
1000 : /* DEP does not add anything to the existing dependence. */
1001 : return DEP_PRESENT;
1002 : }
1003 : else
1004 : {
1005 : /* Only true dependencies can be data speculative and
1006 : only anti dependencies can be control speculative. */
1007 0 : gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1008 : == present_dep_types);
1009 :
1010 : /* if (DEP is SPECULATIVE) then
1011 : ..we should update DEP_STATUS
1012 : else
1013 : ..we should reset existing dep to non-speculative. */
1014 : }
1015 : }
1016 :
1017 : return DEP_CHANGED;
1018 : }
1019 :
1020 : /* Set dependency caches according to DEP. */
1021 : static void
1022 2503888 : set_dependency_caches (dep_t dep)
1023 : {
1024 2503888 : int elem_luid = INSN_LUID (DEP_PRO (dep));
1025 2503888 : int insn_luid = INSN_LUID (DEP_CON (dep));
1026 :
1027 2503888 : if (!(current_sched_info->flags & USE_DEPS_LIST))
1028 : {
1029 2503888 : switch (DEP_TYPE (dep))
1030 : {
1031 326919 : case REG_DEP_TRUE:
1032 326919 : bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1033 326919 : break;
1034 :
1035 377424 : case REG_DEP_OUTPUT:
1036 377424 : bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1037 377424 : break;
1038 :
1039 1799545 : case REG_DEP_ANTI:
1040 1799545 : bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1041 1799545 : break;
1042 :
1043 0 : case REG_DEP_CONTROL:
1044 0 : bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1045 0 : break;
1046 :
1047 0 : default:
1048 0 : gcc_unreachable ();
1049 : }
1050 : }
1051 : else
1052 : {
1053 0 : ds_t ds = DEP_STATUS (dep);
1054 :
1055 0 : if (ds & DEP_TRUE)
1056 0 : bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1057 0 : if (ds & DEP_OUTPUT)
1058 0 : bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1059 0 : if (ds & DEP_ANTI)
1060 0 : bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1061 0 : if (ds & DEP_CONTROL)
1062 0 : bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1063 :
1064 0 : if (ds & SPECULATIVE)
1065 : {
1066 0 : gcc_assert (current_sched_info->flags & DO_SPECULATION);
1067 0 : bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1068 : }
1069 : }
1070 2503888 : }
1071 :
1072 : /* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1073 : caches accordingly. */
1074 : static void
1075 37779 : update_dependency_caches (dep_t dep, enum reg_note old_type)
1076 : {
1077 37779 : int elem_luid = INSN_LUID (DEP_PRO (dep));
1078 37779 : int insn_luid = INSN_LUID (DEP_CON (dep));
1079 :
1080 : /* Clear corresponding cache entry because type of the link
1081 : may have changed. Keep them if we use_deps_list. */
1082 37779 : if (!(current_sched_info->flags & USE_DEPS_LIST))
1083 : {
1084 37779 : switch (old_type)
1085 : {
1086 4916 : case REG_DEP_OUTPUT:
1087 4916 : bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1088 4916 : break;
1089 :
1090 32863 : case REG_DEP_ANTI:
1091 32863 : bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1092 32863 : break;
1093 :
1094 0 : case REG_DEP_CONTROL:
1095 0 : bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1096 0 : break;
1097 :
1098 0 : default:
1099 0 : gcc_unreachable ();
1100 : }
1101 : }
1102 :
1103 37779 : set_dependency_caches (dep);
1104 37779 : }
1105 :
1106 : /* Convert a dependence pointed to by SD_IT to be non-speculative. */
1107 : static void
1108 0 : change_spec_dep_to_hard (sd_iterator_def sd_it)
1109 : {
1110 0 : dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1111 0 : dep_link_t link = DEP_NODE_BACK (node);
1112 0 : dep_t dep = DEP_NODE_DEP (node);
1113 0 : rtx_insn *elem = DEP_PRO (dep);
1114 0 : rtx_insn *insn = DEP_CON (dep);
1115 :
1116 0 : move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1117 :
1118 0 : DEP_STATUS (dep) &= ~SPECULATIVE;
1119 :
1120 0 : if (true_dependency_cache != NULL)
1121 : /* Clear the cache entry. */
1122 0 : bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1123 0 : INSN_LUID (elem));
1124 0 : }
1125 :
1126 : /* Update DEP to incorporate information from NEW_DEP.
1127 : SD_IT points to DEP in case it should be moved to another list.
1128 : MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1129 : data-speculative dependence should be updated. */
1130 : static enum DEPS_ADJUST_RESULT
1131 328513259 : update_dep (dep_t dep, dep_t new_dep,
1132 : sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1133 : rtx mem1 ATTRIBUTE_UNUSED,
1134 : rtx mem2 ATTRIBUTE_UNUSED)
1135 : {
1136 328513259 : enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1137 328513259 : enum reg_note old_type = DEP_TYPE (dep);
1138 328513259 : bool was_spec = dep_spec_p (dep);
1139 :
1140 328513259 : DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1141 328513259 : DEP_MULTIPLE (dep) = 1;
1142 :
1143 : /* If this is a more restrictive type of dependence than the
1144 : existing one, then change the existing dependence to this
1145 : type. */
1146 328513259 : if ((int) DEP_TYPE (new_dep) < (int) old_type)
1147 : {
1148 19424238 : DEP_TYPE (dep) = DEP_TYPE (new_dep);
1149 19424238 : res = DEP_CHANGED;
1150 : }
1151 :
1152 328513259 : if (current_sched_info->flags & USE_DEPS_LIST)
1153 : /* Update DEP_STATUS. */
1154 : {
1155 0 : ds_t dep_status = DEP_STATUS (dep);
1156 0 : ds_t ds = DEP_STATUS (new_dep);
1157 0 : ds_t new_status = ds | dep_status;
1158 :
1159 0 : if (new_status & SPECULATIVE)
1160 : {
1161 : /* Either existing dep or a dep we're adding or both are
1162 : speculative. */
1163 0 : if (!(ds & SPECULATIVE)
1164 0 : || !(dep_status & SPECULATIVE))
1165 : /* The new dep can't be speculative. */
1166 0 : new_status &= ~SPECULATIVE;
1167 : else
1168 : {
1169 : /* Both are speculative. Merge probabilities. */
1170 0 : if (mem1 != NULL)
1171 : {
1172 0 : dw_t dw;
1173 :
1174 0 : dw = estimate_dep_weak (mem1, mem2);
1175 0 : ds = set_dep_weak (ds, BEGIN_DATA, dw);
1176 : }
1177 :
1178 0 : new_status = ds_merge (dep_status, ds);
1179 : }
1180 : }
1181 :
1182 0 : ds = new_status;
1183 :
1184 0 : if (dep_status != ds)
1185 : {
1186 0 : DEP_STATUS (dep) = ds;
1187 0 : res = DEP_CHANGED;
1188 : }
1189 : }
1190 :
1191 328513259 : if (was_spec && !dep_spec_p (dep))
1192 : /* The old dep was speculative, but now it isn't. */
1193 0 : change_spec_dep_to_hard (sd_it);
1194 :
1195 328513259 : if (true_dependency_cache != NULL
1196 37779 : && res == DEP_CHANGED)
1197 37779 : update_dependency_caches (dep, old_type);
1198 :
1199 328513259 : return res;
1200 : }
1201 :
1202 : /* Add or update a dependence described by DEP.
1203 : MEM1 and MEM2, if non-null, correspond to memory locations in case of
1204 : data speculation.
1205 :
1206 : The function returns a value indicating if an old entry has been changed
1207 : or a new entry has been added to insn's backward deps or nothing has
1208 : been updated at all. */
1209 : static enum DEPS_ADJUST_RESULT
1210 547669770 : add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1211 : rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1212 : {
1213 547669770 : bool maybe_present_p = true;
1214 547669770 : bool present_p = false;
1215 :
1216 547669770 : gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1217 : && DEP_PRO (new_dep) != DEP_CON (new_dep));
1218 :
1219 547669770 : if (flag_checking)
1220 547667828 : check_dep (new_dep, mem1 != NULL);
1221 :
1222 547669770 : if (true_dependency_cache != NULL)
1223 : {
1224 5486690 : switch (ask_dependency_caches (new_dep))
1225 : {
1226 2982802 : case DEP_PRESENT:
1227 2982802 : dep_t present_dep;
1228 2982802 : sd_iterator_def sd_it;
1229 :
1230 5965604 : present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1231 2982802 : DEP_CON (new_dep),
1232 : resolved_p, &sd_it);
1233 2982802 : DEP_MULTIPLE (present_dep) = 1;
1234 2982802 : return DEP_PRESENT;
1235 :
1236 : case DEP_CHANGED:
1237 : maybe_present_p = true;
1238 : present_p = true;
1239 2503888 : break;
1240 :
1241 2466109 : case DEP_CREATED:
1242 2466109 : maybe_present_p = false;
1243 2466109 : present_p = false;
1244 2466109 : break;
1245 :
1246 : default:
1247 : gcc_unreachable ();
1248 2982802 : break;
1249 : }
1250 : }
1251 :
1252 : /* Check that we don't already have this dependence. */
1253 2503888 : if (maybe_present_p)
1254 : {
1255 542220859 : dep_t present_dep;
1256 542220859 : sd_iterator_def sd_it;
1257 :
1258 542220859 : gcc_assert (true_dependency_cache == NULL || present_p);
1259 :
1260 1084441718 : present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1261 542220859 : DEP_CON (new_dep),
1262 : resolved_p, &sd_it);
1263 :
1264 542220859 : if (present_dep != NULL)
1265 : /* We found an existing dependency between ELEM and INSN. */
1266 328513259 : return update_dep (present_dep, new_dep, sd_it, mem1, mem2);
1267 : else
1268 : /* We didn't find a dep, it shouldn't present in the cache. */
1269 213707600 : gcc_assert (!present_p);
1270 : }
1271 :
1272 : /* Might want to check one level of transitivity to save conses.
1273 : This check should be done in maybe_add_or_update_dep_1.
1274 : Since we made it to add_or_update_dep_1, we must create
1275 : (or update) a link. */
1276 :
1277 216173709 : if (mem1 != NULL_RTX)
1278 : {
1279 0 : gcc_assert (sched_deps_info->generate_spec_deps);
1280 0 : DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1281 : estimate_dep_weak (mem1, mem2));
1282 : }
1283 :
1284 216173709 : sd_add_dep (new_dep, resolved_p);
1285 :
1286 216173709 : return DEP_CREATED;
1287 : }
1288 :
1289 : /* Initialize BACK_LIST_PTR with consumer's backward list and
1290 : FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1291 : initialize with lists that hold resolved deps. */
1292 : static void
1293 432347418 : get_back_and_forw_lists (dep_t dep, bool resolved_p,
1294 : deps_list_t *back_list_ptr,
1295 : deps_list_t *forw_list_ptr)
1296 : {
1297 432347418 : rtx_insn *con = DEP_CON (dep);
1298 :
1299 432347418 : if (!resolved_p)
1300 : {
1301 224035791 : if (dep_spec_p (dep))
1302 0 : *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1303 : else
1304 224035791 : *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1305 :
1306 224035791 : *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1307 : }
1308 : else
1309 : {
1310 208311627 : *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1311 208311627 : *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1312 : }
1313 432347418 : }
1314 :
1315 : /* Add dependence described by DEP.
1316 : If RESOLVED_P is true treat the dependence as a resolved one. */
1317 : void
1318 216173709 : sd_add_dep (dep_t dep, bool resolved_p)
1319 : {
1320 216173709 : dep_node_t n = create_dep_node ();
1321 216173709 : deps_list_t con_back_deps;
1322 216173709 : deps_list_t pro_forw_deps;
1323 216173709 : rtx_insn *elem = DEP_PRO (dep);
1324 216173709 : rtx_insn *insn = DEP_CON (dep);
1325 :
1326 216173709 : gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1327 :
1328 216173709 : if ((current_sched_info->flags & DO_SPECULATION) == 0
1329 216173709 : || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1330 216173709 : DEP_STATUS (dep) &= ~SPECULATIVE;
1331 :
1332 216173709 : copy_dep (DEP_NODE_DEP (n), dep);
1333 :
1334 216173709 : get_back_and_forw_lists (dep, resolved_p, &con_back_deps, &pro_forw_deps);
1335 :
1336 216173709 : add_to_deps_list (DEP_NODE_BACK (n), con_back_deps);
1337 :
1338 216173709 : if (flag_checking)
1339 216173375 : check_dep (dep, false);
1340 :
1341 216173709 : add_to_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1342 :
1343 : /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1344 : in the bitmap caches of dependency information. */
1345 216173709 : if (true_dependency_cache != NULL)
1346 2466109 : set_dependency_caches (dep);
1347 216173709 : }
1348 :
1349 : /* Add or update backward dependence between INSN and ELEM
1350 : with given type DEP_TYPE and dep_status DS.
1351 : This function is a convenience wrapper. */
1352 : enum DEPS_ADJUST_RESULT
1353 0 : sd_add_or_update_dep (dep_t dep, bool resolved_p)
1354 : {
1355 0 : return add_or_update_dep_1 (dep, resolved_p, NULL_RTX, NULL_RTX);
1356 : }
1357 :
1358 : /* Resolved dependence pointed to by SD_IT.
1359 : SD_IT will advance to the next element. */
1360 : void
1361 208311627 : sd_resolve_dep (sd_iterator_def sd_it)
1362 : {
1363 208311627 : dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1364 208311627 : dep_t dep = DEP_NODE_DEP (node);
1365 208311627 : rtx_insn *pro = DEP_PRO (dep);
1366 208311627 : rtx_insn *con = DEP_CON (dep);
1367 :
1368 208311627 : if (dep_spec_p (dep))
1369 1133121 : move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1370 1133121 : INSN_RESOLVED_BACK_DEPS (con));
1371 : else
1372 207178506 : move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1373 207178506 : INSN_RESOLVED_BACK_DEPS (con));
1374 :
1375 624934881 : move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1376 208311627 : INSN_RESOLVED_FORW_DEPS (pro));
1377 208311627 : }
1378 :
1379 : /* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1380 : pointed to by SD_IT to unresolved state. */
1381 : void
1382 0 : sd_unresolve_dep (sd_iterator_def sd_it)
1383 : {
1384 0 : dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1385 0 : dep_t dep = DEP_NODE_DEP (node);
1386 0 : rtx_insn *pro = DEP_PRO (dep);
1387 0 : rtx_insn *con = DEP_CON (dep);
1388 :
1389 0 : if (dep_spec_p (dep))
1390 0 : move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1391 0 : INSN_SPEC_BACK_DEPS (con));
1392 : else
1393 0 : move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1394 0 : INSN_HARD_BACK_DEPS (con));
1395 :
1396 0 : move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1397 0 : INSN_FORW_DEPS (pro));
1398 0 : }
1399 :
1400 : /* Make TO depend on all the FROM's producers.
1401 : If RESOLVED_P is true add dependencies to the resolved lists. */
1402 : void
1403 0 : sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1404 : {
1405 0 : sd_list_types_def list_type;
1406 0 : sd_iterator_def sd_it;
1407 0 : dep_t dep;
1408 :
1409 0 : list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1410 :
1411 0 : FOR_EACH_DEP (from, list_type, sd_it, dep)
1412 : {
1413 0 : dep_def _new_dep, *new_dep = &_new_dep;
1414 :
1415 0 : copy_dep (new_dep, dep);
1416 0 : DEP_CON (new_dep) = to;
1417 0 : sd_add_dep (new_dep, resolved_p);
1418 : }
1419 0 : }
1420 :
1421 : /* Remove a dependency referred to by SD_IT.
1422 : SD_IT will point to the next dependence after removal. */
1423 : void
1424 7852515 : sd_delete_dep (sd_iterator_def sd_it)
1425 : {
1426 7852515 : dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1427 7852515 : dep_t dep = DEP_NODE_DEP (n);
1428 7852515 : rtx_insn *pro = DEP_PRO (dep);
1429 7852515 : rtx_insn *con = DEP_CON (dep);
1430 7852515 : deps_list_t con_back_deps;
1431 7852515 : deps_list_t pro_forw_deps;
1432 :
1433 7852515 : if (true_dependency_cache != NULL)
1434 : {
1435 2622 : int elem_luid = INSN_LUID (pro);
1436 2622 : int insn_luid = INSN_LUID (con);
1437 :
1438 2622 : bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1439 2622 : bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1440 2622 : bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1441 2622 : bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1442 :
1443 2622 : if (current_sched_info->flags & DO_SPECULATION)
1444 0 : bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1445 : }
1446 :
1447 7852515 : get_back_and_forw_lists (dep, sd_it.resolved_p,
1448 : &con_back_deps, &pro_forw_deps);
1449 :
1450 7852515 : remove_from_deps_list (DEP_NODE_BACK (n), con_back_deps);
1451 7852515 : remove_from_deps_list (DEP_NODE_FORW (n), pro_forw_deps);
1452 :
1453 7852515 : delete_dep_node (n);
1454 7852515 : }
1455 :
1456 : /* Dump size of the lists. */
1457 : #define DUMP_LISTS_SIZE (2)
1458 :
1459 : /* Dump dependencies of the lists. */
1460 : #define DUMP_LISTS_DEPS (4)
1461 :
1462 : /* Dump all information about the lists. */
1463 : #define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1464 :
1465 : /* Dump deps_lists of INSN specified by TYPES to DUMP.
1466 : FLAGS is a bit mask specifying what information about the lists needs
1467 : to be printed.
1468 : If FLAGS has the very first bit set, then dump all information about
1469 : the lists and propagate this bit into the callee dump functions. */
1470 : static void
1471 0 : dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1472 : {
1473 0 : sd_iterator_def sd_it;
1474 0 : dep_t dep;
1475 0 : int all;
1476 :
1477 0 : all = (flags & 1);
1478 :
1479 0 : if (all)
1480 0 : flags |= DUMP_LISTS_ALL;
1481 :
1482 0 : fprintf (dump, "[");
1483 :
1484 0 : if (flags & DUMP_LISTS_SIZE)
1485 0 : fprintf (dump, "%d; ", sd_lists_size (insn, types));
1486 :
1487 0 : if (flags & DUMP_LISTS_DEPS)
1488 : {
1489 0 : FOR_EACH_DEP (insn, types, sd_it, dep)
1490 : {
1491 0 : dump_dep (dump, dep, dump_dep_flags | all);
1492 0 : fprintf (dump, " ");
1493 : }
1494 : }
1495 0 : }
1496 :
1497 : /* Dump all information about deps_lists of INSN specified by TYPES
1498 : to STDERR. */
1499 : void
1500 0 : sd_debug_lists (rtx insn, sd_list_types_def types)
1501 : {
1502 0 : dump_lists (stderr, insn, types, 1);
1503 0 : fprintf (stderr, "\n");
1504 0 : }
1505 :
1506 : /* A wrapper around add_dependence_1, to add a dependence of CON on
1507 : PRO, with type DEP_TYPE. This function implements special handling
1508 : for REG_DEP_CONTROL dependencies. For these, we optionally promote
1509 : the type to REG_DEP_ANTI if we can determine that predication is
1510 : impossible; otherwise we add additional true dependencies on the
1511 : INSN_COND_DEPS list of the jump (which PRO must be). */
1512 : void
1513 599422850 : add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1514 : {
1515 599422850 : if (dep_type == REG_DEP_CONTROL
1516 17440 : && !(current_sched_info->flags & DO_PREDICATION))
1517 : dep_type = REG_DEP_ANTI;
1518 :
1519 : /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1520 : so we must also make the insn dependent on the setter of the
1521 : condition. */
1522 0 : if (dep_type == REG_DEP_CONTROL)
1523 : {
1524 0 : rtx_insn *real_pro = pro;
1525 0 : rtx_insn *other = real_insn_for_shadow (real_pro);
1526 0 : rtx cond;
1527 :
1528 0 : if (other != NULL_RTX)
1529 0 : real_pro = other;
1530 0 : cond = sched_get_reverse_condition_uncached (real_pro);
1531 : /* Verify that the insn does not use a different value in
1532 : the condition register than the one that was present at
1533 : the jump. */
1534 0 : if (cond == NULL_RTX)
1535 : dep_type = REG_DEP_ANTI;
1536 0 : else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1537 : {
1538 : HARD_REG_SET uses;
1539 0 : CLEAR_HARD_REG_SET (uses);
1540 0 : note_uses (&PATTERN (con), record_hard_reg_uses, &uses);
1541 0 : if (TEST_HARD_REG_BIT (uses, REGNO (XEXP (cond, 0))))
1542 0 : dep_type = REG_DEP_ANTI;
1543 : }
1544 0 : if (dep_type == REG_DEP_CONTROL)
1545 : {
1546 0 : if (sched_verbose >= 5)
1547 0 : fprintf (sched_dump, "making DEP_CONTROL for %d\n",
1548 0 : INSN_UID (real_pro));
1549 0 : add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1550 : REG_DEP_TRUE, false);
1551 : }
1552 : }
1553 :
1554 599422850 : add_dependence_1 (con, pro, dep_type);
1555 599422850 : }
1556 :
1557 : /* A convenience wrapper to operate on an entire list. HARD should be
1558 : true if DEP_NONREG should be set on newly created dependencies. */
1559 :
1560 : static void
1561 3107838576 : add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1562 : enum reg_note dep_type, bool hard)
1563 : {
1564 3107838576 : mark_as_hard = hard;
1565 3591117270 : for (; list; list = list->next ())
1566 : {
1567 902989916 : if (uncond || ! sched_insns_conditions_mutex_p (insn, list->insn ()))
1568 483278694 : add_dependence (insn, list->insn (), dep_type);
1569 : }
1570 3107838576 : mark_as_hard = false;
1571 3107838576 : }
1572 :
1573 : /* Similar, but free *LISTP at the same time, when the context
1574 : is not readonly. HARD should be true if DEP_NONREG should be set on
1575 : newly created dependencies. */
1576 :
1577 : static void
1578 1195996900 : add_dependence_list_and_free (class deps_desc *deps, rtx_insn *insn,
1579 : rtx_insn_list **listp,
1580 : int uncond, enum reg_note dep_type, bool hard)
1581 : {
1582 1195996900 : add_dependence_list (insn, *listp, uncond, dep_type, hard);
1583 :
1584 : /* We don't want to short-circuit dependencies involving debug
1585 : insns, because they may cause actual dependencies to be
1586 : disregarded. */
1587 1195996900 : if (deps->readonly || DEBUG_INSN_P (insn))
1588 : return;
1589 :
1590 1195915722 : free_INSN_LIST_list (listp);
1591 : }
1592 :
1593 : /* Remove all occurrences of INSN from LIST. Return the number of
1594 : occurrences removed. */
1595 :
1596 : static int
1597 61518 : remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
1598 : {
1599 61518 : int removed = 0;
1600 :
1601 125480 : while (*listp)
1602 : {
1603 63962 : if ((*listp)->insn () == insn)
1604 : {
1605 12951 : remove_free_INSN_LIST_node (listp);
1606 12951 : removed++;
1607 12951 : continue;
1608 : }
1609 :
1610 51011 : listp = (rtx_insn_list **)&XEXP (*listp, 1);
1611 : }
1612 :
1613 61518 : return removed;
1614 : }
1615 :
1616 : /* Same as above, but process two lists at once. */
1617 : static int
1618 4710 : remove_from_both_dependence_lists (rtx_insn *insn,
1619 : rtx_insn_list **listp,
1620 : rtx_expr_list **exprp)
1621 : {
1622 4710 : int removed = 0;
1623 :
1624 9019 : while (*listp)
1625 : {
1626 4309 : if (XEXP (*listp, 0) == insn)
1627 : {
1628 678 : remove_free_INSN_LIST_node (listp);
1629 678 : remove_free_EXPR_LIST_node (exprp);
1630 678 : removed++;
1631 678 : continue;
1632 : }
1633 :
1634 3631 : listp = (rtx_insn_list **)&XEXP (*listp, 1);
1635 3631 : exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1636 : }
1637 :
1638 4710 : return removed;
1639 : }
1640 :
1641 : /* Clear all dependencies for an insn. */
1642 : static void
1643 4179276 : delete_all_dependences (rtx_insn *insn)
1644 : {
1645 4179276 : sd_iterator_def sd_it;
1646 4179276 : dep_t dep;
1647 :
1648 : /* The below cycle can be optimized to clear the caches and back_deps
1649 : in one call but that would provoke duplication of code from
1650 : delete_dep (). */
1651 :
1652 4179276 : for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1653 11911866 : sd_iterator_cond (&sd_it, &dep);)
1654 7732590 : sd_delete_dep (sd_it);
1655 4179276 : }
1656 :
1657 : /* All insns in a scheduling group except the first should only have
1658 : dependencies on the previous insn in the group. So we find the
1659 : first instruction in the scheduling group by walking the dependence
1660 : chains backwards. Then we add the dependencies for the group to
1661 : the previous nonnote insn. */
1662 :
1663 : static void
1664 4179276 : chain_to_prev_insn (rtx_insn *insn)
1665 : {
1666 4179276 : sd_iterator_def sd_it;
1667 4179276 : dep_t dep;
1668 4179276 : rtx_insn *prev_nonnote;
1669 :
1670 11911866 : FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1671 : {
1672 7732590 : rtx_insn *i = insn;
1673 7732590 : rtx_insn *pro = DEP_PRO (dep);
1674 :
1675 7739406 : do
1676 : {
1677 7739406 : i = prev_nonnote_insn (i);
1678 :
1679 7739406 : if (pro == i)
1680 4179248 : goto next_link;
1681 3566974 : } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1682 :
1683 3553342 : if (! sched_insns_conditions_mutex_p (i, pro))
1684 3553342 : add_dependence (i, pro, DEP_TYPE (dep));
1685 7732590 : next_link:;
1686 : }
1687 :
1688 4179276 : delete_all_dependences (insn);
1689 :
1690 4179276 : prev_nonnote = prev_nonnote_nondebug_insn (insn);
1691 4179276 : if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (prev_nonnote)
1692 4179276 : && ! sched_insns_conditions_mutex_p (insn, prev_nonnote))
1693 4179276 : add_dependence (insn, prev_nonnote, REG_DEP_ANTI);
1694 4179276 : }
1695 :
1696 : /* Process an insn's memory dependencies. There are four kinds of
1697 : dependencies:
1698 :
1699 : (0) read dependence: read follows read
1700 : (1) true dependence: read follows write
1701 : (2) output dependence: write follows write
1702 : (3) anti dependence: write follows read
1703 :
1704 : We are careful to build only dependencies which actually exist, and
1705 : use transitivity to avoid building too many links. */
1706 :
1707 : /* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1708 : The MEM is a memory reference contained within INSN, which we are saving
1709 : so that we can do memory aliasing on it. */
1710 :
1711 : static void
1712 38231475 : add_insn_mem_dependence (class deps_desc *deps, bool read_p,
1713 : rtx_insn *insn, rtx mem)
1714 : {
1715 38231475 : rtx_insn_list **insn_list;
1716 38231475 : rtx_insn_list *insn_node;
1717 38231475 : rtx_expr_list **mem_list;
1718 38231475 : rtx_expr_list *mem_node;
1719 :
1720 38231475 : gcc_assert (!deps->readonly);
1721 38231475 : if (read_p)
1722 : {
1723 25770765 : insn_list = &deps->pending_read_insns;
1724 25770765 : mem_list = &deps->pending_read_mems;
1725 25770765 : if (!DEBUG_INSN_P (insn))
1726 24136721 : deps->pending_read_list_length++;
1727 : }
1728 : else
1729 : {
1730 12460710 : insn_list = &deps->pending_write_insns;
1731 12460710 : mem_list = &deps->pending_write_mems;
1732 12460710 : deps->pending_write_list_length++;
1733 : }
1734 :
1735 38231475 : insn_node = alloc_INSN_LIST (insn, *insn_list);
1736 38231475 : *insn_list = insn_node;
1737 :
1738 38231475 : if (sched_deps_info->use_cselib && MEM_P (mem))
1739 : {
1740 731 : mem = shallow_copy_rtx (mem);
1741 731 : XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1742 731 : GET_MODE (mem), insn);
1743 : }
1744 38231475 : mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1745 38231475 : *mem_list = mem_node;
1746 38231475 : }
1747 :
1748 : /* Make a dependency between every memory reference on the pending lists
1749 : and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1750 : dependencies for a read operation, similarly with FOR_WRITE. */
1751 :
1752 : static void
1753 9458948 : flush_pending_lists (class deps_desc *deps, rtx_insn *insn, int for_read,
1754 : int for_write)
1755 : {
1756 9458948 : if (for_write)
1757 : {
1758 9042329 : add_dependence_list_and_free (deps, insn, &deps->pending_read_insns,
1759 : 1, REG_DEP_ANTI, true);
1760 9042329 : if (!deps->readonly)
1761 : {
1762 9041186 : free_EXPR_LIST_list (&deps->pending_read_mems);
1763 9041186 : deps->pending_read_list_length = 0;
1764 : }
1765 : }
1766 :
1767 9478504 : add_dependence_list_and_free (deps, insn, &deps->pending_write_insns, 1,
1768 : for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1769 : true);
1770 :
1771 9458948 : add_dependence_list_and_free (deps, insn,
1772 : &deps->last_pending_memory_flush, 1,
1773 : for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1774 : true);
1775 :
1776 9458948 : add_dependence_list_and_free (deps, insn, &deps->pending_jump_insns, 1,
1777 : REG_DEP_ANTI, true);
1778 :
1779 9458948 : if (DEBUG_INSN_P (insn))
1780 : {
1781 0 : if (for_write)
1782 0 : free_INSN_LIST_list (&deps->pending_read_insns);
1783 0 : free_INSN_LIST_list (&deps->pending_write_insns);
1784 0 : free_INSN_LIST_list (&deps->last_pending_memory_flush);
1785 0 : free_INSN_LIST_list (&deps->pending_jump_insns);
1786 : }
1787 :
1788 9458948 : if (!deps->readonly)
1789 : {
1790 9457767 : free_EXPR_LIST_list (&deps->pending_write_mems);
1791 9457767 : deps->pending_write_list_length = 0;
1792 :
1793 9457767 : deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1794 9457767 : deps->pending_flush_length = 1;
1795 : }
1796 9458948 : mark_as_hard = false;
1797 9458948 : }
1798 :
1799 : /* Instruction which dependencies we are analyzing. */
1800 : static rtx_insn *cur_insn = NULL;
1801 :
1802 : /* Implement hooks for haifa scheduler. */
1803 :
1804 : static void
1805 114466431 : haifa_start_insn (rtx_insn *insn)
1806 : {
1807 114466431 : gcc_assert (insn && !cur_insn);
1808 :
1809 114466431 : cur_insn = insn;
1810 114466431 : }
1811 :
1812 : static void
1813 114466431 : haifa_finish_insn (void)
1814 : {
1815 114466431 : cur_insn = NULL;
1816 114466431 : }
1817 :
1818 : void
1819 38499259 : haifa_note_reg_set (int regno)
1820 : {
1821 38499259 : SET_REGNO_REG_SET (reg_pending_sets, regno);
1822 38499259 : }
1823 :
1824 : void
1825 11751012 : haifa_note_reg_clobber (int regno)
1826 : {
1827 11751012 : SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1828 11751012 : }
1829 :
1830 : void
1831 80320805 : haifa_note_reg_use (int regno)
1832 : {
1833 80320805 : SET_REGNO_REG_SET (reg_pending_uses, regno);
1834 80320805 : }
1835 :
1836 : static void
1837 32261572 : haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1838 : {
1839 32261572 : if (!(ds & SPECULATIVE))
1840 : {
1841 : mem = NULL_RTX;
1842 : pending_mem = NULL_RTX;
1843 : }
1844 : else
1845 0 : gcc_assert (ds & BEGIN_DATA);
1846 :
1847 32261572 : {
1848 32261572 : dep_def _dep, *dep = &_dep;
1849 :
1850 32261572 : init_dep_1 (dep, pending_insn, cur_insn, ds_to_dt (ds),
1851 32261572 : current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1852 32261572 : DEP_NONREG (dep) = 1;
1853 32261572 : maybe_add_or_update_dep_1 (dep, false, pending_mem, mem);
1854 : }
1855 :
1856 32261572 : }
1857 :
1858 : static void
1859 607107026 : haifa_note_dep (rtx_insn *elem, ds_t ds)
1860 : {
1861 607107026 : dep_def _dep;
1862 607107026 : dep_t dep = &_dep;
1863 :
1864 607107026 : init_dep (dep, elem, cur_insn, ds_to_dt (ds));
1865 607107026 : if (mark_as_hard)
1866 302410226 : DEP_NONREG (dep) = 1;
1867 607107026 : maybe_add_or_update_dep_1 (dep, false, NULL_RTX, NULL_RTX);
1868 607107026 : }
1869 :
1870 : static void
1871 80340987 : note_reg_use (int r)
1872 : {
1873 0 : if (sched_deps_info->note_reg_use)
1874 80340987 : sched_deps_info->note_reg_use (r);
1875 0 : }
1876 :
1877 : static void
1878 38516474 : note_reg_set (int r)
1879 : {
1880 0 : if (sched_deps_info->note_reg_set)
1881 38516474 : sched_deps_info->note_reg_set (r);
1882 0 : }
1883 :
1884 : static void
1885 11752664 : note_reg_clobber (int r)
1886 : {
1887 0 : if (sched_deps_info->note_reg_clobber)
1888 11752664 : sched_deps_info->note_reg_clobber (r);
1889 0 : }
1890 :
1891 : static void
1892 32266405 : note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1893 : {
1894 17375188 : if (sched_deps_info->note_mem_dep)
1895 32265083 : sched_deps_info->note_mem_dep (m1, m2, e, ds);
1896 0 : }
1897 :
1898 : static void
1899 607147754 : note_dep (rtx_insn *e, ds_t ds)
1900 : {
1901 0 : if (sched_deps_info->note_dep)
1902 607133730 : sched_deps_info->note_dep (e, ds);
1903 0 : }
1904 :
1905 : /* Return corresponding to DS reg_note. */
1906 : enum reg_note
1907 639400527 : ds_to_dt (ds_t ds)
1908 : {
1909 639400527 : if (ds & DEP_TRUE)
1910 : return REG_DEP_TRUE;
1911 515651046 : else if (ds & DEP_OUTPUT)
1912 : return REG_DEP_OUTPUT;
1913 437726408 : else if (ds & DEP_ANTI)
1914 : return REG_DEP_ANTI;
1915 : else
1916 : {
1917 0 : gcc_assert (ds & DEP_CONTROL);
1918 : return REG_DEP_CONTROL;
1919 : }
1920 : }
1921 :
1922 :
1923 :
1924 : /* Functions for computation of info needed for register pressure
1925 : sensitive insn scheduling. */
1926 :
1927 :
1928 : /* Allocate and return reg_use_data structure for REGNO and INSN. */
1929 : static struct reg_use_data *
1930 2362 : create_insn_reg_use (int regno, rtx_insn *insn)
1931 : {
1932 2362 : struct reg_use_data *use;
1933 :
1934 2362 : use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1935 2362 : use->regno = regno;
1936 2362 : use->insn = insn;
1937 2362 : use->next_insn_use = INSN_REG_USE_LIST (insn);
1938 2362 : INSN_REG_USE_LIST (insn) = use;
1939 2362 : return use;
1940 : }
1941 :
1942 : /* Allocate reg_set_data structure for REGNO and INSN. */
1943 : static void
1944 2228 : create_insn_reg_set (int regno, rtx insn)
1945 : {
1946 2228 : struct reg_set_data *set;
1947 :
1948 2228 : set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1949 2228 : set->regno = regno;
1950 2228 : set->insn = insn;
1951 2228 : set->next_insn_set = INSN_REG_SET_LIST (insn);
1952 2228 : INSN_REG_SET_LIST (insn) = set;
1953 2228 : }
1954 :
1955 : /* Set up insn register uses for INSN and dependency context DEPS. */
1956 : static void
1957 4315 : setup_insn_reg_uses (class deps_desc *deps, rtx_insn *insn)
1958 : {
1959 4315 : unsigned i;
1960 4315 : reg_set_iterator rsi;
1961 4315 : struct reg_use_data *use, *use2, *next;
1962 4315 : struct deps_reg *reg_last;
1963 :
1964 8399 : EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1965 : {
1966 4907 : if (i < FIRST_PSEUDO_REGISTER
1967 4084 : && TEST_HARD_REG_BIT (ira_no_alloc_regs, i))
1968 823 : continue;
1969 :
1970 3261 : if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1971 1348 : && ! REGNO_REG_SET_P (reg_pending_sets, i)
1972 4450 : && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1973 : /* Ignore use which is not dying. */
1974 1189 : continue;
1975 :
1976 2072 : use = create_insn_reg_use (i, insn);
1977 2072 : use->next_regno_use = use;
1978 2072 : reg_last = &deps->reg_last[i];
1979 :
1980 : /* Create the cycle list of uses. */
1981 2362 : for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
1982 : {
1983 290 : use2 = create_insn_reg_use (i, list->insn ());
1984 290 : next = use->next_regno_use;
1985 290 : use->next_regno_use = use2;
1986 290 : use2->next_regno_use = next;
1987 : }
1988 : }
1989 4315 : }
1990 :
1991 : /* Register pressure info for the currently processed insn. */
1992 : static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1993 :
1994 : /* Return TRUE if INSN has the use structure for REGNO. */
1995 : static bool
1996 2228 : insn_use_p (rtx insn, int regno)
1997 : {
1998 2228 : struct reg_use_data *use;
1999 :
2000 3336 : for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2001 1232 : if (use->regno == regno)
2002 : return true;
2003 : return false;
2004 : }
2005 :
2006 : /* Update the register pressure info after birth of pseudo register REGNO
2007 : in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2008 : the register is in clobber or unused after the insn. */
2009 : static void
2010 1618 : mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2011 : {
2012 1618 : int incr, new_incr;
2013 1618 : enum reg_class cl;
2014 :
2015 1618 : gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2016 1618 : cl = sched_regno_pressure_class[regno];
2017 1618 : if (cl != NO_REGS)
2018 : {
2019 1611 : incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2020 1611 : if (clobber_p)
2021 : {
2022 3 : new_incr = reg_pressure_info[cl].clobber_increase + incr;
2023 3 : reg_pressure_info[cl].clobber_increase = new_incr;
2024 : }
2025 1608 : else if (unused_p)
2026 : {
2027 67 : new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2028 67 : reg_pressure_info[cl].unused_set_increase = new_incr;
2029 : }
2030 : else
2031 : {
2032 1541 : new_incr = reg_pressure_info[cl].set_increase + incr;
2033 1541 : reg_pressure_info[cl].set_increase = new_incr;
2034 1541 : if (! insn_use_p (insn, regno))
2035 1461 : reg_pressure_info[cl].change += incr;
2036 1541 : create_insn_reg_set (regno, insn);
2037 : }
2038 1611 : gcc_assert (new_incr < (1 << INCREASE_BITS));
2039 : }
2040 1618 : }
2041 :
2042 : /* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2043 : hard registers involved in the birth. */
2044 : static void
2045 1376 : mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2046 : bool clobber_p, bool unused_p)
2047 : {
2048 1376 : enum reg_class cl;
2049 1376 : int new_incr, last = regno + nregs;
2050 :
2051 2754 : while (regno < last)
2052 : {
2053 1378 : gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2054 1378 : if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2055 : {
2056 749 : cl = sched_regno_pressure_class[regno];
2057 749 : if (cl != NO_REGS)
2058 : {
2059 749 : if (clobber_p)
2060 : {
2061 1 : new_incr = reg_pressure_info[cl].clobber_increase + 1;
2062 1 : reg_pressure_info[cl].clobber_increase = new_incr;
2063 : }
2064 748 : else if (unused_p)
2065 : {
2066 61 : new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2067 61 : reg_pressure_info[cl].unused_set_increase = new_incr;
2068 : }
2069 : else
2070 : {
2071 687 : new_incr = reg_pressure_info[cl].set_increase + 1;
2072 687 : reg_pressure_info[cl].set_increase = new_incr;
2073 687 : if (! insn_use_p (insn, regno))
2074 643 : reg_pressure_info[cl].change += 1;
2075 687 : create_insn_reg_set (regno, insn);
2076 : }
2077 749 : gcc_assert (new_incr < (1 << INCREASE_BITS));
2078 : }
2079 : }
2080 1378 : regno++;
2081 : }
2082 1376 : }
2083 :
2084 : /* Update the register pressure info after birth of pseudo or hard
2085 : register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2086 : correspondingly that the register is in clobber or unused after the
2087 : insn. */
2088 : static void
2089 3645 : mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2090 : {
2091 3645 : int regno;
2092 :
2093 3645 : if (GET_CODE (reg) == SUBREG)
2094 0 : reg = SUBREG_REG (reg);
2095 :
2096 3645 : if (! REG_P (reg))
2097 : return;
2098 :
2099 2994 : regno = REGNO (reg);
2100 2994 : if (regno < FIRST_PSEUDO_REGISTER)
2101 1376 : mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
2102 : clobber_p, unused_p);
2103 : else
2104 1618 : mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2105 : }
2106 :
2107 : /* Update the register pressure info after death of pseudo register
2108 : REGNO. */
2109 : static void
2110 1236 : mark_pseudo_death (int regno)
2111 : {
2112 1236 : int incr;
2113 1236 : enum reg_class cl;
2114 :
2115 1236 : gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2116 1236 : cl = sched_regno_pressure_class[regno];
2117 1236 : if (cl != NO_REGS)
2118 : {
2119 1232 : incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2120 1232 : reg_pressure_info[cl].change -= incr;
2121 : }
2122 1236 : }
2123 :
2124 : /* Like mark_pseudo_death except that NREGS saying how many hard
2125 : registers involved in the death. */
2126 : static void
2127 890 : mark_hard_regno_death (int regno, int nregs)
2128 : {
2129 890 : enum reg_class cl;
2130 890 : int last = regno + nregs;
2131 :
2132 1780 : while (regno < last)
2133 : {
2134 890 : gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2135 890 : if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, regno))
2136 : {
2137 677 : cl = sched_regno_pressure_class[regno];
2138 677 : if (cl != NO_REGS)
2139 677 : reg_pressure_info[cl].change -= 1;
2140 : }
2141 890 : regno++;
2142 : }
2143 890 : }
2144 :
2145 : /* Update the register pressure info after death of pseudo or hard
2146 : register REG. */
2147 : static void
2148 2126 : mark_reg_death (rtx reg)
2149 : {
2150 2126 : int regno;
2151 :
2152 2126 : if (GET_CODE (reg) == SUBREG)
2153 0 : reg = SUBREG_REG (reg);
2154 :
2155 2126 : if (! REG_P (reg))
2156 : return;
2157 :
2158 2126 : regno = REGNO (reg);
2159 2126 : if (regno < FIRST_PSEUDO_REGISTER)
2160 890 : mark_hard_regno_death (regno, REG_NREGS (reg));
2161 : else
2162 1236 : mark_pseudo_death (regno);
2163 : }
2164 :
2165 : /* Process SETTER of REG. DATA is an insn containing the setter. */
2166 : static void
2167 3645 : mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2168 : {
2169 3645 : if (setter != NULL_RTX && GET_CODE (setter) != SET)
2170 : return;
2171 3227 : mark_insn_reg_birth
2172 3227 : ((rtx) data, reg, false,
2173 3227 : find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2174 : }
2175 :
2176 : /* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2177 : static void
2178 3645 : mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2179 : {
2180 3645 : if (GET_CODE (setter) == CLOBBER)
2181 418 : mark_insn_reg_birth ((rtx) data, reg, true, false);
2182 3645 : }
2183 :
2184 : /* Set up reg pressure info related to INSN. */
2185 : void
2186 4315 : init_insn_reg_pressure_info (rtx_insn *insn)
2187 : {
2188 4315 : int i, len;
2189 4315 : enum reg_class cl;
2190 4315 : static struct reg_pressure_data *pressure_info;
2191 4315 : rtx link;
2192 :
2193 4315 : gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2194 :
2195 4315 : if (! INSN_P (insn))
2196 : return;
2197 :
2198 21864 : for (i = 0; i < ira_pressure_classes_num; i++)
2199 : {
2200 17549 : cl = ira_pressure_classes[i];
2201 17549 : reg_pressure_info[cl].clobber_increase = 0;
2202 17549 : reg_pressure_info[cl].set_increase = 0;
2203 17549 : reg_pressure_info[cl].unused_set_increase = 0;
2204 17549 : reg_pressure_info[cl].change = 0;
2205 : }
2206 :
2207 4315 : note_stores (insn, mark_insn_reg_clobber, insn);
2208 :
2209 4315 : note_stores (insn, mark_insn_reg_store, insn);
2210 :
2211 4315 : if (AUTO_INC_DEC)
2212 : for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2213 : if (REG_NOTE_KIND (link) == REG_INC)
2214 : mark_insn_reg_store (XEXP (link, 0), NULL_RTX, insn);
2215 :
2216 7856 : for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2217 3541 : if (REG_NOTE_KIND (link) == REG_DEAD)
2218 2126 : mark_reg_death (XEXP (link, 0));
2219 :
2220 4315 : len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2221 4315 : pressure_info
2222 4315 : = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2223 4315 : if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2224 4315 : INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2225 : * sizeof (int), 1);
2226 21864 : for (i = 0; i < ira_pressure_classes_num; i++)
2227 : {
2228 17549 : cl = ira_pressure_classes[i];
2229 17549 : pressure_info[i].clobber_increase
2230 17549 : = reg_pressure_info[cl].clobber_increase;
2231 17549 : pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2232 17549 : pressure_info[i].unused_set_increase
2233 17549 : = reg_pressure_info[cl].unused_set_increase;
2234 17549 : pressure_info[i].change = reg_pressure_info[cl].change;
2235 : }
2236 : }
2237 :
2238 :
2239 :
2240 :
2241 : /* Internal variable for sched_analyze_[12] () functions.
2242 : If it is nonzero, this means that sched_analyze_[12] looks
2243 : at the most toplevel SET. */
2244 : static bool can_start_lhs_rhs_p;
2245 :
2246 : /* Extend reg info for the deps context DEPS given that
2247 : we have just generated a register numbered REGNO. */
2248 : static void
2249 829 : extend_deps_reg_info (class deps_desc *deps, int regno)
2250 : {
2251 829 : int max_regno = regno + 1;
2252 :
2253 829 : gcc_assert (!reload_completed);
2254 :
2255 : /* In a readonly context, it would not hurt to extend info,
2256 : but it should not be needed. */
2257 829 : if (reload_completed && deps->readonly)
2258 : {
2259 : deps->max_reg = max_regno;
2260 : return;
2261 : }
2262 :
2263 829 : if (max_regno > deps->max_reg)
2264 : {
2265 183 : deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2266 : max_regno);
2267 183 : memset (&deps->reg_last[deps->max_reg],
2268 183 : 0, (max_regno - deps->max_reg)
2269 : * sizeof (struct deps_reg));
2270 183 : deps->max_reg = max_regno;
2271 : }
2272 : }
2273 :
2274 : /* Extends REG_INFO_P if needed. */
2275 : void
2276 130358026 : maybe_extend_reg_info_p (void)
2277 : {
2278 : /* Extend REG_INFO_P, if needed. */
2279 130358026 : if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2280 : {
2281 15 : size_t new_reg_info_p_size = max_regno + 128;
2282 :
2283 15 : gcc_assert (!reload_completed && sel_sched_p ());
2284 :
2285 15 : reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2286 : new_reg_info_p_size,
2287 : reg_info_p_size,
2288 : sizeof (*reg_info_p));
2289 15 : reg_info_p_size = new_reg_info_p_size;
2290 : }
2291 130358026 : }
2292 :
2293 : /* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2294 : The type of the reference is specified by REF and can be SET,
2295 : CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2296 :
2297 : static void
2298 130357700 : sched_analyze_reg (class deps_desc *deps, int regno, machine_mode mode,
2299 : enum rtx_code ref, rtx_insn *insn)
2300 : {
2301 : /* We could emit new pseudos in renaming. Extend the reg structures. */
2302 52295 : if (!reload_completed && sel_sched_p ()
2303 130391743 : && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2304 829 : extend_deps_reg_info (deps, regno);
2305 :
2306 130357700 : maybe_extend_reg_info_p ();
2307 :
2308 : /* A hard reg in a wide mode may really be multiple registers.
2309 : If so, mark all of them just like the first. */
2310 130357700 : if (regno < FIRST_PSEUDO_REGISTER)
2311 : {
2312 130317010 : int i = hard_regno_nregs (regno, mode);
2313 130317010 : if (ref == SET)
2314 : {
2315 76869133 : while (--i >= 0)
2316 76869133 : note_reg_set (regno + i);
2317 : }
2318 91946296 : else if (ref == USE)
2319 : {
2320 160513060 : while (--i >= 0)
2321 160513060 : note_reg_use (regno + i);
2322 : }
2323 : else
2324 : {
2325 23504252 : while (--i >= 0)
2326 23504252 : note_reg_clobber (regno + i);
2327 : }
2328 : }
2329 :
2330 : /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2331 : it does not reload. Ignore these as they have served their
2332 : purpose already. */
2333 40690 : else if (regno >= deps->max_reg)
2334 : {
2335 0 : enum rtx_code code = GET_CODE (PATTERN (insn));
2336 0 : gcc_assert (code == USE || code == CLOBBER);
2337 : }
2338 :
2339 : else
2340 : {
2341 40690 : if (ref == SET)
2342 18055 : note_reg_set (regno);
2343 22635 : else if (ref == USE)
2344 22603 : note_reg_use (regno);
2345 : else
2346 32 : note_reg_clobber (regno);
2347 :
2348 : /* Pseudos that are REG_EQUIV to something may be replaced
2349 : by that during reloading. We need only add dependencies for
2350 : the address in the REG_EQUIV note. */
2351 40690 : if (!reload_completed && get_reg_known_equiv_p (regno))
2352 : {
2353 0 : rtx t = get_reg_known_value (regno);
2354 0 : if (MEM_P (t))
2355 0 : sched_analyze_2 (deps, XEXP (t, 0), insn);
2356 : }
2357 :
2358 : /* Don't let it cross a call after scheduling if it doesn't
2359 : already cross one. */
2360 40690 : if (REG_N_CALLS_CROSSED (regno) == 0)
2361 : {
2362 37168 : if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2363 8965 : deps->sched_before_next_call
2364 8965 : = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2365 : else
2366 28203 : add_dependence_list (insn, deps->last_function_call, 1,
2367 : REG_DEP_ANTI, false);
2368 : }
2369 : }
2370 130357700 : }
2371 :
2372 : /* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2373 : rtx, X, creating all dependencies generated by the write to the
2374 : destination of X, and reads of everything mentioned. */
2375 :
2376 : static void
2377 68646829 : sched_analyze_1 (class deps_desc *deps, rtx x, rtx_insn *insn)
2378 : {
2379 68646829 : rtx dest = XEXP (x, 0);
2380 68646829 : enum rtx_code code = GET_CODE (x);
2381 68646829 : bool cslr_p = can_start_lhs_rhs_p;
2382 :
2383 68646829 : can_start_lhs_rhs_p = false;
2384 :
2385 68646829 : gcc_assert (dest);
2386 68646829 : if (dest == 0)
2387 : return;
2388 :
2389 68646829 : if (cslr_p && sched_deps_info->start_lhs)
2390 16487 : sched_deps_info->start_lhs (dest);
2391 :
2392 68646829 : if (GET_CODE (dest) == PARALLEL)
2393 : {
2394 7830 : int i;
2395 :
2396 21237 : for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2397 13407 : if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2398 13407 : sched_analyze_1 (deps,
2399 : gen_rtx_CLOBBER (VOIDmode,
2400 : XEXP (XVECEXP (dest, 0, i), 0)),
2401 : insn);
2402 :
2403 7830 : if (cslr_p && sched_deps_info->finish_lhs)
2404 0 : sched_deps_info->finish_lhs ();
2405 :
2406 7830 : if (code == SET)
2407 : {
2408 7830 : can_start_lhs_rhs_p = cslr_p;
2409 :
2410 7830 : sched_analyze_2 (deps, SET_SRC (x), insn);
2411 :
2412 7830 : can_start_lhs_rhs_p = false;
2413 : }
2414 :
2415 7830 : return;
2416 : }
2417 :
2418 68684675 : while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2419 68684675 : || GET_CODE (dest) == ZERO_EXTRACT)
2420 : {
2421 45676 : if (GET_CODE (dest) == STRICT_LOW_PART
2422 11590 : || GET_CODE (dest) == ZERO_EXTRACT
2423 45704 : || read_modify_subreg_p (dest))
2424 : {
2425 : /* These both read and modify the result. We must handle
2426 : them as writes to get proper dependencies for following
2427 : instructions. We must handle them as reads to get proper
2428 : dependencies from this to previous instructions.
2429 : Thus we need to call sched_analyze_2. */
2430 :
2431 45648 : sched_analyze_2 (deps, XEXP (dest, 0), insn);
2432 : }
2433 45676 : if (GET_CODE (dest) == ZERO_EXTRACT)
2434 : {
2435 : /* The second and third arguments are values read by this insn. */
2436 11562 : sched_analyze_2 (deps, XEXP (dest, 1), insn);
2437 11562 : sched_analyze_2 (deps, XEXP (dest, 2), insn);
2438 : }
2439 45676 : dest = XEXP (dest, 0);
2440 : }
2441 :
2442 68638999 : if (REG_P (dest))
2443 : {
2444 49563889 : int regno = REGNO (dest);
2445 49563889 : machine_mode mode = GET_MODE (dest);
2446 :
2447 49563889 : sched_analyze_reg (deps, regno, mode, code, insn);
2448 :
2449 : #ifdef STACK_REGS
2450 : /* Treat all writes to a stack register as modifying the TOS. */
2451 49563889 : if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2452 : {
2453 : /* Avoid analyzing the same register twice. */
2454 322711 : if (regno != FIRST_STACK_REG)
2455 239175 : sched_analyze_reg (deps, FIRST_STACK_REG, mode, code, insn);
2456 :
2457 322711 : add_to_hard_reg_set (&implicit_reg_pending_uses, mode,
2458 : FIRST_STACK_REG);
2459 : }
2460 : #endif
2461 49563889 : if (!deps->readonly && regno == STACK_POINTER_REGNUM)
2462 : {
2463 : /* Please see PR114115. We have insn modifying memory on the stack
2464 : and not addressed by stack pointer and we have insn reserving the
2465 : stack space. If we move the insn modifying memory before insn
2466 : reserving the stack space, we can change memory out of the red
2467 : zone. Even worse, some optimizations (e.g. peephole) can add
2468 : insns using temporary stack slots before insn reserving the stack
2469 : space but after the insn modifying memory. This will corrupt the
2470 : modified memory. Therefore we treat insn changing the stack as
2471 : reading unknown memory. This will create anti-dependence. We
2472 : don't need to treat the insn as writing memory because GCC by
2473 : itself does not generate code reading undefined stack memory. */
2474 6405772 : if ((deps->pending_read_list_length + deps->pending_write_list_length)
2475 6405772 : >= param_max_pending_list_length
2476 2220 : && !DEBUG_INSN_P (insn))
2477 2220 : flush_pending_lists (deps, insn, true, true);
2478 6405772 : add_insn_mem_dependence (deps, true, insn, dest);
2479 : }
2480 : }
2481 19075110 : else if (MEM_P (dest))
2482 : {
2483 : /* Writing memory. */
2484 12483248 : rtx t = dest;
2485 :
2486 12483248 : if (sched_deps_info->use_cselib)
2487 : {
2488 370 : machine_mode address_mode = get_address_mode (dest);
2489 :
2490 370 : t = shallow_copy_rtx (dest);
2491 370 : cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2492 370 : GET_MODE (t), insn);
2493 370 : XEXP (t, 0)
2494 370 : = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2495 : insn);
2496 : }
2497 12483248 : t = canon_rtx (t);
2498 :
2499 : /* Pending lists can't get larger with a readonly context. */
2500 12483248 : if (!deps->readonly
2501 12480266 : && ((deps->pending_read_list_length + deps->pending_write_list_length)
2502 12480266 : >= param_max_pending_list_length))
2503 : {
2504 : /* Flush all pending reads and writes to prevent the pending lists
2505 : from getting any larger. Insn scheduling runs too slowly when
2506 : these lists get long. When compiling GCC with itself,
2507 : this flush occurs 8 times for sparc, and 10 times for m88k using
2508 : the default value of 32. */
2509 19556 : flush_pending_lists (deps, insn, false, true);
2510 : }
2511 : else
2512 : {
2513 12463692 : rtx_insn_list *pending;
2514 12463692 : rtx_expr_list *pending_mem;
2515 :
2516 12463692 : pending = deps->pending_read_insns;
2517 12463692 : pending_mem = deps->pending_read_mems;
2518 39266787 : while (pending)
2519 : {
2520 26803095 : rtx mem = pending_mem->element ();
2521 26803095 : if (REG_P (mem)
2522 26803095 : || (anti_dependence (mem, t)
2523 4676784 : && ! sched_insns_conditions_mutex_p (insn, pending->insn ())))
2524 14362402 : note_mem_dep (t, mem, pending->insn (), DEP_ANTI);
2525 :
2526 26803095 : pending = pending->next ();
2527 26803095 : pending_mem = pending_mem->next ();
2528 : }
2529 :
2530 12463692 : pending = deps->pending_write_insns;
2531 12463692 : pending_mem = deps->pending_write_mems;
2532 42291745 : while (pending)
2533 : {
2534 29828053 : if (output_dependence (pending_mem->element (), t)
2535 38043380 : && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2536 8215327 : note_mem_dep (t, pending_mem->element (),
2537 : pending->insn (),
2538 : DEP_OUTPUT);
2539 :
2540 29828053 : pending = pending->next ();
2541 29828053 : pending_mem = pending_mem-> next ();
2542 : }
2543 :
2544 12463692 : add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2545 : REG_DEP_ANTI, true);
2546 12463692 : add_dependence_list (insn, deps->pending_jump_insns, 1,
2547 : REG_DEP_CONTROL, true);
2548 :
2549 12463692 : if (!deps->readonly)
2550 12460710 : add_insn_mem_dependence (deps, false, insn, dest);
2551 : }
2552 12483248 : sched_analyze_2 (deps, XEXP (dest, 0), insn);
2553 : }
2554 :
2555 68638999 : if (cslr_p && sched_deps_info->finish_lhs)
2556 16487 : sched_deps_info->finish_lhs ();
2557 :
2558 : /* Analyze reads. */
2559 68638999 : if (GET_CODE (x) == SET)
2560 : {
2561 55806556 : can_start_lhs_rhs_p = cslr_p;
2562 :
2563 55806556 : sched_analyze_2 (deps, SET_SRC (x), insn);
2564 :
2565 55806556 : can_start_lhs_rhs_p = false;
2566 : }
2567 : }
2568 :
2569 : /* Analyze the uses of memory and registers in rtx X in INSN. */
2570 : static void
2571 332422116 : sched_analyze_2 (class deps_desc *deps, rtx x, rtx_insn *insn)
2572 : {
2573 332422116 : int i;
2574 332422116 : int j;
2575 332422116 : enum rtx_code code;
2576 332422116 : const char *fmt;
2577 332422116 : bool cslr_p = can_start_lhs_rhs_p;
2578 :
2579 332422116 : can_start_lhs_rhs_p = false;
2580 :
2581 332422116 : gcc_assert (x);
2582 332422116 : if (x == 0)
2583 : return;
2584 :
2585 332422116 : if (cslr_p && sched_deps_info->start_rhs)
2586 16487 : sched_deps_info->start_rhs (x);
2587 :
2588 332422116 : code = GET_CODE (x);
2589 :
2590 332422116 : switch (code)
2591 : {
2592 83225548 : CASE_CONST_ANY:
2593 83225548 : case SYMBOL_REF:
2594 83225548 : case CONST:
2595 83225548 : case LABEL_REF:
2596 : /* Ignore constants. */
2597 83225548 : if (cslr_p && sched_deps_info->finish_rhs)
2598 1605 : sched_deps_info->finish_rhs ();
2599 :
2600 : return;
2601 :
2602 79985970 : case REG:
2603 79985970 : {
2604 79985970 : int regno = REGNO (x);
2605 79985970 : machine_mode mode = GET_MODE (x);
2606 :
2607 79985970 : sched_analyze_reg (deps, regno, mode, USE, insn);
2608 :
2609 : #ifdef STACK_REGS
2610 : /* Treat all reads of a stack register as modifying the TOS. */
2611 79985970 : if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2612 : {
2613 : /* Avoid analyzing the same register twice. */
2614 337357 : if (regno != FIRST_STACK_REG)
2615 231309 : sched_analyze_reg (deps, FIRST_STACK_REG, mode, USE, insn);
2616 337357 : sched_analyze_reg (deps, FIRST_STACK_REG, mode, SET, insn);
2617 : }
2618 : #endif
2619 :
2620 79985970 : if (cslr_p && sched_deps_info->finish_rhs)
2621 6402 : sched_deps_info->finish_rhs ();
2622 :
2623 : return;
2624 : }
2625 :
2626 19367895 : case MEM:
2627 19367895 : {
2628 19367895 : if (DEBUG_INSN_P (insn) && sched_deps_info->use_cselib)
2629 : {
2630 5 : machine_mode address_mode = get_address_mode (x);
2631 :
2632 5 : cselib_lookup_from_insn (XEXP (x, 0), address_mode, 1,
2633 5 : GET_MODE (x), insn);
2634 5 : }
2635 19367890 : else if (!DEBUG_INSN_P (insn))
2636 : {
2637 : /* Reading memory. */
2638 17733851 : rtx_insn_list *u;
2639 17733851 : rtx_insn_list *pending;
2640 17733851 : rtx_expr_list *pending_mem;
2641 17733851 : rtx t = x;
2642 :
2643 17733851 : if (sched_deps_info->use_cselib)
2644 : {
2645 362 : machine_mode address_mode = get_address_mode (t);
2646 :
2647 362 : t = shallow_copy_rtx (t);
2648 362 : cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2649 362 : GET_MODE (t), insn);
2650 362 : XEXP (t, 0)
2651 362 : = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2652 : insn);
2653 : }
2654 :
2655 17733851 : t = canon_rtx (t);
2656 17733851 : pending = deps->pending_read_insns;
2657 17733851 : pending_mem = deps->pending_read_mems;
2658 62733007 : while (pending)
2659 : {
2660 44999156 : rtx mem = pending_mem->element ();
2661 32001736 : if (MEM_P (mem) && read_dependence (mem, t)
2662 45527971 : && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
2663 528815 : note_mem_dep (t, mem, pending->insn (), DEP_ANTI);
2664 :
2665 44999156 : pending = pending->next ();
2666 44999156 : pending_mem = pending_mem->next ();
2667 : }
2668 :
2669 17733851 : pending = deps->pending_write_insns;
2670 17733851 : pending_mem = deps->pending_write_mems;
2671 43414879 : while (pending)
2672 : {
2673 25681028 : if (true_dependence (pending_mem->element (), VOIDmode, t)
2674 34840889 : && ! sched_insns_conditions_mutex_p (insn,
2675 9159861 : pending->insn ()))
2676 9159861 : note_mem_dep (t, pending_mem->element (),
2677 : pending->insn (),
2678 9159861 : sched_deps_info->generate_spec_deps
2679 : ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2680 :
2681 25681028 : pending = pending->next ();
2682 25681028 : pending_mem = pending_mem->next ();
2683 : }
2684 :
2685 22877983 : for (u = deps->last_pending_memory_flush; u; u = u->next ())
2686 5144132 : add_dependence (insn, u->insn (), REG_DEP_ANTI);
2687 :
2688 17750584 : for (u = deps->pending_jump_insns; u; u = u->next ())
2689 16733 : if (deps_may_trap_p (x))
2690 : {
2691 16405 : if ((sched_deps_info->generate_spec_deps)
2692 16405 : && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2693 : {
2694 0 : ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2695 : MAX_DEP_WEAK);
2696 :
2697 16733 : note_dep (u->insn (), ds);
2698 : }
2699 : else
2700 16405 : add_dependence (insn, u->insn (), REG_DEP_CONTROL);
2701 : }
2702 : }
2703 :
2704 : /* Always add these dependencies to pending_reads, since
2705 : this insn may be followed by a write. */
2706 19367895 : if (!deps->readonly)
2707 : {
2708 19363204 : if ((deps->pending_read_list_length
2709 19363204 : + deps->pending_write_list_length)
2710 19363204 : >= param_max_pending_list_length
2711 25998 : && !DEBUG_INSN_P (insn))
2712 23326 : flush_pending_lists (deps, insn, true, true);
2713 19363204 : add_insn_mem_dependence (deps, true, insn, x);
2714 : }
2715 :
2716 19367895 : sched_analyze_2 (deps, XEXP (x, 0), insn);
2717 :
2718 19367895 : if (cslr_p && sched_deps_info->finish_rhs)
2719 1652 : sched_deps_info->finish_rhs ();
2720 :
2721 : return;
2722 : }
2723 :
2724 : /* Force pending stores to memory in case a trap handler needs them.
2725 : Also force pending loads from memory; loads and stores can segfault
2726 : and the signal handler won't be triggered if the trap insn was moved
2727 : above load or store insn. */
2728 5184 : case TRAP_IF:
2729 5184 : flush_pending_lists (deps, insn, true, true);
2730 5184 : break;
2731 :
2732 1824 : case PREFETCH:
2733 1824 : if (PREFETCH_SCHEDULE_BARRIER_P (x))
2734 0 : reg_pending_barrier = TRUE_BARRIER;
2735 : /* Prefetch insn contains addresses only. So if the prefetch
2736 : address has no registers, there will be no dependencies on
2737 : the prefetch insn. This is wrong with result code
2738 : correctness point of view as such prefetch can be moved below
2739 : a jump insn which usually generates MOVE_BARRIER preventing
2740 : to move insns containing registers or memories through the
2741 : barrier. It is also wrong with generated code performance
2742 : point of view as prefetch withouth dependecies will have a
2743 : tendency to be issued later instead of earlier. It is hard
2744 : to generate accurate dependencies for prefetch insns as
2745 : prefetch has only the start address but it is better to have
2746 : something than nothing. */
2747 1824 : if (!deps->readonly)
2748 : {
2749 1885 : rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2750 1789 : if (sched_deps_info->use_cselib)
2751 2 : cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2752 1789 : add_insn_mem_dependence (deps, true, insn, x);
2753 : }
2754 : break;
2755 :
2756 677962 : case UNSPEC_VOLATILE:
2757 677962 : flush_pending_lists (deps, insn, true, true);
2758 : /* FALLTHRU */
2759 :
2760 771250 : case ASM_OPERANDS:
2761 771250 : case ASM_INPUT:
2762 771250 : {
2763 : /* Traditional and volatile asm instructions must be considered to use
2764 : and clobber all hard registers, all pseudo-registers and all of
2765 : memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2766 :
2767 : Consider for instance a volatile asm that changes the fpu rounding
2768 : mode. An insn should not be moved across this even if it only uses
2769 : pseudo-regs because it might give an incorrectly rounded result. */
2770 90982 : if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2771 850813 : && !DEBUG_INSN_P (insn))
2772 759831 : reg_pending_barrier = TRUE_BARRIER;
2773 :
2774 : /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2775 : We cannot just fall through here since then we would be confused
2776 : by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2777 : traditional asms unlike their normal usage. */
2778 :
2779 771250 : if (code == ASM_OPERANDS)
2780 : {
2781 190154 : for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2782 99172 : sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2783 :
2784 90982 : if (cslr_p && sched_deps_info->finish_rhs)
2785 0 : sched_deps_info->finish_rhs ();
2786 :
2787 90982 : return;
2788 : }
2789 : break;
2790 : }
2791 :
2792 3921210 : case PRE_DEC:
2793 3921210 : case POST_DEC:
2794 3921210 : case PRE_INC:
2795 3921210 : case POST_INC:
2796 : /* These both read and modify the result. We must handle them as writes
2797 : to get proper dependencies for following instructions. We must handle
2798 : them as reads to get proper dependencies from this to previous
2799 : instructions. Thus we need to pass them to both sched_analyze_1
2800 : and sched_analyze_2. We must call sched_analyze_2 first in order
2801 : to get the proper antecedent for the read. */
2802 3921210 : sched_analyze_2 (deps, XEXP (x, 0), insn);
2803 3921210 : sched_analyze_1 (deps, x, insn);
2804 :
2805 3921210 : if (cslr_p && sched_deps_info->finish_rhs)
2806 0 : sched_deps_info->finish_rhs ();
2807 :
2808 : return;
2809 :
2810 53872 : case POST_MODIFY:
2811 53872 : case PRE_MODIFY:
2812 : /* op0 = op0 + op1 */
2813 53872 : sched_analyze_2 (deps, XEXP (x, 0), insn);
2814 53872 : sched_analyze_2 (deps, XEXP (x, 1), insn);
2815 53872 : sched_analyze_1 (deps, x, insn);
2816 :
2817 53872 : if (cslr_p && sched_deps_info->finish_rhs)
2818 0 : sched_deps_info->finish_rhs ();
2819 :
2820 : return;
2821 :
2822 : default:
2823 : break;
2824 : }
2825 :
2826 : /* Other cases: walk the insn. */
2827 145776639 : fmt = GET_RTX_FORMAT (code);
2828 369918489 : for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2829 : {
2830 224141850 : if (fmt[i] == 'e')
2831 176055485 : sched_analyze_2 (deps, XEXP (x, i), insn);
2832 48086365 : else if (fmt[i] == 'E')
2833 3692511 : for (j = 0; j < XVECLEN (x, i); j++)
2834 2346349 : sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2835 : }
2836 :
2837 145776639 : if (cslr_p && sched_deps_info->finish_rhs)
2838 6828 : sched_deps_info->finish_rhs ();
2839 : }
2840 :
2841 : /* Try to group two fusible insns together to prevent scheduler
2842 : from scheduling them apart. */
2843 :
2844 : static void
2845 108429525 : sched_macro_fuse_insns (rtx_insn *insn)
2846 : {
2847 108429525 : rtx_insn *prev;
2848 : /* No target hook would return true for debug insn as any of the
2849 : hook operand, and with very large sequences of only debug insns
2850 : where on each we call sched_macro_fuse_insns it has quadratic
2851 : compile time complexity. */
2852 108429525 : if (DEBUG_INSN_P (insn))
2853 : return;
2854 59936414 : prev = prev_nonnote_nondebug_insn_bb (insn);
2855 59936414 : if (!prev)
2856 : return;
2857 :
2858 49668144 : if (any_condjump_p (insn))
2859 : {
2860 4725639 : unsigned int condreg1, condreg2;
2861 4725639 : rtx cc_reg_1;
2862 4725639 : if (targetm.fixed_condition_code_regs (&condreg1, &condreg2))
2863 : {
2864 4725639 : cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2865 4725639 : if (reg_referenced_p (cc_reg_1, PATTERN (insn))
2866 4725639 : && modified_in_p (cc_reg_1, prev))
2867 : {
2868 4605932 : if (targetm.sched.macro_fusion_pair_p (prev, insn))
2869 4179775 : SCHED_GROUP_P (insn) = 1;
2870 4605932 : return;
2871 : }
2872 : }
2873 : }
2874 :
2875 45062212 : if (single_set (insn) && single_set (prev))
2876 : {
2877 37953457 : if (targetm.sched.macro_fusion_pair_p (prev, insn))
2878 13 : SCHED_GROUP_P (insn) = 1;
2879 : }
2880 : }
2881 :
2882 : /* Get the implicit reg pending clobbers for INSN and save them in TEMP. */
2883 : void
2884 27251 : get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
2885 : {
2886 27251 : extract_insn (insn);
2887 27251 : preprocess_constraints (insn);
2888 27251 : alternative_mask preferred = get_preferred_alternatives (insn);
2889 27251 : ira_implicitly_set_insn_hard_regs (temp, preferred);
2890 27251 : *temp &= ~ira_no_alloc_regs;
2891 27251 : }
2892 :
2893 : /* Analyze an INSN with pattern X to find all dependencies. */
2894 : static void
2895 108664605 : sched_analyze_insn (class deps_desc *deps, rtx x, rtx_insn *insn)
2896 : {
2897 108664605 : RTX_CODE code = GET_CODE (x);
2898 108664605 : rtx link;
2899 108664605 : unsigned i;
2900 108664605 : reg_set_iterator rsi;
2901 :
2902 108664605 : if (! reload_completed)
2903 : {
2904 25564 : HARD_REG_SET temp;
2905 25564 : get_implicit_reg_pending_clobbers (&temp, insn);
2906 51128 : implicit_reg_pending_clobbers |= temp;
2907 : }
2908 :
2909 217329210 : can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2910 108664605 : && code == SET);
2911 :
2912 : /* Group compare and branch insns for macro-fusion. */
2913 108664605 : if (!deps->readonly
2914 108634557 : && targetm.sched.macro_fusion_p
2915 217299162 : && targetm.sched.macro_fusion_p ())
2916 108429525 : sched_macro_fuse_insns (insn);
2917 :
2918 108664605 : if (may_trap_p (x))
2919 : /* Avoid moving trapping instructions across function calls that might
2920 : not always return. */
2921 8762047 : add_dependence_list (insn, deps->last_function_call_may_noreturn,
2922 : 1, REG_DEP_ANTI, true);
2923 :
2924 : /* We must avoid creating a situation in which two successors of the
2925 : current block have different unwind info after scheduling. If at any
2926 : point the two paths re-join this leads to incorrect unwind info. */
2927 : /* ??? There are certain situations involving a forced frame pointer in
2928 : which, with extra effort, we could fix up the unwind info at a later
2929 : CFG join. However, it seems better to notice these cases earlier
2930 : during prologue generation and avoid marking the frame pointer setup
2931 : as frame-related at all. */
2932 108664605 : if (RTX_FRAME_RELATED_P (insn))
2933 : {
2934 : /* Make sure prologue insn is scheduled before next jump. */
2935 3667675 : deps->sched_before_next_jump
2936 3667675 : = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2937 :
2938 : /* Make sure epilogue insn is scheduled after preceding jumps. */
2939 3667675 : add_dependence_list (insn, deps->last_pending_memory_flush, 1,
2940 : REG_DEP_ANTI, true);
2941 3667675 : add_dependence_list (insn, deps->pending_jump_insns, 1, REG_DEP_ANTI,
2942 : true);
2943 : }
2944 :
2945 108664605 : if (code == COND_EXEC)
2946 : {
2947 0 : sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2948 :
2949 : /* ??? Should be recording conditions so we reduce the number of
2950 : false dependencies. */
2951 0 : x = COND_EXEC_CODE (x);
2952 0 : code = GET_CODE (x);
2953 : }
2954 108664605 : if (code == SET || code == CLOBBER)
2955 : {
2956 47656374 : sched_analyze_1 (deps, x, insn);
2957 :
2958 : /* Bare clobber insns are used for letting life analysis, reg-stack
2959 : and others know that a value is dead. Depend on the last call
2960 : instruction so that reg-stack won't get confused. */
2961 47656374 : if (code == CLOBBER)
2962 81791 : add_dependence_list (insn, deps->last_function_call, 1,
2963 : REG_DEP_OUTPUT, true);
2964 : }
2965 61008231 : else if (code == PARALLEL)
2966 : {
2967 24798884 : for (i = XVECLEN (x, 0); i--;)
2968 : {
2969 17053215 : rtx sub = XVECEXP (x, 0, i);
2970 17053215 : code = GET_CODE (sub);
2971 :
2972 17053215 : if (code == COND_EXEC)
2973 : {
2974 0 : sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2975 0 : sub = COND_EXEC_CODE (sub);
2976 0 : code = GET_CODE (sub);
2977 : }
2978 17053215 : else if (code == SET || code == CLOBBER)
2979 16579051 : sched_analyze_1 (deps, sub, insn);
2980 : else
2981 474164 : sched_analyze_2 (deps, sub, insn);
2982 : }
2983 : }
2984 : else
2985 53262562 : sched_analyze_2 (deps, x, insn);
2986 :
2987 : /* Mark registers CLOBBERED or used by called function. */
2988 108664605 : if (CALL_P (insn))
2989 : {
2990 13375131 : for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2991 : {
2992 8909066 : if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2993 422915 : sched_analyze_1 (deps, XEXP (link, 0), insn);
2994 8486151 : else if (GET_CODE (XEXP (link, 0)) != SET)
2995 8421129 : sched_analyze_2 (deps, XEXP (link, 0), insn);
2996 : }
2997 : /* Don't schedule anything after a tail call, tail call needs
2998 : to use at least all call-saved registers. */
2999 4466065 : if (SIBLING_CALL_P (insn))
3000 128288 : reg_pending_barrier = TRUE_BARRIER;
3001 4337777 : else if (find_reg_note (insn, REG_SETJMP, NULL))
3002 717 : reg_pending_barrier = MOVE_BARRIER;
3003 : }
3004 :
3005 108664605 : if (JUMP_P (insn))
3006 : {
3007 7693777 : rtx_insn *next = next_nonnote_nondebug_insn (insn);
3008 : /* ??? For tablejumps, the barrier may appear not immediately after
3009 : the jump, but after a label and a jump_table_data insn. */
3010 8735742 : if (next && LABEL_P (next) && NEXT_INSN (next)
3011 8735757 : && JUMP_TABLE_DATA_P (NEXT_INSN (next)))
3012 998 : next = NEXT_INSN (NEXT_INSN (next));
3013 7693777 : if (next && BARRIER_P (next))
3014 2850459 : reg_pending_barrier = MOVE_BARRIER;
3015 : else
3016 : {
3017 4843318 : rtx_insn_list *pending;
3018 4843318 : rtx_expr_list *pending_mem;
3019 :
3020 4843318 : if (sched_deps_info->compute_jump_reg_dependencies)
3021 : {
3022 4841689 : (*sched_deps_info->compute_jump_reg_dependencies)
3023 4841689 : (insn, reg_pending_control_uses);
3024 :
3025 : /* Make latency of jump equal to 0 by using anti-dependence. */
3026 4841850 : EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3027 : {
3028 161 : struct deps_reg *reg_last = &deps->reg_last[i];
3029 161 : add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI,
3030 : false);
3031 161 : add_dependence_list (insn, reg_last->implicit_sets,
3032 : 0, REG_DEP_ANTI, false);
3033 161 : add_dependence_list (insn, reg_last->clobbers, 0,
3034 : REG_DEP_ANTI, false);
3035 : }
3036 : }
3037 :
3038 : /* All memory writes and volatile reads must happen before the
3039 : jump. Non-volatile reads must happen before the jump iff
3040 : the result is needed by the above register used mask. */
3041 :
3042 4843318 : pending = deps->pending_write_insns;
3043 4843318 : pending_mem = deps->pending_write_mems;
3044 7268720 : while (pending)
3045 : {
3046 2425402 : if (! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3047 2425402 : add_dependence (insn, pending->insn (), REG_DEP_OUTPUT);
3048 2425402 : pending = pending->next ();
3049 2425402 : pending_mem = pending_mem->next ();
3050 : }
3051 :
3052 4843318 : pending = deps->pending_read_insns;
3053 4843318 : pending_mem = deps->pending_read_mems;
3054 11684068 : while (pending)
3055 : {
3056 6840750 : rtx mem = pending_mem->element ();
3057 6090106 : if (MEM_P (mem) && MEM_VOLATILE_P (mem)
3058 7163929 : && ! sched_insns_conditions_mutex_p (insn, pending->insn ()))
3059 323179 : add_dependence (insn, pending->insn (), REG_DEP_OUTPUT);
3060 6840750 : pending = pending->next ();
3061 6840750 : pending_mem = pending_mem->next ();
3062 : }
3063 :
3064 4843318 : add_dependence_list (insn, deps->last_pending_memory_flush, 1,
3065 : REG_DEP_ANTI, true);
3066 4843318 : add_dependence_list (insn, deps->pending_jump_insns, 1,
3067 : REG_DEP_ANTI, true);
3068 : }
3069 : }
3070 :
3071 : /* If this instruction can throw an exception, then moving it changes
3072 : where block boundaries fall. This is mighty confusing elsewhere.
3073 : Therefore, prevent such an instruction from being moved. Same for
3074 : non-jump instructions that define block boundaries.
3075 : ??? Unclear whether this is still necessary in EBB mode. If not,
3076 : add_branch_dependences should be adjusted for RGN mode instead. */
3077 12159842 : if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3078 120341401 : || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3079 595669 : reg_pending_barrier = MOVE_BARRIER;
3080 :
3081 108664605 : if (sched_pressure != SCHED_PRESSURE_NONE)
3082 : {
3083 4315 : setup_insn_reg_uses (deps, insn);
3084 4315 : init_insn_reg_pressure_info (insn);
3085 : }
3086 :
3087 : /* Add register dependencies for insn. */
3088 108664605 : if (DEBUG_INSN_P (insn))
3089 : {
3090 48493947 : rtx_insn *prev = deps->last_debug_insn;
3091 48493947 : rtx_insn_list *u;
3092 :
3093 48493947 : if (!deps->readonly)
3094 48493838 : deps->last_debug_insn = insn;
3095 :
3096 48493947 : if (prev)
3097 44971727 : add_dependence (insn, prev, REG_DEP_ANTI);
3098 :
3099 48493947 : add_dependence_list (insn, deps->last_function_call, 1,
3100 : REG_DEP_ANTI, false);
3101 :
3102 48493947 : if (!sel_sched_p ())
3103 57423463 : for (u = deps->last_pending_memory_flush; u; u = u->next ())
3104 8929733 : add_dependence (insn, u->insn (), REG_DEP_ANTI);
3105 :
3106 58688363 : EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3107 : {
3108 10194416 : struct deps_reg *reg_last = &deps->reg_last[i];
3109 10194416 : add_dependence_list (insn, reg_last->sets, 1, REG_DEP_ANTI, false);
3110 : /* There's no point in making REG_DEP_CONTROL dependencies for
3111 : debug insns. */
3112 10194416 : add_dependence_list (insn, reg_last->clobbers, 1, REG_DEP_ANTI,
3113 : false);
3114 :
3115 10194416 : if (!deps->readonly)
3116 10194416 : reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3117 : }
3118 48493947 : CLEAR_REG_SET (reg_pending_uses);
3119 :
3120 : /* Quite often, a debug insn will refer to stuff in the
3121 : previous instruction, but the reason we want this
3122 : dependency here is to make sure the scheduler doesn't
3123 : gratuitously move a debug insn ahead. This could dirty
3124 : DF flags and cause additional analysis that wouldn't have
3125 : occurred in compilation without debug insns, and such
3126 : additional analysis can modify the generated code. */
3127 48493947 : prev = PREV_INSN (insn);
3128 :
3129 48493947 : if (prev && NONDEBUG_INSN_P (prev))
3130 3851060 : add_dependence (insn, prev, REG_DEP_ANTI);
3131 : }
3132 : else
3133 : {
3134 60170658 : regset_head set_or_clobbered;
3135 :
3136 128532237 : EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3137 : {
3138 68361579 : struct deps_reg *reg_last = &deps->reg_last[i];
3139 68361579 : add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3140 68361579 : add_dependence_list (insn, reg_last->implicit_sets, 0, REG_DEP_ANTI,
3141 : false);
3142 68361579 : add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3143 : false);
3144 :
3145 68361579 : if (!deps->readonly)
3146 : {
3147 68347938 : reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3148 68347938 : reg_last->uses_length++;
3149 : }
3150 : }
3151 :
3152 5595871194 : for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3153 5535700536 : if (TEST_HARD_REG_BIT (implicit_reg_pending_uses, i))
3154 : {
3155 14624205 : struct deps_reg *reg_last = &deps->reg_last[i];
3156 14624205 : add_dependence_list (insn, reg_last->sets, 0, REG_DEP_TRUE, false);
3157 14624205 : add_dependence_list (insn, reg_last->implicit_sets, 0,
3158 : REG_DEP_ANTI, false);
3159 14624205 : add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_TRUE,
3160 : false);
3161 :
3162 14624205 : if (!deps->readonly)
3163 : {
3164 14623032 : reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3165 14623032 : reg_last->uses_length++;
3166 : }
3167 : }
3168 :
3169 60170658 : if (targetm.sched.exposed_pipeline)
3170 : {
3171 0 : INIT_REG_SET (&set_or_clobbered);
3172 0 : bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3173 : reg_pending_sets);
3174 0 : EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3175 : {
3176 0 : struct deps_reg *reg_last = &deps->reg_last[i];
3177 0 : rtx list;
3178 0 : for (list = reg_last->uses; list; list = XEXP (list, 1))
3179 : {
3180 0 : rtx other = XEXP (list, 0);
3181 0 : if (INSN_CACHED_COND (other) != const_true_rtx
3182 0 : && refers_to_regno_p (i, INSN_CACHED_COND (other)))
3183 0 : INSN_CACHED_COND (other) = const_true_rtx;
3184 : }
3185 : }
3186 : }
3187 :
3188 : /* If the current insn is conditional, we can't free any
3189 : of the lists. */
3190 60170658 : if (sched_has_condition_p (insn))
3191 : {
3192 4842897 : EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3193 : {
3194 0 : struct deps_reg *reg_last = &deps->reg_last[i];
3195 0 : add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3196 : false);
3197 0 : add_dependence_list (insn, reg_last->implicit_sets, 0,
3198 : REG_DEP_ANTI, false);
3199 0 : add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3200 : false);
3201 0 : add_dependence_list (insn, reg_last->control_uses, 0,
3202 : REG_DEP_CONTROL, false);
3203 :
3204 0 : if (!deps->readonly)
3205 : {
3206 0 : reg_last->clobbers
3207 0 : = alloc_INSN_LIST (insn, reg_last->clobbers);
3208 0 : reg_last->clobbers_length++;
3209 : }
3210 : }
3211 4842897 : EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3212 : {
3213 0 : struct deps_reg *reg_last = &deps->reg_last[i];
3214 0 : add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3215 : false);
3216 0 : add_dependence_list (insn, reg_last->implicit_sets, 0,
3217 : REG_DEP_ANTI, false);
3218 0 : add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_OUTPUT,
3219 : false);
3220 0 : add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3221 : false);
3222 0 : add_dependence_list (insn, reg_last->control_uses, 0,
3223 : REG_DEP_CONTROL, false);
3224 :
3225 0 : if (!deps->readonly)
3226 0 : reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3227 : }
3228 : }
3229 : else
3230 : {
3231 430292149 : EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3232 : {
3233 374964388 : struct deps_reg *reg_last = &deps->reg_last[i];
3234 374964388 : if (reg_last->uses_length >= param_max_pending_list_length
3235 374961773 : || reg_last->clobbers_length >= param_max_pending_list_length)
3236 : {
3237 330050 : add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3238 : REG_DEP_OUTPUT, false);
3239 330050 : add_dependence_list_and_free (deps, insn,
3240 : ®_last->implicit_sets, 0,
3241 : REG_DEP_ANTI, false);
3242 330050 : add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3243 : REG_DEP_ANTI, false);
3244 330050 : add_dependence_list_and_free (deps, insn,
3245 : ®_last->control_uses, 0,
3246 : REG_DEP_ANTI, false);
3247 330050 : add_dependence_list_and_free (deps, insn,
3248 : ®_last->clobbers, 0,
3249 : REG_DEP_OUTPUT, false);
3250 :
3251 330050 : if (!deps->readonly)
3252 : {
3253 330049 : reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3254 330049 : reg_last->clobbers_length = 0;
3255 330049 : reg_last->uses_length = 0;
3256 : }
3257 : }
3258 : else
3259 : {
3260 374634338 : add_dependence_list (insn, reg_last->sets, 0, REG_DEP_OUTPUT,
3261 : false);
3262 374634338 : add_dependence_list (insn, reg_last->implicit_sets, 0,
3263 : REG_DEP_ANTI, false);
3264 374634338 : add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3265 : false);
3266 374634338 : add_dependence_list (insn, reg_last->control_uses, 0,
3267 : REG_DEP_CONTROL, false);
3268 : }
3269 :
3270 374964388 : if (!deps->readonly)
3271 : {
3272 374935007 : reg_last->clobbers_length++;
3273 374935007 : reg_last->clobbers
3274 374935007 : = alloc_INSN_LIST (insn, reg_last->clobbers);
3275 : }
3276 : }
3277 93620961 : EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3278 : {
3279 38293200 : struct deps_reg *reg_last = &deps->reg_last[i];
3280 :
3281 38293200 : add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3282 : REG_DEP_OUTPUT, false);
3283 38293200 : add_dependence_list_and_free (deps, insn,
3284 : ®_last->implicit_sets,
3285 : 0, REG_DEP_ANTI, false);
3286 38293200 : add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
3287 : REG_DEP_OUTPUT, false);
3288 38293200 : add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3289 : REG_DEP_ANTI, false);
3290 38293200 : add_dependence_list (insn, reg_last->control_uses, 0,
3291 : REG_DEP_CONTROL, false);
3292 :
3293 38293200 : if (!deps->readonly)
3294 : {
3295 38284337 : reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3296 38284337 : reg_last->uses_length = 0;
3297 38284337 : reg_last->clobbers_length = 0;
3298 : }
3299 : }
3300 : }
3301 60170658 : if (!deps->readonly)
3302 : {
3303 60140880 : EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3304 : {
3305 161 : struct deps_reg *reg_last = &deps->reg_last[i];
3306 161 : reg_last->control_uses
3307 161 : = alloc_INSN_LIST (insn, reg_last->control_uses);
3308 : }
3309 : }
3310 : }
3311 :
3312 10105808265 : for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3313 9997143660 : if (TEST_HARD_REG_BIT (implicit_reg_pending_clobbers, i))
3314 : {
3315 1709 : struct deps_reg *reg_last = &deps->reg_last[i];
3316 1709 : add_dependence_list (insn, reg_last->sets, 0, REG_DEP_ANTI, false);
3317 1709 : add_dependence_list (insn, reg_last->clobbers, 0, REG_DEP_ANTI, false);
3318 1709 : add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI, false);
3319 1709 : add_dependence_list (insn, reg_last->control_uses, 0, REG_DEP_ANTI,
3320 : false);
3321 :
3322 1709 : if (!deps->readonly)
3323 853 : reg_last->implicit_sets
3324 853 : = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3325 : }
3326 :
3327 108664605 : if (!deps->readonly)
3328 : {
3329 108634557 : IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3330 108634557 : IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3331 108634557 : IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3332 108634557 : IOR_REG_SET_HRS (&deps->reg_last_in_use,
3333 : implicit_reg_pending_uses
3334 : | implicit_reg_pending_clobbers);
3335 :
3336 : /* Set up the pending barrier found. */
3337 108634557 : deps->last_reg_pending_barrier = reg_pending_barrier;
3338 : }
3339 :
3340 108664605 : CLEAR_REG_SET (reg_pending_uses);
3341 108664605 : CLEAR_REG_SET (reg_pending_clobbers);
3342 108664605 : CLEAR_REG_SET (reg_pending_sets);
3343 108664605 : CLEAR_REG_SET (reg_pending_control_uses);
3344 434658420 : CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
3345 108664605 : CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
3346 :
3347 : /* Add dependencies if a scheduling barrier was found. */
3348 108664605 : if (reg_pending_barrier)
3349 : {
3350 : /* In the case of barrier the most added dependencies are not
3351 : real, so we use anti-dependence here. */
3352 4264633 : if (sched_has_condition_p (insn))
3353 : {
3354 0 : EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3355 : {
3356 0 : struct deps_reg *reg_last = &deps->reg_last[i];
3357 0 : add_dependence_list (insn, reg_last->uses, 0, REG_DEP_ANTI,
3358 : true);
3359 0 : add_dependence_list (insn, reg_last->sets, 0,
3360 0 : reg_pending_barrier == TRUE_BARRIER
3361 : ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3362 0 : add_dependence_list (insn, reg_last->implicit_sets, 0,
3363 : REG_DEP_ANTI, true);
3364 0 : add_dependence_list (insn, reg_last->clobbers, 0,
3365 0 : reg_pending_barrier == TRUE_BARRIER
3366 : ? REG_DEP_TRUE : REG_DEP_ANTI, true);
3367 : }
3368 : }
3369 : else
3370 : {
3371 202583600 : EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3372 : {
3373 198318967 : struct deps_reg *reg_last = &deps->reg_last[i];
3374 198318967 : add_dependence_list_and_free (deps, insn, ®_last->uses, 0,
3375 : REG_DEP_ANTI, true);
3376 198318967 : add_dependence_list_and_free (deps, insn,
3377 : ®_last->control_uses, 0,
3378 : REG_DEP_CONTROL, true);
3379 198318967 : add_dependence_list_and_free (deps, insn, ®_last->sets, 0,
3380 198318967 : reg_pending_barrier == TRUE_BARRIER
3381 : ? REG_DEP_TRUE : REG_DEP_ANTI,
3382 : true);
3383 198318967 : add_dependence_list_and_free (deps, insn,
3384 : ®_last->implicit_sets, 0,
3385 : REG_DEP_ANTI, true);
3386 198318967 : add_dependence_list_and_free (deps, insn, ®_last->clobbers, 0,
3387 198318967 : reg_pending_barrier == TRUE_BARRIER
3388 : ? REG_DEP_TRUE : REG_DEP_ANTI,
3389 : true);
3390 :
3391 198318967 : if (!deps->readonly)
3392 : {
3393 198311147 : reg_last->uses_length = 0;
3394 198311147 : reg_last->clobbers_length = 0;
3395 : }
3396 : }
3397 : }
3398 :
3399 4264633 : if (!deps->readonly)
3400 396625075 : for (i = 0; i < (unsigned)deps->max_reg; i++)
3401 : {
3402 392361112 : struct deps_reg *reg_last = &deps->reg_last[i];
3403 392361112 : reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3404 392361112 : SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3405 : }
3406 :
3407 : /* Don't flush pending lists on speculative checks for
3408 : selective scheduling. */
3409 4264633 : if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3410 4264633 : flush_pending_lists (deps, insn, true, true);
3411 :
3412 4264633 : reg_pending_barrier = NOT_A_BARRIER;
3413 : }
3414 :
3415 : /* If a post-call group is still open, see if it should remain so.
3416 : This insn must be a simple move of a hard reg to a pseudo or
3417 : vice-versa.
3418 :
3419 : We must avoid moving these insns for correctness on targets
3420 : with small register classes, and for special registers like
3421 : PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3422 : hard regs for all targets. */
3423 :
3424 108664605 : if (deps->in_post_call_group_p)
3425 : {
3426 1174 : rtx tmp, set = single_set (insn);
3427 1174 : int src_regno, dest_regno;
3428 :
3429 1174 : if (set == NULL)
3430 : {
3431 395 : if (DEBUG_INSN_P (insn))
3432 : /* We don't want to mark debug insns as part of the same
3433 : sched group. We know they really aren't, but if we use
3434 : debug insns to tell that a call group is over, we'll
3435 : get different code if debug insns are not there and
3436 : instructions that follow seem like they should be part
3437 : of the call group.
3438 :
3439 : Also, if we did, chain_to_prev_insn would move the
3440 : deps of the debug insn to the call insn, modifying
3441 : non-debug post-dependency counts of the debug insn
3442 : dependencies and otherwise messing with the scheduling
3443 : order.
3444 :
3445 : Instead, let such debug insns be scheduled freely, but
3446 : keep the call group open in case there are insns that
3447 : should be part of it afterwards. Since we grant debug
3448 : insns higher priority than even sched group insns, it
3449 : will all turn out all right. */
3450 332 : goto debug_dont_end_call_group;
3451 : else
3452 63 : goto end_call_group;
3453 : }
3454 :
3455 779 : tmp = SET_DEST (set);
3456 779 : if (GET_CODE (tmp) == SUBREG)
3457 0 : tmp = SUBREG_REG (tmp);
3458 779 : if (REG_P (tmp))
3459 716 : dest_regno = REGNO (tmp);
3460 : else
3461 63 : goto end_call_group;
3462 :
3463 716 : tmp = SET_SRC (set);
3464 716 : if (GET_CODE (tmp) == SUBREG)
3465 23 : tmp = SUBREG_REG (tmp);
3466 716 : if ((GET_CODE (tmp) == PLUS
3467 716 : || GET_CODE (tmp) == MINUS)
3468 89 : && REG_P (XEXP (tmp, 0))
3469 79 : && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3470 738 : && dest_regno == STACK_POINTER_REGNUM)
3471 : src_regno = STACK_POINTER_REGNUM;
3472 694 : else if (REG_P (tmp))
3473 284 : src_regno = REGNO (tmp);
3474 : else
3475 410 : goto end_call_group;
3476 :
3477 306 : if (src_regno < FIRST_PSEUDO_REGISTER
3478 306 : || dest_regno < FIRST_PSEUDO_REGISTER)
3479 : {
3480 250 : if (!deps->readonly
3481 209 : && deps->in_post_call_group_p == post_call_initial)
3482 0 : deps->in_post_call_group_p = post_call;
3483 :
3484 250 : if (!sel_sched_p () || sched_emulate_haifa_p)
3485 : {
3486 196 : SCHED_GROUP_P (insn) = 1;
3487 196 : CANT_MOVE (insn) = 1;
3488 : }
3489 : }
3490 : else
3491 : {
3492 56 : end_call_group:
3493 592 : if (!deps->readonly)
3494 428 : deps->in_post_call_group_p = not_post_call;
3495 : }
3496 : }
3497 :
3498 108663431 : debug_dont_end_call_group:
3499 108664605 : if ((current_sched_info->flags & DO_SPECULATION)
3500 108664605 : && !sched_insn_is_legitimate_for_speculation_p (insn, 0))
3501 : /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3502 : be speculated. */
3503 : {
3504 0 : if (sel_sched_p ())
3505 0 : sel_mark_hard_insn (insn);
3506 : else
3507 : {
3508 0 : sd_iterator_def sd_it;
3509 0 : dep_t dep;
3510 :
3511 0 : for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3512 0 : sd_iterator_cond (&sd_it, &dep);)
3513 0 : change_spec_dep_to_hard (sd_it);
3514 : }
3515 : }
3516 :
3517 : /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3518 : honor their original ordering. */
3519 108664605 : if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3520 : {
3521 4000292 : if (deps->last_args_size)
3522 2732094 : add_dependence (insn, deps->last_args_size, REG_DEP_OUTPUT);
3523 4000292 : if (!deps->readonly)
3524 4000131 : deps->last_args_size = insn;
3525 : }
3526 :
3527 : /* We must not mix prologue and epilogue insns. See PR78029. */
3528 108664605 : if (prologue_contains (insn))
3529 : {
3530 3220263 : add_dependence_list (insn, deps->last_epilogue, true, REG_DEP_ANTI, true);
3531 3220263 : if (!deps->readonly)
3532 : {
3533 3219812 : if (deps->last_logue_was_epilogue)
3534 2 : free_INSN_LIST_list (&deps->last_prologue);
3535 3219812 : deps->last_prologue = alloc_INSN_LIST (insn, deps->last_prologue);
3536 3219812 : deps->last_logue_was_epilogue = false;
3537 : }
3538 : }
3539 :
3540 108664605 : if (epilogue_contains (insn))
3541 : {
3542 3122000 : add_dependence_list (insn, deps->last_prologue, true, REG_DEP_ANTI, true);
3543 3122000 : if (!deps->readonly)
3544 : {
3545 3121491 : if (!deps->last_logue_was_epilogue)
3546 1150580 : free_INSN_LIST_list (&deps->last_epilogue);
3547 3121491 : deps->last_epilogue = alloc_INSN_LIST (insn, deps->last_epilogue);
3548 3121491 : deps->last_logue_was_epilogue = true;
3549 : }
3550 : }
3551 108664605 : }
3552 :
3553 : /* Return TRUE if INSN might not always return normally (e.g. call exit,
3554 : longjmp, loop forever, ...). */
3555 : /* FIXME: Why can't this function just use flags_from_decl_or_type and
3556 : test for ECF_NORETURN? */
3557 : static bool
3558 4465572 : call_may_noreturn_p (rtx_insn *insn)
3559 : {
3560 4465572 : rtx call;
3561 :
3562 : /* const or pure calls that aren't looping will always return. */
3563 8788641 : if (RTL_CONST_OR_PURE_CALL_P (insn)
3564 4739650 : && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3565 : return false;
3566 :
3567 4080842 : call = get_call_rtx_from (insn);
3568 4080842 : if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3569 : {
3570 3905415 : rtx symbol = XEXP (XEXP (call, 0), 0);
3571 3905415 : if (SYMBOL_REF_DECL (symbol)
3572 3905415 : && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3573 : {
3574 3667260 : if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3575 3667260 : == BUILT_IN_NORMAL)
3576 527733 : switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3577 : {
3578 : case BUILT_IN_BCMP:
3579 : case BUILT_IN_BCOPY:
3580 : case BUILT_IN_BZERO:
3581 : case BUILT_IN_INDEX:
3582 : case BUILT_IN_MEMCHR:
3583 : case BUILT_IN_MEMCMP:
3584 : case BUILT_IN_MEMCPY:
3585 : case BUILT_IN_MEMMOVE:
3586 : case BUILT_IN_MEMPCPY:
3587 : case BUILT_IN_MEMSET:
3588 : case BUILT_IN_RINDEX:
3589 : case BUILT_IN_STPCPY:
3590 : case BUILT_IN_STPNCPY:
3591 : case BUILT_IN_STRCAT:
3592 : case BUILT_IN_STRCHR:
3593 : case BUILT_IN_STRCMP:
3594 : case BUILT_IN_STRCPY:
3595 : case BUILT_IN_STRCSPN:
3596 : case BUILT_IN_STRLEN:
3597 : case BUILT_IN_STRNCAT:
3598 : case BUILT_IN_STRNCMP:
3599 : case BUILT_IN_STRNCPY:
3600 : case BUILT_IN_STRPBRK:
3601 : case BUILT_IN_STRRCHR:
3602 : case BUILT_IN_STRSPN:
3603 : case BUILT_IN_STRSTR:
3604 : /* Assume certain string/memory builtins always return. */
3605 : return false;
3606 : default:
3607 : break;
3608 : }
3609 : }
3610 : }
3611 :
3612 : /* For all other calls assume that they might not always return. */
3613 : return true;
3614 : }
3615 :
3616 : /* Return true if INSN should be made dependent on the previous instruction
3617 : group, and if all INSN's dependencies should be moved to the first
3618 : instruction of that group. */
3619 :
3620 : static bool
3621 55704593 : chain_to_prev_insn_p (rtx_insn *insn)
3622 : {
3623 : /* INSN forms a group with the previous instruction. */
3624 55704593 : if (SCHED_GROUP_P (insn))
3625 : return true;
3626 :
3627 : /* If the previous instruction clobbers a register R and this one sets
3628 : part of R, the clobber was added specifically to help us track the
3629 : liveness of R. There's no point scheduling the clobber and leaving
3630 : INSN behind, especially if we move the clobber to another block. */
3631 51523864 : rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
3632 51523864 : if (prev
3633 50571073 : && INSN_P (prev)
3634 45749886 : && BLOCK_FOR_INSN (prev) == BLOCK_FOR_INSN (insn)
3635 93034086 : && GET_CODE (PATTERN (prev)) == CLOBBER)
3636 : {
3637 81697 : rtx x = XEXP (PATTERN (prev), 0);
3638 81697 : if (set_of (x, insn))
3639 : return true;
3640 : }
3641 :
3642 : return false;
3643 : }
3644 :
3645 : /* Analyze INSN with DEPS as a context. */
3646 : void
3647 114505425 : deps_analyze_insn (class deps_desc *deps, rtx_insn *insn)
3648 : {
3649 114505425 : if (sched_deps_info->start_insn)
3650 114485136 : sched_deps_info->start_insn (insn);
3651 :
3652 : /* Record the condition for this insn. */
3653 114505425 : if (NONDEBUG_INSN_P (insn))
3654 : {
3655 60170658 : rtx t;
3656 60170658 : sched_get_condition_with_rev (insn, NULL);
3657 60170658 : t = INSN_CACHED_COND (insn);
3658 60170658 : INSN_COND_DEPS (insn) = NULL;
3659 60170658 : if (reload_completed
3660 60146660 : && (current_sched_info->flags & DO_PREDICATION)
3661 0 : && COMPARISON_P (t)
3662 0 : && REG_P (XEXP (t, 0))
3663 0 : && CONSTANT_P (XEXP (t, 1)))
3664 : {
3665 0 : unsigned int regno;
3666 0 : int nregs;
3667 0 : rtx_insn_list *cond_deps = NULL;
3668 0 : t = XEXP (t, 0);
3669 0 : regno = REGNO (t);
3670 0 : nregs = REG_NREGS (t);
3671 0 : while (nregs-- > 0)
3672 : {
3673 0 : struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3674 0 : cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3675 0 : cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3676 0 : cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3677 : }
3678 0 : INSN_COND_DEPS (insn) = cond_deps;
3679 : }
3680 : }
3681 :
3682 114505425 : if (JUMP_P (insn))
3683 : {
3684 : /* Make each JUMP_INSN (but not a speculative check)
3685 : a scheduling barrier for memory references. */
3686 7693777 : if (!deps->readonly
3687 7695486 : && !(sel_sched_p ()
3688 1709 : && sel_insn_is_speculation_check (insn)))
3689 : {
3690 : /* Keep the list a reasonable size. */
3691 7692335 : if (deps->pending_flush_length++ >= param_max_pending_list_length)
3692 2 : flush_pending_lists (deps, insn, true, true);
3693 : else
3694 7692333 : deps->pending_jump_insns
3695 7692333 : = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3696 : }
3697 :
3698 : /* For each insn which shouldn't cross a jump, add a dependence. */
3699 7693777 : add_dependence_list_and_free (deps, insn,
3700 : &deps->sched_before_next_jump, 1,
3701 : REG_DEP_ANTI, true);
3702 :
3703 7693777 : sched_analyze_insn (deps, PATTERN (insn), insn);
3704 : }
3705 106811648 : else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3706 : {
3707 96504763 : sched_analyze_insn (deps, PATTERN (insn), insn);
3708 : }
3709 10306885 : else if (CALL_P (insn))
3710 : {
3711 4466065 : int i;
3712 :
3713 4466065 : CANT_MOVE (insn) = 1;
3714 :
3715 4466065 : if (!reload_completed)
3716 : {
3717 : /* Scheduling across calls may increase register pressure by extending
3718 : live ranges of pseudos over the call. Worse, in presence of setjmp
3719 : it may incorrectly move up an assignment over a longjmp. */
3720 714 : reg_pending_barrier = MOVE_BARRIER;
3721 : }
3722 4465351 : else if (find_reg_note (insn, REG_SETJMP, NULL))
3723 : {
3724 : /* This is setjmp. Assume that all registers, not just
3725 : hard registers, may be clobbered by this call. */
3726 716 : reg_pending_barrier = MOVE_BARRIER;
3727 : }
3728 : else
3729 : {
3730 4464635 : function_abi callee_abi = insn_callee_abi (insn);
3731 419675690 : for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3732 : /* A call may read and modify global register variables. */
3733 410746420 : if (global_regs[i])
3734 : {
3735 118 : SET_REGNO_REG_SET (reg_pending_sets, i);
3736 118 : SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3737 : }
3738 : /* Other call-clobbered hard regs may be clobbered.
3739 : Since we only have a choice between 'might be clobbered'
3740 : and 'definitely not clobbered', we must include all
3741 : partly call-clobbered registers here. */
3742 410746302 : else if (callee_abi.clobbers_at_least_part_of_reg_p (i))
3743 363293007 : SET_REGNO_REG_SET (reg_pending_clobbers, i);
3744 : /* We don't know what set of fixed registers might be used
3745 : by the function, but it is certain that the stack pointer
3746 : is among them, but be conservative. */
3747 47453295 : else if (fixed_regs[i])
3748 13899317 : SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3749 : /* The frame pointer is normally not used by the function
3750 : itself, but by the debugger. */
3751 : /* ??? MIPS o32 is an exception. It uses the frame pointer
3752 : in the macro expansion of jal but does not represent this
3753 : fact in the call_insn rtl. */
3754 33553978 : else if (i == FRAME_POINTER_REGNUM
3755 33553978 : || (i == HARD_FRAME_POINTER_REGNUM
3756 4464621 : && (! reload_completed || frame_pointer_needed)))
3757 464759 : SET_HARD_REG_BIT (implicit_reg_pending_uses, i);
3758 : }
3759 :
3760 : /* For each insn which shouldn't cross a call, add a dependence
3761 : between that insn and this call insn. */
3762 4466065 : add_dependence_list_and_free (deps, insn,
3763 : &deps->sched_before_next_call, 1,
3764 : REG_DEP_ANTI, true);
3765 :
3766 4466065 : sched_analyze_insn (deps, PATTERN (insn), insn);
3767 :
3768 : /* If CALL would be in a sched group, then this will violate
3769 : convention that sched group insns have dependencies only on the
3770 : previous instruction.
3771 :
3772 : Of course one can say: "Hey! What about head of the sched group?"
3773 : And I will answer: "Basic principles (one dep per insn) are always
3774 : the same." */
3775 4466065 : gcc_assert (!SCHED_GROUP_P (insn));
3776 :
3777 : /* In the absence of interprocedural alias analysis, we must flush
3778 : all pending reads and writes, and start new dependencies starting
3779 : from here. But only flush writes for constant calls (which may
3780 : be passed a pointer to something we haven't written yet). */
3781 4882684 : flush_pending_lists (deps, insn, true, ! RTL_CONST_OR_PURE_CALL_P (insn));
3782 :
3783 4466065 : if (!deps->readonly)
3784 : {
3785 : /* Remember the last function call for limiting lifetimes. */
3786 4465572 : free_INSN_LIST_list (&deps->last_function_call);
3787 4465572 : deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3788 :
3789 4465572 : if (call_may_noreturn_p (insn))
3790 : {
3791 : /* Remember the last function call that might not always return
3792 : normally for limiting moves of trapping insns. */
3793 4001693 : free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3794 4001693 : deps->last_function_call_may_noreturn
3795 4001693 : = alloc_INSN_LIST (insn, NULL_RTX);
3796 : }
3797 :
3798 : /* Before reload, begin a post-call group, so as to keep the
3799 : lifetimes of hard registers correct. */
3800 4465572 : if (! reload_completed)
3801 555 : deps->in_post_call_group_p = post_call;
3802 : }
3803 : }
3804 :
3805 114505425 : if (sched_deps_info->use_cselib)
3806 1532 : cselib_process_insn (insn);
3807 :
3808 114505425 : if (sched_deps_info->finish_insn)
3809 114485136 : sched_deps_info->finish_insn ();
3810 :
3811 : /* Fixup the dependencies in the sched group. */
3812 114505425 : if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3813 55704593 : && chain_to_prev_insn_p (insn)
3814 118686195 : && !sel_sched_p ())
3815 4179276 : chain_to_prev_insn (insn);
3816 114505425 : }
3817 :
3818 : /* Initialize DEPS for the new block beginning with HEAD. */
3819 : void
3820 10331646 : deps_start_bb (class deps_desc *deps, rtx_insn *head)
3821 : {
3822 10331646 : gcc_assert (!deps->readonly);
3823 :
3824 : /* Before reload, if the previous block ended in a call, show that
3825 : we are inside a post-call group, so as to keep the lifetimes of
3826 : hard registers correct. */
3827 10331646 : if (! reload_completed && !LABEL_P (head))
3828 : {
3829 1233 : rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3830 :
3831 1233 : if (insn && CALL_P (insn))
3832 5 : deps->in_post_call_group_p = post_call_initial;
3833 : }
3834 10331646 : }
3835 :
3836 : /* Analyze every insn between HEAD and TAIL inclusive, creating backward
3837 : dependencies for each insn. */
3838 : void
3839 10331646 : sched_analyze (class deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3840 : {
3841 10331646 : rtx_insn *insn;
3842 :
3843 10331646 : if (sched_deps_info->use_cselib)
3844 174 : cselib_init (CSELIB_RECORD_MEMORY);
3845 :
3846 10331646 : deps_start_bb (deps, head);
3847 :
3848 114466431 : for (insn = head;; insn = NEXT_INSN (insn))
3849 : {
3850 114466431 : if (INSN_P (insn))
3851 : {
3852 : /* And initialize deps_lists. */
3853 108625611 : sd_init_insn (insn);
3854 : /* Clean up SCHED_GROUP_P which may be set by last
3855 : scheduler pass. */
3856 108625611 : if (SCHED_GROUP_P (insn))
3857 4161539 : SCHED_GROUP_P (insn) = 0;
3858 : }
3859 :
3860 114466431 : deps_analyze_insn (deps, insn);
3861 :
3862 114466431 : if (insn == tail)
3863 : {
3864 10331646 : if (sched_deps_info->use_cselib)
3865 174 : cselib_finish ();
3866 10331646 : return;
3867 : }
3868 104134785 : }
3869 : }
3870 :
3871 : /* Helper for sched_free_deps ().
3872 : Delete INSN's (RESOLVED_P) backward dependencies. */
3873 : static void
3874 108625611 : delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
3875 : {
3876 108625611 : sd_iterator_def sd_it;
3877 108625611 : dep_t dep;
3878 108625611 : sd_list_types_def types;
3879 :
3880 108625611 : if (resolved_p)
3881 : types = SD_LIST_RES_BACK;
3882 : else
3883 4479 : types = SD_LIST_BACK;
3884 :
3885 108625611 : for (sd_it = sd_iterator_start (insn, types);
3886 316946805 : sd_iterator_cond (&sd_it, &dep);)
3887 : {
3888 208321194 : dep_link_t link = *sd_it.linkp;
3889 208321194 : dep_node_t node = DEP_LINK_NODE (link);
3890 208321194 : deps_list_t back_list;
3891 208321194 : deps_list_t forw_list;
3892 :
3893 208321194 : get_back_and_forw_lists (dep, resolved_p, &back_list, &forw_list);
3894 208321194 : remove_from_deps_list (link, back_list);
3895 208321194 : delete_dep_node (node);
3896 : }
3897 108625611 : }
3898 :
3899 : /* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3900 : deps_lists. */
3901 : void
3902 10314387 : sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3903 : {
3904 10314387 : rtx_insn *insn;
3905 10314387 : rtx_insn *next_tail = NEXT_INSN (tail);
3906 :
3907 : /* We make two passes since some insns may be scheduled before their
3908 : dependencies are resolved. */
3909 130099372 : for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3910 109470598 : if (INSN_P (insn) && INSN_LUID (insn) > 0)
3911 : {
3912 : /* Clear forward deps and leave the dep_nodes to the
3913 : corresponding back_deps list. */
3914 108625611 : if (resolved_p)
3915 108621132 : clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3916 : else
3917 4479 : clear_deps_list (INSN_FORW_DEPS (insn));
3918 : }
3919 119784985 : for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3920 109470598 : if (INSN_P (insn) && INSN_LUID (insn) > 0)
3921 : {
3922 : /* Clear resolved back deps together with its dep_nodes. */
3923 108625611 : delete_dep_nodes_in_back_deps (insn, resolved_p);
3924 :
3925 108625611 : sd_finish_insn (insn);
3926 : }
3927 10314387 : }
3928 :
3929 : /* Initialize variables for region data dependence analysis.
3930 : When LAZY_REG_LAST is true, do not allocate reg_last array
3931 : of class deps_desc immediately. */
3932 :
3933 : void
3934 10338325 : init_deps (class deps_desc *deps, bool lazy_reg_last)
3935 : {
3936 10338325 : int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3937 :
3938 10338325 : deps->max_reg = max_reg;
3939 10338325 : if (lazy_reg_last)
3940 4943 : deps->reg_last = NULL;
3941 : else
3942 10333382 : deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3943 10338325 : INIT_REG_SET (&deps->reg_last_in_use);
3944 :
3945 10338325 : deps->pending_read_insns = 0;
3946 10338325 : deps->pending_read_mems = 0;
3947 10338325 : deps->pending_write_insns = 0;
3948 10338325 : deps->pending_write_mems = 0;
3949 10338325 : deps->pending_jump_insns = 0;
3950 10338325 : deps->pending_read_list_length = 0;
3951 10338325 : deps->pending_write_list_length = 0;
3952 10338325 : deps->pending_flush_length = 0;
3953 10338325 : deps->last_pending_memory_flush = 0;
3954 10338325 : deps->last_function_call = 0;
3955 10338325 : deps->last_function_call_may_noreturn = 0;
3956 10338325 : deps->sched_before_next_call = 0;
3957 10338325 : deps->sched_before_next_jump = 0;
3958 10338325 : deps->in_post_call_group_p = not_post_call;
3959 10338325 : deps->last_debug_insn = 0;
3960 10338325 : deps->last_args_size = 0;
3961 10338325 : deps->last_prologue = 0;
3962 10338325 : deps->last_epilogue = 0;
3963 10338325 : deps->last_logue_was_epilogue = false;
3964 10338325 : deps->last_reg_pending_barrier = NOT_A_BARRIER;
3965 10338325 : deps->readonly = 0;
3966 10338325 : }
3967 :
3968 : /* Init only reg_last field of DEPS, which was not allocated before as
3969 : we inited DEPS lazily. */
3970 : void
3971 3380 : init_deps_reg_last (class deps_desc *deps)
3972 : {
3973 3380 : gcc_assert (deps && deps->max_reg > 0);
3974 3380 : gcc_assert (deps->reg_last == NULL);
3975 :
3976 3380 : deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3977 3380 : }
3978 :
3979 :
3980 : /* Free insn lists found in DEPS. */
3981 :
3982 : void
3983 10338325 : free_deps (class deps_desc *deps)
3984 : {
3985 10338325 : unsigned i;
3986 10338325 : reg_set_iterator rsi;
3987 :
3988 : /* We set max_reg to 0 when this context was already freed. */
3989 10338325 : if (deps->max_reg == 0)
3990 : {
3991 0 : gcc_assert (deps->reg_last == NULL);
3992 0 : return;
3993 : }
3994 10338325 : deps->max_reg = 0;
3995 :
3996 10338325 : free_INSN_LIST_list (&deps->pending_read_insns);
3997 10338325 : free_EXPR_LIST_list (&deps->pending_read_mems);
3998 10338325 : free_INSN_LIST_list (&deps->pending_write_insns);
3999 10338325 : free_EXPR_LIST_list (&deps->pending_write_mems);
4000 10338325 : free_INSN_LIST_list (&deps->last_pending_memory_flush);
4001 :
4002 : /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
4003 : times. For a testcase with 42000 regs and 8000 small basic blocks,
4004 : this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
4005 514448093 : EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4006 : {
4007 504109768 : struct deps_reg *reg_last = &deps->reg_last[i];
4008 504109768 : if (reg_last->uses)
4009 28218029 : free_INSN_LIST_list (®_last->uses);
4010 504109768 : if (reg_last->sets)
4011 354692460 : free_INSN_LIST_list (®_last->sets);
4012 504109768 : if (reg_last->implicit_sets)
4013 315 : free_INSN_LIST_list (®_last->implicit_sets);
4014 504109768 : if (reg_last->control_uses)
4015 80 : free_INSN_LIST_list (®_last->control_uses);
4016 504109768 : if (reg_last->clobbers)
4017 140359484 : free_INSN_LIST_list (®_last->clobbers);
4018 : }
4019 10338325 : CLEAR_REG_SET (&deps->reg_last_in_use);
4020 :
4021 : /* As we initialize reg_last lazily, it is possible that we didn't allocate
4022 : it at all. */
4023 10338325 : free (deps->reg_last);
4024 10338325 : deps->reg_last = NULL;
4025 :
4026 10338325 : deps = NULL;
4027 : }
4028 :
4029 : /* Remove INSN from dependence contexts DEPS. */
4030 : void
4031 2355 : remove_from_deps (class deps_desc *deps, rtx_insn *insn)
4032 : {
4033 2355 : int removed;
4034 2355 : unsigned i;
4035 2355 : reg_set_iterator rsi;
4036 :
4037 2355 : removed = remove_from_both_dependence_lists (insn, &deps->pending_read_insns,
4038 : &deps->pending_read_mems);
4039 2355 : if (!DEBUG_INSN_P (insn))
4040 2326 : deps->pending_read_list_length -= removed;
4041 2355 : removed = remove_from_both_dependence_lists (insn, &deps->pending_write_insns,
4042 : &deps->pending_write_mems);
4043 2355 : deps->pending_write_list_length -= removed;
4044 :
4045 2355 : removed = remove_from_dependence_list (insn, &deps->pending_jump_insns);
4046 2355 : deps->pending_flush_length -= removed;
4047 2355 : removed = remove_from_dependence_list (insn, &deps->last_pending_memory_flush);
4048 2355 : deps->pending_flush_length -= removed;
4049 :
4050 2355 : unsigned to_clear = -1U;
4051 53582 : EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4052 : {
4053 51227 : if (to_clear != -1U)
4054 : {
4055 9407 : CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4056 9407 : to_clear = -1U;
4057 : }
4058 51227 : struct deps_reg *reg_last = &deps->reg_last[i];
4059 51227 : if (reg_last->uses)
4060 8368 : remove_from_dependence_list (insn, ®_last->uses);
4061 51227 : if (reg_last->sets)
4062 28337 : remove_from_dependence_list (insn, ®_last->sets);
4063 51227 : if (reg_last->implicit_sets)
4064 415 : remove_from_dependence_list (insn, ®_last->implicit_sets);
4065 51227 : if (reg_last->clobbers)
4066 17187 : remove_from_dependence_list (insn, ®_last->clobbers);
4067 51227 : if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4068 23008 : && !reg_last->clobbers)
4069 51227 : to_clear = i;
4070 : }
4071 2355 : if (to_clear != -1U)
4072 364 : CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4073 :
4074 2355 : if (CALL_P (insn))
4075 : {
4076 73 : remove_from_dependence_list (insn, &deps->last_function_call);
4077 73 : remove_from_dependence_list (insn,
4078 : &deps->last_function_call_may_noreturn);
4079 : }
4080 2355 : remove_from_dependence_list (insn, &deps->sched_before_next_call);
4081 2355 : }
4082 :
4083 : /* Init deps data vector. */
4084 : static void
4085 969267 : init_deps_data_vector (void)
4086 : {
4087 969267 : int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4088 1934921 : if (reserve > 0 && ! h_d_i_d.space (reserve))
4089 965654 : h_d_i_d.safe_grow_cleared (3 * sched_max_luid / 2, true);
4090 969267 : }
4091 :
4092 : /* If it is profitable to use them, initialize or extend (depending on
4093 : GLOBAL_P) dependency data. */
4094 : void
4095 969267 : sched_deps_init (bool global_p)
4096 : {
4097 : /* Average number of insns in the basic block.
4098 : '+ 1' is used to make it nonzero. */
4099 969267 : int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4100 :
4101 969267 : init_deps_data_vector ();
4102 :
4103 : /* We use another caching mechanism for selective scheduling, so
4104 : we don't use this one. */
4105 969267 : if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4106 : {
4107 : /* ?!? We could save some memory by computing a per-region luid mapping
4108 : which could reduce both the number of vectors in the cache and the
4109 : size of each vector. Instead we just avoid the cache entirely unless
4110 : the average number of instructions in a basic block is very high. See
4111 : the comment before the declaration of true_dependency_cache for
4112 : what we consider "very high". */
4113 234 : cache_size = 0;
4114 234 : extend_dependency_caches (sched_max_luid, true);
4115 : }
4116 :
4117 969267 : if (global_p)
4118 : {
4119 964480 : dl_pool = new object_allocator<_deps_list> ("deps_list");
4120 : /* Allocate lists for one block at a time. */
4121 964480 : dn_pool = new object_allocator<_dep_node> ("dep_node");
4122 : /* Allocate nodes for one block at a time. */
4123 : }
4124 969267 : }
4125 :
4126 :
4127 : /* Create or extend (depending on CREATE_P) dependency caches to
4128 : size N. */
4129 : void
4130 234 : extend_dependency_caches (int n, bool create_p)
4131 : {
4132 234 : if (create_p || true_dependency_cache)
4133 : {
4134 234 : int i, luid = cache_size + n;
4135 :
4136 234 : true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4137 : luid);
4138 234 : output_dependency_cache = XRESIZEVEC (bitmap_head,
4139 : output_dependency_cache, luid);
4140 234 : anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4141 : luid);
4142 234 : control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4143 : luid);
4144 :
4145 234 : if (current_sched_info->flags & DO_SPECULATION)
4146 0 : spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4147 : luid);
4148 :
4149 878621 : for (i = cache_size; i < luid; i++)
4150 : {
4151 878387 : bitmap_initialize (&true_dependency_cache[i], 0);
4152 878387 : bitmap_initialize (&output_dependency_cache[i], 0);
4153 878387 : bitmap_initialize (&anti_dependency_cache[i], 0);
4154 878387 : bitmap_initialize (&control_dependency_cache[i], 0);
4155 :
4156 878387 : if (current_sched_info->flags & DO_SPECULATION)
4157 0 : bitmap_initialize (&spec_dependency_cache[i], 0);
4158 : }
4159 234 : cache_size = luid;
4160 : }
4161 234 : }
4162 :
4163 : /* Finalize dependency information for the whole function. */
4164 : void
4165 964480 : sched_deps_finish (void)
4166 : {
4167 964480 : gcc_assert (deps_pools_are_empty_p ());
4168 1928960 : delete dn_pool;
4169 1928960 : delete dl_pool;
4170 964480 : dn_pool = NULL;
4171 964480 : dl_pool = NULL;
4172 :
4173 964480 : h_d_i_d.release ();
4174 964480 : cache_size = 0;
4175 :
4176 964480 : if (true_dependency_cache)
4177 : {
4178 : int i;
4179 :
4180 234 : for (i = 0; i < cache_size; i++)
4181 : {
4182 : bitmap_clear (&true_dependency_cache[i]);
4183 : bitmap_clear (&output_dependency_cache[i]);
4184 : bitmap_clear (&anti_dependency_cache[i]);
4185 : bitmap_clear (&control_dependency_cache[i]);
4186 :
4187 : if (sched_deps_info->generate_spec_deps)
4188 : bitmap_clear (&spec_dependency_cache[i]);
4189 : }
4190 234 : free (true_dependency_cache);
4191 234 : true_dependency_cache = NULL;
4192 234 : free (output_dependency_cache);
4193 234 : output_dependency_cache = NULL;
4194 234 : free (anti_dependency_cache);
4195 234 : anti_dependency_cache = NULL;
4196 234 : free (control_dependency_cache);
4197 234 : control_dependency_cache = NULL;
4198 :
4199 234 : if (sched_deps_info->generate_spec_deps)
4200 : {
4201 0 : free (spec_dependency_cache);
4202 0 : spec_dependency_cache = NULL;
4203 : }
4204 :
4205 : }
4206 964480 : }
4207 :
4208 : /* Initialize some global variables needed by the dependency analysis
4209 : code. */
4210 :
4211 : void
4212 10332102 : init_deps_global (void)
4213 : {
4214 41328408 : CLEAR_HARD_REG_SET (implicit_reg_pending_clobbers);
4215 10332102 : CLEAR_HARD_REG_SET (implicit_reg_pending_uses);
4216 10332102 : reg_pending_sets = ALLOC_REG_SET (®_obstack);
4217 10332102 : reg_pending_clobbers = ALLOC_REG_SET (®_obstack);
4218 10332102 : reg_pending_uses = ALLOC_REG_SET (®_obstack);
4219 10332102 : reg_pending_control_uses = ALLOC_REG_SET (®_obstack);
4220 10332102 : reg_pending_barrier = NOT_A_BARRIER;
4221 :
4222 10332102 : if (!sel_sched_p () || sched_emulate_haifa_p)
4223 : {
4224 10331332 : sched_deps_info->start_insn = haifa_start_insn;
4225 10331332 : sched_deps_info->finish_insn = haifa_finish_insn;
4226 :
4227 10331332 : sched_deps_info->note_reg_set = haifa_note_reg_set;
4228 10331332 : sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4229 10331332 : sched_deps_info->note_reg_use = haifa_note_reg_use;
4230 :
4231 10331332 : sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4232 10331332 : sched_deps_info->note_dep = haifa_note_dep;
4233 : }
4234 10332102 : }
4235 :
4236 : /* Free everything used by the dependency analysis code. */
4237 :
4238 : void
4239 10332102 : finish_deps_global (void)
4240 : {
4241 10332102 : FREE_REG_SET (reg_pending_sets);
4242 10332102 : FREE_REG_SET (reg_pending_clobbers);
4243 10332102 : FREE_REG_SET (reg_pending_uses);
4244 10332102 : FREE_REG_SET (reg_pending_control_uses);
4245 10332102 : }
4246 :
4247 : /* Estimate the weakness of dependence between MEM1 and MEM2. */
4248 : dw_t
4249 518 : estimate_dep_weak (rtx mem1, rtx mem2)
4250 : {
4251 518 : if (mem1 == mem2)
4252 : /* MEMs are the same - don't speculate. */
4253 : return MIN_DEP_WEAK;
4254 :
4255 518 : rtx r1 = XEXP (mem1, 0);
4256 518 : rtx r2 = XEXP (mem2, 0);
4257 :
4258 518 : if (sched_deps_info->use_cselib)
4259 : {
4260 : /* We cannot call rtx_equal_for_cselib_p because the VALUEs might be
4261 : dangling at this point, since we never preserve them. Instead we
4262 : canonicalize manually to get stable VALUEs out of hashing. */
4263 0 : if (GET_CODE (r1) == VALUE && CSELIB_VAL_PTR (r1))
4264 0 : r1 = canonical_cselib_val (CSELIB_VAL_PTR (r1))->val_rtx;
4265 0 : if (GET_CODE (r2) == VALUE && CSELIB_VAL_PTR (r2))
4266 0 : r2 = canonical_cselib_val (CSELIB_VAL_PTR (r2))->val_rtx;
4267 : }
4268 :
4269 518 : if (r1 == r2
4270 518 : || (REG_P (r1) && REG_P (r2) && REGNO (r1) == REGNO (r2)))
4271 : /* Again, MEMs are the same. */
4272 : return MIN_DEP_WEAK;
4273 497 : else if ((REG_P (r1) && !REG_P (r2)) || (!REG_P (r1) && REG_P (r2)))
4274 : /* Different addressing modes - reason to be more speculative,
4275 : than usual. */
4276 : return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4277 : else
4278 : /* We can't say anything about the dependence. */
4279 453 : return UNCERTAIN_DEP_WEAK;
4280 : }
4281 :
4282 : /* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4283 : This function can handle same INSN and ELEM (INSN == ELEM).
4284 : It is a convenience wrapper. */
4285 : static void
4286 607147754 : add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4287 : {
4288 607147754 : ds_t ds;
4289 607147754 : bool internal;
4290 :
4291 607147754 : if (dep_type == REG_DEP_TRUE)
4292 : ds = DEP_TRUE;
4293 : else if (dep_type == REG_DEP_OUTPUT)
4294 : ds = DEP_OUTPUT;
4295 : else if (dep_type == REG_DEP_CONTROL)
4296 : ds = DEP_CONTROL;
4297 : else
4298 : {
4299 0 : gcc_assert (dep_type == REG_DEP_ANTI);
4300 : ds = DEP_ANTI;
4301 : }
4302 :
4303 : /* When add_dependence is called from inside sched-deps.cc, we expect
4304 : cur_insn to be non-null. */
4305 607147754 : internal = cur_insn != NULL;
4306 607147754 : if (internal)
4307 551631698 : gcc_assert (insn == cur_insn);
4308 : else
4309 55516056 : cur_insn = insn;
4310 :
4311 607147754 : note_dep (elem, ds);
4312 607147754 : if (!internal)
4313 55516056 : cur_insn = NULL;
4314 607147754 : }
4315 :
4316 : /* Return weakness of speculative type TYPE in the dep_status DS,
4317 : without checking to prevent ICEs on malformed input. */
4318 : static dw_t
4319 0 : get_dep_weak_1 (ds_t ds, ds_t type)
4320 : {
4321 0 : ds = ds & type;
4322 :
4323 0 : switch (type)
4324 : {
4325 : case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4326 0 : case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4327 0 : case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4328 0 : case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4329 0 : default: gcc_unreachable ();
4330 : }
4331 :
4332 0 : return (dw_t) ds;
4333 : }
4334 :
4335 : /* Return weakness of speculative type TYPE in the dep_status DS. */
4336 : dw_t
4337 0 : get_dep_weak (ds_t ds, ds_t type)
4338 : {
4339 0 : dw_t dw = get_dep_weak_1 (ds, type);
4340 :
4341 0 : gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4342 0 : return dw;
4343 : }
4344 :
4345 : /* Return the dep_status, which has the same parameters as DS, except for
4346 : speculative type TYPE, that will have weakness DW. */
4347 : ds_t
4348 0 : set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4349 : {
4350 0 : gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4351 :
4352 0 : ds &= ~type;
4353 0 : switch (type)
4354 : {
4355 0 : case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4356 0 : case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4357 0 : case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4358 0 : case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4359 0 : default: gcc_unreachable ();
4360 : }
4361 0 : return ds;
4362 : }
4363 :
4364 : /* Return the join of two dep_statuses DS1 and DS2.
4365 : If MAX_P is true then choose the greater probability,
4366 : otherwise multiply probabilities.
4367 : This function assumes that both DS1 and DS2 contain speculative bits. */
4368 : static ds_t
4369 0 : ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4370 : {
4371 0 : ds_t ds, t;
4372 :
4373 0 : gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4374 :
4375 0 : ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4376 :
4377 0 : t = FIRST_SPEC_TYPE;
4378 0 : do
4379 : {
4380 0 : if ((ds1 & t) && !(ds2 & t))
4381 0 : ds |= ds1 & t;
4382 0 : else if (!(ds1 & t) && (ds2 & t))
4383 0 : ds |= ds2 & t;
4384 0 : else if ((ds1 & t) && (ds2 & t))
4385 : {
4386 0 : dw_t dw1 = get_dep_weak (ds1, t);
4387 0 : dw_t dw2 = get_dep_weak (ds2, t);
4388 0 : ds_t dw;
4389 :
4390 0 : if (!max_p)
4391 : {
4392 0 : dw = ((ds_t) dw1) * ((ds_t) dw2);
4393 0 : dw /= MAX_DEP_WEAK;
4394 0 : if (dw < MIN_DEP_WEAK)
4395 0 : dw = MIN_DEP_WEAK;
4396 : }
4397 : else
4398 : {
4399 0 : if (dw1 >= dw2)
4400 : dw = dw1;
4401 : else
4402 : dw = dw2;
4403 : }
4404 :
4405 0 : ds = set_dep_weak (ds, t, (dw_t) dw);
4406 : }
4407 :
4408 0 : if (t == LAST_SPEC_TYPE)
4409 : break;
4410 0 : t <<= SPEC_TYPE_SHIFT;
4411 0 : }
4412 : while (1);
4413 :
4414 0 : return ds;
4415 : }
4416 :
4417 : /* Return the join of two dep_statuses DS1 and DS2.
4418 : This function assumes that both DS1 and DS2 contain speculative bits. */
4419 : ds_t
4420 0 : ds_merge (ds_t ds1, ds_t ds2)
4421 : {
4422 0 : return ds_merge_1 (ds1, ds2, false);
4423 : }
4424 :
4425 : /* Return the join of two dep_statuses DS1 and DS2. */
4426 : ds_t
4427 55368 : ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4428 : {
4429 55368 : ds_t new_status = ds | ds2;
4430 :
4431 55368 : if (new_status & SPECULATIVE)
4432 : {
4433 0 : if ((ds && !(ds & SPECULATIVE))
4434 0 : || (ds2 && !(ds2 & SPECULATIVE)))
4435 : /* Then this dep can't be speculative. */
4436 0 : new_status &= ~SPECULATIVE;
4437 : else
4438 : {
4439 : /* Both are speculative. Merging probabilities. */
4440 0 : if (mem1)
4441 : {
4442 0 : dw_t dw;
4443 :
4444 0 : dw = estimate_dep_weak (mem1, mem2);
4445 0 : ds = set_dep_weak (ds, BEGIN_DATA, dw);
4446 : }
4447 :
4448 0 : if (!ds)
4449 : new_status = ds2;
4450 0 : else if (!ds2)
4451 : new_status = ds;
4452 : else
4453 0 : new_status = ds_merge (ds2, ds);
4454 : }
4455 : }
4456 :
4457 55368 : return new_status;
4458 : }
4459 :
4460 : /* Return the join of DS1 and DS2. Use maximum instead of multiplying
4461 : probabilities. */
4462 : ds_t
4463 2175 : ds_max_merge (ds_t ds1, ds_t ds2)
4464 : {
4465 2175 : if (ds1 == 0 && ds2 == 0)
4466 : return 0;
4467 :
4468 0 : if (ds1 == 0 && ds2 != 0)
4469 : return ds2;
4470 :
4471 0 : if (ds1 != 0 && ds2 == 0)
4472 : return ds1;
4473 :
4474 0 : return ds_merge_1 (ds1, ds2, true);
4475 : }
4476 :
4477 : /* Return the probability of speculation success for the speculation
4478 : status DS. */
4479 : dw_t
4480 0 : ds_weak (ds_t ds)
4481 : {
4482 0 : ds_t res = 1, dt;
4483 0 : int n = 0;
4484 :
4485 0 : dt = FIRST_SPEC_TYPE;
4486 0 : do
4487 : {
4488 0 : if (ds & dt)
4489 : {
4490 0 : res *= (ds_t) get_dep_weak (ds, dt);
4491 0 : n++;
4492 : }
4493 :
4494 0 : if (dt == LAST_SPEC_TYPE)
4495 : break;
4496 0 : dt <<= SPEC_TYPE_SHIFT;
4497 : }
4498 : while (1);
4499 :
4500 0 : gcc_assert (n);
4501 0 : while (--n)
4502 0 : res /= MAX_DEP_WEAK;
4503 :
4504 0 : if (res < MIN_DEP_WEAK)
4505 : res = MIN_DEP_WEAK;
4506 :
4507 0 : gcc_assert (res <= MAX_DEP_WEAK);
4508 :
4509 0 : return (dw_t) res;
4510 : }
4511 :
4512 : /* Return a dep status that contains all speculation types of DS. */
4513 : ds_t
4514 5155 : ds_get_speculation_types (ds_t ds)
4515 : {
4516 5155 : if (ds & BEGIN_DATA)
4517 0 : ds |= BEGIN_DATA;
4518 5155 : if (ds & BE_IN_DATA)
4519 0 : ds |= BE_IN_DATA;
4520 5155 : if (ds & BEGIN_CONTROL)
4521 0 : ds |= BEGIN_CONTROL;
4522 5155 : if (ds & BE_IN_CONTROL)
4523 0 : ds |= BE_IN_CONTROL;
4524 :
4525 5155 : return ds & SPECULATIVE;
4526 : }
4527 :
4528 : /* Return a dep status that contains maximal weakness for each speculation
4529 : type present in DS. */
4530 : ds_t
4531 2786 : ds_get_max_dep_weak (ds_t ds)
4532 : {
4533 2786 : if (ds & BEGIN_DATA)
4534 0 : ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4535 2786 : if (ds & BE_IN_DATA)
4536 0 : ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4537 2786 : if (ds & BEGIN_CONTROL)
4538 0 : ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4539 2786 : if (ds & BE_IN_CONTROL)
4540 0 : ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4541 :
4542 2786 : return ds;
4543 : }
4544 :
4545 : /* Dump information about the dependence status S. */
4546 : static void
4547 0 : dump_ds (FILE *f, ds_t s)
4548 : {
4549 0 : fprintf (f, "{");
4550 :
4551 0 : if (s & BEGIN_DATA)
4552 0 : fprintf (f, "BEGIN_DATA: %d; ", get_dep_weak_1 (s, BEGIN_DATA));
4553 0 : if (s & BE_IN_DATA)
4554 0 : fprintf (f, "BE_IN_DATA: %d; ", get_dep_weak_1 (s, BE_IN_DATA));
4555 0 : if (s & BEGIN_CONTROL)
4556 0 : fprintf (f, "BEGIN_CONTROL: %d; ", get_dep_weak_1 (s, BEGIN_CONTROL));
4557 0 : if (s & BE_IN_CONTROL)
4558 0 : fprintf (f, "BE_IN_CONTROL: %d; ", get_dep_weak_1 (s, BE_IN_CONTROL));
4559 :
4560 0 : if (s & HARD_DEP)
4561 0 : fprintf (f, "HARD_DEP; ");
4562 :
4563 0 : if (s & DEP_TRUE)
4564 0 : fprintf (f, "DEP_TRUE; ");
4565 0 : if (s & DEP_OUTPUT)
4566 0 : fprintf (f, "DEP_OUTPUT; ");
4567 0 : if (s & DEP_ANTI)
4568 0 : fprintf (f, "DEP_ANTI; ");
4569 0 : if (s & DEP_CONTROL)
4570 0 : fprintf (f, "DEP_CONTROL; ");
4571 :
4572 0 : fprintf (f, "}");
4573 0 : }
4574 :
4575 : DEBUG_FUNCTION void
4576 0 : debug_ds (ds_t s)
4577 : {
4578 0 : dump_ds (stderr, s);
4579 0 : fprintf (stderr, "\n");
4580 0 : }
4581 :
4582 : /* Verify that dependence type and status are consistent.
4583 : If RELAXED_P is true, then skip dep_weakness checks. */
4584 : static void
4585 763841203 : check_dep (dep_t dep, bool relaxed_p)
4586 : {
4587 763841203 : enum reg_note dt = DEP_TYPE (dep);
4588 763841203 : ds_t ds = DEP_STATUS (dep);
4589 :
4590 763841203 : gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4591 :
4592 763841203 : if (!(current_sched_info->flags & USE_DEPS_LIST))
4593 : {
4594 763841203 : gcc_assert (ds == 0);
4595 : return;
4596 : }
4597 :
4598 : /* Check that dependence type contains the same bits as the status. */
4599 0 : if (dt == REG_DEP_TRUE)
4600 0 : gcc_assert (ds & DEP_TRUE);
4601 0 : else if (dt == REG_DEP_OUTPUT)
4602 0 : gcc_assert ((ds & DEP_OUTPUT)
4603 : && !(ds & DEP_TRUE));
4604 0 : else if (dt == REG_DEP_ANTI)
4605 0 : gcc_assert ((ds & DEP_ANTI)
4606 : && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4607 : else
4608 0 : gcc_assert (dt == REG_DEP_CONTROL
4609 : && (ds & DEP_CONTROL)
4610 : && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4611 :
4612 : /* HARD_DEP cannot appear in dep_status of a link. */
4613 0 : gcc_assert (!(ds & HARD_DEP));
4614 :
4615 : /* Check that dependence status is set correctly when speculation is not
4616 : supported. */
4617 0 : if (!sched_deps_info->generate_spec_deps)
4618 0 : gcc_assert (!(ds & SPECULATIVE));
4619 0 : else if (ds & SPECULATIVE)
4620 : {
4621 0 : if (!relaxed_p)
4622 : {
4623 : ds_t type = FIRST_SPEC_TYPE;
4624 :
4625 : /* Check that dependence weakness is in proper range. */
4626 0 : do
4627 : {
4628 0 : if (ds & type)
4629 0 : get_dep_weak (ds, type);
4630 :
4631 0 : if (type == LAST_SPEC_TYPE)
4632 : break;
4633 0 : type <<= SPEC_TYPE_SHIFT;
4634 : }
4635 : while (1);
4636 : }
4637 :
4638 0 : if (ds & BEGIN_SPEC)
4639 : {
4640 : /* Only true dependence can be data speculative. */
4641 0 : if (ds & BEGIN_DATA)
4642 0 : gcc_assert (ds & DEP_TRUE);
4643 :
4644 : /* Control dependencies in the insn scheduler are represented by
4645 : anti-dependencies, therefore only anti dependence can be
4646 : control speculative. */
4647 0 : if (ds & BEGIN_CONTROL)
4648 0 : gcc_assert (ds & DEP_ANTI);
4649 : }
4650 : else
4651 : {
4652 : /* Subsequent speculations should resolve true dependencies. */
4653 0 : gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4654 : }
4655 :
4656 : /* Check that true and anti dependencies can't have other speculative
4657 : statuses. */
4658 0 : if (ds & DEP_TRUE)
4659 0 : gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4660 : /* An output dependence can't be speculative at all. */
4661 0 : gcc_assert (!(ds & DEP_OUTPUT));
4662 0 : if (ds & DEP_ANTI)
4663 0 : gcc_assert (ds & BEGIN_CONTROL);
4664 : }
4665 : }
4666 :
4667 : /* The following code discovers opportunities to switch a memory reference
4668 : and an increment by modifying the address. We ensure that this is done
4669 : only for dependencies that are only used to show a single register
4670 : dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4671 : instruction involved is subject to only one dep that can cause a pattern
4672 : change.
4673 :
4674 : When we discover a suitable dependency, we fill in the dep_replacement
4675 : structure to show how to modify the memory reference. */
4676 :
4677 : /* Holds information about a pair of memory reference and register increment
4678 : insns which depend on each other, but could possibly be interchanged. */
4679 : struct mem_inc_info
4680 : {
4681 : rtx_insn *inc_insn;
4682 : rtx_insn *mem_insn;
4683 :
4684 : rtx *mem_loc;
4685 : /* A register occurring in the memory address for which we wish to break
4686 : the dependence. This must be identical to the destination register of
4687 : the increment. */
4688 : rtx mem_reg0;
4689 : /* Any kind of index that is added to that register. */
4690 : rtx mem_index;
4691 : /* The constant offset used in the memory address. */
4692 : HOST_WIDE_INT mem_constant;
4693 : /* The constant added in the increment insn. Negated if the increment is
4694 : after the memory address. */
4695 : HOST_WIDE_INT inc_constant;
4696 : /* The source register used in the increment. May be different from mem_reg0
4697 : if the increment occurs before the memory address. */
4698 : rtx inc_input;
4699 : };
4700 :
4701 : /* Verify that the memory location described in MII can be replaced with
4702 : one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4703 : insn remains unchanged by this function. */
4704 :
4705 : static rtx
4706 1133122 : attempt_change (struct mem_inc_info *mii, rtx new_addr)
4707 : {
4708 1133122 : rtx mem = *mii->mem_loc;
4709 1133122 : rtx new_mem;
4710 :
4711 1133122 : if (!targetm.new_address_profitable_p (mem, mii->mem_insn, new_addr))
4712 : return NULL_RTX;
4713 :
4714 : /* Jump through a lot of hoops to keep the attributes up to date. We
4715 : do not want to call one of the change address variants that take
4716 : an offset even though we know the offset in many cases. These
4717 : assume you are changing where the address is pointing by the
4718 : offset. */
4719 1133122 : new_mem = replace_equiv_address_nv (mem, new_addr);
4720 1133122 : if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4721 : {
4722 1 : if (sched_verbose >= 5)
4723 0 : fprintf (sched_dump, "validation failure\n");
4724 1 : return NULL_RTX;
4725 : }
4726 :
4727 : /* Put back the old one. */
4728 1133121 : validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4729 :
4730 1133121 : return new_mem;
4731 : }
4732 :
4733 : /* Return true if INSN is of a form "a = b op c" where a and b are
4734 : regs. op is + if c is a reg and +|- if c is a const. Fill in
4735 : informantion in MII about what is found.
4736 : BEFORE_MEM indicates whether the increment is found before or after
4737 : a corresponding memory reference. */
4738 :
4739 : static bool
4740 34756150 : parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4741 : {
4742 34756150 : rtx pat = single_set (insn);
4743 34756150 : rtx src, cst;
4744 34756150 : bool regs_equal;
4745 :
4746 34756150 : if (RTX_FRAME_RELATED_P (insn) || !pat)
4747 : return false;
4748 :
4749 : /* Do not allow breaking data dependencies for insns that are marked
4750 : with REG_STACK_CHECK. */
4751 28748038 : if (find_reg_note (insn, REG_STACK_CHECK, NULL))
4752 : return false;
4753 :
4754 : /* Result must be single reg. */
4755 28748038 : if (!REG_P (SET_DEST (pat)))
4756 : return false;
4757 :
4758 20396182 : if (GET_CODE (SET_SRC (pat)) != PLUS)
4759 : return false;
4760 :
4761 4775111 : mii->inc_insn = insn;
4762 4775111 : src = SET_SRC (pat);
4763 4775111 : mii->inc_input = XEXP (src, 0);
4764 :
4765 4775111 : if (!REG_P (XEXP (src, 0)))
4766 : return false;
4767 :
4768 4536235 : if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4769 : return false;
4770 :
4771 2964419 : cst = XEXP (src, 1);
4772 2964419 : if (!CONST_INT_P (cst))
4773 : return false;
4774 2797583 : mii->inc_constant = INTVAL (cst);
4775 :
4776 2797583 : regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4777 :
4778 2797583 : if (!before_mem)
4779 : {
4780 2006077 : mii->inc_constant = -mii->inc_constant;
4781 2006077 : if (!regs_equal)
4782 : return false;
4783 : }
4784 :
4785 2748929 : if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4786 : {
4787 : /* Note that the sign has already been reversed for !before_mem. */
4788 2345648 : if (STACK_GROWS_DOWNWARD)
4789 2345648 : return mii->inc_constant > 0;
4790 : else
4791 : return mii->inc_constant < 0;
4792 : }
4793 : return true;
4794 : }
4795 :
4796 : /* Once a suitable mem reference has been found and the corresponding data
4797 : in MII has been filled in, this function is called to find a suitable
4798 : add or inc insn involving the register we found in the memory
4799 : reference.
4800 : If successful, this function will create additional dependencies between
4801 : - mii->inc_insn's producers and mii->mem_insn as a consumer (if backwards)
4802 : - mii->inc_insn's consumers and mii->mem_insn as a producer (if !backwards).
4803 : */
4804 :
4805 : static bool
4806 30565244 : find_inc (struct mem_inc_info *mii, bool backwards)
4807 : {
4808 30565244 : sd_iterator_def sd_it;
4809 30565244 : dep_t dep;
4810 30565244 : sd_list_types_def mem_deps = backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW;
4811 30565244 : int n_mem_deps = dep_list_size (mii->mem_insn, mem_deps);
4812 :
4813 30565244 : sd_it = sd_iterator_start (mii->mem_insn, mem_deps);
4814 103064960 : while (sd_iterator_cond (&sd_it, &dep))
4815 : {
4816 73632837 : dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4817 73632837 : rtx_insn *pro = DEP_PRO (dep);
4818 73632837 : rtx_insn *con = DEP_CON (dep);
4819 73632837 : rtx_insn *inc_cand;
4820 73632837 : int n_inc_deps;
4821 :
4822 73632837 : if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4823 38785957 : goto next;
4824 :
4825 34846880 : if (backwards)
4826 : {
4827 14567150 : inc_cand = pro;
4828 14567150 : n_inc_deps = dep_list_size (inc_cand, SD_LIST_BACK);
4829 : }
4830 : else
4831 : {
4832 20279730 : inc_cand = con;
4833 20279730 : n_inc_deps = dep_list_size (inc_cand, SD_LIST_FORW);
4834 : }
4835 :
4836 : /* In the FOR_EACH_DEP loop below we will create additional n_inc_deps
4837 : for mem_insn. This by itself is not a problem, since each mem_insn
4838 : will have only a few inc_insns associated with it. However, if
4839 : we consider that a single inc_insn may have a lot of mem_insns, AND,
4840 : on top of that, a few other inc_insns associated with it --
4841 : those _other inc_insns_ will get (n_mem_deps * number of MEM insns)
4842 : dependencies created for them. This may cause an exponential
4843 : growth of memory usage and scheduling time.
4844 : See PR96388 for details.
4845 : We [heuristically] use n_inc_deps as a proxy for the number of MEM
4846 : insns, and drop opportunities for breaking modifiable_mem dependencies
4847 : when dependency lists grow beyond reasonable size. */
4848 34846880 : if (n_mem_deps * n_inc_deps
4849 34846880 : >= param_max_pending_list_length * param_max_pending_list_length)
4850 90730 : goto next;
4851 :
4852 34756150 : if (parse_add_or_inc (mii, inc_cand, backwards))
4853 : {
4854 1134318 : struct dep_replacement *desc;
4855 1134318 : df_ref def;
4856 1134318 : rtx newaddr, newmem;
4857 :
4858 1134318 : if (sched_verbose >= 5)
4859 0 : fprintf (sched_dump, "candidate mem/inc pair: %d %d\n",
4860 0 : INSN_UID (mii->mem_insn), INSN_UID (inc_cand));
4861 :
4862 : /* Need to assure that none of the operands of the inc
4863 : instruction are assigned to by the mem insn. */
4864 1624067 : FOR_EACH_INSN_DEF (def, mii->mem_insn)
4865 490945 : if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4866 490945 : || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4867 : {
4868 1196 : if (sched_verbose >= 5)
4869 0 : fprintf (sched_dump,
4870 : "inc conflicts with store failure.\n");
4871 1196 : goto next;
4872 : }
4873 :
4874 1133122 : newaddr = mii->inc_input;
4875 1133122 : if (mii->mem_index != NULL_RTX)
4876 19447 : newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4877 : mii->mem_index);
4878 2266244 : newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4879 1133122 : mii->mem_constant + mii->inc_constant);
4880 1133122 : newmem = attempt_change (mii, newaddr);
4881 1133122 : if (newmem == NULL_RTX)
4882 1 : goto next;
4883 1133121 : if (sched_verbose >= 5)
4884 0 : fprintf (sched_dump, "successful address replacement\n");
4885 1133121 : desc = XCNEW (struct dep_replacement);
4886 1133121 : DEP_REPLACE (dep) = desc;
4887 1133121 : desc->loc = mii->mem_loc;
4888 1133121 : desc->newval = newmem;
4889 1133121 : desc->orig = *desc->loc;
4890 1133121 : desc->insn = mii->mem_insn;
4891 3399363 : move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4892 1133121 : INSN_SPEC_BACK_DEPS (con));
4893 :
4894 : /* Make sure that n_inc_deps above is consistent with dependencies
4895 : we create. */
4896 1133121 : gcc_assert (mii->inc_insn == inc_cand);
4897 :
4898 1133121 : if (backwards)
4899 : {
4900 2042978 : FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4901 1847826 : add_dependence_1 (mii->mem_insn, DEP_PRO (dep),
4902 : REG_DEP_TRUE);
4903 : }
4904 : else
4905 : {
4906 6815047 : FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4907 5877078 : add_dependence_1 (DEP_CON (dep), mii->mem_insn,
4908 : REG_DEP_ANTI);
4909 : }
4910 1133121 : return true;
4911 : }
4912 33621832 : next:
4913 72499716 : sd_iterator_next (&sd_it);
4914 : }
4915 : return false;
4916 : }
4917 :
4918 : /* A recursive function that walks ADDRESS_OF_X to find memory references
4919 : which could be modified during scheduling. We call find_inc for each
4920 : one we find that has a recognizable form. MII holds information about
4921 : the pair of memory/increment instructions.
4922 : We ensure that every instruction with a memory reference (which will be
4923 : the location of the replacement) is assigned at most one breakable
4924 : dependency. */
4925 :
4926 : static bool
4927 252200232 : find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4928 : {
4929 252200232 : rtx x = *address_of_x;
4930 252200232 : enum rtx_code code = GET_CODE (x);
4931 252200232 : const char *const fmt = GET_RTX_FORMAT (code);
4932 252200232 : int i;
4933 :
4934 252200232 : if (code == MEM)
4935 : {
4936 25512695 : rtx reg0 = XEXP (x, 0);
4937 :
4938 25512695 : mii->mem_loc = address_of_x;
4939 25512695 : mii->mem_index = NULL_RTX;
4940 25512695 : mii->mem_constant = 0;
4941 25512695 : if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4942 : {
4943 12987443 : mii->mem_constant = INTVAL (XEXP (reg0, 1));
4944 12987443 : reg0 = XEXP (reg0, 0);
4945 : }
4946 25512695 : if (GET_CODE (reg0) == PLUS)
4947 : {
4948 1201347 : mii->mem_index = XEXP (reg0, 1);
4949 1201347 : reg0 = XEXP (reg0, 0);
4950 : }
4951 25512695 : if (REG_P (reg0))
4952 : {
4953 16015798 : df_ref use;
4954 16015798 : int occurrences = 0;
4955 :
4956 : /* Make sure this reg appears only once in this insn. Can't use
4957 : count_occurrences since that only works for pseudos. */
4958 39885897 : FOR_EACH_INSN_USE (use, mii->mem_insn)
4959 24505699 : if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4960 16651398 : if (++occurrences > 1)
4961 : {
4962 635600 : if (sched_verbose >= 5)
4963 0 : fprintf (sched_dump, "mem count failure\n");
4964 635600 : return false;
4965 : }
4966 :
4967 15380198 : mii->mem_reg0 = reg0;
4968 15380198 : return find_inc (mii, true) || find_inc (mii, false);
4969 : }
4970 : return false;
4971 : }
4972 :
4973 226687537 : if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4974 : {
4975 : /* If REG occurs inside a MEM used in a bit-field reference,
4976 : that is unacceptable. */
4977 : return false;
4978 : }
4979 :
4980 : /* Time for some deep diving. */
4981 532301646 : for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4982 : {
4983 306901864 : if (fmt[i] == 'e')
4984 : {
4985 178139113 : if (find_mem (mii, &XEXP (x, i)))
4986 : return true;
4987 : }
4988 128762751 : else if (fmt[i] == 'E')
4989 : {
4990 8453592 : int j;
4991 26028989 : for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4992 17599918 : if (find_mem (mii, &XVECEXP (x, i, j)))
4993 : return true;
4994 : }
4995 : }
4996 : return false;
4997 : }
4998 :
4999 :
5000 : /* Examine the instructions between HEAD and TAIL and try to find
5001 : dependencies that can be broken by modifying one of the patterns. */
5002 :
5003 : void
5004 10313337 : find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
5005 : {
5006 10313337 : rtx_insn *insn, *next_tail = NEXT_INSN (tail);
5007 10313337 : int success_in_block = 0;
5008 :
5009 118934461 : for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5010 : {
5011 108621124 : struct mem_inc_info mii;
5012 :
5013 108621124 : if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
5014 52159923 : continue;
5015 :
5016 56461201 : mii.mem_insn = insn;
5017 56461201 : if (find_mem (&mii, &PATTERN (insn)))
5018 1133121 : success_in_block++;
5019 : }
5020 10313337 : if (success_in_block && sched_verbose >= 5)
5021 0 : fprintf (sched_dump, "%d candidates for address modification found.\n",
5022 : success_in_block);
5023 10313337 : }
5024 :
5025 : #endif /* INSN_SCHEDULING */
|