Line data Source code
1 : /* SSA Jump Threading
2 : Copyright (C) 2005-2026 Free Software Foundation, Inc.
3 : Contributed by Jeff Law <law@redhat.com>
4 :
5 : This file is part of GCC.
6 :
7 : GCC is free software; you can redistribute it and/or modify
8 : it under the terms of the GNU General Public License as published by
9 : the Free Software Foundation; either version 3, or (at your option)
10 : any later version.
11 :
12 : GCC is distributed in the hope that it will be useful,
13 : but WITHOUT ANY WARRANTY; without even the implied warranty of
14 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 : GNU General Public License for more details.
16 :
17 : You should have received a copy of the GNU General Public License
18 : along with GCC; see the file COPYING3. If not see
19 : <http://www.gnu.org/licenses/>. */
20 :
21 : #include "config.h"
22 : #include "system.h"
23 : #include "coretypes.h"
24 : #include "backend.h"
25 : #include "tree.h"
26 : #include "gimple.h"
27 : #include "predict.h"
28 : #include "ssa.h"
29 : #include "fold-const.h"
30 : #include "cfgloop.h"
31 : #include "gimple-iterator.h"
32 : #include "tree-cfg.h"
33 : #include "tree-ssa-threadupdate.h"
34 : #include "tree-ssa-scopedtables.h"
35 : #include "tree-ssa-threadedge.h"
36 : #include "gimple-fold.h"
37 : #include "cfganal.h"
38 : #include "alloc-pool.h"
39 : #include "vr-values.h"
40 : #include "gimple-range.h"
41 : #include "gimple-range-path.h"
42 :
43 : /* To avoid code explosion due to jump threading, we limit the
44 : number of statements we are going to copy. This variable
45 : holds the number of statements currently seen that we'll have
46 : to copy as part of the jump threading process. */
47 : static int stmt_count;
48 :
49 : /* Array to record value-handles per SSA_NAME. */
50 : vec<tree> ssa_name_values;
51 :
52 : /* Set the value for the SSA name NAME to VALUE. */
53 :
54 : void
55 78335370 : set_ssa_name_value (tree name, tree value)
56 : {
57 78335370 : if (SSA_NAME_VERSION (name) >= ssa_name_values.length ())
58 3668470 : ssa_name_values.safe_grow_cleared (SSA_NAME_VERSION (name) + 1, true);
59 78335370 : if (value && TREE_OVERFLOW_P (value))
60 15 : value = drop_tree_overflow (value);
61 78335370 : ssa_name_values[SSA_NAME_VERSION (name)] = value;
62 78335370 : }
63 :
64 2083233 : jump_threader::jump_threader (jt_simplifier *simplifier, jt_state *state)
65 : {
66 : /* Initialize the per SSA_NAME value-handles array. */
67 2083233 : gcc_assert (!ssa_name_values.exists ());
68 4166466 : ssa_name_values.create (num_ssa_names);
69 :
70 2083233 : dummy_cond = gimple_build_cond (NE_EXPR, integer_zero_node,
71 : integer_zero_node, NULL, NULL);
72 :
73 2083233 : m_registry = new fwd_jt_path_registry ();
74 2083233 : m_simplifier = simplifier;
75 2083233 : m_state = state;
76 2083233 : }
77 :
78 2083233 : jump_threader::~jump_threader (void)
79 : {
80 2083233 : ssa_name_values.release ();
81 2083233 : ggc_free (dummy_cond);
82 2083233 : delete m_registry;
83 2083233 : }
84 :
85 : void
86 370387 : jump_threader::remove_jump_threads_including (edge_def *e)
87 : {
88 370387 : m_registry->remove_jump_threads_including (e);
89 370387 : }
90 :
91 : bool
92 2083233 : jump_threader::thread_through_all_blocks (bool may_peel_loop_headers)
93 : {
94 2083233 : return m_registry->thread_through_all_blocks (may_peel_loop_headers);
95 : }
96 :
97 : static inline bool
98 17017221 : has_phis_p (basic_block bb)
99 : {
100 5544534 : return !gsi_end_p (gsi_start_phis (bb));
101 : }
102 :
103 : /* Return TRUE for a block with PHIs but no statements. */
104 :
105 : static bool
106 32020023 : empty_block_with_phis_p (basic_block bb)
107 : {
108 37564557 : return gsi_end_p (gsi_start_nondebug_bb (bb)) && has_phis_p (bb);
109 : }
110 :
111 : /* Return TRUE if we may be able to thread an incoming edge into
112 : BB to an outgoing edge from BB. Return FALSE otherwise. */
113 :
114 : static bool
115 29518544 : potentially_threadable_block (basic_block bb)
116 : {
117 29518544 : gimple_stmt_iterator gsi;
118 :
119 : /* Special case. We can get blocks that are forwarders, but are
120 : not optimized away because they forward from outside a loop
121 : to the loop header. We want to thread through them as we can
122 : sometimes thread to the loop exit, which is obviously profitable.
123 : The interesting case here is when the block has PHIs. */
124 29518544 : if (empty_block_with_phis_p (bb))
125 : return true;
126 :
127 : /* If BB has a single successor or a single predecessor, then
128 : there is no threading opportunity. */
129 29192392 : if (single_succ_p (bb) || single_pred_p (bb))
130 : return false;
131 :
132 : /* If BB does not end with a conditional, switch or computed goto,
133 : then there is no threading opportunity. */
134 8697793 : gsi = gsi_last_bb (bb);
135 8697793 : if (gsi_end_p (gsi)
136 8691146 : || ! gsi_stmt (gsi)
137 8697793 : || (gimple_code (gsi_stmt (gsi)) != GIMPLE_COND
138 : && gimple_code (gsi_stmt (gsi)) != GIMPLE_GOTO
139 : && gimple_code (gsi_stmt (gsi)) != GIMPLE_SWITCH))
140 : return false;
141 :
142 : return true;
143 : }
144 :
145 : /* Record temporary equivalences created by PHIs at the target of the
146 : edge E.
147 :
148 : If a PHI which prevents threading is encountered, then return FALSE
149 : indicating we should not thread this edge, else return TRUE. */
150 :
151 : bool
152 15604949 : jump_threader::record_temporary_equivalences_from_phis (edge e)
153 : {
154 15604949 : gphi_iterator gsi;
155 :
156 : /* Each PHI creates a temporary equivalence, record them.
157 : These are context sensitive equivalences and will be removed
158 : later. */
159 32595480 : for (gsi = gsi_start_phis (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
160 : {
161 16990531 : gphi *phi = gsi.phi ();
162 16990531 : tree src = PHI_ARG_DEF_FROM_EDGE (phi, e);
163 16990531 : tree dst = gimple_phi_result (phi);
164 :
165 : /* If the desired argument is not the same as this PHI's result
166 : and it is set by a PHI in E->dest, then we cannot thread
167 : through E->dest. */
168 16990531 : if (src != dst
169 16990531 : && TREE_CODE (src) == SSA_NAME
170 13791869 : && gimple_code (SSA_NAME_DEF_STMT (src)) == GIMPLE_PHI
171 22617214 : && gimple_bb (SSA_NAME_DEF_STMT (src)) == e->dest)
172 : return false;
173 :
174 : /* We consider any non-virtual PHI as a statement since it
175 : count result in a constant assignment or copy operation. */
176 33981062 : if (!virtual_operand_p (dst))
177 10451705 : stmt_count++;
178 :
179 16990531 : m_state->register_equiv (dst, src, /*update_range=*/true);
180 : }
181 : return true;
182 : }
183 :
184 : /* Valueize hook for gimple_fold_stmt_to_constant_1. */
185 :
186 : static tree
187 30430440 : threadedge_valueize (tree t)
188 : {
189 30430440 : if (TREE_CODE (t) == SSA_NAME)
190 : {
191 27652895 : tree tem = SSA_NAME_VALUE (t);
192 26302810 : if (tem)
193 7798324 : return tem;
194 : }
195 : return t;
196 : }
197 :
198 : /* Try to simplify each statement in E->dest, ultimately leading to
199 : a simplification of the COND_EXPR at the end of E->dest.
200 :
201 : Record unwind information for temporary equivalences onto STACK.
202 :
203 : Uses M_SIMPLIFIER to further simplify statements using pass specific
204 : information.
205 :
206 : We might consider marking just those statements which ultimately
207 : feed the COND_EXPR. It's not clear if the overhead of bookkeeping
208 : would be recovered by trying to simplify fewer statements.
209 :
210 : If we are able to simplify a statement into the form
211 : SSA_NAME = (SSA_NAME | gimple invariant), then we can record
212 : a context sensitive equivalence which may help us simplify
213 : later statements in E->dest. */
214 :
215 : gimple *
216 15604949 : jump_threader::record_temporary_equivalences_from_stmts_at_dest (edge e)
217 : {
218 15604949 : gimple *stmt = NULL;
219 15604949 : gimple_stmt_iterator gsi;
220 15604949 : int max_stmt_count;
221 :
222 15604949 : max_stmt_count = param_max_jump_thread_duplication_stmts;
223 :
224 : /* Walk through each statement in the block recording equivalences
225 : we discover. Note any equivalences we discover are context
226 : sensitive (ie, are dependent on traversing E) and must be unwound
227 : when we're finished processing E. */
228 180658943 : for (gsi = gsi_start_bb (e->dest); !gsi_end_p (gsi); gsi_next (&gsi))
229 : {
230 150757031 : stmt = gsi_stmt (gsi);
231 :
232 : /* Ignore empty statements and labels. */
233 150757031 : if (gimple_code (stmt) == GIMPLE_NOP
234 150757019 : || gimple_code (stmt) == GIMPLE_LABEL
235 300972502 : || is_gimple_debug (stmt))
236 101432373 : continue;
237 :
238 : /* If the statement has volatile operands, then we assume we
239 : cannot thread through this block. This is overly
240 : conservative in some ways. */
241 49324658 : if (gimple_code (stmt) == GIMPLE_ASM
242 49324658 : && gimple_asm_volatile_p (as_a <gasm *> (stmt)))
243 : return NULL;
244 :
245 : /* If the statement is a unique builtin, we cannot thread
246 : through here. */
247 49309414 : if (gimple_code (stmt) == GIMPLE_CALL
248 4853791 : && gimple_call_internal_p (stmt)
249 49402996 : && gimple_call_internal_unique_p (stmt))
250 : return NULL;
251 :
252 : /* We cannot thread through __builtin_constant_p, because an
253 : expression that is constant on two threading paths may become
254 : non-constant (i.e.: phi) when they merge. */
255 49309414 : if (gimple_call_builtin_p (stmt, BUILT_IN_CONSTANT_P))
256 : return NULL;
257 :
258 : /* If duplicating this block is going to cause too much code
259 : expansion, then do not thread through this block. */
260 49309401 : stmt_count++;
261 49309401 : if (stmt_count > max_stmt_count)
262 : {
263 : /* If any of the stmts in the PATH's dests are going to be
264 : killed due to threading, grow the max count
265 : accordingly. */
266 2216954 : if (max_stmt_count
267 2216954 : == param_max_jump_thread_duplication_stmts)
268 : {
269 1661069 : max_stmt_count += estimate_threading_killed_stmts (e->dest);
270 1661069 : if (dump_file)
271 53 : fprintf (dump_file, "threading bb %i up to %i stmts\n",
272 53 : e->dest->index, max_stmt_count);
273 : }
274 : /* If we're still past the limit, we're done. */
275 2216954 : if (stmt_count > max_stmt_count)
276 : return NULL;
277 : }
278 :
279 48016672 : m_state->record_ranges_from_stmt (stmt, true);
280 :
281 : /* If this is not a statement that sets an SSA_NAME to a new
282 : value, then do not try to simplify this statement as it will
283 : not simplify in any way that is helpful for jump threading. */
284 48016672 : if ((gimple_code (stmt) != GIMPLE_ASSIGN
285 33411349 : || TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME)
286 56854192 : && (gimple_code (stmt) != GIMPLE_CALL
287 4691962 : || gimple_call_lhs (stmt) == NULL_TREE
288 2163202 : || TREE_CODE (gimple_call_lhs (stmt)) != SSA_NAME))
289 21692100 : continue;
290 :
291 : /* The result of __builtin_object_size depends on all the arguments
292 : of a phi node. Temporarily using only one edge produces invalid
293 : results. For example
294 :
295 : if (x < 6)
296 : goto l;
297 : else
298 : goto l;
299 :
300 : l:
301 : r = PHI <&w[2].a[1](2), &a.a[6](3)>
302 : __builtin_object_size (r, 0)
303 :
304 : The result of __builtin_object_size is defined to be the maximum of
305 : remaining bytes. If we use only one edge on the phi, the result will
306 : change to be the remaining bytes for the corresponding phi argument.
307 :
308 : Similarly for __builtin_constant_p:
309 :
310 : r = PHI <1(2), 2(3)>
311 : __builtin_constant_p (r)
312 :
313 : Both PHI arguments are constant, but x ? 1 : 2 is still not
314 : constant. */
315 :
316 26324572 : if (is_gimple_call (stmt))
317 : {
318 1750743 : tree fndecl = gimple_call_fndecl (stmt);
319 1750743 : if (fndecl
320 1626225 : && fndecl_built_in_p (fndecl, BUILT_IN_NORMAL)
321 2157262 : && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_OBJECT_SIZE
322 406519 : || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_CONSTANT_P))
323 0 : continue;
324 : }
325 :
326 26324572 : m_state->register_equivs_stmt (stmt, e->src, m_simplifier);
327 : }
328 : return stmt;
329 : }
330 :
331 : /* Simplify the control statement at the end of the block E->dest.
332 :
333 : Use SIMPLIFY (a pointer to a callback function) to further simplify
334 : a condition using pass specific information.
335 :
336 : Return the simplified condition or NULL if simplification could
337 : not be performed. When simplifying a GIMPLE_SWITCH, we may return
338 : the CASE_LABEL_EXPR that will be taken. */
339 :
340 : tree
341 9815041 : jump_threader::simplify_control_stmt_condition (edge e, gimple *stmt)
342 : {
343 9815041 : tree cond, cached_lhs;
344 9815041 : enum gimple_code code = gimple_code (stmt);
345 :
346 : /* For comparisons, we have to update both operands, then try
347 : to simplify the comparison. */
348 9815041 : if (code == GIMPLE_COND)
349 : {
350 9783502 : tree op0, op1;
351 9783502 : enum tree_code cond_code;
352 :
353 9783502 : op0 = gimple_cond_lhs (stmt);
354 9783502 : op1 = gimple_cond_rhs (stmt);
355 9783502 : cond_code = gimple_cond_code (stmt);
356 :
357 : /* Get the current value of both operands. */
358 9783502 : if (TREE_CODE (op0) == SSA_NAME)
359 : {
360 11918191 : for (int i = 0; i < 2; i++)
361 : {
362 11911640 : if (TREE_CODE (op0) == SSA_NAME
363 11911640 : && SSA_NAME_VALUE (op0))
364 2169877 : op0 = SSA_NAME_VALUE (op0);
365 : else
366 : break;
367 : }
368 : }
369 :
370 9783502 : if (TREE_CODE (op1) == SSA_NAME)
371 : {
372 3163155 : for (int i = 0; i < 2; i++)
373 : {
374 3162481 : if (TREE_CODE (op1) == SSA_NAME
375 3162481 : && SSA_NAME_VALUE (op1))
376 584900 : op1 = SSA_NAME_VALUE (op1);
377 : else
378 : break;
379 : }
380 : }
381 :
382 9783502 : const unsigned recursion_limit = 4;
383 :
384 9783502 : cached_lhs
385 9783502 : = simplify_control_stmt_condition_1 (e, stmt, op0, cond_code, op1,
386 : recursion_limit);
387 :
388 : /* If we were testing an integer/pointer against a constant,
389 : then we can trace the value of the SSA_NAME. If a value is
390 : found, then the condition will collapse to a constant.
391 :
392 : Return the SSA_NAME we want to trace back rather than the full
393 : expression and give the threader a chance to find its value. */
394 9783502 : if (cached_lhs == NULL)
395 : {
396 : /* Recover the original operands. They may have been simplified
397 : using context sensitive equivalences. Those context sensitive
398 : equivalences may not be valid on paths. */
399 8486840 : tree op0 = gimple_cond_lhs (stmt);
400 8486840 : tree op1 = gimple_cond_rhs (stmt);
401 :
402 16826111 : if ((INTEGRAL_TYPE_P (TREE_TYPE (op0))
403 2068294 : || POINTER_TYPE_P (TREE_TYPE (op0)))
404 8315962 : && TREE_CODE (op0) == SSA_NAME
405 16802104 : && TREE_CODE (op1) == INTEGER_CST)
406 : return op0;
407 : }
408 :
409 4177867 : return cached_lhs;
410 : }
411 :
412 31539 : if (code == GIMPLE_SWITCH)
413 31094 : cond = gimple_switch_index (as_a <gswitch *> (stmt));
414 445 : else if (code == GIMPLE_GOTO)
415 445 : cond = gimple_goto_dest (stmt);
416 : else
417 0 : gcc_unreachable ();
418 :
419 : /* We can have conditionals which just test the state of a variable
420 : rather than use a relational operator. These are simpler to handle. */
421 31539 : if (TREE_CODE (cond) == SSA_NAME)
422 : {
423 : tree original_lhs = cond;
424 : cached_lhs = cond;
425 :
426 : /* Get the variable's current value from the equivalence chains.
427 :
428 : It is possible to get loops in the SSA_NAME_VALUE chains
429 : (consider threading the backedge of a loop where we have
430 : a loop invariant SSA_NAME used in the condition). */
431 : if (cached_lhs)
432 : {
433 41117 : for (int i = 0; i < 2; i++)
434 : {
435 41117 : if (TREE_CODE (cached_lhs) == SSA_NAME
436 41117 : && SSA_NAME_VALUE (cached_lhs))
437 9650 : cached_lhs = SSA_NAME_VALUE (cached_lhs);
438 : else
439 : break;
440 : }
441 : }
442 :
443 : /* If we haven't simplified to an invariant yet, then use the
444 : pass specific callback to try and simplify it further. */
445 31467 : if (cached_lhs && ! is_gimple_min_invariant (cached_lhs))
446 : {
447 29159 : if (code == GIMPLE_SWITCH)
448 : {
449 : /* Replace the index operand of the GIMPLE_SWITCH with any LHS
450 : we found before handing off to VRP. If simplification is
451 : possible, the simplified value will be a CASE_LABEL_EXPR of
452 : the label that is proven to be taken. */
453 28939 : gswitch *dummy_switch = as_a<gswitch *> (gimple_copy (stmt));
454 28939 : gimple_switch_set_index (dummy_switch, cached_lhs);
455 28939 : cached_lhs = m_simplifier->simplify (dummy_switch, stmt, e->src,
456 : m_state);
457 28939 : ggc_free (dummy_switch);
458 : }
459 : else
460 220 : cached_lhs = m_simplifier->simplify (stmt, stmt, e->src, m_state);
461 : }
462 :
463 : /* We couldn't find an invariant. But, callers of this
464 : function may be able to do something useful with the
465 : unmodified destination. */
466 31467 : if (!cached_lhs)
467 29002 : cached_lhs = original_lhs;
468 : }
469 : else
470 : cached_lhs = NULL;
471 :
472 : return cached_lhs;
473 : }
474 :
475 : /* Recursive helper for simplify_control_stmt_condition. */
476 :
477 : tree
478 9783502 : jump_threader::simplify_control_stmt_condition_1
479 : (edge e,
480 : gimple *stmt,
481 : tree op0,
482 : enum tree_code cond_code,
483 : tree op1,
484 : unsigned limit)
485 : {
486 9783502 : if (limit == 0)
487 : return NULL_TREE;
488 :
489 : /* We may need to canonicalize the comparison. For
490 : example, op0 might be a constant while op1 is an
491 : SSA_NAME. Failure to canonicalize will cause us to
492 : miss threading opportunities. */
493 9783502 : if (tree_swap_operands_p (op0, op1))
494 : {
495 473409 : cond_code = swap_tree_comparison (cond_code);
496 473409 : std::swap (op0, op1);
497 : }
498 :
499 9783502 : gimple_cond_set_code (dummy_cond, cond_code);
500 9783502 : gimple_cond_set_lhs (dummy_cond, op0);
501 9783502 : gimple_cond_set_rhs (dummy_cond, op1);
502 :
503 : /* We absolutely do not care about any type conversions
504 : we only care about a zero/nonzero value. */
505 9783502 : fold_defer_overflow_warnings ();
506 :
507 9783502 : tree res = fold_binary (cond_code, boolean_type_node, op0, op1);
508 9783502 : if (res)
509 1799963 : while (CONVERT_EXPR_P (res))
510 636 : res = TREE_OPERAND (res, 0);
511 :
512 9783502 : fold_undefer_overflow_warnings ((res && is_gimple_min_invariant (res)),
513 : stmt, WARN_STRICT_OVERFLOW_CONDITIONAL);
514 :
515 : /* If we have not simplified the condition down to an invariant,
516 : then use the pass specific callback to simplify the condition. */
517 9783502 : if (!res
518 9783502 : || !is_gimple_min_invariant (res))
519 8806747 : res = m_simplifier->simplify (dummy_cond, stmt, e->src, m_state);
520 :
521 : return res;
522 : }
523 :
524 : /* Copy debug stmts from DEST's chain of single predecessors up to
525 : SRC, so that we don't lose the bindings as PHI nodes are introduced
526 : when DEST gains new predecessors. */
527 : void
528 1807164 : propagate_threaded_block_debug_into (basic_block dest, basic_block src)
529 : {
530 1807164 : if (!MAY_HAVE_DEBUG_BIND_STMTS)
531 1176761 : return;
532 :
533 1807164 : if (!single_pred_p (dest))
534 : return;
535 :
536 630403 : gcc_checking_assert (dest != src);
537 :
538 630403 : gimple_stmt_iterator gsi = gsi_after_labels (dest);
539 630403 : int i = 0;
540 630403 : const int alloc_count = 16; // ?? Should this be a PARAM?
541 :
542 : /* Estimate the number of debug vars overridden in the beginning of
543 : DEST, to tell how many we're going to need to begin with. */
544 630403 : for (gimple_stmt_iterator si = gsi;
545 2739305 : i * 4 <= alloc_count * 3 && !gsi_end_p (si); gsi_next (&si))
546 : {
547 2599048 : gimple *stmt = gsi_stmt (si);
548 2599048 : if (!is_gimple_debug (stmt))
549 : break;
550 2108902 : if (gimple_debug_nonbind_marker_p (stmt))
551 372531 : continue;
552 1736371 : i++;
553 : }
554 :
555 630403 : auto_vec<tree, alloc_count> fewvars;
556 630403 : hash_set<tree> *vars = NULL;
557 :
558 : /* If we're already starting with 3/4 of alloc_count, go for a
559 : hash_set, otherwise start with an unordered stack-allocated
560 : VEC. */
561 630403 : if (i * 4 > alloc_count * 3)
562 66649 : vars = new hash_set<tree>;
563 :
564 : /* Now go through the initial debug stmts in DEST again, this time
565 : actually inserting in VARS or FEWVARS. Don't bother checking for
566 : duplicates in FEWVARS. */
567 3739543 : for (gimple_stmt_iterator si = gsi; !gsi_end_p (si); gsi_next (&si))
568 : {
569 3664077 : gimple *stmt = gsi_stmt (si);
570 3664077 : if (!is_gimple_debug (stmt))
571 : break;
572 :
573 3109140 : tree var;
574 :
575 3109140 : if (gimple_debug_bind_p (stmt))
576 2599848 : var = gimple_debug_bind_get_var (stmt);
577 509292 : else if (gimple_debug_source_bind_p (stmt))
578 18398 : var = gimple_debug_source_bind_get_var (stmt);
579 490894 : else if (gimple_debug_nonbind_marker_p (stmt))
580 490894 : continue;
581 : else
582 0 : gcc_unreachable ();
583 :
584 2618246 : if (vars)
585 1748312 : vars->add (var);
586 : else
587 869934 : fewvars.quick_push (var);
588 : }
589 :
590 : basic_block bb = dest;
591 :
592 651437 : do
593 : {
594 651437 : bb = single_pred (bb);
595 651437 : for (gimple_stmt_iterator si = gsi_last_bb (bb);
596 13863975 : !gsi_end_p (si); gsi_prev (&si))
597 : {
598 6606269 : gimple *stmt = gsi_stmt (si);
599 6606269 : if (!is_gimple_debug (stmt))
600 4722240 : continue;
601 :
602 5185074 : tree var;
603 :
604 5185074 : if (gimple_debug_bind_p (stmt))
605 4214031 : var = gimple_debug_bind_get_var (stmt);
606 971043 : else if (gimple_debug_source_bind_p (stmt))
607 21590 : var = gimple_debug_source_bind_get_var (stmt);
608 949453 : else if (gimple_debug_nonbind_marker_p (stmt))
609 949453 : continue;
610 : else
611 0 : gcc_unreachable ();
612 :
613 : /* Discard debug bind overlaps. Unlike stmts from src,
614 : copied into a new block that will precede BB, debug bind
615 : stmts in bypassed BBs may actually be discarded if
616 : they're overwritten by subsequent debug bind stmts. We
617 : want to copy binds for all modified variables, so that we
618 : retain a bind to the shared def if there is one, or to a
619 : newly introduced PHI node if there is one. Our bind will
620 : end up reset if the value is dead, but that implies the
621 : variable couldn't have survived, so it's fine. We are
622 : not actually running the code that performed the binds at
623 : this point, we're just adding binds so that they survive
624 : the new confluence, so markers should not be copied. */
625 4235621 : if (vars && vars->add (var))
626 1586660 : continue;
627 2648961 : else if (!vars)
628 : {
629 2259883 : int i = fewvars.length ();
630 12454554 : while (i--)
631 10959603 : if (fewvars[i] == var)
632 : break;
633 2259883 : if (i >= 0)
634 764932 : continue;
635 1494951 : else if (fewvars.length () < (unsigned) alloc_count)
636 1464600 : fewvars.quick_push (var);
637 : else
638 : {
639 30351 : vars = new hash_set<tree>;
640 546318 : for (i = 0; i < alloc_count; i++)
641 485616 : vars->add (fewvars[i]);
642 30351 : fewvars.release ();
643 30351 : vars->add (var);
644 : }
645 : }
646 :
647 1884029 : stmt = gimple_copy (stmt);
648 : /* ??? Should we drop the location of the copy to denote
649 : they're artificial bindings? */
650 1884029 : gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
651 : }
652 : }
653 1312939 : while (bb != src && single_pred_p (bb));
654 :
655 630403 : if (vars)
656 97000 : delete vars;
657 533403 : else if (fewvars.exists ())
658 533403 : fewvars.release ();
659 630403 : }
660 :
661 : /* See if TAKEN_EDGE->dest is a threadable block with no side effecs (ie, it
662 : need not be duplicated as part of the CFG/SSA updating process).
663 :
664 : If it is threadable, add it to PATH and VISITED and recurse, ultimately
665 : returning TRUE from the toplevel call. Otherwise do nothing and
666 : return false. */
667 :
668 : bool
669 11046842 : jump_threader::thread_around_empty_blocks (vec<jump_thread_edge *> *path,
670 : edge taken_edge,
671 : bitmap visited, unsigned &limit)
672 : {
673 11473572 : basic_block bb = taken_edge->dest;
674 11473572 : gimple_stmt_iterator gsi;
675 11473572 : gimple *stmt;
676 11473572 : tree cond;
677 :
678 11473572 : if (limit == 0)
679 : return false;
680 11472687 : --limit;
681 :
682 : /* The key property of these blocks is that they need not be duplicated
683 : when threading. Thus they cannot have visible side effects such
684 : as PHI nodes. */
685 11472687 : if (has_phis_p (bb))
686 : return false;
687 :
688 : /* Skip over DEBUG statements at the start of the block. */
689 7564532 : gsi = gsi_start_nondebug_bb (bb);
690 :
691 : /* If the block has no statements, but does have a single successor, then
692 : it's just a forwarding block and we can thread through it trivially.
693 :
694 : However, note that just threading through empty blocks with single
695 : successors is not inherently profitable. For the jump thread to
696 : be profitable, we must avoid a runtime conditional.
697 :
698 : By taking the return value from the recursive call, we get the
699 : desired effect of returning TRUE when we found a profitable jump
700 : threading opportunity and FALSE otherwise.
701 :
702 : This is particularly important when this routine is called after
703 : processing a joiner block. Returning TRUE too aggressively in
704 : that case results in pointless duplication of the joiner block. */
705 7564532 : if (gsi_end_p (gsi))
706 : {
707 1758578 : if (single_succ_p (bb))
708 : {
709 1758578 : taken_edge = single_succ_edge (bb);
710 :
711 1758578 : if ((taken_edge->flags & EDGE_DFS_BACK) != 0)
712 : return false;
713 :
714 426730 : if (!bitmap_bit_p (visited, taken_edge->dest->index))
715 : {
716 426730 : m_registry->push_edge (path, taken_edge, EDGE_NO_COPY_SRC_BLOCK);
717 426730 : m_state->append_path (taken_edge->dest);
718 426730 : bitmap_set_bit (visited, taken_edge->dest->index);
719 426730 : return thread_around_empty_blocks (path, taken_edge, visited,
720 426730 : limit);
721 : }
722 : }
723 :
724 : /* We have a block with no statements, but multiple successors? */
725 0 : return false;
726 : }
727 :
728 : /* The only real statements this block can have are a control
729 : flow altering statement. Anything else stops the thread. */
730 5805954 : stmt = gsi_stmt (gsi);
731 5805954 : if (gimple_code (stmt) != GIMPLE_COND
732 : && gimple_code (stmt) != GIMPLE_GOTO
733 : && gimple_code (stmt) != GIMPLE_SWITCH)
734 : return false;
735 :
736 : /* Extract and simplify the condition. */
737 554242 : cond = simplify_control_stmt_condition (taken_edge, stmt);
738 :
739 : /* If the condition can be statically computed and we have not already
740 : visited the destination edge, then add the taken edge to our thread
741 : path. */
742 554242 : if (cond != NULL_TREE
743 554242 : && (is_gimple_min_invariant (cond)
744 328711 : || TREE_CODE (cond) == CASE_LABEL_EXPR))
745 : {
746 100822 : if (TREE_CODE (cond) == CASE_LABEL_EXPR)
747 29 : taken_edge = find_edge (bb, label_to_block (cfun, CASE_LABEL (cond)));
748 : else
749 100793 : taken_edge = find_taken_edge (bb, cond);
750 :
751 100822 : if (!taken_edge
752 100802 : || (taken_edge->flags & EDGE_DFS_BACK) != 0)
753 : return false;
754 :
755 100734 : if (bitmap_bit_p (visited, taken_edge->dest->index))
756 : return false;
757 100734 : bitmap_set_bit (visited, taken_edge->dest->index);
758 :
759 100734 : m_registry->push_edge (path, taken_edge, EDGE_NO_COPY_SRC_BLOCK);
760 100734 : m_state->append_path (taken_edge->dest);
761 :
762 100734 : thread_around_empty_blocks (path, taken_edge, visited, limit);
763 100734 : return true;
764 : }
765 :
766 : return false;
767 : }
768 :
769 : /* We are exiting E->src, see if E->dest ends with a conditional
770 : jump which has a known value when reached via E.
771 :
772 : E->dest can have arbitrary side effects which, if threading is
773 : successful, will be maintained.
774 :
775 : Special care is necessary if E is a back edge in the CFG as we
776 : may have already recorded equivalences for E->dest into our
777 : various tables, including the result of the conditional at
778 : the end of E->dest. Threading opportunities are severely
779 : limited in that case to avoid short-circuiting the loop
780 : incorrectly.
781 :
782 : Positive return value is success. Zero return value is failure, but
783 : the block can still be duplicated as a joiner in a jump thread path,
784 : negative indicates the block should not be duplicated and thus is not
785 : suitable for a joiner in a jump threading path. */
786 :
787 : int
788 15605866 : jump_threader::thread_through_normal_block (vec<jump_thread_edge *> *path,
789 : edge e, bitmap visited,
790 : unsigned &limit)
791 : {
792 15605866 : if (limit == 0)
793 : return 0;
794 15604949 : limit--;
795 :
796 15604949 : m_state->register_equivs_edge (e);
797 :
798 : /* PHIs create temporary equivalences.
799 : Note that if we found a PHI that made the block non-threadable, then
800 : we need to bubble that up to our caller in the same manner we do
801 : when we prematurely stop processing statements below. */
802 15604949 : if (!record_temporary_equivalences_from_phis (e))
803 : return -1;
804 :
805 : /* Now walk each statement recording any context sensitive
806 : temporary equivalences we can detect. */
807 15604949 : gimple *stmt = record_temporary_equivalences_from_stmts_at_dest (e);
808 :
809 : /* There's two reasons STMT might be null, and distinguishing
810 : between them is important.
811 :
812 : First the block may not have had any statements. For example, it
813 : might have some PHIs and unconditionally transfer control elsewhere.
814 : Such blocks are suitable for jump threading, particularly as a
815 : joiner block.
816 :
817 : The second reason would be if we did not process all the statements
818 : in the block (because there were too many to make duplicating the
819 : block profitable. If we did not look at all the statements, then
820 : we may not have invalidated everything needing invalidation. Thus
821 : we must signal to our caller that this block is not suitable for
822 : use as a joiner in a threading path. */
823 15604949 : if (!stmt)
824 : {
825 : /* First case. The statement simply doesn't have any instructions, but
826 : does have PHIs. */
827 2501479 : if (empty_block_with_phis_p (e->dest))
828 : return 0;
829 :
830 : /* Second case. */
831 : return -1;
832 : }
833 :
834 : /* If we stopped at a COND_EXPR or SWITCH_EXPR, see if we know which arm
835 : will be taken. */
836 13103470 : if (gimple_code (stmt) == GIMPLE_COND
837 : || gimple_code (stmt) == GIMPLE_GOTO
838 : || gimple_code (stmt) == GIMPLE_SWITCH)
839 : {
840 9260799 : tree cond;
841 :
842 : /* Extract and simplify the condition. */
843 9260799 : cond = simplify_control_stmt_condition (e, stmt);
844 :
845 9260799 : if (!cond)
846 : return 0;
847 :
848 6504260 : if (is_gimple_min_invariant (cond)
849 6504260 : || TREE_CODE (cond) == CASE_LABEL_EXPR)
850 : {
851 1181347 : edge taken_edge;
852 1181347 : if (TREE_CODE (cond) == CASE_LABEL_EXPR)
853 128 : taken_edge = find_edge (e->dest,
854 128 : label_to_block (cfun, CASE_LABEL (cond)));
855 : else
856 1181219 : taken_edge = find_taken_edge (e->dest, cond);
857 :
858 1181347 : basic_block dest = (taken_edge ? taken_edge->dest : NULL);
859 :
860 : /* DEST could be NULL for a computed jump to an absolute
861 : address. */
862 1181301 : if (dest == NULL
863 1181301 : || dest == e->dest
864 1181301 : || (taken_edge->flags & EDGE_DFS_BACK) != 0
865 2361963 : || bitmap_bit_p (visited, dest->index))
866 685 : return 0;
867 :
868 : /* Only push the EDGE_START_JUMP_THREAD marker if this is
869 : first edge on the path. */
870 1180662 : if (path->length () == 0)
871 728787 : m_registry->push_edge (path, e, EDGE_START_JUMP_THREAD);
872 :
873 1180662 : m_registry->push_edge (path, taken_edge, EDGE_COPY_SRC_BLOCK);
874 1180662 : m_state->append_path (taken_edge->dest);
875 :
876 : /* See if we can thread through DEST as well, this helps capture
877 : secondary effects of threading without having to re-run DOM or
878 : VRP.
879 :
880 : We don't want to thread back to a block we have already
881 : visited. This may be overly conservative. */
882 1180662 : bitmap_set_bit (visited, dest->index);
883 1180662 : bitmap_set_bit (visited, e->dest->index);
884 1180662 : thread_around_empty_blocks (path, taken_edge, visited, limit);
885 1180662 : return 1;
886 : }
887 : }
888 : return 0;
889 : }
890 :
891 : /* There are basic blocks look like:
892 : <P0>
893 : p0 = a CMP b ; or p0 = (INT) (a CMP b)
894 : goto <X>;
895 :
896 : <P1>
897 : p1 = c CMP d
898 : goto <X>;
899 :
900 : <X>
901 : # phi = PHI <p0 (P0), p1 (P1)>
902 : if (phi != 0) goto <Y>; else goto <Z>;
903 :
904 : Then, edge (P0,X) or (P1,X) could be marked as EDGE_START_JUMP_THREAD
905 : And edge (X,Y), (X,Z) is EDGE_COPY_SRC_JOINER_BLOCK
906 :
907 : Return true if E is (P0,X) or (P1,X) */
908 :
909 : bool
910 9257239 : edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (edge e)
911 : {
912 : /* See if there is only one stmt which is gcond. */
913 9257239 : gcond *gs;
914 10697417 : if (!(gs = safe_dyn_cast<gcond *> (last_and_only_stmt (e->dest))))
915 : return false;
916 :
917 : /* See if gcond's cond is "(phi !=/== 0/1)" in the basic block. */
918 1458670 : tree cond = gimple_cond_lhs (gs);
919 1458670 : enum tree_code code = gimple_cond_code (gs);
920 1458670 : tree rhs = gimple_cond_rhs (gs);
921 1458670 : if (TREE_CODE (cond) != SSA_NAME
922 1458431 : || (code != NE_EXPR && code != EQ_EXPR)
923 2439893 : || (!integer_onep (rhs) && !integer_zerop (rhs)))
924 817453 : return false;
925 641217 : gphi *phi = dyn_cast <gphi *> (SSA_NAME_DEF_STMT (cond));
926 407175 : if (phi == NULL || gimple_bb (phi) != e->dest)
927 : return false;
928 :
929 : /* Check if phi's incoming value is CMP. */
930 301617 : gassign *def;
931 301617 : tree value = PHI_ARG_DEF_FROM_EDGE (phi, e);
932 301617 : if (TREE_CODE (value) != SSA_NAME
933 301441 : || !has_single_use (value)
934 463487 : || !(def = dyn_cast <gassign *> (SSA_NAME_DEF_STMT (value))))
935 : return false;
936 :
937 : /* Or if it is (INT) (a CMP b). */
938 194031 : if (CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def)))
939 : {
940 29552 : value = gimple_assign_rhs1 (def);
941 29552 : if (TREE_CODE (value) != SSA_NAME
942 29552 : || !has_single_use (value)
943 9249301 : || !(def = dyn_cast<gassign *> (SSA_NAME_DEF_STMT (value))))
944 : return false;
945 : }
946 :
947 181299 : if (TREE_CODE_CLASS (gimple_assign_rhs_code (def)) != tcc_comparison)
948 : return false;
949 :
950 : return true;
951 : }
952 :
953 : /* We are exiting E->src, see if E->dest ends with a conditional jump
954 : which has a known value when reached via E. If so, thread the
955 : edge. */
956 :
957 : void
958 7083926 : jump_threader::thread_across_edge (edge e)
959 : {
960 7083926 : auto_bitmap visited;
961 :
962 7083926 : m_state->push (e);
963 :
964 7083926 : stmt_count = 0;
965 :
966 7083926 : vec<jump_thread_edge *> *path = m_registry->allocate_thread_path ();
967 7083926 : bitmap_set_bit (visited, e->src->index);
968 7083926 : bitmap_set_bit (visited, e->dest->index);
969 :
970 : /* Limit search space. */
971 7083926 : unsigned limit = param_max_jump_thread_paths;
972 :
973 7083926 : int threaded = 0;
974 7083926 : if ((e->flags & EDGE_DFS_BACK) == 0)
975 5896752 : threaded = thread_through_normal_block (path, e, visited, limit);
976 :
977 5896752 : if (threaded > 0)
978 : {
979 728787 : propagate_threaded_block_debug_into (path->last ()->e->dest,
980 : e->dest);
981 728787 : m_registry->register_jump_thread (path);
982 728787 : m_state->pop ();
983 728787 : return;
984 : }
985 :
986 6355139 : gcc_checking_assert (path->length () == 0);
987 6355139 : path->release ();
988 :
989 6355139 : if (threaded < 0)
990 : {
991 : /* The target block was deemed too big to duplicate. Just quit
992 : now rather than trying to use the block as a joiner in a jump
993 : threading path.
994 :
995 : This prevents unnecessary code growth, but more importantly if we
996 : do not look at all the statements in the block, then we may have
997 : missed some invalidations if we had traversed a backedge! */
998 163575 : m_state->pop ();
999 163575 : return;
1000 : }
1001 :
1002 : /* We were unable to determine what out edge from E->dest is taken. However,
1003 : we might still be able to thread through successors of E->dest. This
1004 : often occurs when E->dest is a joiner block which then fans back out
1005 : based on redundant tests.
1006 :
1007 : If so, we'll copy E->dest and redirect the appropriate predecessor to
1008 : the copy. Within the copy of E->dest, we'll thread one or more edges
1009 : to points deeper in the CFG.
1010 :
1011 : This is a stopgap until we have a more structured approach to path
1012 : isolation. */
1013 6191564 : {
1014 6191564 : edge taken_edge;
1015 6191564 : edge_iterator ei;
1016 6191564 : bool found;
1017 :
1018 : /* If E->dest has abnormal outgoing edges, then there's no guarantee
1019 : we can safely redirect any of the edges. Just punt those cases. */
1020 18368211 : FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1021 12177066 : if (taken_edge->flags & EDGE_COMPLEX)
1022 : {
1023 419 : m_state->pop ();
1024 419 : return;
1025 : }
1026 :
1027 : /* Look at each successor of E->dest to see if we can thread through it. */
1028 18367792 : FOR_EACH_EDGE (taken_edge, ei, e->dest->succs)
1029 : {
1030 12176647 : if ((e->flags & EDGE_DFS_BACK) != 0
1031 9828958 : || (taken_edge->flags & EDGE_DFS_BACK) != 0)
1032 2411201 : continue;
1033 :
1034 9765446 : m_state->push (taken_edge);
1035 :
1036 : /* Avoid threading to any block we have already visited. */
1037 9765446 : bitmap_clear (visited);
1038 9765446 : bitmap_set_bit (visited, e->src->index);
1039 9765446 : bitmap_set_bit (visited, e->dest->index);
1040 9765446 : bitmap_set_bit (visited, taken_edge->dest->index);
1041 :
1042 9765446 : vec<jump_thread_edge *> *path = m_registry->allocate_thread_path ();
1043 9765446 : m_registry->push_edge (path, e, EDGE_START_JUMP_THREAD);
1044 9765446 : m_registry->push_edge (path, taken_edge, EDGE_COPY_SRC_JOINER_BLOCK);
1045 :
1046 9765446 : found = thread_around_empty_blocks (path, taken_edge, visited, limit);
1047 :
1048 9765446 : if (!found)
1049 9709114 : found = thread_through_normal_block (path,
1050 9709114 : path->last ()->e, visited,
1051 : limit) > 0;
1052 :
1053 : /* If we were able to thread through a successor of E->dest, then
1054 : record the jump threading opportunity. */
1055 9709114 : if (found
1056 9709114 : || edge_forwards_cmp_to_conditional_jump_through_empty_bb_p (e))
1057 : {
1058 559417 : if (taken_edge->dest != path->last ()->e->dest)
1059 508750 : propagate_threaded_block_debug_into (path->last ()->e->dest,
1060 : taken_edge->dest);
1061 559417 : m_registry->register_jump_thread (path);
1062 : }
1063 : else
1064 9206029 : path->release ();
1065 :
1066 9765446 : m_state->pop ();
1067 : }
1068 : }
1069 :
1070 6191145 : m_state->pop ();
1071 7083926 : }
1072 :
1073 : /* Return TRUE if BB has a single successor to a block with multiple
1074 : incoming and outgoing edges. */
1075 :
1076 : bool
1077 23460143 : single_succ_to_potentially_threadable_block (basic_block bb)
1078 : {
1079 23460143 : int flags = (EDGE_IGNORE | EDGE_COMPLEX | EDGE_ABNORMAL);
1080 23460143 : return (single_succ_p (bb)
1081 11595700 : && (single_succ_edge (bb)->flags & flags) == 0
1082 34562978 : && potentially_threadable_block (single_succ (bb)));
1083 : }
1084 :
1085 : /* Examine the outgoing edges from BB and conditionally
1086 : try to thread them. */
1087 :
1088 : void
1089 23461626 : jump_threader::thread_outgoing_edges (basic_block bb)
1090 : {
1091 23461626 : int flags = (EDGE_IGNORE | EDGE_COMPLEX | EDGE_ABNORMAL);
1092 :
1093 23461626 : if (!flag_thread_jumps)
1094 : return;
1095 :
1096 : /* If we have an outgoing edge to a block with multiple incoming and
1097 : outgoing edges, then we may be able to thread the edge, i.e., we
1098 : may be able to statically determine which of the outgoing edges
1099 : will be traversed when the incoming edge from BB is traversed. */
1100 23460143 : if (single_succ_to_potentially_threadable_block (bb))
1101 4373573 : thread_across_edge (single_succ_edge (bb));
1102 38173140 : else if (safe_is_a <gcond *> (*gsi_last_bb (bb))
1103 9093544 : && EDGE_COUNT (bb->succs) == 2
1104 9093544 : && (EDGE_SUCC (bb, 0)->flags & flags) == 0
1105 25879114 : && (EDGE_SUCC (bb, 1)->flags & flags) == 0)
1106 : {
1107 9093544 : edge true_edge, false_edge;
1108 :
1109 9093544 : extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
1110 :
1111 : /* Only try to thread the edge if it reaches a target block with
1112 : more than one predecessor and more than one successor. */
1113 9093544 : if (potentially_threadable_block (true_edge->dest))
1114 936186 : thread_across_edge (true_edge);
1115 :
1116 : /* Similarly for the ELSE arm. */
1117 9093544 : if (potentially_threadable_block (false_edge->dest))
1118 1774167 : thread_across_edge (false_edge);
1119 : }
1120 : }
1121 :
1122 : // Marker to keep track of the start of the current path.
1123 : const basic_block jt_state::BB_MARKER = (basic_block) -1;
1124 :
1125 : // Record that E is being crossed.
1126 :
1127 : void
1128 16849372 : jt_state::push (edge e)
1129 : {
1130 16849372 : m_blocks.safe_push (BB_MARKER);
1131 16849372 : if (m_blocks.length () == 1)
1132 7083926 : m_blocks.safe_push (e->src);
1133 16849372 : m_blocks.safe_push (e->dest);
1134 16849372 : }
1135 :
1136 : // Pop to the last pushed state.
1137 :
1138 : void
1139 16849372 : jt_state::pop ()
1140 : {
1141 16849372 : if (!m_blocks.is_empty ())
1142 : {
1143 42490796 : while (m_blocks.last () != BB_MARKER)
1144 25641424 : m_blocks.pop ();
1145 : // Pop marker.
1146 16849372 : m_blocks.pop ();
1147 : }
1148 16849372 : }
1149 :
1150 : // Add BB to the list of blocks seen.
1151 :
1152 : void
1153 1708126 : jt_state::append_path (basic_block bb)
1154 : {
1155 1708126 : gcc_checking_assert (!m_blocks.is_empty ());
1156 1708126 : m_blocks.safe_push (bb);
1157 1708126 : }
1158 :
1159 : void
1160 0 : jt_state::dump (FILE *out)
1161 : {
1162 0 : if (!m_blocks.is_empty ())
1163 : {
1164 0 : auto_vec<basic_block> path;
1165 0 : get_path (path);
1166 0 : dump_ranger (out, path);
1167 0 : }
1168 0 : }
1169 :
1170 : void
1171 0 : jt_state::debug ()
1172 : {
1173 0 : push_dump_file save (stderr, TDF_DETAILS);
1174 0 : dump (stderr);
1175 0 : }
1176 :
1177 : // Convert the current path in jt_state into a path suitable for the
1178 : // path solver. Return the resulting path in PATH.
1179 :
1180 : void
1181 8681083 : jt_state::get_path (vec<basic_block> &path)
1182 : {
1183 8681083 : path.truncate (0);
1184 :
1185 51401732 : for (int i = (int) m_blocks.length () - 1; i >= 0; --i)
1186 : {
1187 34039566 : basic_block bb = m_blocks[i];
1188 :
1189 34039566 : if (bb != BB_MARKER)
1190 21505860 : path.safe_push (bb);
1191 : }
1192 8681083 : }
1193 :
1194 : // Record an equivalence from DST to SRC. If UPDATE_RANGE is TRUE,
1195 : // update the value range associated with DST.
1196 :
1197 : void
1198 0 : jt_state::register_equiv (tree dest ATTRIBUTE_UNUSED,
1199 : tree src ATTRIBUTE_UNUSED,
1200 : bool update_range ATTRIBUTE_UNUSED)
1201 : {
1202 0 : }
1203 :
1204 : // Record any ranges calculated in STMT. If TEMPORARY is TRUE, then
1205 : // this is a temporary equivalence and should be recorded into the
1206 : // unwind table, instead of the global table.
1207 :
1208 : void
1209 48016672 : jt_state::record_ranges_from_stmt (gimple *,
1210 : bool temporary ATTRIBUTE_UNUSED)
1211 : {
1212 48016672 : }
1213 :
1214 : // Record any equivalences created by traversing E.
1215 :
1216 : void
1217 0 : jt_state::register_equivs_edge (edge)
1218 : {
1219 0 : }
1220 :
1221 : void
1222 26324572 : jt_state::register_equivs_stmt (gimple *stmt, basic_block bb,
1223 : jt_simplifier *simplifier)
1224 : {
1225 : /* At this point we have a statement which assigns an RHS to an
1226 : SSA_VAR on the LHS. We want to try and simplify this statement
1227 : to expose more context sensitive equivalences which in turn may
1228 : allow us to simplify the condition at the end of the loop.
1229 :
1230 : Handle simple copy operations. */
1231 26324572 : tree cached_lhs = NULL;
1232 26324572 : if (gimple_assign_single_p (stmt)
1233 26324572 : && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
1234 : cached_lhs = gimple_assign_rhs1 (stmt);
1235 : else
1236 : {
1237 : /* A statement that is not a trivial copy.
1238 : Try to fold the new expression. Inserting the
1239 : expression into the hash table is unlikely to help. */
1240 : /* ??? The DOM callback below can be changed to setting
1241 : the mprts_hook around the call to thread_across_edge,
1242 : avoiding the use substitution. */
1243 25312206 : cached_lhs = gimple_fold_stmt_to_constant_1 (stmt,
1244 : threadedge_valueize);
1245 25312206 : if (NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES) != 0
1246 25312206 : && (!cached_lhs
1247 3042266 : || (TREE_CODE (cached_lhs) != SSA_NAME
1248 2650745 : && !is_gimple_min_invariant (cached_lhs))))
1249 : {
1250 : /* We're going to temporarily copy propagate the operands
1251 : and see if that allows us to simplify this statement. */
1252 22400790 : tree *copy;
1253 22400790 : ssa_op_iter iter;
1254 22400790 : use_operand_p use_p;
1255 22400790 : unsigned int num, i = 0;
1256 :
1257 22400790 : num = NUM_SSA_OPERANDS (stmt, SSA_OP_ALL_USES);
1258 22400790 : copy = XALLOCAVEC (tree, num);
1259 :
1260 : /* Make a copy of the uses & vuses into USES_COPY, then cprop into
1261 : the operands. */
1262 55191098 : FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
1263 : {
1264 32790308 : tree tmp = NULL;
1265 32790308 : tree use = USE_FROM_PTR (use_p);
1266 :
1267 32790308 : copy[i++] = use;
1268 32790308 : if (TREE_CODE (use) == SSA_NAME)
1269 63371030 : tmp = SSA_NAME_VALUE (use);
1270 30580722 : if (tmp)
1271 8843505 : SET_USE (use_p, tmp);
1272 : }
1273 :
1274 : /* Do not pass state to avoid calling the ranger with the
1275 : temporarily altered IL. */
1276 22400790 : cached_lhs = simplifier->simplify (stmt, stmt, bb, /*state=*/NULL);
1277 :
1278 : /* Restore the statement's original uses/defs. */
1279 22400790 : i = 0;
1280 55191098 : FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES)
1281 32790308 : SET_USE (use_p, copy[i++]);
1282 : }
1283 : }
1284 :
1285 : /* Record the context sensitive equivalence if we were able
1286 : to simplify this statement. */
1287 26324572 : if (cached_lhs
1288 26324572 : && (TREE_CODE (cached_lhs) == SSA_NAME
1289 2495096 : || is_gimple_min_invariant (cached_lhs)))
1290 4426888 : register_equiv (gimple_get_lhs (stmt), cached_lhs,
1291 : /*update_range=*/false);
1292 26324572 : }
1293 :
1294 : // Hybrid threader implementation.
1295 :
1296 2083233 : hybrid_jt_simplifier::hybrid_jt_simplifier (gimple_ranger *r,
1297 2083233 : path_range_query *q)
1298 : {
1299 2083233 : m_ranger = r;
1300 2083233 : m_query = q;
1301 2083233 : }
1302 :
1303 : tree
1304 8681083 : hybrid_jt_simplifier::simplify (gimple *stmt, gimple *, basic_block,
1305 : jt_state *state)
1306 : {
1307 8681083 : auto_bitmap dependencies;
1308 8681083 : auto_vec<basic_block> path;
1309 :
1310 8681083 : state->get_path (path);
1311 8681083 : compute_exit_dependencies (dependencies, path, stmt);
1312 8681083 : m_query->reset_path (path, dependencies);
1313 :
1314 8681083 : if (gimple_code (stmt) == GIMPLE_COND
1315 8681083 : || gimple_code (stmt) == GIMPLE_ASSIGN)
1316 : {
1317 8651924 : value_range r (gimple_range_type (stmt));
1318 8651924 : tree ret;
1319 17303848 : if (m_query->range_of_stmt (r, stmt) && r.singleton_p (&ret))
1320 165084 : return ret;
1321 8651924 : }
1322 29159 : else if (gimple_code (stmt) == GIMPLE_SWITCH)
1323 : {
1324 28939 : int_range_max r;
1325 28939 : gswitch *switch_stmt = dyn_cast <gswitch *> (stmt);
1326 28939 : tree index = gimple_switch_index (switch_stmt);
1327 28939 : if (m_query->range_of_expr (r, index, stmt))
1328 28939 : return find_case_label_range (switch_stmt, &r);
1329 28939 : }
1330 : return NULL;
1331 8681083 : }
1332 :
1333 : // Calculate the set of exit dependencies for a path and statement to
1334 : // be simplified. This is different than the
1335 : // compute_exit_dependencies in the path solver because the forward
1336 : // threader asks questions about statements not necessarily in the
1337 : // path. Using the default compute_exit_dependencies in the path
1338 : // solver gets noticeably less threads.
1339 :
1340 : void
1341 8681083 : hybrid_jt_simplifier::compute_exit_dependencies (bitmap dependencies,
1342 : const vec<basic_block> &path,
1343 : gimple *stmt)
1344 : {
1345 : // Start with the imports to the final conditional.
1346 8681083 : bitmap_copy (dependencies, m_ranger->gori_ssa ()->imports (path[0]));
1347 :
1348 : // Add any other interesting operands we may have missed.
1349 8681083 : if (gimple_bb (stmt) != path[0])
1350 : {
1351 43259620 : for (unsigned i = 0; i < gimple_num_ops (stmt); ++i)
1352 : {
1353 34607696 : tree op = gimple_op (stmt, i);
1354 34607696 : if (op
1355 17303848 : && TREE_CODE (op) == SSA_NAME
1356 45429474 : && value_range::supports_type_p (TREE_TYPE (op)))
1357 10816948 : bitmap_set_bit (dependencies, SSA_NAME_VERSION (op));
1358 : }
1359 : }
1360 8681083 : }
|