Line data Source code
1 : /* RTL simplification functions for GNU compiler.
2 : Copyright (C) 1987-2026 Free Software Foundation, Inc.
3 :
4 : This file is part of GCC.
5 :
6 : GCC is free software; you can redistribute it and/or modify it under
7 : the terms of the GNU General Public License as published by the Free
8 : Software Foundation; either version 3, or (at your option) any later
9 : version.
10 :
11 : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12 : WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 : for more details.
15 :
16 : You should have received a copy of the GNU General Public License
17 : along with GCC; see the file COPYING3. If not see
18 : <http://www.gnu.org/licenses/>. */
19 :
20 :
21 : #include "config.h"
22 : #include "system.h"
23 : #include "coretypes.h"
24 : #include "backend.h"
25 : #include "target.h"
26 : #include "rtl.h"
27 : #include "tree.h"
28 : #include "predict.h"
29 : #include "memmodel.h"
30 : #include "optabs.h"
31 : #include "emit-rtl.h"
32 : #include "recog.h"
33 : #include "diagnostic-core.h"
34 : #include "varasm.h"
35 : #include "flags.h"
36 : #include "selftest.h"
37 : #include "selftest-rtl.h"
38 : #include "rtx-vector-builder.h"
39 : #include "rtlanal.h"
40 :
41 : /* Simplification and canonicalization of RTL. */
42 :
43 : /* Much code operates on (low, high) pairs; the low value is an
44 : unsigned wide int, the high value a signed wide int. We
45 : occasionally need to sign extend from low to high as if low were a
46 : signed wide int. */
47 : #define HWI_SIGN_EXTEND(low) \
48 : ((((HOST_WIDE_INT) low) < 0) ? HOST_WIDE_INT_M1 : HOST_WIDE_INT_0)
49 :
50 : static bool plus_minus_operand_p (const_rtx);
51 :
52 : /* Negate I, which satisfies poly_int_rtx_p. MODE is the mode of I. */
53 :
54 : static rtx
55 8985451 : neg_poly_int_rtx (machine_mode mode, const_rtx i)
56 : {
57 8985451 : return immed_wide_int_const (-wi::to_poly_wide (i, mode), mode);
58 : }
59 :
60 : /* Test whether expression, X, is an immediate constant that represents
61 : the most significant bit of machine mode MODE. */
62 :
63 : bool
64 6261487 : mode_signbit_p (machine_mode mode, const_rtx x)
65 : {
66 6261487 : unsigned HOST_WIDE_INT val;
67 6261487 : unsigned int width;
68 6261487 : scalar_int_mode int_mode;
69 :
70 6261487 : if (!is_int_mode (mode, &int_mode))
71 : return false;
72 :
73 6261479 : width = GET_MODE_PRECISION (int_mode);
74 6261479 : if (width == 0)
75 : return false;
76 :
77 6261479 : if (width <= HOST_BITS_PER_WIDE_INT
78 6259949 : && CONST_INT_P (x))
79 6110913 : val = INTVAL (x);
80 : #if TARGET_SUPPORTS_WIDE_INT
81 150566 : else if (CONST_WIDE_INT_P (x))
82 : {
83 474 : unsigned int i;
84 474 : unsigned int elts = CONST_WIDE_INT_NUNITS (x);
85 474 : if (elts != (width + HOST_BITS_PER_WIDE_INT - 1) / HOST_BITS_PER_WIDE_INT)
86 : return false;
87 888 : for (i = 0; i < elts - 1; i++)
88 474 : if (CONST_WIDE_INT_ELT (x, i) != 0)
89 : return false;
90 414 : val = CONST_WIDE_INT_ELT (x, elts - 1);
91 414 : width %= HOST_BITS_PER_WIDE_INT;
92 414 : if (width == 0)
93 : width = HOST_BITS_PER_WIDE_INT;
94 : }
95 : #else
96 : else if (width <= HOST_BITS_PER_DOUBLE_INT
97 : && CONST_DOUBLE_AS_INT_P (x)
98 : && CONST_DOUBLE_LOW (x) == 0)
99 : {
100 : val = CONST_DOUBLE_HIGH (x);
101 : width -= HOST_BITS_PER_WIDE_INT;
102 : }
103 : #endif
104 : else
105 : /* X is not an integer constant. */
106 : return false;
107 :
108 6110913 : if (width < HOST_BITS_PER_WIDE_INT)
109 5525288 : val &= (HOST_WIDE_INT_1U << width) - 1;
110 6111327 : return val == (HOST_WIDE_INT_1U << (width - 1));
111 : }
112 :
113 : /* Test whether VAL is equal to the most significant bit of mode MODE
114 : (after masking with the mode mask of MODE). Returns false if the
115 : precision of MODE is too large to handle. */
116 :
117 : bool
118 3613710 : val_signbit_p (machine_mode mode, unsigned HOST_WIDE_INT val)
119 : {
120 3613710 : unsigned int width;
121 3613710 : scalar_int_mode int_mode;
122 :
123 3613710 : if (!is_int_mode (mode, &int_mode))
124 : return false;
125 :
126 3613674 : width = GET_MODE_PRECISION (int_mode);
127 3613674 : if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
128 : return false;
129 :
130 3609046 : val &= GET_MODE_MASK (int_mode);
131 3609046 : return val == (HOST_WIDE_INT_1U << (width - 1));
132 : }
133 :
134 : /* Test whether the most significant bit of mode MODE is set in VAL.
135 : Returns false if the precision of MODE is too large to handle. */
136 : bool
137 2739873 : val_signbit_known_set_p (machine_mode mode, unsigned HOST_WIDE_INT val)
138 : {
139 2739873 : unsigned int width;
140 :
141 2739873 : scalar_int_mode int_mode;
142 2739873 : if (!is_int_mode (mode, &int_mode))
143 : return false;
144 :
145 2706521 : width = GET_MODE_PRECISION (int_mode);
146 2706521 : if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
147 : return false;
148 :
149 2706521 : val &= HOST_WIDE_INT_1U << (width - 1);
150 2706521 : return val != 0;
151 : }
152 :
153 : /* Test whether the most significant bit of mode MODE is clear in VAL.
154 : Returns false if the precision of MODE is too large to handle. */
155 : bool
156 7718168 : val_signbit_known_clear_p (machine_mode mode, unsigned HOST_WIDE_INT val)
157 : {
158 7718168 : unsigned int width;
159 :
160 7718168 : scalar_int_mode int_mode;
161 7718168 : if (!is_int_mode (mode, &int_mode))
162 : return false;
163 :
164 7398085 : width = GET_MODE_PRECISION (int_mode);
165 7398085 : if (width == 0 || width > HOST_BITS_PER_WIDE_INT)
166 : return false;
167 :
168 7286957 : val &= HOST_WIDE_INT_1U << (width - 1);
169 7286957 : return val == 0;
170 : }
171 :
172 : /* Make a binary operation by properly ordering the operands and
173 : seeing if the expression folds. */
174 :
175 : rtx
176 114969858 : simplify_context::simplify_gen_binary (rtx_code code, machine_mode mode,
177 : rtx op0, rtx op1)
178 : {
179 114969858 : rtx tem;
180 :
181 : /* If this simplifies, do it. */
182 114969858 : tem = simplify_binary_operation (code, mode, op0, op1);
183 114969858 : if (tem)
184 : return tem;
185 :
186 : /* Put complex operands first and constants second if commutative. */
187 72727189 : if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
188 72727189 : && swap_commutative_operands_p (op0, op1))
189 : std::swap (op0, op1);
190 :
191 72727189 : return gen_rtx_fmt_ee (code, mode, op0, op1);
192 : }
193 :
194 : /* If X is a MEM referencing the constant pool, return the real value.
195 : Otherwise return X. */
196 : rtx
197 2768193024 : avoid_constant_pool_reference (rtx x)
198 : {
199 2768193024 : rtx c, tmp, addr;
200 2768193024 : machine_mode cmode;
201 2768193024 : poly_int64 offset = 0;
202 :
203 2768193024 : switch (GET_CODE (x))
204 : {
205 260228183 : case MEM:
206 260228183 : break;
207 :
208 914908 : case FLOAT_EXTEND:
209 : /* Handle float extensions of constant pool references. */
210 914908 : tmp = XEXP (x, 0);
211 914908 : c = avoid_constant_pool_reference (tmp);
212 914908 : if (c != tmp && CONST_DOUBLE_AS_FLOAT_P (c))
213 122427 : return const_double_from_real_value (*CONST_DOUBLE_REAL_VALUE (c),
214 122427 : GET_MODE (x));
215 : return x;
216 :
217 : default:
218 : return x;
219 : }
220 :
221 260228183 : if (GET_MODE (x) == BLKmode)
222 : return x;
223 :
224 256196663 : addr = XEXP (x, 0);
225 :
226 : /* Call target hook to avoid the effects of -fpic etc.... */
227 256196663 : addr = targetm.delegitimize_address (addr);
228 :
229 : /* Split the address into a base and integer offset. */
230 256196663 : addr = strip_offset (addr, &offset);
231 :
232 256196663 : if (GET_CODE (addr) == LO_SUM)
233 0 : addr = XEXP (addr, 1);
234 :
235 : /* If this is a constant pool reference, we can turn it into its
236 : constant and hope that simplifications happen. */
237 256196663 : if (GET_CODE (addr) == SYMBOL_REF
238 256196663 : && CONSTANT_POOL_ADDRESS_P (addr))
239 : {
240 5347511 : c = get_pool_constant (addr);
241 5347511 : cmode = get_pool_mode (addr);
242 :
243 : /* If we're accessing the constant in a different mode than it was
244 : originally stored, attempt to fix that up via subreg simplifications.
245 : If that fails we have no choice but to return the original memory. */
246 5347511 : if (known_eq (offset, 0) && cmode == GET_MODE (x))
247 : return c;
248 23220 : else if (known_in_range_p (offset, 0, GET_MODE_SIZE (cmode)))
249 : {
250 11610 : rtx tem = simplify_subreg (GET_MODE (x), c, cmode, offset);
251 11610 : if (tem && CONSTANT_P (tem))
252 : return tem;
253 : }
254 : }
255 :
256 : return x;
257 : }
258 :
259 : /* Simplify a MEM based on its attributes. This is the default
260 : delegitimize_address target hook, and it's recommended that every
261 : overrider call it. */
262 :
263 : rtx
264 3522626935 : delegitimize_mem_from_attrs (rtx x)
265 : {
266 : /* MEMs without MEM_OFFSETs may have been offset, so we can't just
267 : use their base addresses as equivalent. */
268 3522626935 : if (MEM_P (x)
269 61951805 : && MEM_EXPR (x)
270 3560628075 : && MEM_OFFSET_KNOWN_P (x))
271 : {
272 35139134 : tree decl = MEM_EXPR (x);
273 35139134 : machine_mode mode = GET_MODE (x);
274 35139134 : poly_int64 offset = 0;
275 :
276 35139134 : switch (TREE_CODE (decl))
277 : {
278 : default:
279 : decl = NULL;
280 : break;
281 :
282 : case VAR_DECL:
283 : break;
284 :
285 10101863 : case ARRAY_REF:
286 10101863 : case ARRAY_RANGE_REF:
287 10101863 : case COMPONENT_REF:
288 10101863 : case BIT_FIELD_REF:
289 10101863 : case REALPART_EXPR:
290 10101863 : case IMAGPART_EXPR:
291 10101863 : case VIEW_CONVERT_EXPR:
292 10101863 : {
293 10101863 : poly_int64 bitsize, bitpos, bytepos, toffset_val = 0;
294 10101863 : tree toffset;
295 10101863 : int unsignedp, reversep, volatilep = 0;
296 :
297 10101863 : decl
298 10101863 : = get_inner_reference (decl, &bitsize, &bitpos, &toffset, &mode,
299 : &unsignedp, &reversep, &volatilep);
300 20203726 : if (maybe_ne (bitsize, GET_MODE_BITSIZE (mode))
301 10382770 : || !multiple_p (bitpos, BITS_PER_UNIT, &bytepos)
302 19764573 : || (toffset && !poly_int_tree_p (toffset, &toffset_val)))
303 : decl = NULL;
304 : else
305 9381803 : offset += bytepos + toffset_val;
306 10101863 : break;
307 : }
308 : }
309 :
310 720060 : if (decl
311 20714916 : && mode == GET_MODE (x)
312 20444378 : && VAR_P (decl)
313 13269191 : && (TREE_STATIC (decl)
314 12039163 : || DECL_THREAD_LOCAL_P (decl))
315 1266139 : && DECL_RTL_SET_P (decl)
316 10647442 : && MEM_P (DECL_RTL (decl)))
317 : {
318 1265639 : rtx newx;
319 :
320 1265639 : offset += MEM_OFFSET (x);
321 :
322 1265639 : newx = DECL_RTL (decl);
323 :
324 1265639 : if (MEM_P (newx))
325 : {
326 1265639 : rtx n = XEXP (newx, 0), o = XEXP (x, 0);
327 1265639 : poly_int64 n_offset, o_offset;
328 :
329 : /* Avoid creating a new MEM needlessly if we already had
330 : the same address. We do if there's no OFFSET and the
331 : old address X is identical to NEWX, or if X is of the
332 : form (plus NEWX OFFSET), or the NEWX is of the form
333 : (plus Y (const_int Z)) and X is that with the offset
334 : added: (plus Y (const_int Z+OFFSET)). */
335 1265639 : n = strip_offset (n, &n_offset);
336 1265639 : o = strip_offset (o, &o_offset);
337 2503620 : if (!(known_eq (o_offset, n_offset + offset)
338 1237981 : && rtx_equal_p (o, n)))
339 211478 : x = adjust_address_nv (newx, mode, offset);
340 : }
341 0 : else if (GET_MODE (x) == GET_MODE (newx)
342 0 : && known_eq (offset, 0))
343 : x = newx;
344 : }
345 : }
346 :
347 3522626935 : return x;
348 : }
349 :
350 : /* Make a unary operation by first seeing if it folds and otherwise making
351 : the specified operation. */
352 :
353 : rtx
354 5253201 : simplify_context::simplify_gen_unary (rtx_code code, machine_mode mode, rtx op,
355 : machine_mode op_mode)
356 : {
357 5253201 : rtx tem;
358 :
359 : /* If this simplifies, use it. */
360 5253201 : if ((tem = simplify_unary_operation (code, mode, op, op_mode)) != 0)
361 : return tem;
362 :
363 2012616 : return gen_rtx_fmt_e (code, mode, op);
364 : }
365 :
366 : /* Likewise for ternary operations. */
367 :
368 : rtx
369 2239573 : simplify_context::simplify_gen_ternary (rtx_code code, machine_mode mode,
370 : machine_mode op0_mode,
371 : rtx op0, rtx op1, rtx op2)
372 : {
373 2239573 : rtx tem;
374 :
375 : /* If this simplifies, use it. */
376 2239573 : if ((tem = simplify_ternary_operation (code, mode, op0_mode,
377 : op0, op1, op2)) != 0)
378 : return tem;
379 :
380 1994958 : return gen_rtx_fmt_eee (code, mode, op0, op1, op2);
381 : }
382 :
383 : /* Likewise, for relational operations.
384 : CMP_MODE specifies mode comparison is done in. */
385 :
386 : rtx
387 21760286 : simplify_context::simplify_gen_relational (rtx_code code, machine_mode mode,
388 : machine_mode cmp_mode,
389 : rtx op0, rtx op1)
390 : {
391 21760286 : rtx tem;
392 :
393 21760286 : if ((tem = simplify_relational_operation (code, mode, cmp_mode,
394 : op0, op1)) != 0)
395 : return tem;
396 :
397 19361631 : return gen_rtx_fmt_ee (code, mode, op0, op1);
398 : }
399 :
400 : /* If FN is NULL, replace all occurrences of OLD_RTX in X with copy_rtx (DATA)
401 : and simplify the result. If FN is non-NULL, call this callback on each
402 : X, if it returns non-NULL, replace X with its return value and simplify the
403 : result. */
404 :
405 : rtx
406 483535094 : simplify_replace_fn_rtx (rtx x, const_rtx old_rtx,
407 : rtx (*fn) (rtx, const_rtx, void *), void *data)
408 : {
409 483535094 : enum rtx_code code = GET_CODE (x);
410 483535094 : machine_mode mode = GET_MODE (x);
411 483535094 : machine_mode op_mode;
412 483535094 : const char *fmt;
413 483535094 : rtx op0, op1, op2, newx, op;
414 483535094 : rtvec vec, newvec;
415 483535094 : int i, j;
416 :
417 483535094 : if (UNLIKELY (fn != NULL))
418 : {
419 419940060 : newx = fn (x, old_rtx, data);
420 419940060 : if (newx)
421 : return newx;
422 : }
423 63595034 : else if (rtx_equal_p (x, old_rtx))
424 5222532 : return copy_rtx ((rtx) data);
425 :
426 379835207 : switch (GET_RTX_CLASS (code))
427 : {
428 2045561 : case RTX_UNARY:
429 2045561 : op0 = XEXP (x, 0);
430 2045561 : op_mode = GET_MODE (op0);
431 2045561 : op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
432 2045561 : if (op0 == XEXP (x, 0))
433 : return x;
434 662658 : return simplify_gen_unary (code, mode, op0, op_mode);
435 :
436 74800025 : case RTX_BIN_ARITH:
437 74800025 : case RTX_COMM_ARITH:
438 74800025 : op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
439 74800025 : op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
440 74800025 : if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
441 : return x;
442 22899534 : return simplify_gen_binary (code, mode, op0, op1);
443 :
444 9881373 : case RTX_COMPARE:
445 9881373 : case RTX_COMM_COMPARE:
446 9881373 : op0 = XEXP (x, 0);
447 9881373 : op1 = XEXP (x, 1);
448 9881373 : op_mode = GET_MODE (op0) != VOIDmode ? GET_MODE (op0) : GET_MODE (op1);
449 9881373 : op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
450 9881373 : op1 = simplify_replace_fn_rtx (op1, old_rtx, fn, data);
451 9881373 : if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
452 : return x;
453 2355047 : return simplify_gen_relational (code, mode, op_mode, op0, op1);
454 :
455 5764788 : case RTX_TERNARY:
456 5764788 : case RTX_BITFIELD_OPS:
457 5764788 : op0 = XEXP (x, 0);
458 5764788 : op_mode = GET_MODE (op0);
459 5764788 : op0 = simplify_replace_fn_rtx (op0, old_rtx, fn, data);
460 5764788 : op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
461 5764788 : op2 = simplify_replace_fn_rtx (XEXP (x, 2), old_rtx, fn, data);
462 5764788 : if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1) && op2 == XEXP (x, 2))
463 : return x;
464 1659135 : if (op_mode == VOIDmode)
465 1638281 : op_mode = GET_MODE (op0);
466 1659135 : return simplify_gen_ternary (code, mode, op_mode, op0, op1, op2);
467 :
468 83004728 : case RTX_EXTRA:
469 83004728 : if (code == SUBREG)
470 : {
471 684924 : op0 = simplify_replace_fn_rtx (SUBREG_REG (x), old_rtx, fn, data);
472 684924 : if (op0 == SUBREG_REG (x))
473 : return x;
474 137884 : op0 = simplify_gen_subreg (GET_MODE (x), op0,
475 68942 : GET_MODE (SUBREG_REG (x)),
476 68942 : SUBREG_BYTE (x));
477 68942 : return op0 ? op0 : x;
478 : }
479 : break;
480 :
481 61257647 : case RTX_OBJ:
482 61257647 : if (code == MEM)
483 : {
484 10798887 : op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
485 10798887 : if (op0 == XEXP (x, 0))
486 : return x;
487 152885 : return replace_equiv_address_nv (x, op0);
488 : }
489 50458760 : else if (code == LO_SUM)
490 : {
491 0 : op0 = simplify_replace_fn_rtx (XEXP (x, 0), old_rtx, fn, data);
492 0 : op1 = simplify_replace_fn_rtx (XEXP (x, 1), old_rtx, fn, data);
493 :
494 : /* (lo_sum (high x) y) -> y where x and y have the same base. */
495 0 : if (GET_CODE (op0) == HIGH)
496 : {
497 0 : rtx base0, base1, offset0, offset1;
498 0 : split_const (XEXP (op0, 0), &base0, &offset0);
499 0 : split_const (op1, &base1, &offset1);
500 0 : if (rtx_equal_p (base0, base1))
501 0 : return op1;
502 : }
503 :
504 0 : if (op0 == XEXP (x, 0) && op1 == XEXP (x, 1))
505 : return x;
506 0 : return gen_rtx_LO_SUM (mode, op0, op1);
507 : }
508 : break;
509 :
510 : default:
511 : break;
512 : }
513 :
514 275859649 : newx = x;
515 275859649 : fmt = GET_RTX_FORMAT (code);
516 595416501 : for (i = 0; fmt[i]; i++)
517 319556852 : switch (fmt[i])
518 : {
519 3062199 : case 'E':
520 3062199 : vec = XVEC (x, i);
521 3062199 : newvec = XVEC (newx, i);
522 12738776 : for (j = 0; j < GET_NUM_ELEM (vec); j++)
523 : {
524 9676577 : op = simplify_replace_fn_rtx (RTVEC_ELT (vec, j),
525 : old_rtx, fn, data);
526 9676577 : if (op != RTVEC_ELT (vec, j))
527 : {
528 343817 : if (newvec == vec)
529 : {
530 333805 : newvec = shallow_copy_rtvec (vec);
531 333805 : if (x == newx)
532 333805 : newx = shallow_copy_rtx (x);
533 333805 : XVEC (newx, i) = newvec;
534 : }
535 343817 : RTVEC_ELT (newvec, j) = op;
536 : }
537 : }
538 : break;
539 :
540 75171873 : case 'e':
541 75171873 : if (XEXP (x, i))
542 : {
543 75171873 : op = simplify_replace_fn_rtx (XEXP (x, i), old_rtx, fn, data);
544 75171873 : if (op != XEXP (x, i))
545 : {
546 4215175 : if (x == newx)
547 4211896 : newx = shallow_copy_rtx (x);
548 4215175 : XEXP (newx, i) = op;
549 : }
550 : }
551 : break;
552 : }
553 : return newx;
554 : }
555 :
556 : /* Replace all occurrences of OLD_RTX in X with NEW_RTX and try to simplify the
557 : resulting RTX. Return a new RTX which is as simplified as possible. */
558 :
559 : rtx
560 13069384 : simplify_replace_rtx (rtx x, const_rtx old_rtx, rtx new_rtx)
561 : {
562 13069384 : return simplify_replace_fn_rtx (x, old_rtx, 0, new_rtx);
563 : }
564 :
565 : /* Try to simplify a MODE truncation of OP, which has OP_MODE.
566 : Only handle cases where the truncated value is inherently an rvalue.
567 :
568 : RTL provides two ways of truncating a value:
569 :
570 : 1. a lowpart subreg. This form is only a truncation when both
571 : the outer and inner modes (here MODE and OP_MODE respectively)
572 : are scalar integers, and only then when the subreg is used as
573 : an rvalue.
574 :
575 : It is only valid to form such truncating subregs if the
576 : truncation requires no action by the target. The onus for
577 : proving this is on the creator of the subreg -- e.g. the
578 : caller to simplify_subreg or simplify_gen_subreg -- and typically
579 : involves either TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode.
580 :
581 : 2. a TRUNCATE. This form handles both scalar and compound integers.
582 :
583 : The first form is preferred where valid. However, the TRUNCATE
584 : handling in simplify_unary_operation turns the second form into the
585 : first form when TRULY_NOOP_TRUNCATION_MODES_P or truncated_to_mode allow,
586 : so it is generally safe to form rvalue truncations using:
587 :
588 : simplify_gen_unary (TRUNCATE, ...)
589 :
590 : and leave simplify_unary_operation to work out which representation
591 : should be used.
592 :
593 : Because of the proof requirements on (1), simplify_truncation must
594 : also use simplify_gen_unary (TRUNCATE, ...) to truncate parts of OP,
595 : regardless of whether the outer truncation came from a SUBREG or a
596 : TRUNCATE. For example, if the caller has proven that an SImode
597 : truncation of:
598 :
599 : (and:DI X Y)
600 :
601 : is a no-op and can be represented as a subreg, it does not follow
602 : that SImode truncations of X and Y are also no-ops. On a target
603 : like 64-bit MIPS that requires SImode values to be stored in
604 : sign-extended form, an SImode truncation of:
605 :
606 : (and:DI (reg:DI X) (const_int 63))
607 :
608 : is trivially a no-op because only the lower 6 bits can be set.
609 : However, X is still an arbitrary 64-bit number and so we cannot
610 : assume that truncating it too is a no-op. */
611 :
612 : rtx
613 18023560 : simplify_context::simplify_truncation (machine_mode mode, rtx op,
614 : machine_mode op_mode)
615 : {
616 18023560 : unsigned int precision = GET_MODE_UNIT_PRECISION (mode);
617 18023560 : unsigned int op_precision = GET_MODE_UNIT_PRECISION (op_mode);
618 18023560 : scalar_int_mode int_mode, int_op_mode, subreg_mode;
619 :
620 18023560 : gcc_assert (precision <= op_precision);
621 :
622 : /* Optimize truncations of zero and sign extended values. */
623 18023560 : if (GET_CODE (op) == ZERO_EXTEND
624 18023560 : || GET_CODE (op) == SIGN_EXTEND)
625 : {
626 : /* There are three possibilities. If MODE is the same as the
627 : origmode, we can omit both the extension and the subreg.
628 : If MODE is not larger than the origmode, we can apply the
629 : truncation without the extension. Finally, if the outermode
630 : is larger than the origmode, we can just extend to the appropriate
631 : mode. */
632 277992 : machine_mode origmode = GET_MODE (XEXP (op, 0));
633 277992 : if (mode == origmode)
634 : return XEXP (op, 0);
635 19246 : else if (precision <= GET_MODE_UNIT_PRECISION (origmode))
636 6653 : return simplify_gen_unary (TRUNCATE, mode,
637 6653 : XEXP (op, 0), origmode);
638 : else
639 2970 : return simplify_gen_unary (GET_CODE (op), mode,
640 2970 : XEXP (op, 0), origmode);
641 : }
642 :
643 : /* If the machine can perform operations in the truncated mode, distribute
644 : the truncation, i.e. simplify (truncate:QI (op:SI (x:SI) (y:SI))) into
645 : (op:QI (truncate:QI (x:SI)) (truncate:QI (y:SI))). */
646 17745568 : if (1
647 : && (!WORD_REGISTER_OPERATIONS || precision >= BITS_PER_WORD)
648 : && (GET_CODE (op) == PLUS
649 : || GET_CODE (op) == MINUS
650 17745568 : || GET_CODE (op) == MULT))
651 : {
652 764682 : rtx op0 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0), op_mode);
653 764682 : if (op0)
654 : {
655 764682 : rtx op1 = simplify_gen_unary (TRUNCATE, mode, XEXP (op, 1), op_mode);
656 764682 : if (op1)
657 764682 : return simplify_gen_binary (GET_CODE (op), mode, op0, op1);
658 : }
659 : }
660 :
661 : /* Simplify (truncate:QI (lshiftrt:SI (sign_extend:SI (x:QI)) C)) into
662 : to (ashiftrt:QI (x:QI) C), where C is a suitable small constant and
663 : the outer subreg is effectively a truncation to the original mode. */
664 16980886 : if ((GET_CODE (op) == LSHIFTRT
665 16980886 : || GET_CODE (op) == ASHIFTRT)
666 : /* Ensure that OP_MODE is at least twice as wide as MODE
667 : to avoid the possibility that an outer LSHIFTRT shifts by more
668 : than the sign extension's sign_bit_copies and introduces zeros
669 : into the high bits of the result. */
670 1612532 : && 2 * precision <= op_precision
671 1612532 : && CONST_INT_P (XEXP (op, 1))
672 1514999 : && GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
673 26 : && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
674 23 : && UINTVAL (XEXP (op, 1)) < precision)
675 19 : return simplify_gen_binary (ASHIFTRT, mode,
676 19 : XEXP (XEXP (op, 0), 0), XEXP (op, 1));
677 :
678 : /* Likewise (truncate:QI (lshiftrt:SI (zero_extend:SI (x:QI)) C)) into
679 : to (lshiftrt:QI (x:QI) C), where C is a suitable small constant and
680 : the outer subreg is effectively a truncation to the original mode. */
681 16980867 : if ((GET_CODE (op) == LSHIFTRT
682 : || GET_CODE (op) == ASHIFTRT)
683 1612513 : && CONST_INT_P (XEXP (op, 1))
684 1514980 : && GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
685 677 : && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
686 677 : && UINTVAL (XEXP (op, 1)) < precision)
687 667 : return simplify_gen_binary (LSHIFTRT, mode,
688 667 : XEXP (XEXP (op, 0), 0), XEXP (op, 1));
689 :
690 : /* Likewise (truncate:QI (ashift:SI (zero_extend:SI (x:QI)) C)) into
691 : to (ashift:QI (x:QI) C), where C is a suitable small constant and
692 : the outer subreg is effectively a truncation to the original mode. */
693 16980200 : if (GET_CODE (op) == ASHIFT
694 448921 : && CONST_INT_P (XEXP (op, 1))
695 389196 : && (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
696 389196 : || GET_CODE (XEXP (op, 0)) == SIGN_EXTEND)
697 555 : && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode
698 542 : && UINTVAL (XEXP (op, 1)) < precision)
699 534 : return simplify_gen_binary (ASHIFT, mode,
700 534 : XEXP (XEXP (op, 0), 0), XEXP (op, 1));
701 :
702 : /* Likewise (truncate:QI (and:SI (lshiftrt:SI (x:SI) C) C2)) into
703 : (and:QI (lshiftrt:QI (truncate:QI (x:SI)) C) C2) for suitable C
704 : and C2. */
705 16979666 : if (GET_CODE (op) == AND
706 644206 : && (GET_CODE (XEXP (op, 0)) == LSHIFTRT
707 644206 : || GET_CODE (XEXP (op, 0)) == ASHIFTRT)
708 30709 : && CONST_INT_P (XEXP (XEXP (op, 0), 1))
709 30604 : && CONST_INT_P (XEXP (op, 1)))
710 : {
711 30604 : rtx op0 = (XEXP (XEXP (op, 0), 0));
712 30604 : rtx shift_op = XEXP (XEXP (op, 0), 1);
713 30604 : rtx mask_op = XEXP (op, 1);
714 30604 : unsigned HOST_WIDE_INT shift = UINTVAL (shift_op);
715 30604 : unsigned HOST_WIDE_INT mask = UINTVAL (mask_op);
716 :
717 30604 : if (shift < precision
718 : /* If doing this transform works for an X with all bits set,
719 : it works for any X. */
720 17517 : && ((GET_MODE_MASK (mode) >> shift) & mask)
721 17517 : == ((GET_MODE_MASK (op_mode) >> shift) & mask)
722 3063 : && (op0 = simplify_gen_unary (TRUNCATE, mode, op0, op_mode))
723 33667 : && (op0 = simplify_gen_binary (LSHIFTRT, mode, op0, shift_op)))
724 : {
725 3063 : mask_op = GEN_INT (trunc_int_for_mode (mask, mode));
726 3063 : return simplify_gen_binary (AND, mode, op0, mask_op);
727 : }
728 : }
729 :
730 : /* Turn (truncate:M1 (*_extract:M2 (reg:M2) (len) (pos))) into
731 : (*_extract:M1 (truncate:M1 (reg:M2)) (len) (pos')) if possible without
732 : changing len. */
733 16976603 : if ((GET_CODE (op) == ZERO_EXTRACT || GET_CODE (op) == SIGN_EXTRACT)
734 377335 : && REG_P (XEXP (op, 0))
735 256822 : && GET_MODE (XEXP (op, 0)) == GET_MODE (op)
736 255921 : && CONST_INT_P (XEXP (op, 1))
737 255921 : && CONST_INT_P (XEXP (op, 2)))
738 : {
739 223029 : rtx op0 = XEXP (op, 0);
740 223029 : unsigned HOST_WIDE_INT len = UINTVAL (XEXP (op, 1));
741 223029 : unsigned HOST_WIDE_INT pos = UINTVAL (XEXP (op, 2));
742 223029 : if (BITS_BIG_ENDIAN && pos >= op_precision - precision)
743 : {
744 : op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
745 : if (op0)
746 : {
747 : pos -= op_precision - precision;
748 : return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
749 : XEXP (op, 1), GEN_INT (pos));
750 : }
751 : }
752 223029 : else if (!BITS_BIG_ENDIAN && precision >= len + pos)
753 : {
754 7691 : op0 = simplify_gen_unary (TRUNCATE, mode, op0, GET_MODE (op0));
755 7691 : if (op0)
756 7691 : return simplify_gen_ternary (GET_CODE (op), mode, mode, op0,
757 7691 : XEXP (op, 1), XEXP (op, 2));
758 : }
759 : }
760 :
761 : /* Recognize a word extraction from a multi-word subreg. */
762 16968912 : if ((GET_CODE (op) == LSHIFTRT
763 16968912 : || GET_CODE (op) == ASHIFTRT)
764 1611846 : && SCALAR_INT_MODE_P (mode)
765 1608832 : && SCALAR_INT_MODE_P (op_mode)
766 1743255 : && precision >= BITS_PER_WORD
767 64519 : && 2 * precision <= op_precision
768 64519 : && CONST_INT_P (XEXP (op, 1))
769 56383 : && (INTVAL (XEXP (op, 1)) & (precision - 1)) == 0
770 1865 : && UINTVAL (XEXP (op, 1)) < op_precision)
771 : {
772 1865 : poly_int64 byte = subreg_lowpart_offset (mode, op_mode);
773 1865 : int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
774 1865 : return simplify_gen_subreg (mode, XEXP (op, 0), op_mode,
775 : (WORDS_BIG_ENDIAN
776 1865 : ? byte - shifted_bytes
777 1865 : : byte + shifted_bytes));
778 : }
779 :
780 : /* If we have a TRUNCATE of a right shift of MEM, make a new MEM
781 : and try replacing the TRUNCATE and shift with it. Don't do this
782 : if the MEM has a mode-dependent address. */
783 16967047 : if ((GET_CODE (op) == LSHIFTRT
784 : || GET_CODE (op) == ASHIFTRT)
785 1606967 : && is_a <scalar_int_mode> (mode, &int_mode)
786 18572778 : && is_a <scalar_int_mode> (op_mode, &int_op_mode)
787 1606967 : && MEM_P (XEXP (op, 0))
788 11328 : && CONST_INT_P (XEXP (op, 1))
789 21088 : && INTVAL (XEXP (op, 1)) % GET_MODE_BITSIZE (int_mode) == 0
790 1279 : && INTVAL (XEXP (op, 1)) > 0
791 2558 : && INTVAL (XEXP (op, 1)) < GET_MODE_BITSIZE (int_op_mode)
792 1279 : && ! mode_dependent_address_p (XEXP (XEXP (op, 0), 0),
793 1279 : MEM_ADDR_SPACE (XEXP (op, 0)))
794 1279 : && ! MEM_VOLATILE_P (XEXP (op, 0))
795 16967047 : && (GET_MODE_SIZE (int_mode) >= UNITS_PER_WORD
796 : || WORDS_BIG_ENDIAN == BYTES_BIG_ENDIAN))
797 : {
798 1236 : poly_int64 byte = subreg_lowpart_offset (int_mode, int_op_mode);
799 1236 : int shifted_bytes = INTVAL (XEXP (op, 1)) / BITS_PER_UNIT;
800 1236 : return adjust_address_nv (XEXP (op, 0), int_mode,
801 : (WORDS_BIG_ENDIAN
802 : ? byte - shifted_bytes
803 : : byte + shifted_bytes));
804 : }
805 :
806 : /* (truncate:SI (OP:DI ({sign,zero}_extend:DI foo:SI))) is
807 : (OP:SI foo:SI) if OP is NEG or ABS. */
808 16965811 : if ((GET_CODE (op) == ABS
809 16965811 : || GET_CODE (op) == NEG)
810 23039 : && (GET_CODE (XEXP (op, 0)) == SIGN_EXTEND
811 23039 : || GET_CODE (XEXP (op, 0)) == ZERO_EXTEND)
812 18 : && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
813 2 : return simplify_gen_unary (GET_CODE (op), mode,
814 2 : XEXP (XEXP (op, 0), 0), mode);
815 :
816 : /* Simplifications of (truncate:A (subreg:B X 0)). */
817 16965809 : if (GET_CODE (op) == SUBREG
818 16965986 : && is_a <scalar_int_mode> (mode, &int_mode)
819 27623 : && SCALAR_INT_MODE_P (op_mode)
820 27623 : && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &subreg_mode)
821 16993292 : && subreg_lowpart_p (op))
822 : {
823 : /* (truncate:A (subreg:B (truncate:C X) 0)) is (truncate:A X). */
824 27480 : if (GET_CODE (SUBREG_REG (op)) == TRUNCATE)
825 : {
826 0 : rtx inner = XEXP (SUBREG_REG (op), 0);
827 0 : if (GET_MODE_PRECISION (int_mode)
828 0 : <= GET_MODE_PRECISION (subreg_mode))
829 0 : return simplify_gen_unary (TRUNCATE, int_mode, inner,
830 0 : GET_MODE (inner));
831 : else
832 : /* If subreg above is paradoxical and C is narrower
833 : than A, return (subreg:A (truncate:C X) 0). */
834 0 : return simplify_gen_subreg (int_mode, SUBREG_REG (op),
835 : subreg_mode, 0);
836 : }
837 :
838 : /* Simplifications of (truncate:A (subreg:B X:C 0)) with
839 : paradoxical subregs (B is wider than C). */
840 27480 : if (is_a <scalar_int_mode> (op_mode, &int_op_mode))
841 : {
842 27480 : unsigned int int_op_prec = GET_MODE_PRECISION (int_op_mode);
843 27480 : unsigned int subreg_prec = GET_MODE_PRECISION (subreg_mode);
844 27480 : if (int_op_prec > subreg_prec)
845 : {
846 1428 : if (int_mode == subreg_mode)
847 : return SUBREG_REG (op);
848 61 : if (GET_MODE_PRECISION (int_mode) < subreg_prec)
849 27 : return simplify_gen_unary (TRUNCATE, int_mode,
850 27 : SUBREG_REG (op), subreg_mode);
851 : }
852 : /* Simplification of (truncate:A (subreg:B X:C 0)) where
853 : A is narrower than B and B is narrower than C. */
854 26052 : else if (int_op_prec < subreg_prec
855 26052 : && GET_MODE_PRECISION (int_mode) < int_op_prec)
856 26052 : return simplify_gen_unary (TRUNCATE, int_mode,
857 26052 : SUBREG_REG (op), subreg_mode);
858 : }
859 : }
860 :
861 : /* (truncate:A (truncate:B X)) is (truncate:A X). */
862 16938363 : if (GET_CODE (op) == TRUNCATE)
863 0 : return simplify_gen_unary (TRUNCATE, mode, XEXP (op, 0),
864 0 : GET_MODE (XEXP (op, 0)));
865 :
866 : /* (truncate:A (ior X C)) is (const_int -1) if C is equal to that already,
867 : in mode A. */
868 16938363 : if (GET_CODE (op) == IOR
869 34600 : && SCALAR_INT_MODE_P (mode)
870 34600 : && SCALAR_INT_MODE_P (op_mode)
871 34600 : && CONST_INT_P (XEXP (op, 1))
872 16947082 : && trunc_int_for_mode (INTVAL (XEXP (op, 1)), mode) == -1)
873 42 : return constm1_rtx;
874 :
875 : return NULL_RTX;
876 : }
877 :
878 : /* Try to simplify a unary operation CODE whose output mode is to be
879 : MODE with input operand OP whose mode was originally OP_MODE.
880 : Return zero if no simplification can be made. */
881 : rtx
882 27339163 : simplify_context::simplify_unary_operation (rtx_code code, machine_mode mode,
883 : rtx op, machine_mode op_mode)
884 : {
885 27339163 : rtx trueop, tem;
886 :
887 27339163 : trueop = avoid_constant_pool_reference (op);
888 :
889 27339163 : tem = simplify_const_unary_operation (code, mode, trueop, op_mode);
890 27339163 : if (tem)
891 : return tem;
892 :
893 22306044 : return simplify_unary_operation_1 (code, mode, op);
894 : }
895 :
896 : /* Return true if FLOAT or UNSIGNED_FLOAT operation OP is known
897 : to be exact. */
898 :
899 : static bool
900 2702 : exact_int_to_float_conversion_p (const_rtx op)
901 : {
902 2702 : machine_mode op0_mode = GET_MODE (XEXP (op, 0));
903 : /* Constants can reach here with -frounding-math, if they do then
904 : the conversion isn't exact. */
905 2702 : if (op0_mode == VOIDmode)
906 : return false;
907 5402 : int out_bits = significand_size (GET_MODE_INNER (GET_MODE (op)));
908 2701 : int in_prec = GET_MODE_UNIT_PRECISION (op0_mode);
909 2701 : int in_bits = in_prec;
910 2701 : if (HWI_COMPUTABLE_MODE_P (op0_mode))
911 : {
912 2611 : unsigned HOST_WIDE_INT nonzero = nonzero_bits (XEXP (op, 0), op0_mode);
913 2611 : if (GET_CODE (op) == FLOAT)
914 2487 : in_bits -= num_sign_bit_copies (XEXP (op, 0), op0_mode);
915 124 : else if (GET_CODE (op) == UNSIGNED_FLOAT)
916 124 : in_bits = wi::min_precision (wi::uhwi (nonzero, in_prec), UNSIGNED);
917 : else
918 0 : gcc_unreachable ();
919 2611 : in_bits -= wi::ctz (wi::uhwi (nonzero, in_prec));
920 : }
921 2701 : return in_bits <= out_bits;
922 : }
923 :
924 : /* Perform some simplifications we can do even if the operands
925 : aren't constant. */
926 : rtx
927 22306044 : simplify_context::simplify_unary_operation_1 (rtx_code code, machine_mode mode,
928 : rtx op)
929 : {
930 22306044 : enum rtx_code reversed;
931 22306044 : rtx temp, elt, base, step;
932 22306044 : scalar_int_mode inner, int_mode, op_mode, op0_mode;
933 :
934 22306044 : switch (code)
935 : {
936 1793650 : case NOT:
937 : /* (not (not X)) == X. */
938 1793650 : if (GET_CODE (op) == NOT)
939 3325 : return XEXP (op, 0);
940 :
941 : /* (not (eq X Y)) == (ne X Y), etc. if BImode or the result of the
942 : comparison is all ones. */
943 1790325 : if (COMPARISON_P (op)
944 12201 : && (mode == BImode || STORE_FLAG_VALUE == -1)
945 1790325 : && ((reversed = reversed_comparison_code (op, NULL)) != UNKNOWN))
946 0 : return simplify_gen_relational (reversed, mode, VOIDmode,
947 0 : XEXP (op, 0), XEXP (op, 1));
948 :
949 : /* (not (plus X -1)) can become (neg X). */
950 1790325 : if (GET_CODE (op) == PLUS
951 291359 : && XEXP (op, 1) == constm1_rtx)
952 7126 : return simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
953 :
954 : /* Similarly, (not (neg X)) is (plus X -1). Only do this for
955 : modes that have CONSTM1_RTX, i.e. MODE_INT, MODE_PARTIAL_INT
956 : and MODE_VECTOR_INT. */
957 1783199 : if (GET_CODE (op) == NEG && CONSTM1_RTX (mode))
958 70916 : return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
959 70916 : CONSTM1_RTX (mode));
960 :
961 : /* (not (xor X C)) for C constant is (xor X D) with D = ~C. */
962 1712283 : if (GET_CODE (op) == XOR
963 15831 : && CONST_INT_P (XEXP (op, 1))
964 1716106 : && (temp = simplify_unary_operation (NOT, mode,
965 : XEXP (op, 1), mode)) != 0)
966 3823 : return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
967 :
968 : /* (not (plus X C)) for signbit C is (xor X D) with D = ~C. */
969 1708460 : if (GET_CODE (op) == PLUS
970 284233 : && CONST_INT_P (XEXP (op, 1))
971 166952 : && mode_signbit_p (mode, XEXP (op, 1))
972 1712371 : && (temp = simplify_unary_operation (NOT, mode,
973 : XEXP (op, 1), mode)) != 0)
974 3911 : return simplify_gen_binary (XOR, mode, XEXP (op, 0), temp);
975 :
976 :
977 : /* (not (ashift 1 X)) is (rotate ~1 X). We used to do this for
978 : operands other than 1, but that is not valid. We could do a
979 : similar simplification for (not (lshiftrt C X)) where C is
980 : just the sign bit, but this doesn't seem common enough to
981 : bother with. */
982 1704549 : if (GET_CODE (op) == ASHIFT
983 41567 : && XEXP (op, 0) == const1_rtx)
984 : {
985 1043 : temp = simplify_gen_unary (NOT, mode, const1_rtx, mode);
986 1043 : return simplify_gen_binary (ROTATE, mode, temp, XEXP (op, 1));
987 : }
988 :
989 : /* (not (ashiftrt foo C)) where C is the number of bits in FOO
990 : minus 1 is (ge foo (const_int 0)) if STORE_FLAG_VALUE is -1,
991 : so we can perform the above simplification. */
992 1703506 : if (STORE_FLAG_VALUE == -1
993 : && is_a <scalar_int_mode> (mode, &int_mode)
994 : && GET_CODE (op) == ASHIFTRT
995 : && CONST_INT_P (XEXP (op, 1))
996 : && INTVAL (XEXP (op, 1)) == GET_MODE_PRECISION (int_mode) - 1)
997 : return simplify_gen_relational (GE, int_mode, VOIDmode,
998 : XEXP (op, 0), const0_rtx);
999 :
1000 :
1001 1703506 : if (partial_subreg_p (op)
1002 69515 : && subreg_lowpart_p (op)
1003 69205 : && GET_CODE (SUBREG_REG (op)) == ASHIFT
1004 94501 : && XEXP (SUBREG_REG (op), 0) == const1_rtx)
1005 : {
1006 163 : machine_mode inner_mode = GET_MODE (SUBREG_REG (op));
1007 163 : rtx x;
1008 :
1009 163 : x = gen_rtx_ROTATE (inner_mode,
1010 : simplify_gen_unary (NOT, inner_mode, const1_rtx,
1011 : inner_mode),
1012 : XEXP (SUBREG_REG (op), 1));
1013 163 : temp = rtl_hooks.gen_lowpart_no_emit (mode, x);
1014 163 : if (temp)
1015 : return temp;
1016 : }
1017 :
1018 : /* Apply De Morgan's laws to reduce number of patterns for machines
1019 : with negating logical insns (and-not, nand, etc.). If result has
1020 : only one NOT, put it first, since that is how the patterns are
1021 : coded. */
1022 1703343 : if (GET_CODE (op) == IOR || GET_CODE (op) == AND)
1023 : {
1024 12122 : rtx in1 = XEXP (op, 0), in2 = XEXP (op, 1);
1025 12122 : machine_mode op_mode;
1026 :
1027 12122 : op_mode = GET_MODE (in1);
1028 12122 : in1 = simplify_gen_unary (NOT, op_mode, in1, op_mode);
1029 :
1030 12122 : op_mode = GET_MODE (in2);
1031 12122 : if (op_mode == VOIDmode)
1032 5343 : op_mode = mode;
1033 12122 : in2 = simplify_gen_unary (NOT, op_mode, in2, op_mode);
1034 :
1035 12122 : if (GET_CODE (in2) == NOT && GET_CODE (in1) != NOT)
1036 : std::swap (in1, in2);
1037 :
1038 24244 : return gen_rtx_fmt_ee (GET_CODE (op) == IOR ? AND : IOR,
1039 : mode, in1, in2);
1040 : }
1041 :
1042 : /* (not (bswap x)) -> (bswap (not x)). */
1043 1691221 : if (GET_CODE (op) == BSWAP || GET_CODE (op) == BITREVERSE)
1044 : {
1045 0 : rtx x = simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1046 0 : return simplify_gen_unary (GET_CODE (op), mode, x, mode);
1047 : }
1048 : break;
1049 :
1050 1698298 : case NEG:
1051 : /* (neg (neg X)) == X. */
1052 1698298 : if (GET_CODE (op) == NEG)
1053 6372 : return XEXP (op, 0);
1054 :
1055 : /* (neg (x ? (neg y) : y)) == !x ? (neg y) : y.
1056 : If comparison is not reversible use
1057 : x ? y : (neg y). */
1058 1691926 : if (GET_CODE (op) == IF_THEN_ELSE)
1059 : {
1060 3171 : rtx cond = XEXP (op, 0);
1061 3171 : rtx true_rtx = XEXP (op, 1);
1062 3171 : rtx false_rtx = XEXP (op, 2);
1063 :
1064 3171 : if ((GET_CODE (true_rtx) == NEG
1065 0 : && rtx_equal_p (XEXP (true_rtx, 0), false_rtx))
1066 3171 : || (GET_CODE (false_rtx) == NEG
1067 0 : && rtx_equal_p (XEXP (false_rtx, 0), true_rtx)))
1068 : {
1069 0 : if (reversed_comparison_code (cond, NULL) != UNKNOWN)
1070 0 : temp = reversed_comparison (cond, mode);
1071 : else
1072 : {
1073 : temp = cond;
1074 : std::swap (true_rtx, false_rtx);
1075 : }
1076 0 : return simplify_gen_ternary (IF_THEN_ELSE, mode,
1077 0 : mode, temp, true_rtx, false_rtx);
1078 : }
1079 : }
1080 :
1081 : /* (neg (plus X 1)) can become (not X). */
1082 1691926 : if (GET_CODE (op) == PLUS
1083 138584 : && XEXP (op, 1) == const1_rtx)
1084 54133 : return simplify_gen_unary (NOT, mode, XEXP (op, 0), mode);
1085 :
1086 : /* Similarly, (neg (not X)) is (plus X 1). */
1087 1637793 : if (GET_CODE (op) == NOT)
1088 522 : return simplify_gen_binary (PLUS, mode, XEXP (op, 0),
1089 522 : CONST1_RTX (mode));
1090 :
1091 : /* (neg (minus X Y)) can become (minus Y X). This transformation
1092 : isn't safe for modes with signed zeros, since if X and Y are
1093 : both +0, (minus Y X) is the same as (minus X Y). If the
1094 : rounding mode is towards +infinity (or -infinity) then the two
1095 : expressions will be rounded differently. */
1096 1637271 : if (GET_CODE (op) == MINUS
1097 23765 : && !HONOR_SIGNED_ZEROS (mode)
1098 1659652 : && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1099 22381 : return simplify_gen_binary (MINUS, mode, XEXP (op, 1), XEXP (op, 0));
1100 :
1101 1614890 : if (GET_CODE (op) == PLUS
1102 84451 : && !HONOR_SIGNED_ZEROS (mode)
1103 1698917 : && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1104 : {
1105 : /* (neg (plus A C)) is simplified to (minus -C A). */
1106 84027 : if (CONST_SCALAR_INT_P (XEXP (op, 1))
1107 4902 : || CONST_DOUBLE_AS_FLOAT_P (XEXP (op, 1)))
1108 : {
1109 79125 : temp = simplify_unary_operation (NEG, mode, XEXP (op, 1), mode);
1110 79125 : if (temp)
1111 79125 : return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 0));
1112 : }
1113 :
1114 : /* (neg (plus A B)) is canonicalized to (minus (neg A) B). */
1115 4902 : temp = simplify_gen_unary (NEG, mode, XEXP (op, 0), mode);
1116 4902 : return simplify_gen_binary (MINUS, mode, temp, XEXP (op, 1));
1117 : }
1118 :
1119 : /* (neg (mult A B)) becomes (mult A (neg B)).
1120 : This works even for floating-point values. */
1121 1530863 : if (GET_CODE (op) == MULT
1122 1530863 : && !HONOR_SIGN_DEPENDENT_ROUNDING (mode))
1123 : {
1124 20209 : temp = simplify_gen_unary (NEG, mode, XEXP (op, 1), mode);
1125 20209 : return simplify_gen_binary (MULT, mode, XEXP (op, 0), temp);
1126 : }
1127 :
1128 : /* NEG commutes with ASHIFT since it is multiplication. Only do
1129 : this if we can then eliminate the NEG (e.g., if the operand
1130 : is a constant). */
1131 1510654 : if (GET_CODE (op) == ASHIFT)
1132 : {
1133 53318 : temp = simplify_unary_operation (NEG, mode, XEXP (op, 0), mode);
1134 53318 : if (temp)
1135 12850 : return simplify_gen_binary (ASHIFT, mode, temp, XEXP (op, 1));
1136 : }
1137 :
1138 : /* (neg (ashiftrt X C)) can be replaced by (lshiftrt X C) when
1139 : C is equal to the width of MODE minus 1. */
1140 1497804 : if (GET_CODE (op) == ASHIFTRT
1141 27551 : && CONST_INT_P (XEXP (op, 1))
1142 1552810 : && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1143 420 : return simplify_gen_binary (LSHIFTRT, mode,
1144 420 : XEXP (op, 0), XEXP (op, 1));
1145 :
1146 : /* (neg (lshiftrt X C)) can be replaced by (ashiftrt X C) when
1147 : C is equal to the width of MODE minus 1. */
1148 1497384 : if (GET_CODE (op) == LSHIFTRT
1149 7709 : && CONST_INT_P (XEXP (op, 1))
1150 1512634 : && INTVAL (XEXP (op, 1)) == GET_MODE_UNIT_PRECISION (mode) - 1)
1151 2350 : return simplify_gen_binary (ASHIFTRT, mode,
1152 2350 : XEXP (op, 0), XEXP (op, 1));
1153 :
1154 : /* (neg (xor A 1)) is (plus A -1) if A is known to be either 0 or 1. */
1155 1495034 : if (GET_CODE (op) == XOR
1156 17765 : && XEXP (op, 1) == const1_rtx
1157 1495146 : && nonzero_bits (XEXP (op, 0), mode) == 1)
1158 33 : return plus_constant (mode, XEXP (op, 0), -1);
1159 :
1160 : /* (neg (lt x 0)) is (ashiftrt X C) if STORE_FLAG_VALUE is 1. */
1161 : /* (neg (lt x 0)) is (lshiftrt X C) if STORE_FLAG_VALUE is -1. */
1162 1495001 : if (GET_CODE (op) == LT
1163 2526 : && XEXP (op, 1) == const0_rtx
1164 1496733 : && is_a <scalar_int_mode> (GET_MODE (XEXP (op, 0)), &inner))
1165 : {
1166 339 : int_mode = as_a <scalar_int_mode> (mode);
1167 339 : int isize = GET_MODE_PRECISION (inner);
1168 339 : if (STORE_FLAG_VALUE == 1)
1169 : {
1170 339 : temp = simplify_gen_binary (ASHIFTRT, inner, XEXP (op, 0),
1171 : gen_int_shift_amount (inner,
1172 339 : isize - 1));
1173 339 : if (int_mode == inner)
1174 : return temp;
1175 174 : if (GET_MODE_PRECISION (int_mode) > isize)
1176 109 : return simplify_gen_unary (SIGN_EXTEND, int_mode, temp, inner);
1177 65 : return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1178 : }
1179 : else if (STORE_FLAG_VALUE == -1)
1180 : {
1181 : temp = simplify_gen_binary (LSHIFTRT, inner, XEXP (op, 0),
1182 : gen_int_shift_amount (inner,
1183 : isize - 1));
1184 : if (int_mode == inner)
1185 : return temp;
1186 : if (GET_MODE_PRECISION (int_mode) > isize)
1187 : return simplify_gen_unary (ZERO_EXTEND, int_mode, temp, inner);
1188 : return simplify_gen_unary (TRUNCATE, int_mode, temp, inner);
1189 : }
1190 : }
1191 :
1192 1494662 : if (vec_series_p (op, &base, &step))
1193 : {
1194 : /* Only create a new series if we can simplify both parts. In other
1195 : cases this isn't really a simplification, and it's not necessarily
1196 : a win to replace a vector operation with a scalar operation. */
1197 276 : scalar_mode inner_mode = GET_MODE_INNER (mode);
1198 276 : base = simplify_unary_operation (NEG, inner_mode, base, inner_mode);
1199 276 : if (base)
1200 : {
1201 276 : step = simplify_unary_operation (NEG, inner_mode,
1202 : step, inner_mode);
1203 276 : if (step)
1204 276 : return gen_vec_series (mode, base, step);
1205 : }
1206 : }
1207 : break;
1208 :
1209 1255953 : case TRUNCATE:
1210 : /* Don't optimize (lshiftrt (mult ...)) as it would interfere
1211 : with the umulXi3_highpart patterns. */
1212 1255953 : if (GET_CODE (op) == LSHIFTRT
1213 18263 : && GET_CODE (XEXP (op, 0)) == MULT)
1214 : break;
1215 :
1216 1248680 : if (GET_MODE_CLASS (mode) == MODE_PARTIAL_INT)
1217 : {
1218 12 : if (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1219 : {
1220 12 : temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1221 12 : if (temp)
1222 : return temp;
1223 : }
1224 : /* We can't handle truncation to a partial integer mode here
1225 : because we don't know the real bitsize of the partial
1226 : integer mode. */
1227 : break;
1228 : }
1229 :
1230 1248668 : if (GET_MODE (op) != VOIDmode)
1231 : {
1232 1248668 : temp = simplify_truncation (mode, op, GET_MODE (op));
1233 1248668 : if (temp)
1234 : return temp;
1235 : }
1236 :
1237 : /* If we know that the value is already truncated, we can
1238 : replace the TRUNCATE with a SUBREG. */
1239 1127239 : if (known_eq (GET_MODE_NUNITS (mode), 1)
1240 1127239 : && (TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op))
1241 0 : || truncated_to_mode (mode, op)))
1242 : {
1243 1115797 : temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1244 1115797 : if (temp)
1245 : return temp;
1246 : }
1247 :
1248 : /* A truncate of a comparison can be replaced with a subreg if
1249 : STORE_FLAG_VALUE permits. This is like the previous test,
1250 : but it works even if the comparison is done in a mode larger
1251 : than HOST_BITS_PER_WIDE_INT. */
1252 11640 : if (HWI_COMPUTABLE_MODE_P (mode)
1253 198 : && COMPARISON_P (op)
1254 0 : && (STORE_FLAG_VALUE & ~GET_MODE_MASK (mode)) == 0
1255 11640 : && TRULY_NOOP_TRUNCATION_MODES_P (mode, GET_MODE (op)))
1256 : {
1257 0 : temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1258 0 : if (temp)
1259 : return temp;
1260 : }
1261 :
1262 : /* A truncate of a memory is just loading the low part of the memory
1263 : if we are not changing the meaning of the address. */
1264 11640 : if (GET_CODE (op) == MEM
1265 321 : && !VECTOR_MODE_P (mode)
1266 196 : && !MEM_VOLATILE_P (op)
1267 11830 : && !mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op)))
1268 : {
1269 190 : temp = rtl_hooks.gen_lowpart_no_emit (mode, op);
1270 190 : if (temp)
1271 : return temp;
1272 : }
1273 :
1274 : /* Check for useless truncation. */
1275 11640 : if (GET_MODE (op) == mode)
1276 : return op;
1277 : break;
1278 :
1279 172525 : case FLOAT_TRUNCATE:
1280 : /* Check for useless truncation. */
1281 172525 : if (GET_MODE (op) == mode)
1282 : return op;
1283 :
1284 172525 : if (DECIMAL_FLOAT_MODE_P (mode))
1285 : break;
1286 :
1287 : /* (float_truncate:SF (float_extend:DF foo:SF)) = foo:SF. */
1288 172371 : if (GET_CODE (op) == FLOAT_EXTEND
1289 5 : && GET_MODE (XEXP (op, 0)) == mode)
1290 : return XEXP (op, 0);
1291 :
1292 : /* (float_truncate:SF (float_truncate:DF foo:XF))
1293 : = (float_truncate:SF foo:XF).
1294 : This may eliminate double rounding, so it is unsafe.
1295 :
1296 : (float_truncate:SF (float_extend:XF foo:DF))
1297 : = (float_truncate:SF foo:DF).
1298 :
1299 : (float_truncate:DF (float_extend:XF foo:SF))
1300 : = (float_extend:DF foo:SF). */
1301 172369 : if ((GET_CODE (op) == FLOAT_TRUNCATE
1302 145 : && flag_unsafe_math_optimizations)
1303 172365 : || GET_CODE (op) == FLOAT_EXTEND)
1304 14 : return simplify_gen_unary (GET_MODE_UNIT_SIZE (GET_MODE (XEXP (op, 0)))
1305 7 : > GET_MODE_UNIT_SIZE (mode)
1306 : ? FLOAT_TRUNCATE : FLOAT_EXTEND,
1307 : mode,
1308 14 : XEXP (op, 0), GET_MODE (XEXP (op, 0)));
1309 :
1310 : /* (float_truncate (float x)) is (float x) */
1311 172362 : if ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1312 172362 : && (flag_unsafe_math_optimizations
1313 1419 : || exact_int_to_float_conversion_p (op)))
1314 1418 : return simplify_gen_unary (GET_CODE (op), mode,
1315 : XEXP (op, 0),
1316 1418 : GET_MODE (XEXP (op, 0)));
1317 :
1318 : /* (float_truncate:SF (OP:DF (float_extend:DF foo:sf))) is
1319 : (OP:SF foo:SF) if OP is NEG or ABS. */
1320 170944 : if ((GET_CODE (op) == ABS
1321 170944 : || GET_CODE (op) == NEG)
1322 209 : && GET_CODE (XEXP (op, 0)) == FLOAT_EXTEND
1323 28 : && GET_MODE (XEXP (XEXP (op, 0), 0)) == mode)
1324 28 : return simplify_gen_unary (GET_CODE (op), mode,
1325 28 : XEXP (XEXP (op, 0), 0), mode);
1326 :
1327 : /* (float_truncate:SF (subreg:DF (float_truncate:SF X) 0))
1328 : is (float_truncate:SF x). */
1329 170916 : if (GET_CODE (op) == SUBREG
1330 318 : && subreg_lowpart_p (op)
1331 171231 : && GET_CODE (SUBREG_REG (op)) == FLOAT_TRUNCATE)
1332 : return SUBREG_REG (op);
1333 : break;
1334 :
1335 592537 : case FLOAT_EXTEND:
1336 : /* Check for useless extension. */
1337 592537 : if (GET_MODE (op) == mode)
1338 : return op;
1339 :
1340 592537 : if (DECIMAL_FLOAT_MODE_P (mode))
1341 : break;
1342 :
1343 : /* (float_extend (float_extend x)) is (float_extend x)
1344 :
1345 : (float_extend (float x)) is (float x) assuming that double
1346 : rounding can't happen.
1347 : */
1348 592434 : if (GET_CODE (op) == FLOAT_EXTEND
1349 592434 : || ((GET_CODE (op) == FLOAT || GET_CODE (op) == UNSIGNED_FLOAT)
1350 1283 : && exact_int_to_float_conversion_p (op)))
1351 563 : return simplify_gen_unary (GET_CODE (op), mode,
1352 : XEXP (op, 0),
1353 563 : GET_MODE (XEXP (op, 0)));
1354 :
1355 : break;
1356 :
1357 275791 : case ABS:
1358 : /* (abs (neg <foo>)) -> (abs <foo>) */
1359 275791 : if (GET_CODE (op) == NEG)
1360 30 : return simplify_gen_unary (ABS, mode, XEXP (op, 0),
1361 30 : GET_MODE (XEXP (op, 0)));
1362 :
1363 : /* If the mode of the operand is VOIDmode (i.e. if it is ASM_OPERANDS),
1364 : do nothing. */
1365 275761 : if (GET_MODE (op) == VOIDmode)
1366 : break;
1367 :
1368 : /* If operand is something known to be positive, ignore the ABS. */
1369 275761 : if (val_signbit_known_clear_p (GET_MODE (op),
1370 : nonzero_bits (op, GET_MODE (op))))
1371 : return op;
1372 :
1373 : /* Using nonzero_bits doesn't (currently) work for modes wider than
1374 : HOST_WIDE_INT, so the following transformations help simplify
1375 : ABS for TImode and wider. */
1376 275517 : switch (GET_CODE (op))
1377 : {
1378 : case ABS:
1379 : case CLRSB:
1380 : case FFS:
1381 : case PARITY:
1382 : case POPCOUNT:
1383 : case SS_ABS:
1384 : return op;
1385 :
1386 0 : case LSHIFTRT:
1387 0 : if (CONST_INT_P (XEXP (op, 1))
1388 0 : && INTVAL (XEXP (op, 1)) > 0
1389 275517 : && is_a <scalar_int_mode> (mode, &int_mode)
1390 0 : && INTVAL (XEXP (op, 1)) < GET_MODE_PRECISION (int_mode))
1391 : return op;
1392 : break;
1393 :
1394 : default:
1395 : break;
1396 : }
1397 :
1398 : /* If operand is known to be only -1 or 0, convert ABS to NEG. */
1399 275517 : if (is_a <scalar_int_mode> (mode, &int_mode)
1400 36621 : && (num_sign_bit_copies (op, int_mode)
1401 36621 : == GET_MODE_PRECISION (int_mode)))
1402 74 : return gen_rtx_NEG (int_mode, op);
1403 :
1404 : break;
1405 :
1406 0 : case FFS:
1407 : /* (ffs (*_extend <X>)) = (*_extend (ffs <X>)). */
1408 0 : if (GET_CODE (op) == SIGN_EXTEND
1409 0 : || GET_CODE (op) == ZERO_EXTEND)
1410 : {
1411 0 : temp = simplify_gen_unary (FFS, GET_MODE (XEXP (op, 0)),
1412 0 : XEXP (op, 0), GET_MODE (XEXP (op, 0)));
1413 0 : return simplify_gen_unary (GET_CODE (op), mode, temp,
1414 0 : GET_MODE (temp));
1415 : }
1416 : break;
1417 :
1418 3443 : case POPCOUNT:
1419 3443 : switch (GET_CODE (op))
1420 : {
1421 0 : case BSWAP:
1422 0 : case BITREVERSE:
1423 : /* (popcount (bswap <X>)) = (popcount <X>). */
1424 0 : return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1425 0 : GET_MODE (XEXP (op, 0)));
1426 :
1427 44 : case ZERO_EXTEND:
1428 : /* (popcount (zero_extend <X>)) = (zero_extend (popcount <X>)). */
1429 88 : temp = simplify_gen_unary (POPCOUNT, GET_MODE (XEXP (op, 0)),
1430 44 : XEXP (op, 0), GET_MODE (XEXP (op, 0)));
1431 44 : return simplify_gen_unary (ZERO_EXTEND, mode, temp,
1432 44 : GET_MODE (temp));
1433 :
1434 0 : case ROTATE:
1435 0 : case ROTATERT:
1436 : /* Rotations don't affect popcount. */
1437 0 : if (!side_effects_p (XEXP (op, 1)))
1438 0 : return simplify_gen_unary (POPCOUNT, mode, XEXP (op, 0),
1439 0 : GET_MODE (XEXP (op, 0)));
1440 : break;
1441 :
1442 : default:
1443 : break;
1444 : }
1445 : break;
1446 :
1447 0 : case PARITY:
1448 0 : switch (GET_CODE (op))
1449 : {
1450 0 : case NOT:
1451 0 : case BSWAP:
1452 0 : case BITREVERSE:
1453 0 : return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1454 0 : GET_MODE (XEXP (op, 0)));
1455 :
1456 0 : case ZERO_EXTEND:
1457 0 : case SIGN_EXTEND:
1458 0 : temp = simplify_gen_unary (PARITY, GET_MODE (XEXP (op, 0)),
1459 0 : XEXP (op, 0), GET_MODE (XEXP (op, 0)));
1460 0 : return simplify_gen_unary (GET_CODE (op), mode, temp,
1461 0 : GET_MODE (temp));
1462 :
1463 0 : case ROTATE:
1464 0 : case ROTATERT:
1465 : /* Rotations don't affect parity. */
1466 0 : if (!side_effects_p (XEXP (op, 1)))
1467 0 : return simplify_gen_unary (PARITY, mode, XEXP (op, 0),
1468 0 : GET_MODE (XEXP (op, 0)));
1469 : break;
1470 :
1471 : case PARITY:
1472 : /* (parity (parity x)) -> parity (x). */
1473 : return op;
1474 :
1475 : default:
1476 : break;
1477 : }
1478 : break;
1479 :
1480 31170 : case BSWAP:
1481 : /* (bswap (bswap x)) -> x. */
1482 31170 : if (GET_CODE (op) == BSWAP)
1483 184 : return XEXP (op, 0);
1484 : break;
1485 :
1486 0 : case BITREVERSE:
1487 : /* (bitreverse (bitreverse x)) -> x. */
1488 0 : if (GET_CODE (op) == BITREVERSE)
1489 0 : return XEXP (op, 0);
1490 : break;
1491 :
1492 897484 : case FLOAT:
1493 : /* (float (sign_extend <X>)) = (float <X>). */
1494 897484 : if (GET_CODE (op) == SIGN_EXTEND)
1495 9255 : return simplify_gen_unary (FLOAT, mode, XEXP (op, 0),
1496 9255 : GET_MODE (XEXP (op, 0)));
1497 : break;
1498 :
1499 3211123 : case SIGN_EXTEND:
1500 : /* Check for useless extension. */
1501 3211123 : if (GET_MODE (op) == mode)
1502 : return op;
1503 :
1504 : /* (sign_extend (truncate (minus (label_ref L1) (label_ref L2))))
1505 : becomes just the MINUS if its mode is MODE. This allows
1506 : folding switch statements on machines using casesi (such as
1507 : the VAX). */
1508 3211083 : if (GET_CODE (op) == TRUNCATE
1509 62 : && GET_MODE (XEXP (op, 0)) == mode
1510 62 : && GET_CODE (XEXP (op, 0)) == MINUS
1511 0 : && GET_CODE (XEXP (XEXP (op, 0), 0)) == LABEL_REF
1512 0 : && GET_CODE (XEXP (XEXP (op, 0), 1)) == LABEL_REF)
1513 : return XEXP (op, 0);
1514 :
1515 : /* Extending a widening multiplication should be canonicalized to
1516 : a wider widening multiplication. */
1517 3211083 : if (GET_CODE (op) == MULT)
1518 : {
1519 66580 : rtx lhs = XEXP (op, 0);
1520 66580 : rtx rhs = XEXP (op, 1);
1521 66580 : enum rtx_code lcode = GET_CODE (lhs);
1522 66580 : enum rtx_code rcode = GET_CODE (rhs);
1523 :
1524 : /* Widening multiplies usually extend both operands, but sometimes
1525 : they use a shift to extract a portion of a register. */
1526 66580 : if ((lcode == SIGN_EXTEND
1527 66431 : || (lcode == ASHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1528 881 : && (rcode == SIGN_EXTEND
1529 837 : || (rcode == ASHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1530 : {
1531 165 : machine_mode lmode = GET_MODE (lhs);
1532 165 : machine_mode rmode = GET_MODE (rhs);
1533 165 : int bits;
1534 :
1535 165 : if (lcode == ASHIFTRT)
1536 : /* Number of bits not shifted off the end. */
1537 125 : bits = (GET_MODE_UNIT_PRECISION (lmode)
1538 125 : - INTVAL (XEXP (lhs, 1)));
1539 : else /* lcode == SIGN_EXTEND */
1540 : /* Size of inner mode. */
1541 80 : bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1542 :
1543 165 : if (rcode == ASHIFTRT)
1544 121 : bits += (GET_MODE_UNIT_PRECISION (rmode)
1545 121 : - INTVAL (XEXP (rhs, 1)));
1546 : else /* rcode == SIGN_EXTEND */
1547 88 : bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1548 :
1549 : /* We can only widen multiplies if the result is mathematiclly
1550 : equivalent. I.e. if overflow was impossible. */
1551 330 : if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1552 108 : return simplify_gen_binary
1553 108 : (MULT, mode,
1554 : simplify_gen_unary (SIGN_EXTEND, mode, lhs, lmode),
1555 108 : simplify_gen_unary (SIGN_EXTEND, mode, rhs, rmode));
1556 : }
1557 : }
1558 :
1559 : /* Check for a sign extension of a subreg of a promoted
1560 : variable, where the promotion is sign-extended, and the
1561 : target mode is the same as the variable's promotion. */
1562 3210975 : if (GET_CODE (op) == SUBREG
1563 159962 : && SUBREG_PROMOTED_VAR_P (op)
1564 3216577 : && SUBREG_PROMOTED_SIGNED_P (op))
1565 : {
1566 0 : rtx subreg = SUBREG_REG (op);
1567 0 : machine_mode subreg_mode = GET_MODE (subreg);
1568 0 : if (!paradoxical_subreg_p (mode, subreg_mode))
1569 : {
1570 0 : temp = rtl_hooks.gen_lowpart_no_emit (mode, subreg);
1571 0 : if (temp)
1572 : {
1573 : /* Preserve SUBREG_PROMOTED_VAR_P. */
1574 0 : if (partial_subreg_p (temp))
1575 : {
1576 0 : SUBREG_PROMOTED_VAR_P (temp) = 1;
1577 0 : SUBREG_PROMOTED_SET (temp, SRP_SIGNED);
1578 : }
1579 0 : return temp;
1580 : }
1581 : }
1582 : else
1583 : /* Sign-extending a sign-extended subreg. */
1584 0 : return simplify_gen_unary (SIGN_EXTEND, mode,
1585 0 : subreg, subreg_mode);
1586 : }
1587 :
1588 : /* (sign_extend:M (sign_extend:N <X>)) is (sign_extend:M <X>).
1589 : (sign_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1590 3210975 : if (GET_CODE (op) == SIGN_EXTEND || GET_CODE (op) == ZERO_EXTEND)
1591 : {
1592 19542 : gcc_assert (GET_MODE_UNIT_PRECISION (mode)
1593 : > GET_MODE_UNIT_PRECISION (GET_MODE (op)));
1594 6514 : return simplify_gen_unary (GET_CODE (op), mode, XEXP (op, 0),
1595 6514 : GET_MODE (XEXP (op, 0)));
1596 : }
1597 :
1598 : /* (sign_extend:M (ashiftrt:N (ashift <X> (const_int I)) (const_int I)))
1599 : is (sign_extend:M (subreg:O <X>)) if there is mode with
1600 : GET_MODE_BITSIZE (N) - I bits.
1601 : (sign_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1602 : is similarly (zero_extend:M (subreg:O <X>)). */
1603 3204461 : if ((GET_CODE (op) == ASHIFTRT || GET_CODE (op) == LSHIFTRT)
1604 88823 : && GET_CODE (XEXP (op, 0)) == ASHIFT
1605 3206955 : && is_a <scalar_int_mode> (mode, &int_mode)
1606 5062 : && CONST_INT_P (XEXP (op, 1))
1607 5062 : && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1608 3209399 : && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1609 4938 : GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1610 : {
1611 4938 : scalar_int_mode tmode;
1612 4938 : gcc_assert (GET_MODE_PRECISION (int_mode)
1613 : > GET_MODE_PRECISION (op_mode));
1614 4938 : if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1615 7308 : - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1616 : {
1617 2568 : rtx inner =
1618 2568 : rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1619 2568 : if (inner)
1620 2568 : return simplify_gen_unary (GET_CODE (op) == ASHIFTRT
1621 : ? SIGN_EXTEND : ZERO_EXTEND,
1622 2568 : int_mode, inner, tmode);
1623 : }
1624 : }
1625 :
1626 : /* (sign_extend:M (lshiftrt:N <X> (const_int I))) is better as
1627 : (zero_extend:M (lshiftrt:N <X> (const_int I))) if I is not 0. */
1628 3201893 : if (GET_CODE (op) == LSHIFTRT
1629 201 : && CONST_INT_P (XEXP (op, 1))
1630 201 : && XEXP (op, 1) != const0_rtx)
1631 201 : return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1632 :
1633 : /* (sign_extend:M (truncate:N (lshiftrt:O <X> (const_int I)))) where
1634 : I is GET_MODE_PRECISION(O) - GET_MODE_PRECISION(N), simplifies to
1635 : (ashiftrt:M <X> (const_int I)) if modes M and O are the same, and
1636 : (truncate:M (ashiftrt:O <X> (const_int I))) if M is narrower than
1637 : O, and (sign_extend:M (ashiftrt:O <X> (const_int I))) if M is
1638 : wider than O. */
1639 3201692 : if (GET_CODE (op) == TRUNCATE
1640 62 : && GET_CODE (XEXP (op, 0)) == LSHIFTRT
1641 0 : && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
1642 : {
1643 0 : scalar_int_mode m_mode, n_mode, o_mode;
1644 0 : rtx old_shift = XEXP (op, 0);
1645 0 : if (is_a <scalar_int_mode> (mode, &m_mode)
1646 0 : && is_a <scalar_int_mode> (GET_MODE (op), &n_mode)
1647 0 : && is_a <scalar_int_mode> (GET_MODE (old_shift), &o_mode)
1648 0 : && GET_MODE_PRECISION (o_mode) - GET_MODE_PRECISION (n_mode)
1649 0 : == INTVAL (XEXP (old_shift, 1)))
1650 : {
1651 0 : rtx new_shift = simplify_gen_binary (ASHIFTRT,
1652 : GET_MODE (old_shift),
1653 : XEXP (old_shift, 0),
1654 : XEXP (old_shift, 1));
1655 0 : if (GET_MODE_PRECISION (m_mode) > GET_MODE_PRECISION (o_mode))
1656 0 : return simplify_gen_unary (SIGN_EXTEND, mode, new_shift,
1657 0 : GET_MODE (new_shift));
1658 0 : if (mode != GET_MODE (new_shift))
1659 0 : return simplify_gen_unary (TRUNCATE, mode, new_shift,
1660 0 : GET_MODE (new_shift));
1661 : return new_shift;
1662 : }
1663 : }
1664 :
1665 : /* We can canonicalize SIGN_EXTEND (op) as ZERO_EXTEND (op) when
1666 : we know the sign bit of OP must be clear. */
1667 3201692 : if (val_signbit_known_clear_p (GET_MODE (op),
1668 3201692 : nonzero_bits (op, GET_MODE (op))))
1669 47606 : return simplify_gen_unary (ZERO_EXTEND, mode, op, GET_MODE (op));
1670 :
1671 : /* (sign_extend:DI (subreg:SI (ctz:DI ...))) is (ctz:DI ...). */
1672 3154086 : if (GET_CODE (op) == SUBREG
1673 159359 : && subreg_lowpart_p (op)
1674 159216 : && GET_MODE (SUBREG_REG (op)) == mode
1675 3288016 : && is_a <scalar_int_mode> (mode, &int_mode)
1676 140708 : && is_a <scalar_int_mode> (GET_MODE (op), &op_mode)
1677 140708 : && GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_WIDE_INT
1678 138901 : && GET_MODE_PRECISION (op_mode) < GET_MODE_PRECISION (int_mode)
1679 3292987 : && (nonzero_bits (SUBREG_REG (op), mode)
1680 138901 : & ~(GET_MODE_MASK (op_mode) >> 1)) == 0)
1681 6778 : return SUBREG_REG (op);
1682 :
1683 : #if defined(POINTERS_EXTEND_UNSIGNED)
1684 : /* As we do not know which address space the pointer is referring to,
1685 : we can do this only if the target does not support different pointer
1686 : or address modes depending on the address space. */
1687 3147308 : if (target_default_pointer_address_modes_p ()
1688 : && ! POINTERS_EXTEND_UNSIGNED
1689 : && mode == Pmode && GET_MODE (op) == ptr_mode
1690 : && (CONSTANT_P (op)
1691 : || (GET_CODE (op) == SUBREG
1692 : && REG_P (SUBREG_REG (op))
1693 : && REG_POINTER (SUBREG_REG (op))
1694 : && GET_MODE (SUBREG_REG (op)) == Pmode))
1695 : && !targetm.have_ptr_extend ())
1696 : {
1697 : temp
1698 : = convert_memory_address_addr_space_1 (Pmode, op,
1699 : ADDR_SPACE_GENERIC, false,
1700 : true);
1701 : if (temp)
1702 : return temp;
1703 : }
1704 : #endif
1705 : break;
1706 :
1707 11478483 : case ZERO_EXTEND:
1708 : /* Check for useless extension. */
1709 11478483 : if (GET_MODE (op) == mode)
1710 : return op;
1711 :
1712 : /* (zero_extend:SI (and:QI X (const))) -> (and:SI (lowpart:SI X) const)
1713 : where const does not sign bit set. */
1714 11478443 : if (GET_CODE (op) == AND
1715 116995 : && CONST_INT_P (XEXP (op, 1))
1716 94635 : && INTVAL (XEXP (op, 1)) > 0)
1717 : {
1718 90188 : rtx tem = rtl_hooks.gen_lowpart_no_emit (mode, XEXP (op, 0));
1719 90188 : if (tem)
1720 74688 : return simplify_gen_binary (AND, mode, tem, XEXP (op, 1));
1721 : }
1722 :
1723 : /* Check for a zero extension of a subreg of a promoted
1724 : variable, where the promotion is zero-extended, and the
1725 : target mode is the same as the variable's promotion. */
1726 11403755 : if (GET_CODE (op) == SUBREG
1727 1523948 : && SUBREG_PROMOTED_VAR_P (op)
1728 11404212 : && SUBREG_PROMOTED_UNSIGNED_P (op))
1729 : {
1730 457 : rtx subreg = SUBREG_REG (op);
1731 457 : machine_mode subreg_mode = GET_MODE (subreg);
1732 457 : if (!paradoxical_subreg_p (mode, subreg_mode))
1733 : {
1734 299 : temp = rtl_hooks.gen_lowpart_no_emit (mode, subreg);
1735 299 : if (temp)
1736 : {
1737 : /* Preserve SUBREG_PROMOTED_VAR_P. */
1738 299 : if (partial_subreg_p (temp))
1739 : {
1740 129 : SUBREG_PROMOTED_VAR_P (temp) = 1;
1741 129 : SUBREG_PROMOTED_SET (temp, SRP_UNSIGNED);
1742 : }
1743 299 : return temp;
1744 : }
1745 : }
1746 : else
1747 : /* Zero-extending a zero-extended subreg. */
1748 158 : return simplify_gen_unary (ZERO_EXTEND, mode,
1749 158 : subreg, subreg_mode);
1750 : }
1751 :
1752 : /* Extending a widening multiplication should be canonicalized to
1753 : a wider widening multiplication. */
1754 11403298 : if (GET_CODE (op) == MULT)
1755 : {
1756 168650 : rtx lhs = XEXP (op, 0);
1757 168650 : rtx rhs = XEXP (op, 1);
1758 168650 : enum rtx_code lcode = GET_CODE (lhs);
1759 168650 : enum rtx_code rcode = GET_CODE (rhs);
1760 :
1761 : /* Widening multiplies usually extend both operands, but sometimes
1762 : they use a shift to extract a portion of a register. */
1763 168650 : if ((lcode == ZERO_EXTEND
1764 167878 : || (lcode == LSHIFTRT && CONST_INT_P (XEXP (lhs, 1))))
1765 862 : && (rcode == ZERO_EXTEND
1766 750 : || (rcode == LSHIFTRT && CONST_INT_P (XEXP (rhs, 1)))))
1767 : {
1768 112 : machine_mode lmode = GET_MODE (lhs);
1769 112 : machine_mode rmode = GET_MODE (rhs);
1770 112 : int bits;
1771 :
1772 112 : if (lcode == LSHIFTRT)
1773 : /* Number of bits not shifted off the end. */
1774 0 : bits = (GET_MODE_UNIT_PRECISION (lmode)
1775 0 : - INTVAL (XEXP (lhs, 1)));
1776 : else /* lcode == ZERO_EXTEND */
1777 : /* Size of inner mode. */
1778 224 : bits = GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (lhs, 0)));
1779 :
1780 112 : if (rcode == LSHIFTRT)
1781 0 : bits += (GET_MODE_UNIT_PRECISION (rmode)
1782 0 : - INTVAL (XEXP (rhs, 1)));
1783 : else /* rcode == ZERO_EXTEND */
1784 224 : bits += GET_MODE_UNIT_PRECISION (GET_MODE (XEXP (rhs, 0)));
1785 :
1786 : /* We can only widen multiplies if the result is mathematiclly
1787 : equivalent. I.e. if overflow was impossible. */
1788 224 : if (bits <= GET_MODE_UNIT_PRECISION (GET_MODE (op)))
1789 112 : return simplify_gen_binary
1790 112 : (MULT, mode,
1791 : simplify_gen_unary (ZERO_EXTEND, mode, lhs, lmode),
1792 112 : simplify_gen_unary (ZERO_EXTEND, mode, rhs, rmode));
1793 : }
1794 : }
1795 :
1796 : /* (zero_extend:M (zero_extend:N <X>)) is (zero_extend:M <X>). */
1797 11403186 : if (GET_CODE (op) == ZERO_EXTEND)
1798 22288 : return simplify_gen_unary (ZERO_EXTEND, mode, XEXP (op, 0),
1799 22288 : GET_MODE (XEXP (op, 0)));
1800 :
1801 : /* (zero_extend:M (lshiftrt:N (ashift <X> (const_int I)) (const_int I)))
1802 : is (zero_extend:M (subreg:O <X>)) if there is mode with
1803 : GET_MODE_PRECISION (N) - I bits. */
1804 11380898 : if (GET_CODE (op) == LSHIFTRT
1805 76975 : && GET_CODE (XEXP (op, 0)) == ASHIFT
1806 11380921 : && is_a <scalar_int_mode> (mode, &int_mode)
1807 23 : && CONST_INT_P (XEXP (op, 1))
1808 18 : && XEXP (XEXP (op, 0), 1) == XEXP (op, 1)
1809 11380898 : && (op_mode = as_a <scalar_int_mode> (GET_MODE (op)),
1810 0 : GET_MODE_PRECISION (op_mode) > INTVAL (XEXP (op, 1))))
1811 : {
1812 0 : scalar_int_mode tmode;
1813 0 : if (int_mode_for_size (GET_MODE_PRECISION (op_mode)
1814 0 : - INTVAL (XEXP (op, 1)), 1).exists (&tmode))
1815 : {
1816 0 : rtx inner =
1817 0 : rtl_hooks.gen_lowpart_no_emit (tmode, XEXP (XEXP (op, 0), 0));
1818 0 : if (inner)
1819 0 : return simplify_gen_unary (ZERO_EXTEND, int_mode,
1820 0 : inner, tmode);
1821 : }
1822 : }
1823 :
1824 : /* (zero_extend:M (subreg:N <X:O>)) is <X:O> (for M == O) or
1825 : (zero_extend:M <X:O>), if X doesn't have any non-zero bits outside
1826 : of mode N. E.g.
1827 : (zero_extend:SI (subreg:QI (and:SI (reg:SI) (const_int 63)) 0)) is
1828 : (and:SI (reg:SI) (const_int 63)). */
1829 11380898 : if (partial_subreg_p (op)
1830 12864796 : && is_a <scalar_int_mode> (mode, &int_mode)
1831 1504861 : && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op)), &op0_mode)
1832 1504291 : && GET_MODE_PRECISION (op0_mode) <= HOST_BITS_PER_WIDE_INT
1833 1126298 : && GET_MODE_PRECISION (int_mode) >= GET_MODE_PRECISION (op0_mode)
1834 1105165 : && subreg_lowpart_p (op)
1835 2298918 : && (nonzero_bits (SUBREG_REG (op), op0_mode)
1836 775427 : & ~GET_MODE_MASK (GET_MODE (op))) == 0)
1837 : {
1838 20963 : if (GET_MODE_PRECISION (int_mode) == GET_MODE_PRECISION (op0_mode))
1839 13881 : return SUBREG_REG (op);
1840 7082 : return simplify_gen_unary (ZERO_EXTEND, int_mode, SUBREG_REG (op),
1841 7082 : op0_mode);
1842 : }
1843 :
1844 : /* (zero_extend:DI (subreg:SI (ctz:DI ...))) is (ctz:DI ...). */
1845 11359935 : if (GET_CODE (op) == SUBREG
1846 1502528 : && subreg_lowpart_p (op)
1847 859463 : && GET_MODE (SUBREG_REG (op)) == mode
1848 12130425 : && is_a <scalar_int_mode> (mode, &int_mode)
1849 770490 : && is_a <scalar_int_mode> (GET_MODE (op), &op_mode)
1850 770490 : && GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_WIDE_INT
1851 706640 : && GET_MODE_PRECISION (op_mode) < GET_MODE_PRECISION (int_mode)
1852 12066575 : && (nonzero_bits (SUBREG_REG (op), mode)
1853 706640 : & ~GET_MODE_MASK (op_mode)) == 0)
1854 0 : return SUBREG_REG (op);
1855 :
1856 : /* Trying to optimize:
1857 : (zero_extend:M (subreg:N (not:M (X:M)))) ->
1858 : (xor:M (zero_extend:M (subreg:N (X:M)), mask))
1859 : where the mask is GET_MODE_MASK (N).
1860 : For the cases when X:M doesn't have any non-zero bits
1861 : outside of mode N, (zero_extend:M (subreg:N (X:M))
1862 : will be simplified to just (X:M)
1863 : and whole optimization will be -> (xor:M (X:M, mask)). */
1864 11359935 : if (partial_subreg_p (op)
1865 1483898 : && GET_CODE (XEXP (op, 0)) == NOT
1866 1447 : && GET_MODE (XEXP (op, 0)) == mode
1867 1426 : && subreg_lowpart_p (op)
1868 11360339 : && HWI_COMPUTABLE_MODE_P (mode)
1869 411 : && is_a <scalar_int_mode> (GET_MODE (op), &op_mode)
1870 1502939 : && (nonzero_bits (XEXP (XEXP (op, 0), 0), mode)
1871 411 : & ~GET_MODE_MASK (op_mode)) == 0)
1872 : {
1873 7 : unsigned HOST_WIDE_INT mask = GET_MODE_MASK (op_mode);
1874 14 : return simplify_gen_binary (XOR, mode,
1875 7 : XEXP (XEXP (op, 0), 0),
1876 7 : gen_int_mode (mask, mode));
1877 : }
1878 :
1879 : #if defined(POINTERS_EXTEND_UNSIGNED)
1880 : /* As we do not know which address space the pointer is referring to,
1881 : we can do this only if the target does not support different pointer
1882 : or address modes depending on the address space. */
1883 11359928 : if (target_default_pointer_address_modes_p ()
1884 : && POINTERS_EXTEND_UNSIGNED > 0
1885 12694957 : && mode == Pmode && GET_MODE (op) == ptr_mode
1886 687 : && (CONSTANT_P (op)
1887 666 : || (GET_CODE (op) == SUBREG
1888 0 : && REG_P (SUBREG_REG (op))
1889 0 : && REG_POINTER (SUBREG_REG (op))
1890 0 : && GET_MODE (SUBREG_REG (op)) == Pmode))
1891 11359949 : && !targetm.have_ptr_extend ())
1892 : {
1893 21 : temp
1894 21 : = convert_memory_address_addr_space_1 (Pmode, op,
1895 : ADDR_SPACE_GENERIC, false,
1896 : true);
1897 21 : if (temp)
1898 : return temp;
1899 : }
1900 : #endif
1901 : break;
1902 :
1903 : default:
1904 : break;
1905 : }
1906 :
1907 19050720 : if (VECTOR_MODE_P (mode)
1908 1517703 : && vec_duplicate_p (op, &elt)
1909 20574200 : && code != VEC_DUPLICATE)
1910 : {
1911 5773 : if (code == SIGN_EXTEND || code == ZERO_EXTEND)
1912 : /* Enforce a canonical order of VEC_DUPLICATE wrt other unary
1913 : operations by promoting VEC_DUPLICATE to the root of the expression
1914 : (as far as possible). */
1915 4695 : temp = simplify_gen_unary (code, GET_MODE_INNER (mode),
1916 9390 : elt, GET_MODE_INNER (GET_MODE (op)));
1917 : else
1918 : /* Try applying the operator to ELT and see if that simplifies.
1919 : We can duplicate the result if so.
1920 :
1921 : The reason we traditionally haven't used simplify_gen_unary
1922 : for these codes is that it didn't necessarily seem to be a
1923 : win to convert things like:
1924 :
1925 : (neg:V (vec_duplicate:V (reg:S R)))
1926 :
1927 : to:
1928 :
1929 : (vec_duplicate:V (neg:S (reg:S R)))
1930 :
1931 : The first might be done entirely in vector registers while the
1932 : second might need a move between register files.
1933 :
1934 : However, there also cases where promoting the vec_duplicate is
1935 : more efficient, and there is definite value in having a canonical
1936 : form when matching instruction patterns. We should consider
1937 : extending the simplify_gen_unary code above to more cases. */
1938 1078 : temp = simplify_unary_operation (code, GET_MODE_INNER (mode),
1939 2156 : elt, GET_MODE_INNER (GET_MODE (op)));
1940 5773 : if (temp)
1941 5281 : return gen_vec_duplicate (mode, temp);
1942 : }
1943 :
1944 : return 0;
1945 : }
1946 :
1947 : /* Try to compute the value of a unary operation CODE whose output mode is to
1948 : be MODE with input operand OP whose mode was originally OP_MODE.
1949 : Return zero if the value cannot be computed. */
1950 : rtx
1951 27340050 : simplify_const_unary_operation (enum rtx_code code, machine_mode mode,
1952 : rtx op, machine_mode op_mode)
1953 : {
1954 27340050 : scalar_int_mode result_mode;
1955 :
1956 27340050 : if (code == VEC_DUPLICATE)
1957 : {
1958 1626840 : gcc_assert (VECTOR_MODE_P (mode));
1959 1626840 : if (GET_MODE (op) != VOIDmode)
1960 : {
1961 545109 : if (!VECTOR_MODE_P (GET_MODE (op)))
1962 1077042 : gcc_assert (GET_MODE_INNER (mode) == GET_MODE (op));
1963 : else
1964 19764 : gcc_assert (GET_MODE_INNER (mode) == GET_MODE_INNER
1965 : (GET_MODE (op)));
1966 : }
1967 1626840 : if (CONST_SCALAR_INT_P (op) || CONST_DOUBLE_AS_FLOAT_P (op))
1968 1119553 : return gen_const_vec_duplicate (mode, op);
1969 507287 : if (GET_CODE (op) == CONST_VECTOR
1970 507287 : && (CONST_VECTOR_DUPLICATE_P (op)
1971 : || CONST_VECTOR_NUNITS (op).is_constant ()))
1972 : {
1973 755 : unsigned int npatterns = (CONST_VECTOR_DUPLICATE_P (op)
1974 755 : ? CONST_VECTOR_NPATTERNS (op)
1975 1509 : : CONST_VECTOR_NUNITS (op).to_constant ());
1976 2265 : gcc_assert (multiple_p (GET_MODE_NUNITS (mode), npatterns));
1977 755 : rtx_vector_builder builder (mode, npatterns, 1);
1978 3130 : for (unsigned i = 0; i < npatterns; i++)
1979 2375 : builder.quick_push (CONST_VECTOR_ELT (op, i));
1980 755 : return builder.build ();
1981 755 : }
1982 : }
1983 :
1984 24979844 : if (VECTOR_MODE_P (mode)
1985 1557938 : && GET_CODE (op) == CONST_VECTOR
1986 26319720 : && known_eq (GET_MODE_NUNITS (mode), CONST_VECTOR_NUNITS (op)))
1987 : {
1988 33326 : gcc_assert (GET_MODE (op) == op_mode);
1989 :
1990 33326 : rtx_vector_builder builder;
1991 33326 : if (!builder.new_unary_operation (mode, op, false))
1992 : return 0;
1993 :
1994 33326 : unsigned int count = builder.encoded_nelts ();
1995 147675 : for (unsigned int i = 0; i < count; i++)
1996 : {
1997 229736 : rtx x = simplify_unary_operation (code, GET_MODE_INNER (mode),
1998 : CONST_VECTOR_ELT (op, i),
1999 229736 : GET_MODE_INNER (op_mode));
2000 114868 : if (!x || !valid_for_const_vector_p (mode, x))
2001 519 : return 0;
2002 114349 : builder.quick_push (x);
2003 : }
2004 32807 : return builder.build ();
2005 33326 : }
2006 :
2007 : /* The order of these tests is critical so that, for example, we don't
2008 : check the wrong mode (input vs. output) for a conversion operation,
2009 : such as FIX. At some point, this should be simplified. */
2010 :
2011 26186416 : if (code == FLOAT && CONST_SCALAR_INT_P (op))
2012 : {
2013 6916 : REAL_VALUE_TYPE d;
2014 :
2015 6916 : if (op_mode == VOIDmode)
2016 : {
2017 : /* CONST_INT have VOIDmode as the mode. We assume that all
2018 : the bits of the constant are significant, though, this is
2019 : a dangerous assumption as many times CONST_INTs are
2020 : created and used with garbage in the bits outside of the
2021 : precision of the implied mode of the const_int. */
2022 64 : op_mode = MAX_MODE_INT;
2023 : }
2024 :
2025 6916 : real_from_integer (&d, mode, rtx_mode_t (op, op_mode), SIGNED);
2026 :
2027 : /* Avoid the folding if flag_signaling_nans is on and
2028 : operand is a signaling NaN. */
2029 6916 : if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2030 : return 0;
2031 :
2032 6916 : d = real_value_truncate (mode, d);
2033 :
2034 : /* Avoid the folding if flag_rounding_math is on and the
2035 : conversion is not exact. */
2036 6916 : if (HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2037 : {
2038 1011 : bool fail = false;
2039 1011 : wide_int w = real_to_integer (&d, &fail,
2040 : GET_MODE_PRECISION
2041 1011 : (as_a <scalar_int_mode> (op_mode)));
2042 2022 : if (fail || wi::ne_p (w, wide_int (rtx_mode_t (op, op_mode))))
2043 905 : return 0;
2044 1011 : }
2045 :
2046 6011 : return const_double_from_real_value (d, mode);
2047 : }
2048 26179500 : else if (code == UNSIGNED_FLOAT && CONST_SCALAR_INT_P (op))
2049 : {
2050 2138 : REAL_VALUE_TYPE d;
2051 :
2052 2138 : if (op_mode == VOIDmode)
2053 : {
2054 : /* CONST_INT have VOIDmode as the mode. We assume that all
2055 : the bits of the constant are significant, though, this is
2056 : a dangerous assumption as many times CONST_INTs are
2057 : created and used with garbage in the bits outside of the
2058 : precision of the implied mode of the const_int. */
2059 8 : op_mode = MAX_MODE_INT;
2060 : }
2061 :
2062 2138 : real_from_integer (&d, mode, rtx_mode_t (op, op_mode), UNSIGNED);
2063 :
2064 : /* Avoid the folding if flag_signaling_nans is on and
2065 : operand is a signaling NaN. */
2066 2138 : if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2067 : return 0;
2068 :
2069 2138 : d = real_value_truncate (mode, d);
2070 :
2071 : /* Avoid the folding if flag_rounding_math is on and the
2072 : conversion is not exact. */
2073 2138 : if (HONOR_SIGN_DEPENDENT_ROUNDING (mode))
2074 : {
2075 16 : bool fail = false;
2076 16 : wide_int w = real_to_integer (&d, &fail,
2077 : GET_MODE_PRECISION
2078 16 : (as_a <scalar_int_mode> (op_mode)));
2079 28 : if (fail || wi::ne_p (w, wide_int (rtx_mode_t (op, op_mode))))
2080 16 : return 0;
2081 16 : }
2082 :
2083 2122 : return const_double_from_real_value (d, mode);
2084 : }
2085 :
2086 26177362 : if (CONST_SCALAR_INT_P (op) && is_a <scalar_int_mode> (mode, &result_mode))
2087 : {
2088 3455031 : unsigned int width = GET_MODE_PRECISION (result_mode);
2089 3455031 : if (width > MAX_BITSIZE_MODE_ANY_INT)
2090 : return 0;
2091 :
2092 3455031 : wide_int result;
2093 3455031 : scalar_int_mode imode = (op_mode == VOIDmode
2094 3455031 : ? result_mode
2095 3454812 : : as_a <scalar_int_mode> (op_mode));
2096 3455031 : rtx_mode_t op0 = rtx_mode_t (op, imode);
2097 3455031 : int int_value;
2098 :
2099 : #if TARGET_SUPPORTS_WIDE_INT == 0
2100 : /* This assert keeps the simplification from producing a result
2101 : that cannot be represented in a CONST_DOUBLE but a lot of
2102 : upstream callers expect that this function never fails to
2103 : simplify something and so you if you added this to the test
2104 : above the code would die later anyway. If this assert
2105 : happens, you just need to make the port support wide int. */
2106 : gcc_assert (width <= HOST_BITS_PER_DOUBLE_INT);
2107 : #endif
2108 :
2109 3455031 : switch (code)
2110 : {
2111 180178 : case NOT:
2112 180178 : result = wi::bit_not (op0);
2113 180178 : break;
2114 :
2115 1949885 : case NEG:
2116 1949885 : result = wi::neg (op0);
2117 1949885 : break;
2118 :
2119 7324 : case ABS:
2120 7324 : result = wi::abs (op0);
2121 7324 : break;
2122 :
2123 0 : case FFS:
2124 0 : result = wi::shwi (wi::ffs (op0), result_mode);
2125 0 : break;
2126 :
2127 168 : case CLZ:
2128 168 : if (wi::ne_p (op0, 0))
2129 38 : int_value = wi::clz (op0);
2130 260 : else if (! CLZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
2131 : return NULL_RTX;
2132 38 : result = wi::shwi (int_value, result_mode);
2133 38 : break;
2134 :
2135 0 : case CLRSB:
2136 0 : result = wi::shwi (wi::clrsb (op0), result_mode);
2137 0 : break;
2138 :
2139 0 : case CTZ:
2140 0 : if (wi::ne_p (op0, 0))
2141 0 : int_value = wi::ctz (op0);
2142 0 : else if (! CTZ_DEFINED_VALUE_AT_ZERO (imode, int_value))
2143 : return NULL_RTX;
2144 0 : result = wi::shwi (int_value, result_mode);
2145 0 : break;
2146 :
2147 160 : case POPCOUNT:
2148 160 : result = wi::shwi (wi::popcount (op0), result_mode);
2149 160 : break;
2150 :
2151 0 : case PARITY:
2152 0 : result = wi::shwi (wi::parity (op0), result_mode);
2153 0 : break;
2154 :
2155 2017 : case BSWAP:
2156 2017 : result = wi::bswap (op0);
2157 2017 : break;
2158 :
2159 0 : case BITREVERSE:
2160 0 : result = wi::bitreverse (op0);
2161 0 : break;
2162 :
2163 1137856 : case TRUNCATE:
2164 1137856 : case ZERO_EXTEND:
2165 1137856 : result = wide_int::from (op0, width, UNSIGNED);
2166 1137856 : break;
2167 :
2168 14342 : case US_TRUNCATE:
2169 14342 : case SS_TRUNCATE:
2170 14342 : {
2171 14342 : signop sgn = code == US_TRUNCATE ? UNSIGNED : SIGNED;
2172 14342 : wide_int nmax
2173 14342 : = wide_int::from (wi::max_value (width, sgn),
2174 28684 : GET_MODE_PRECISION (imode), sgn);
2175 14342 : wide_int nmin
2176 14342 : = wide_int::from (wi::min_value (width, sgn),
2177 28684 : GET_MODE_PRECISION (imode), sgn);
2178 14342 : result = wi::min (wi::max (op0, nmin, sgn), nmax, sgn);
2179 14342 : result = wide_int::from (result, width, sgn);
2180 14342 : break;
2181 14342 : }
2182 163101 : case SIGN_EXTEND:
2183 163101 : result = wide_int::from (op0, width, SIGNED);
2184 163101 : break;
2185 :
2186 0 : case SS_NEG:
2187 0 : if (wi::only_sign_bit_p (op0))
2188 0 : result = wi::max_value (GET_MODE_PRECISION (imode), SIGNED);
2189 : else
2190 0 : result = wi::neg (op0);
2191 : break;
2192 :
2193 0 : case SS_ABS:
2194 0 : if (wi::only_sign_bit_p (op0))
2195 0 : result = wi::max_value (GET_MODE_PRECISION (imode), SIGNED);
2196 : else
2197 0 : result = wi::abs (op0);
2198 : break;
2199 :
2200 : case SQRT:
2201 : default:
2202 : return 0;
2203 : }
2204 :
2205 3454901 : return immed_wide_int_const (result, result_mode);
2206 3455031 : }
2207 :
2208 22722331 : else if (CONST_DOUBLE_AS_FLOAT_P (op)
2209 420620 : && SCALAR_FLOAT_MODE_P (mode)
2210 418592 : && SCALAR_FLOAT_MODE_P (GET_MODE (op)))
2211 : {
2212 418592 : REAL_VALUE_TYPE d = *CONST_DOUBLE_REAL_VALUE (op);
2213 418592 : switch (code)
2214 : {
2215 : case SQRT:
2216 : return 0;
2217 350 : case ABS:
2218 350 : d = real_value_abs (&d);
2219 350 : break;
2220 15687 : case NEG:
2221 15687 : d = real_value_negate (&d);
2222 15687 : break;
2223 2286 : case FLOAT_TRUNCATE:
2224 : /* Don't perform the operation if flag_signaling_nans is on
2225 : and the operand is a signaling NaN. */
2226 2286 : if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2227 : return NULL_RTX;
2228 : /* Or if flag_rounding_math is on and the truncation is not
2229 : exact. */
2230 2286 : if (HONOR_SIGN_DEPENDENT_ROUNDING (mode)
2231 2286 : && !exact_real_truncate (mode, &d))
2232 231 : return NULL_RTX;
2233 2055 : d = real_value_truncate (mode, d);
2234 2055 : break;
2235 393736 : case FLOAT_EXTEND:
2236 : /* Don't perform the operation if flag_signaling_nans is on
2237 : and the operand is a signaling NaN. */
2238 393736 : if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2239 : return NULL_RTX;
2240 : /* All this does is change the mode, unless changing
2241 : mode class. */
2242 393734 : if (GET_MODE_CLASS (mode) != GET_MODE_CLASS (GET_MODE (op)))
2243 0 : real_convert (&d, mode, &d);
2244 : break;
2245 0 : case FIX:
2246 : /* Don't perform the operation if flag_signaling_nans is on
2247 : and the operand is a signaling NaN. */
2248 0 : if (HONOR_SNANS (mode) && REAL_VALUE_ISSIGNALING_NAN (d))
2249 : return NULL_RTX;
2250 0 : real_arithmetic (&d, FIX_TRUNC_EXPR, &d, NULL);
2251 0 : break;
2252 5928 : case NOT:
2253 5928 : {
2254 5928 : long tmp[4];
2255 5928 : int i;
2256 :
2257 5928 : real_to_target (tmp, &d, GET_MODE (op));
2258 29640 : for (i = 0; i < 4; i++)
2259 23712 : tmp[i] = ~tmp[i];
2260 5928 : real_from_target (&d, tmp, mode);
2261 5928 : break;
2262 : }
2263 0 : default:
2264 0 : gcc_unreachable ();
2265 : }
2266 417754 : return const_double_from_real_value (d, mode);
2267 : }
2268 2028 : else if (CONST_DOUBLE_AS_FLOAT_P (op)
2269 2028 : && SCALAR_FLOAT_MODE_P (GET_MODE (op))
2270 22305767 : && is_int_mode (mode, &result_mode))
2271 : {
2272 2028 : unsigned int width = GET_MODE_PRECISION (result_mode);
2273 2028 : if (width > MAX_BITSIZE_MODE_ANY_INT)
2274 : return 0;
2275 :
2276 : /* Although the overflow semantics of RTL's FIX and UNSIGNED_FIX
2277 : operators are intentionally left unspecified (to ease implementation
2278 : by target backends), for consistency, this routine implements the
2279 : same semantics for constant folding as used by the middle-end. */
2280 :
2281 : /* This was formerly used only for non-IEEE float.
2282 : eggert@twinsun.com says it is safe for IEEE also. */
2283 2028 : REAL_VALUE_TYPE t;
2284 2028 : const REAL_VALUE_TYPE *x = CONST_DOUBLE_REAL_VALUE (op);
2285 2028 : wide_int wmax, wmin;
2286 : /* This is part of the abi to real_to_integer, but we check
2287 : things before making this call. */
2288 2028 : bool fail;
2289 :
2290 2028 : switch (code)
2291 : {
2292 2020 : case FIX:
2293 : /* According to IEEE standard, for conversions from floating point to
2294 : integer. When a NaN or infinite operand cannot be represented in
2295 : the destination format and this cannot otherwise be indicated, the
2296 : invalid operation exception shall be signaled. When a numeric
2297 : operand would convert to an integer outside the range of the
2298 : destination format, the invalid operation exception shall be
2299 : signaled if this situation cannot otherwise be indicated. */
2300 2020 : if (REAL_VALUE_ISNAN (*x))
2301 955 : return flag_trapping_math ? NULL_RTX : const0_rtx;
2302 :
2303 1065 : if (REAL_VALUE_ISINF (*x) && flag_trapping_math)
2304 : return NULL_RTX;
2305 :
2306 : /* Test against the signed upper bound. */
2307 105 : wmax = wi::max_value (width, SIGNED);
2308 105 : real_from_integer (&t, VOIDmode, wmax, SIGNED);
2309 105 : if (real_less (&t, x))
2310 3 : return (flag_trapping_math
2311 3 : ? NULL_RTX : immed_wide_int_const (wmax, mode));
2312 :
2313 : /* Test against the signed lower bound. */
2314 102 : wmin = wi::min_value (width, SIGNED);
2315 102 : real_from_integer (&t, VOIDmode, wmin, SIGNED);
2316 102 : if (real_less (x, &t))
2317 8 : return immed_wide_int_const (wmin, mode);
2318 :
2319 94 : return immed_wide_int_const (real_to_integer (x, &fail, width),
2320 : mode);
2321 :
2322 8 : case UNSIGNED_FIX:
2323 8 : if (REAL_VALUE_ISNAN (*x) || REAL_VALUE_NEGATIVE (*x))
2324 6 : return flag_trapping_math ? NULL_RTX : const0_rtx;
2325 :
2326 2 : if (REAL_VALUE_ISINF (*x) && flag_trapping_math)
2327 : return NULL_RTX;
2328 :
2329 : /* Test against the unsigned upper bound. */
2330 0 : wmax = wi::max_value (width, UNSIGNED);
2331 0 : real_from_integer (&t, VOIDmode, wmax, UNSIGNED);
2332 0 : if (real_less (&t, x))
2333 0 : return (flag_trapping_math
2334 0 : ? NULL_RTX : immed_wide_int_const (wmax, mode));
2335 :
2336 0 : return immed_wide_int_const (real_to_integer (x, &fail, width),
2337 : mode);
2338 :
2339 0 : default:
2340 0 : gcc_unreachable ();
2341 : }
2342 2028 : }
2343 :
2344 : /* Handle polynomial integers. */
2345 : else if (CONST_POLY_INT_P (op))
2346 : {
2347 : poly_wide_int result;
2348 : switch (code)
2349 : {
2350 : case NEG:
2351 : result = -const_poly_int_value (op);
2352 : break;
2353 :
2354 : case NOT:
2355 : result = ~const_poly_int_value (op);
2356 : break;
2357 :
2358 : default:
2359 : return NULL_RTX;
2360 : }
2361 : return immed_wide_int_const (result, mode);
2362 : }
2363 :
2364 : return NULL_RTX;
2365 : }
2366 :
2367 : /* Subroutine of simplify_binary_operation to simplify a binary operation
2368 : CODE that can commute with byte swapping, with result mode MODE and
2369 : operating on OP0 and OP1. CODE is currently one of AND, IOR or XOR.
2370 : Return zero if no simplification or canonicalization is possible. */
2371 :
2372 : rtx
2373 38046819 : simplify_context::simplify_byte_swapping_operation (rtx_code code,
2374 : machine_mode mode,
2375 : rtx op0, rtx op1)
2376 : {
2377 38046819 : rtx tem;
2378 :
2379 : /* (op (bswap x) C1)) -> (bswap (op x C2)) with C2 swapped. */
2380 38046819 : if (GET_CODE (op0) == BSWAP && CONST_SCALAR_INT_P (op1))
2381 : {
2382 506 : tem = simplify_gen_binary (code, mode, XEXP (op0, 0),
2383 : simplify_gen_unary (BSWAP, mode, op1, mode));
2384 506 : return simplify_gen_unary (BSWAP, mode, tem, mode);
2385 : }
2386 :
2387 : /* (op (bswap x) (bswap y)) -> (bswap (op x y)). */
2388 38046313 : if (GET_CODE (op0) == BSWAP && GET_CODE (op1) == BSWAP)
2389 : {
2390 0 : tem = simplify_gen_binary (code, mode, XEXP (op0, 0), XEXP (op1, 0));
2391 0 : return simplify_gen_unary (BSWAP, mode, tem, mode);
2392 : }
2393 :
2394 : return NULL_RTX;
2395 : }
2396 :
2397 : /* Subroutine of simplify_binary_operation to simplify a commutative,
2398 : associative binary operation CODE with result mode MODE, operating
2399 : on OP0 and OP1. CODE is currently one of PLUS, MULT, AND, IOR, XOR,
2400 : SMIN, SMAX, UMIN or UMAX. Return zero if no simplification or
2401 : canonicalization is possible. */
2402 :
2403 : rtx
2404 47971345 : simplify_context::simplify_associative_operation (rtx_code code,
2405 : machine_mode mode,
2406 : rtx op0, rtx op1)
2407 : {
2408 47971345 : rtx tem;
2409 :
2410 : /* Normally expressions simplified by simplify-rtx.cc are combined
2411 : at most from a few machine instructions and therefore the
2412 : expressions should be fairly small. During var-tracking
2413 : we can see arbitrarily large expressions though and reassociating
2414 : those can be quadratic, so punt after encountering max_assoc_count
2415 : simplify_associative_operation calls during outermost simplify_*
2416 : call. */
2417 47971345 : if (++assoc_count >= max_assoc_count)
2418 : return NULL_RTX;
2419 :
2420 : /* Linearize the operator to the left. */
2421 47966895 : if (GET_CODE (op1) == code)
2422 : {
2423 : /* "(a op b) op (c op d)" becomes "((a op b) op c) op d)". */
2424 21500 : if (GET_CODE (op0) == code)
2425 : {
2426 5084 : tem = simplify_gen_binary (code, mode, op0, XEXP (op1, 0));
2427 5084 : return simplify_gen_binary (code, mode, tem, XEXP (op1, 1));
2428 : }
2429 :
2430 : /* "a op (b op c)" becomes "(b op c) op a". */
2431 16416 : if (! swap_commutative_operands_p (op1, op0))
2432 16416 : return simplify_gen_binary (code, mode, op1, op0);
2433 :
2434 : std::swap (op0, op1);
2435 : }
2436 :
2437 47945395 : if (GET_CODE (op0) == code)
2438 : {
2439 : /* Canonicalize "(x op c) op y" as "(x op y) op c". */
2440 1420318 : if (swap_commutative_operands_p (XEXP (op0, 1), op1))
2441 : {
2442 291639 : tem = simplify_gen_binary (code, mode, XEXP (op0, 0), op1);
2443 291639 : return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2444 : }
2445 :
2446 : /* Attempt to simplify "(a op b) op c" as "a op (b op c)". */
2447 1128679 : tem = simplify_binary_operation (code, mode, XEXP (op0, 1), op1);
2448 1128679 : if (tem != 0)
2449 83697 : return simplify_gen_binary (code, mode, XEXP (op0, 0), tem);
2450 :
2451 : /* Attempt to simplify "(a op b) op c" as "(a op c) op b". */
2452 1044982 : tem = simplify_binary_operation (code, mode, XEXP (op0, 0), op1);
2453 1044982 : if (tem != 0)
2454 35226 : return simplify_gen_binary (code, mode, tem, XEXP (op0, 1));
2455 : }
2456 :
2457 : return 0;
2458 : }
2459 :
2460 : /* If COMPARISON can be treated as an unsigned comparison, return a mask
2461 : that represents it (8 if it includes <, 4 if it includes > and 2
2462 : if it includes ==). Return 0 otherwise. */
2463 : static int
2464 18866 : unsigned_comparison_to_mask (rtx_code comparison)
2465 : {
2466 0 : switch (comparison)
2467 : {
2468 : case LTU:
2469 : return 8;
2470 : case GTU:
2471 : return 4;
2472 : case EQ:
2473 : return 2;
2474 :
2475 : case LEU:
2476 : return 10;
2477 : case GEU:
2478 : return 6;
2479 :
2480 : case NE:
2481 : return 12;
2482 :
2483 : default:
2484 : return 0;
2485 : }
2486 : }
2487 :
2488 : /* Reverse the mapping in unsigned_comparison_to_mask, going from masks
2489 : to comparisons. */
2490 : static rtx_code
2491 6596 : mask_to_unsigned_comparison (int mask)
2492 : {
2493 6596 : switch (mask)
2494 : {
2495 : case 8:
2496 : return LTU;
2497 160 : case 4:
2498 160 : return GTU;
2499 2590 : case 2:
2500 2590 : return EQ;
2501 :
2502 160 : case 10:
2503 160 : return LEU;
2504 160 : case 6:
2505 160 : return GEU;
2506 :
2507 3366 : case 12:
2508 3366 : return NE;
2509 :
2510 0 : default:
2511 0 : gcc_unreachable ();
2512 : }
2513 : }
2514 :
2515 : /* Return a mask describing the COMPARISON. */
2516 : static int
2517 2666 : comparison_to_mask (enum rtx_code comparison)
2518 : {
2519 2666 : switch (comparison)
2520 : {
2521 : case LT:
2522 : return 8;
2523 472 : case GT:
2524 472 : return 4;
2525 419 : case EQ:
2526 419 : return 2;
2527 19 : case UNORDERED:
2528 19 : return 1;
2529 :
2530 0 : case LTGT:
2531 0 : return 12;
2532 441 : case LE:
2533 441 : return 10;
2534 441 : case GE:
2535 441 : return 6;
2536 0 : case UNLT:
2537 0 : return 9;
2538 0 : case UNGT:
2539 0 : return 5;
2540 0 : case UNEQ:
2541 0 : return 3;
2542 :
2543 0 : case ORDERED:
2544 0 : return 14;
2545 400 : case NE:
2546 400 : return 13;
2547 0 : case UNLE:
2548 0 : return 11;
2549 0 : case UNGE:
2550 0 : return 7;
2551 :
2552 0 : default:
2553 0 : gcc_unreachable ();
2554 : }
2555 : }
2556 :
2557 : /* Return a comparison corresponding to the MASK. */
2558 : static enum rtx_code
2559 1014 : mask_to_comparison (int mask)
2560 : {
2561 1014 : switch (mask)
2562 : {
2563 : case 8:
2564 : return LT;
2565 : case 4:
2566 : return GT;
2567 : case 2:
2568 : return EQ;
2569 : case 1:
2570 : return UNORDERED;
2571 :
2572 : case 12:
2573 : return LTGT;
2574 : case 10:
2575 : return LE;
2576 : case 6:
2577 : return GE;
2578 : case 9:
2579 : return UNLT;
2580 : case 5:
2581 : return UNGT;
2582 : case 3:
2583 : return UNEQ;
2584 :
2585 : case 14:
2586 : return ORDERED;
2587 : case 13:
2588 : return NE;
2589 : case 11:
2590 : return UNLE;
2591 : case 7:
2592 : return UNGE;
2593 :
2594 0 : default:
2595 0 : gcc_unreachable ();
2596 : }
2597 : }
2598 :
2599 : /* Canonicalize RES, a scalar const0_rtx/const_true_rtx to the right
2600 : false/true value of comparison with MODE where comparison operands
2601 : have CMP_MODE. */
2602 :
2603 : static rtx
2604 879207 : relational_result (machine_mode mode, machine_mode cmp_mode, rtx res)
2605 : {
2606 879207 : if (SCALAR_FLOAT_MODE_P (mode))
2607 : {
2608 190 : if (res == const0_rtx)
2609 186 : return CONST0_RTX (mode);
2610 : #ifdef FLOAT_STORE_FLAG_VALUE
2611 : REAL_VALUE_TYPE val = FLOAT_STORE_FLAG_VALUE (mode);
2612 : return const_double_from_real_value (val, mode);
2613 : #else
2614 : return NULL_RTX;
2615 : #endif
2616 : }
2617 879017 : if (VECTOR_MODE_P (mode))
2618 : {
2619 369 : if (res == const0_rtx)
2620 63 : return CONST0_RTX (mode);
2621 : #ifdef VECTOR_STORE_FLAG_VALUE
2622 306 : rtx val = VECTOR_STORE_FLAG_VALUE (mode);
2623 306 : if (val == NULL_RTX)
2624 : return NULL_RTX;
2625 306 : if (val == const1_rtx)
2626 0 : return CONST1_RTX (mode);
2627 :
2628 306 : return gen_const_vec_duplicate (mode, val);
2629 : #else
2630 : return NULL_RTX;
2631 : #endif
2632 : }
2633 : /* For vector comparison with scalar int result, it is unknown
2634 : if the target means here a comparison into an integral bitmask,
2635 : or comparison where all comparisons true mean const_true_rtx
2636 : whole result, or where any comparisons true mean const_true_rtx
2637 : whole result. For const0_rtx all the cases are the same. */
2638 878648 : if (VECTOR_MODE_P (cmp_mode)
2639 0 : && SCALAR_INT_MODE_P (mode)
2640 0 : && res == const_true_rtx)
2641 0 : return NULL_RTX;
2642 :
2643 : return res;
2644 : }
2645 :
2646 : /* Simplify a logical operation CODE with result mode MODE, operating on OP0
2647 : and OP1, in the case where both are relational operations. Assume that
2648 : OP0 is inverted if INVERT0_P is true.
2649 :
2650 : Return 0 if no such simplification is possible. */
2651 : rtx
2652 14083490 : simplify_context::simplify_logical_relational_operation (rtx_code code,
2653 : machine_mode mode,
2654 : rtx op0, rtx op1,
2655 : bool invert0_p)
2656 : {
2657 14083490 : if (!(COMPARISON_P (op0) && COMPARISON_P (op1)))
2658 : return 0;
2659 :
2660 21389 : if (!(rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2661 9859 : && rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))))
2662 2097 : return 0;
2663 :
2664 9433 : if (side_effects_p (op0))
2665 : return 0;
2666 :
2667 9433 : enum rtx_code code0 = GET_CODE (op0);
2668 9433 : enum rtx_code code1 = GET_CODE (op1);
2669 9433 : machine_mode cmp_mode = GET_MODE (XEXP (op0, 0));
2670 9433 : if (cmp_mode == VOIDmode)
2671 0 : cmp_mode = GET_MODE (XEXP (op0, 1));
2672 :
2673 : /* Assume at first that the comparisons are on integers, and that the
2674 : operands are therefore ordered. */
2675 9433 : int all = 14;
2676 9433 : int mask0 = unsigned_comparison_to_mask (code0);
2677 9433 : int mask1 = unsigned_comparison_to_mask (code1);
2678 18866 : bool unsigned_p = (IN_RANGE (mask0 & 12, 4, 8)
2679 9433 : || IN_RANGE (mask1 & 12, 4, 8));
2680 1333 : if (unsigned_p)
2681 : {
2682 : /* We only reach here when comparing integers. Reject mixtures of signed
2683 : and unsigned comparisons. */
2684 8100 : if (mask0 == 0 || mask1 == 0)
2685 : return 0;
2686 : }
2687 : else
2688 : {
2689 : /* See whether the operands might be unordered. Assume that all
2690 : results are possible for CC modes, and punt later if we don't get an
2691 : always-true or always-false answer. */
2692 1333 : if (GET_MODE_CLASS (cmp_mode) == MODE_CC || HONOR_NANS (cmp_mode))
2693 : all = 15;
2694 1333 : mask0 = comparison_to_mask (code0) & all;
2695 1333 : mask1 = comparison_to_mask (code1) & all;
2696 : }
2697 :
2698 8153 : if (invert0_p)
2699 4658 : mask0 = mask0 ^ all;
2700 :
2701 8153 : int mask;
2702 8153 : if (code == AND)
2703 960 : mask = mask0 & mask1;
2704 7193 : else if (code == IOR)
2705 948 : mask = mask0 | mask1;
2706 6245 : else if (code == XOR)
2707 6245 : mask = mask0 ^ mask1;
2708 : else
2709 : return 0;
2710 :
2711 8153 : if (mask == all)
2712 232 : return relational_result (mode, GET_MODE (op0), const_true_rtx);
2713 :
2714 7921 : if (mask == 0)
2715 232 : return relational_result (mode, GET_MODE (op0), const0_rtx);
2716 :
2717 7689 : if (unsigned_p)
2718 6596 : code = mask_to_unsigned_comparison (mask);
2719 : else
2720 : {
2721 1093 : if (GET_MODE_CLASS (cmp_mode) == MODE_CC)
2722 : return 0;
2723 :
2724 1014 : code = mask_to_comparison (mask);
2725 : /* LTGT and NE are arithmetically equivalent for ordered operands,
2726 : with NE being the canonical choice. */
2727 1014 : if (code == LTGT && all == 14)
2728 184 : code = NE;
2729 : }
2730 :
2731 7610 : op0 = XEXP (op1, 0);
2732 7610 : op1 = XEXP (op1, 1);
2733 :
2734 7610 : return simplify_gen_relational (code, mode, VOIDmode, op0, op1);
2735 : }
2736 :
2737 : /* Simplify a binary operation CODE with result mode MODE, operating on OP0
2738 : and OP1. Return 0 if no simplification is possible.
2739 :
2740 : Don't use this for relational operations such as EQ or LT.
2741 : Use simplify_relational_operation instead. */
2742 : rtx
2743 473557750 : simplify_context::simplify_binary_operation (rtx_code code, machine_mode mode,
2744 : rtx op0, rtx op1)
2745 : {
2746 473557750 : rtx trueop0, trueop1;
2747 473557750 : rtx tem;
2748 :
2749 : /* Relational operations don't work here. We must know the mode
2750 : of the operands in order to do the comparison correctly.
2751 : Assuming a full word can give incorrect results.
2752 : Consider comparing 128 with -128 in QImode. */
2753 473557750 : gcc_assert (GET_RTX_CLASS (code) != RTX_COMPARE);
2754 473557750 : gcc_assert (GET_RTX_CLASS (code) != RTX_COMM_COMPARE);
2755 :
2756 : /* Make sure the constant is second. */
2757 473557750 : if (GET_RTX_CLASS (code) == RTX_COMM_ARITH
2758 473557750 : && swap_commutative_operands_p (op0, op1))
2759 : std::swap (op0, op1);
2760 :
2761 473557750 : trueop0 = avoid_constant_pool_reference (op0);
2762 473557750 : trueop1 = avoid_constant_pool_reference (op1);
2763 :
2764 473557750 : tem = simplify_const_binary_operation (code, mode, trueop0, trueop1);
2765 473557750 : if (tem)
2766 : return tem;
2767 443809010 : tem = simplify_binary_operation_1 (code, mode, op0, op1, trueop0, trueop1);
2768 :
2769 443809010 : if (tem)
2770 : return tem;
2771 :
2772 : /* If the above steps did not result in a simplification and op0 or op1
2773 : were constant pool references, use the referenced constants directly. */
2774 381595839 : if (trueop0 != op0 || trueop1 != op1)
2775 580691 : return simplify_gen_binary (code, mode, trueop0, trueop1);
2776 :
2777 : return NULL_RTX;
2778 : }
2779 :
2780 : /* Subroutine of simplify_binary_operation_1 that looks for cases in
2781 : which OP0 and OP1 are both vector series or vector duplicates
2782 : (which are really just series with a step of 0). If so, try to
2783 : form a new series by applying CODE to the bases and to the steps.
2784 : Return null if no simplification is possible.
2785 :
2786 : MODE is the mode of the operation and is known to be a vector
2787 : integer mode. */
2788 :
2789 : rtx
2790 2376350 : simplify_context::simplify_binary_operation_series (rtx_code code,
2791 : machine_mode mode,
2792 : rtx op0, rtx op1)
2793 : {
2794 2376350 : rtx base0, step0;
2795 2376350 : if (vec_duplicate_p (op0, &base0))
2796 65851 : step0 = const0_rtx;
2797 2310499 : else if (!vec_series_p (op0, &base0, &step0))
2798 : return NULL_RTX;
2799 :
2800 66410 : rtx base1, step1;
2801 66410 : if (vec_duplicate_p (op1, &base1))
2802 412 : step1 = const0_rtx;
2803 65998 : else if (!vec_series_p (op1, &base1, &step1))
2804 : return NULL_RTX;
2805 :
2806 : /* Only create a new series if we can simplify both parts. In other
2807 : cases this isn't really a simplification, and it's not necessarily
2808 : a win to replace a vector operation with a scalar operation. */
2809 3088 : scalar_mode inner_mode = GET_MODE_INNER (mode);
2810 3088 : rtx new_base = simplify_binary_operation (code, inner_mode, base0, base1);
2811 3088 : if (!new_base)
2812 : return NULL_RTX;
2813 :
2814 2786 : rtx new_step = simplify_binary_operation (code, inner_mode, step0, step1);
2815 2786 : if (!new_step)
2816 : return NULL_RTX;
2817 :
2818 2786 : return gen_vec_series (mode, new_base, new_step);
2819 : }
2820 :
2821 : /* Subroutine of simplify_binary_operation_1. Un-distribute a binary
2822 : operation CODE with result mode MODE, operating on OP0 and OP1.
2823 : e.g. simplify (xor (and A C) (and (B C)) to (and (xor (A B) C).
2824 : Returns NULL_RTX if no simplification is possible. */
2825 :
2826 : rtx
2827 1425188 : simplify_context::simplify_distributive_operation (rtx_code code,
2828 : machine_mode mode,
2829 : rtx op0, rtx op1)
2830 : {
2831 1425188 : enum rtx_code op = GET_CODE (op0);
2832 1425188 : gcc_assert (GET_CODE (op1) == op);
2833 :
2834 1425188 : if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 1))
2835 1425188 : && ! side_effects_p (XEXP (op0, 1)))
2836 337592 : return simplify_gen_binary (op, mode,
2837 : simplify_gen_binary (code, mode,
2838 : XEXP (op0, 0),
2839 : XEXP (op1, 0)),
2840 337592 : XEXP (op0, 1));
2841 :
2842 1087596 : if (GET_RTX_CLASS (op) == RTX_COMM_ARITH)
2843 : {
2844 1065005 : if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
2845 1065005 : && ! side_effects_p (XEXP (op0, 0)))
2846 493315 : return simplify_gen_binary (op, mode,
2847 : simplify_gen_binary (code, mode,
2848 : XEXP (op0, 1),
2849 : XEXP (op1, 1)),
2850 493315 : XEXP (op0, 0));
2851 571690 : if (rtx_equal_p (XEXP (op0, 0), XEXP (op1, 1))
2852 571690 : && ! side_effects_p (XEXP (op0, 0)))
2853 55 : return simplify_gen_binary (op, mode,
2854 : simplify_gen_binary (code, mode,
2855 : XEXP (op0, 1),
2856 : XEXP (op1, 0)),
2857 55 : XEXP (op0, 0));
2858 571635 : if (rtx_equal_p (XEXP (op0, 1), XEXP (op1, 0))
2859 571635 : && ! side_effects_p (XEXP (op0, 1)))
2860 284582 : return simplify_gen_binary (op, mode,
2861 : simplify_gen_binary (code, mode,
2862 : XEXP (op0, 0),
2863 : XEXP (op1, 1)),
2864 284582 : XEXP (op0, 1));
2865 : }
2866 :
2867 : return NULL_RTX;
2868 : }
2869 :
2870 : /* Return TRUE if a rotate in mode MODE with a constant count in OP1
2871 : should be reversed.
2872 :
2873 : If the rotate should not be reversed, return FALSE.
2874 :
2875 : LEFT indicates if this is a rotate left or a rotate right. */
2876 :
2877 : bool
2878 145022 : reverse_rotate_by_imm_p (machine_mode mode, unsigned int left, rtx op1)
2879 : {
2880 145022 : if (!CONST_INT_P (op1))
2881 : return false;
2882 :
2883 : /* Some targets may only be able to rotate by a constant
2884 : in one direction. So we need to query the optab interface
2885 : to see what is possible. */
2886 113128 : optab binoptab = left ? rotl_optab : rotr_optab;
2887 47233 : optab re_binoptab = left ? rotr_optab : rotl_optab;
2888 113128 : enum insn_code icode = optab_handler (binoptab, mode);
2889 113128 : enum insn_code re_icode = optab_handler (re_binoptab, mode);
2890 :
2891 : /* If the target can not support the reversed optab, then there
2892 : is nothing to do. */
2893 113128 : if (re_icode == CODE_FOR_nothing)
2894 : return false;
2895 :
2896 : /* If the target does not support the requested rotate-by-immediate,
2897 : then we want to try reversing the rotate. We also want to try
2898 : reversing to minimize the count. */
2899 110670 : if ((icode == CODE_FOR_nothing)
2900 110670 : || (!insn_operand_matches (icode, 2, op1))
2901 553350 : || (IN_RANGE (INTVAL (op1),
2902 : GET_MODE_UNIT_PRECISION (mode) / 2 + left,
2903 : GET_MODE_UNIT_PRECISION (mode) - 1)))
2904 14682 : return (insn_operand_matches (re_icode, 2, op1));
2905 : return false;
2906 : }
2907 :
2908 : /* Analyse argument X to see if it represents an (ASHIFT X Y) operation
2909 : and return the expression to be shifted in SHIFT_OPND and the shift amount
2910 : in SHIFT_AMNT. This is primarily used to group handling of ASHIFT (X, CST)
2911 : and (PLUS (X, X)) in one place. If the expression is not equivalent to an
2912 : ASHIFT then return FALSE and set SHIFT_OPND and SHIFT_AMNT to NULL. */
2913 :
2914 : static bool
2915 523219082 : extract_ashift_operands_p (rtx x, rtx *shift_opnd, rtx *shift_amnt)
2916 : {
2917 523219082 : if (GET_CODE (x) == ASHIFT)
2918 : {
2919 13787301 : *shift_opnd = XEXP (x, 0);
2920 13787301 : *shift_amnt = XEXP (x, 1);
2921 13787301 : return true;
2922 : }
2923 509431781 : if (GET_CODE (x) == PLUS && rtx_equal_p (XEXP (x, 0), XEXP (x, 1)))
2924 : {
2925 13281 : *shift_opnd = XEXP (x, 0);
2926 13281 : *shift_amnt = CONST1_RTX (GET_MODE (x));
2927 13281 : return true;
2928 : }
2929 509418500 : *shift_opnd = NULL_RTX;
2930 509418500 : *shift_amnt = NULL_RTX;
2931 509418500 : return false;
2932 : }
2933 :
2934 : /* OP0 and OP1 are combined under an operation of mode MODE that can
2935 : potentially result in a ROTATE expression. Analyze the OP0 and OP1
2936 : and return the resulting ROTATE expression if so. Return NULL otherwise.
2937 : This is used in detecting the patterns (X << C1) [+,|,^] (X >> C2) where
2938 : C1 + C2 == GET_MODE_UNIT_PRECISION (mode).
2939 : (X << C1) and (C >> C2) would be OP0 and OP1. */
2940 :
2941 : static rtx
2942 263719425 : simplify_rotate_op (rtx op0, rtx op1, machine_mode mode)
2943 : {
2944 : /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
2945 : mode size to (rotate A CX). */
2946 :
2947 263719425 : rtx opleft = op0;
2948 263719425 : rtx opright = op1;
2949 263719425 : rtx ashift_opnd, ashift_amnt;
2950 : /* In some cases the ASHIFT is not a direct ASHIFT. Look deeper and extract
2951 : the relevant operands here. */
2952 263719425 : bool ashift_op_p
2953 263719425 : = extract_ashift_operands_p (op1, &ashift_opnd, &ashift_amnt);
2954 :
2955 263719425 : if (ashift_op_p
2956 262081949 : || GET_CODE (op1) == SUBREG)
2957 : {
2958 : opleft = op1;
2959 : opright = op0;
2960 : }
2961 : else
2962 : {
2963 259499657 : opright = op1;
2964 259499657 : opleft = op0;
2965 259499657 : ashift_op_p
2966 259499657 : = extract_ashift_operands_p (opleft, &ashift_opnd, &ashift_amnt);
2967 : }
2968 :
2969 13800582 : if (ashift_op_p && GET_CODE (opright) == LSHIFTRT
2970 262124895 : && rtx_equal_p (ashift_opnd, XEXP (opright, 0)))
2971 : {
2972 9709 : rtx leftcst = unwrap_const_vec_duplicate (ashift_amnt);
2973 9709 : rtx rightcst = unwrap_const_vec_duplicate (XEXP (opright, 1));
2974 :
2975 5891 : if (CONST_INT_P (leftcst) && CONST_INT_P (rightcst)
2976 15600 : && (INTVAL (leftcst) + INTVAL (rightcst)
2977 5891 : == GET_MODE_UNIT_PRECISION (mode)))
2978 5364 : return gen_rtx_ROTATE (mode, XEXP (opright, 0), ashift_amnt);
2979 : }
2980 :
2981 : /* Same, but for ashift that has been "simplified" to a wider mode
2982 : by simplify_shift_const. */
2983 263714061 : scalar_int_mode int_mode, inner_mode;
2984 :
2985 263714061 : if (GET_CODE (opleft) == SUBREG
2986 267313445 : && is_a <scalar_int_mode> (mode, &int_mode)
2987 3594020 : && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (opleft)),
2988 : &inner_mode)
2989 3562992 : && GET_CODE (SUBREG_REG (opleft)) == ASHIFT
2990 104013 : && GET_CODE (opright) == LSHIFTRT
2991 1106 : && GET_CODE (XEXP (opright, 0)) == SUBREG
2992 251 : && known_eq (SUBREG_BYTE (opleft), SUBREG_BYTE (XEXP (opright, 0)))
2993 498 : && GET_MODE_SIZE (int_mode) < GET_MODE_SIZE (inner_mode)
2994 235 : && rtx_equal_p (XEXP (SUBREG_REG (opleft), 0),
2995 235 : SUBREG_REG (XEXP (opright, 0)))
2996 19 : && CONST_INT_P (XEXP (SUBREG_REG (opleft), 1))
2997 19 : && CONST_INT_P (XEXP (opright, 1))
2998 263714061 : && (INTVAL (XEXP (SUBREG_REG (opleft), 1))
2999 19 : + INTVAL (XEXP (opright, 1))
3000 19 : == GET_MODE_PRECISION (int_mode)))
3001 15 : return gen_rtx_ROTATE (int_mode, XEXP (opright, 0),
3002 : XEXP (SUBREG_REG (opleft), 1));
3003 : return NULL_RTX;
3004 : }
3005 :
3006 : /* Returns true if OP0 and OP1 match the pattern (OP (plus (A - 1)) (neg A)),
3007 : and the pattern can be simplified (there are no side effects). */
3008 :
3009 : static bool
3010 40148865 : match_plus_neg_pattern (rtx op0, rtx op1, machine_mode mode)
3011 : {
3012 : /* Remove SUBREG from OP0 and OP1, if needed. */
3013 40148865 : if (GET_CODE (op0) == SUBREG
3014 7054555 : && GET_CODE (op1) == SUBREG
3015 310241 : && subreg_lowpart_p (op0)
3016 40457706 : && subreg_lowpart_p (op1))
3017 : {
3018 308832 : op0 = XEXP (op0, 0);
3019 308832 : op1 = XEXP (op1, 0);
3020 : }
3021 :
3022 : /* Check for the pattern (OP (plus (A - 1)) (neg A)). */
3023 40148865 : if (((GET_CODE (op1) == NEG
3024 3672 : && GET_CODE (op0) == PLUS
3025 2209 : && XEXP (op0, 1) == CONSTM1_RTX (mode))
3026 40148203 : || (GET_CODE (op0) == NEG
3027 78802 : && GET_CODE (op1) == PLUS
3028 0 : && XEXP (op1, 1) == CONSTM1_RTX (mode)))
3029 662 : && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
3030 40148867 : && !side_effects_p (XEXP (op0, 0)))
3031 : return true;
3032 : return false;
3033 : }
3034 :
3035 : /* Check if OP matches the pattern of (subreg (not X)) and the subreg is
3036 : non-paradoxical. */
3037 :
3038 : static bool
3039 76095877 : non_paradoxical_subreg_not_p (rtx op)
3040 : {
3041 76095877 : return GET_CODE (op) == SUBREG
3042 8590111 : && !paradoxical_subreg_p (op)
3043 79038333 : && GET_CODE (SUBREG_REG (op)) == NOT;
3044 : }
3045 :
3046 : /* Convert (binop (subreg (not X)) Y) into (binop (not (subreg X)) Y), or
3047 : (binop X (subreg (not Y))) into (binop X (not (subreg Y))) to expose
3048 : opportunities to combine another binary logical operation with NOT. */
3049 :
3050 : static rtx
3051 38049110 : simplify_with_subreg_not (rtx_code binop, machine_mode mode, rtx op0, rtx op1)
3052 : {
3053 38049110 : rtx opn = NULL_RTX;
3054 38049110 : if (non_paradoxical_subreg_not_p (op0))
3055 : opn = op0;
3056 38046767 : else if (non_paradoxical_subreg_not_p (op1))
3057 : opn = op1;
3058 :
3059 2346 : if (opn == NULL_RTX)
3060 : return NULL_RTX;
3061 :
3062 4692 : rtx new_subreg = simplify_gen_subreg (mode,
3063 : XEXP (SUBREG_REG (opn), 0),
3064 2346 : GET_MODE (SUBREG_REG (opn)),
3065 2346 : SUBREG_BYTE (opn));
3066 :
3067 2346 : if (!new_subreg)
3068 : return NULL_RTX;
3069 :
3070 2291 : rtx new_not = simplify_gen_unary (NOT, mode, new_subreg, mode);
3071 2291 : if (opn == op0)
3072 2288 : return simplify_gen_binary (binop, mode, new_not, op1);
3073 : else
3074 3 : return simplify_gen_binary (binop, mode, op0, new_not);
3075 : }
3076 :
3077 : /* Return TRUE iff NOP is a negated form of OP, or vice-versa. */
3078 : static bool
3079 6999910 : negated_ops_p (rtx nop, rtx op)
3080 : {
3081 : /* Explicit negation. */
3082 6999910 : if (GET_CODE (nop) == NOT
3083 6999910 : && rtx_equal_p (XEXP (nop, 0), op))
3084 : return true;
3085 6996301 : if (GET_CODE (op) == NOT
3086 6996301 : && rtx_equal_p (XEXP (op, 0), nop))
3087 : return true;
3088 :
3089 : /* (~C <r A) is a negated form of (C << A) if C == 1. */
3090 6995621 : if (GET_CODE (op) == ASHIFT
3091 1409886 : && GET_CODE (nop) == ROTATE
3092 0 : && XEXP (op, 0) == CONST1_RTX (GET_MODE (op))
3093 0 : && CONST_INT_P (XEXP (nop, 0))
3094 0 : && INTVAL (XEXP (nop, 0)) == -2
3095 6995621 : && rtx_equal_p (XEXP (op, 1), XEXP (nop, 1)))
3096 : return true;
3097 6995621 : if (GET_CODE (nop) == ASHIFT
3098 203711 : && GET_CODE (op) == ROTATE
3099 0 : && XEXP (nop, 0) == CONST1_RTX (GET_MODE (op))
3100 0 : && CONST_INT_P (XEXP (nop, 0))
3101 0 : && INTVAL (XEXP (nop, 0)) == -2
3102 6995621 : && rtx_equal_p (XEXP (op, 1), XEXP (nop, 1)))
3103 : return true;
3104 :
3105 : /* ??? Should we consider rotations of C and ~C by the same amount? */
3106 :
3107 : return false;
3108 : }
3109 :
3110 : /* Subroutine of simplify_binary_operation. Simplify a binary operation
3111 : CODE with result mode MODE, operating on OP0 and OP1. If OP0 and/or
3112 : OP1 are constant pool references, TRUEOP0 and TRUEOP1 represent the
3113 : actual constants. */
3114 :
3115 : rtx
3116 443809010 : simplify_context::simplify_binary_operation_1 (rtx_code code,
3117 : machine_mode mode,
3118 : rtx op0, rtx op1,
3119 : rtx trueop0, rtx trueop1)
3120 : {
3121 443809010 : rtx tem, reversed, elt0, elt1;
3122 443809010 : HOST_WIDE_INT val;
3123 443809010 : scalar_int_mode int_mode, inner_mode;
3124 443809010 : poly_int64 offset;
3125 :
3126 : /* Even if we can't compute a constant result,
3127 : there are some cases worth simplifying. */
3128 :
3129 443809010 : switch (code)
3130 : {
3131 252395061 : case PLUS:
3132 : /* Maybe simplify x + 0 to x. The two expressions are equivalent
3133 : when x is NaN, infinite, or finite and nonzero. They aren't
3134 : when x is -0 and the rounding mode is not towards -infinity,
3135 : since (-0) + 0 is then 0. */
3136 500883796 : if (!HONOR_SIGNED_ZEROS (mode) && !HONOR_SNANS (mode)
3137 500883784 : && trueop1 == CONST0_RTX (mode))
3138 : return op0;
3139 :
3140 : /* ((-a) + b) -> (b - a) and similarly for (a + (-b)). These
3141 : transformations are safe even for IEEE. */
3142 251083257 : if (GET_CODE (op0) == NEG)
3143 26034 : return simplify_gen_binary (MINUS, mode, op1, XEXP (op0, 0));
3144 251057223 : else if (GET_CODE (op1) == NEG)
3145 7397 : return simplify_gen_binary (MINUS, mode, op0, XEXP (op1, 0));
3146 :
3147 : /* (~a) + 1 -> -a */
3148 251049826 : if (INTEGRAL_MODE_P (mode)
3149 246238260 : && GET_CODE (op0) == NOT
3150 627204 : && trueop1 == const1_rtx)
3151 3568 : return simplify_gen_unary (NEG, mode, XEXP (op0, 0), mode);
3152 :
3153 : /* Handle both-operands-constant cases. We can only add
3154 : CONST_INTs to constants since the sum of relocatable symbols
3155 : can't be handled by most assemblers. Don't add CONST_INT
3156 : to CONST_INT since overflow won't be computed properly if wider
3157 : than HOST_BITS_PER_WIDE_INT. */
3158 :
3159 251046258 : if ((GET_CODE (op0) == CONST
3160 251046258 : || GET_CODE (op0) == SYMBOL_REF
3161 248532929 : || GET_CODE (op0) == LABEL_REF)
3162 251046258 : && poly_int_rtx_p (op1, &offset))
3163 2512376 : return plus_constant (mode, op0, offset);
3164 248533882 : else if ((GET_CODE (op1) == CONST
3165 248533882 : || GET_CODE (op1) == SYMBOL_REF
3166 244831013 : || GET_CODE (op1) == LABEL_REF)
3167 248533882 : && poly_int_rtx_p (op0, &offset))
3168 0 : return plus_constant (mode, op1, offset);
3169 :
3170 : /* See if this is something like X * C - X or vice versa or
3171 : if the multiplication is written as a shift. If so, we can
3172 : distribute and make a new multiply, shift, or maybe just
3173 : have X (if C is 2 in the example above). But don't make
3174 : something more expensive than we had before. */
3175 :
3176 248533882 : if (is_a <scalar_int_mode> (mode, &int_mode))
3177 : {
3178 241607084 : rtx lhs = op0, rhs = op1;
3179 :
3180 241607084 : wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
3181 241607084 : wide_int coeff1 = wi::one (GET_MODE_PRECISION (int_mode));
3182 :
3183 241607084 : if (GET_CODE (lhs) == NEG)
3184 : {
3185 0 : coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
3186 0 : lhs = XEXP (lhs, 0);
3187 : }
3188 241607084 : else if (GET_CODE (lhs) == MULT
3189 6750545 : && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
3190 : {
3191 5652528 : coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
3192 5652528 : lhs = XEXP (lhs, 0);
3193 : }
3194 235954556 : else if (GET_CODE (lhs) == ASHIFT
3195 10974872 : && CONST_INT_P (XEXP (lhs, 1))
3196 10903206 : && INTVAL (XEXP (lhs, 1)) >= 0
3197 246857750 : && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
3198 : {
3199 10903194 : coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
3200 21806388 : GET_MODE_PRECISION (int_mode));
3201 10903194 : lhs = XEXP (lhs, 0);
3202 : }
3203 :
3204 241607084 : if (GET_CODE (rhs) == NEG)
3205 : {
3206 0 : coeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
3207 0 : rhs = XEXP (rhs, 0);
3208 : }
3209 241607084 : else if (GET_CODE (rhs) == MULT
3210 289908 : && CONST_INT_P (XEXP (rhs, 1)))
3211 : {
3212 179883 : coeff1 = rtx_mode_t (XEXP (rhs, 1), int_mode);
3213 179883 : rhs = XEXP (rhs, 0);
3214 : }
3215 241427201 : else if (GET_CODE (rhs) == ASHIFT
3216 538207 : && CONST_INT_P (XEXP (rhs, 1))
3217 528428 : && INTVAL (XEXP (rhs, 1)) >= 0
3218 241955629 : && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
3219 : {
3220 528428 : coeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
3221 1056856 : GET_MODE_PRECISION (int_mode));
3222 528428 : rhs = XEXP (rhs, 0);
3223 : }
3224 :
3225 : /* Keep PLUS of 2 volatile memory references. */
3226 241607084 : if (rtx_equal_p (lhs, rhs)
3227 241607084 : && (!MEM_P (lhs) || !MEM_VOLATILE_P (lhs)))
3228 : {
3229 774761 : rtx orig = gen_rtx_PLUS (int_mode, op0, op1);
3230 774761 : rtx coeff;
3231 774761 : bool speed = optimize_function_for_speed_p (cfun);
3232 :
3233 774761 : coeff = immed_wide_int_const (coeff0 + coeff1, int_mode);
3234 :
3235 774761 : tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
3236 774761 : return (set_src_cost (tem, int_mode, speed)
3237 774761 : <= set_src_cost (orig, int_mode, speed) ? tem : 0);
3238 : }
3239 :
3240 : /* Optimize (X - 1) * Y + Y to X * Y. */
3241 240832323 : lhs = op0;
3242 240832323 : rhs = op1;
3243 240832323 : if (GET_CODE (op0) == MULT)
3244 : {
3245 6730398 : if (((GET_CODE (XEXP (op0, 0)) == PLUS
3246 278840 : && XEXP (XEXP (op0, 0), 1) == constm1_rtx)
3247 6689090 : || (GET_CODE (XEXP (op0, 0)) == MINUS
3248 36545 : && XEXP (XEXP (op0, 0), 1) == const1_rtx))
3249 6771706 : && rtx_equal_p (XEXP (op0, 1), op1))
3250 78 : lhs = XEXP (XEXP (op0, 0), 0);
3251 6730320 : else if (((GET_CODE (XEXP (op0, 1)) == PLUS
3252 1284 : && XEXP (XEXP (op0, 1), 1) == constm1_rtx)
3253 6730286 : || (GET_CODE (XEXP (op0, 1)) == MINUS
3254 339 : && XEXP (XEXP (op0, 1), 1) == const1_rtx))
3255 6730354 : && rtx_equal_p (XEXP (op0, 0), op1))
3256 0 : lhs = XEXP (XEXP (op0, 1), 0);
3257 : }
3258 234101925 : else if (GET_CODE (op1) == MULT)
3259 : {
3260 135961 : if (((GET_CODE (XEXP (op1, 0)) == PLUS
3261 42 : && XEXP (XEXP (op1, 0), 1) == constm1_rtx)
3262 135956 : || (GET_CODE (XEXP (op1, 0)) == MINUS
3263 27 : && XEXP (XEXP (op1, 0), 1) == const1_rtx))
3264 135966 : && rtx_equal_p (XEXP (op1, 1), op0))
3265 0 : rhs = XEXP (XEXP (op1, 0), 0);
3266 135961 : else if (((GET_CODE (XEXP (op1, 1)) == PLUS
3267 0 : && XEXP (XEXP (op1, 1), 1) == constm1_rtx)
3268 135961 : || (GET_CODE (XEXP (op1, 1)) == MINUS
3269 0 : && XEXP (XEXP (op1, 1), 1) == const1_rtx))
3270 135961 : && rtx_equal_p (XEXP (op1, 0), op0))
3271 0 : rhs = XEXP (XEXP (op1, 1), 0);
3272 : }
3273 240832323 : if (lhs != op0 || rhs != op1)
3274 78 : return simplify_gen_binary (MULT, int_mode, lhs, rhs);
3275 241607084 : }
3276 :
3277 : /* (plus (xor X C1) C2) is (xor X (C1^C2)) if C2 is signbit. */
3278 247759043 : if (CONST_SCALAR_INT_P (op1)
3279 190894861 : && GET_CODE (op0) == XOR
3280 20759 : && CONST_SCALAR_INT_P (XEXP (op0, 1))
3281 247771308 : && mode_signbit_p (mode, op1))
3282 121 : return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
3283 : simplify_gen_binary (XOR, mode, op1,
3284 121 : XEXP (op0, 1)));
3285 :
3286 : /* (plus (xor X C1) C2) is (xor X (C1^C2)) if X is either 0 or 1 and
3287 : 2 * ((X ^ C1) & C2) == 0; based on A + B == A ^ B + 2 * (A & B). */
3288 247758922 : if (CONST_SCALAR_INT_P (op1)
3289 190894740 : && GET_CODE (op0) == XOR
3290 20638 : && CONST_SCALAR_INT_P (XEXP (op0, 1))
3291 12144 : && nonzero_bits (XEXP (op0, 0), mode) == 1
3292 191 : && 2 * (INTVAL (XEXP (op0, 1)) & INTVAL (op1)) == 0
3293 247758922 : && 2 * ((1 ^ INTVAL (XEXP (op0, 1))) & INTVAL (op1)) == 0)
3294 0 : return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
3295 : simplify_gen_binary (XOR, mode, op1,
3296 0 : XEXP (op0, 1)));
3297 :
3298 : /* Canonicalize (plus (mult (neg B) C) A) to (minus A (mult B C)). */
3299 247758922 : if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
3300 247756442 : && GET_CODE (op0) == MULT
3301 254854101 : && GET_CODE (XEXP (op0, 0)) == NEG)
3302 : {
3303 5665 : rtx in1, in2;
3304 :
3305 5665 : in1 = XEXP (XEXP (op0, 0), 0);
3306 5665 : in2 = XEXP (op0, 1);
3307 5665 : return simplify_gen_binary (MINUS, mode, op1,
3308 : simplify_gen_binary (MULT, mode,
3309 5665 : in1, in2));
3310 : }
3311 :
3312 : /* (plus (comparison A B) C) can become (neg (rev-comp A B)) if
3313 : C is 1 and STORE_FLAG_VALUE is -1 or if C is -1 and STORE_FLAG_VALUE
3314 : is 1. */
3315 247753257 : if (COMPARISON_P (op0)
3316 1459254 : && ((STORE_FLAG_VALUE == -1 && trueop1 == const1_rtx)
3317 1459254 : || (STORE_FLAG_VALUE == 1 && trueop1 == constm1_rtx))
3318 247806324 : && (reversed = reversed_comparison (op0, mode)))
3319 52718 : return
3320 52718 : simplify_gen_unary (NEG, mode, reversed, mode);
3321 :
3322 : /* Convert (plus (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
3323 : mode size to (rotate A CX). */
3324 247700539 : if ((tem = simplify_rotate_op (op0, op1, mode)))
3325 : return tem;
3326 :
3327 : /* If one of the operands is a PLUS or a MINUS, see if we can
3328 : simplify this by the associative law.
3329 : Don't use the associative law for floating point.
3330 : The inaccuracy makes it nonassociative,
3331 : and subtle programs can break if operations are associated. */
3332 :
3333 247699069 : if (INTEGRAL_MODE_P (mode)
3334 242887552 : && (plus_minus_operand_p (op0)
3335 209695901 : || plus_minus_operand_p (op1))
3336 34201499 : && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3337 : return tem;
3338 :
3339 : /* Reassociate floating point addition only when the user
3340 : specifies associative math operations. */
3341 214217179 : if (FLOAT_MODE_P (mode)
3342 4811517 : && flag_associative_math)
3343 : {
3344 905171 : tem = simplify_associative_operation (code, mode, op0, op1);
3345 905171 : if (tem)
3346 : return tem;
3347 : }
3348 :
3349 : /* Handle vector series. */
3350 214203588 : if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
3351 : {
3352 1948448 : tem = simplify_binary_operation_series (code, mode, op0, op1);
3353 1948448 : if (tem)
3354 : return tem;
3355 : }
3356 : break;
3357 :
3358 : case COMPARE:
3359 : break;
3360 :
3361 42168315 : case MINUS:
3362 : /* We can't assume x-x is 0 even with non-IEEE floating point,
3363 : but since it is zero except in very strange circumstances, we
3364 : will treat it as zero with -ffinite-math-only. */
3365 42168315 : if (rtx_equal_p (trueop0, trueop1)
3366 219316 : && ! side_effects_p (op0)
3367 42386370 : && (!FLOAT_MODE_P (mode) || !HONOR_NANS (mode)))
3368 215592 : return CONST0_RTX (mode);
3369 :
3370 : /* Change subtraction from zero into negation. (0 - x) is the
3371 : same as -x when x is NaN, infinite, or finite and nonzero.
3372 : But if the mode has signed zeros, and does not round towards
3373 : -infinity, then 0 - 0 is 0, not -0. */
3374 41952723 : if (!HONOR_SIGNED_ZEROS (mode) && trueop0 == CONST0_RTX (mode))
3375 306446 : return simplify_gen_unary (NEG, mode, op1, mode);
3376 :
3377 : /* (-1 - a) is ~a, unless the expression contains symbolic
3378 : constants, in which case not retaining additions and
3379 : subtractions could cause invalid assembly to be produced. */
3380 41646277 : if (trueop0 == CONSTM1_RTX (mode)
3381 41646277 : && !contains_symbolic_reference_p (op1))
3382 355476 : return simplify_gen_unary (NOT, mode, op1, mode);
3383 :
3384 : /* Subtracting 0 has no effect unless the mode has signalling NaNs,
3385 : or has signed zeros and supports rounding towards -infinity.
3386 : In such a case, 0 - 0 is -0. */
3387 41986177 : if (!(HONOR_SIGNED_ZEROS (mode)
3388 695376 : && HONOR_SIGN_DEPENDENT_ROUNDING (mode))
3389 41289595 : && !HONOR_SNANS (mode)
3390 82580360 : && trueop1 == CONST0_RTX (mode))
3391 : return op0;
3392 :
3393 : /* See if this is something like X * C - X or vice versa or
3394 : if the multiplication is written as a shift. If so, we can
3395 : distribute and make a new multiply, shift, or maybe just
3396 : have X (if C is 2 in the example above). But don't make
3397 : something more expensive than we had before. */
3398 :
3399 40342085 : if (is_a <scalar_int_mode> (mode, &int_mode))
3400 : {
3401 39077822 : rtx lhs = op0, rhs = op1;
3402 :
3403 39077822 : wide_int coeff0 = wi::one (GET_MODE_PRECISION (int_mode));
3404 39077822 : wide_int negcoeff1 = wi::minus_one (GET_MODE_PRECISION (int_mode));
3405 :
3406 39077822 : if (GET_CODE (lhs) == NEG)
3407 : {
3408 113586 : coeff0 = wi::minus_one (GET_MODE_PRECISION (int_mode));
3409 113586 : lhs = XEXP (lhs, 0);
3410 : }
3411 38964236 : else if (GET_CODE (lhs) == MULT
3412 229637 : && CONST_SCALAR_INT_P (XEXP (lhs, 1)))
3413 : {
3414 82536 : coeff0 = rtx_mode_t (XEXP (lhs, 1), int_mode);
3415 82536 : lhs = XEXP (lhs, 0);
3416 : }
3417 38881700 : else if (GET_CODE (lhs) == ASHIFT
3418 327562 : && CONST_INT_P (XEXP (lhs, 1))
3419 324261 : && INTVAL (XEXP (lhs, 1)) >= 0
3420 39205940 : && INTVAL (XEXP (lhs, 1)) < GET_MODE_PRECISION (int_mode))
3421 : {
3422 324240 : coeff0 = wi::set_bit_in_zero (INTVAL (XEXP (lhs, 1)),
3423 648480 : GET_MODE_PRECISION (int_mode));
3424 324240 : lhs = XEXP (lhs, 0);
3425 : }
3426 :
3427 39077822 : if (GET_CODE (rhs) == NEG)
3428 : {
3429 8508 : negcoeff1 = wi::one (GET_MODE_PRECISION (int_mode));
3430 8508 : rhs = XEXP (rhs, 0);
3431 : }
3432 39069314 : else if (GET_CODE (rhs) == MULT
3433 150896 : && CONST_INT_P (XEXP (rhs, 1)))
3434 : {
3435 117371 : negcoeff1 = wi::neg (rtx_mode_t (XEXP (rhs, 1), int_mode));
3436 117371 : rhs = XEXP (rhs, 0);
3437 : }
3438 38951943 : else if (GET_CODE (rhs) == ASHIFT
3439 383172 : && CONST_INT_P (XEXP (rhs, 1))
3440 382663 : && INTVAL (XEXP (rhs, 1)) >= 0
3441 39334606 : && INTVAL (XEXP (rhs, 1)) < GET_MODE_PRECISION (int_mode))
3442 : {
3443 382663 : negcoeff1 = wi::set_bit_in_zero (INTVAL (XEXP (rhs, 1)),
3444 765326 : GET_MODE_PRECISION (int_mode));
3445 382663 : negcoeff1 = -negcoeff1;
3446 382663 : rhs = XEXP (rhs, 0);
3447 : }
3448 :
3449 39077822 : if (rtx_equal_p (lhs, rhs))
3450 : {
3451 98865 : rtx orig = gen_rtx_MINUS (int_mode, op0, op1);
3452 98865 : rtx coeff;
3453 98865 : bool speed = optimize_function_for_speed_p (cfun);
3454 :
3455 98865 : coeff = immed_wide_int_const (coeff0 + negcoeff1, int_mode);
3456 :
3457 98865 : tem = simplify_gen_binary (MULT, int_mode, lhs, coeff);
3458 98865 : return (set_src_cost (tem, int_mode, speed)
3459 98865 : <= set_src_cost (orig, int_mode, speed) ? tem : 0);
3460 : }
3461 :
3462 : /* Optimize (X + 1) * Y - Y to X * Y. */
3463 38978957 : lhs = op0;
3464 38978957 : if (GET_CODE (op0) == MULT)
3465 : {
3466 229517 : if (((GET_CODE (XEXP (op0, 0)) == PLUS
3467 4932 : && XEXP (XEXP (op0, 0), 1) == const1_rtx)
3468 227925 : || (GET_CODE (XEXP (op0, 0)) == MINUS
3469 1835 : && XEXP (XEXP (op0, 0), 1) == constm1_rtx))
3470 231109 : && rtx_equal_p (XEXP (op0, 1), op1))
3471 2 : lhs = XEXP (XEXP (op0, 0), 0);
3472 229515 : else if (((GET_CODE (XEXP (op0, 1)) == PLUS
3473 26 : && XEXP (XEXP (op0, 1), 1) == const1_rtx)
3474 229509 : || (GET_CODE (XEXP (op0, 1)) == MINUS
3475 84 : && XEXP (XEXP (op0, 1), 1) == constm1_rtx))
3476 229521 : && rtx_equal_p (XEXP (op0, 0), op1))
3477 0 : lhs = XEXP (XEXP (op0, 1), 0);
3478 : }
3479 38978957 : if (lhs != op0)
3480 2 : return simplify_gen_binary (MULT, int_mode, lhs, op1);
3481 39077822 : }
3482 :
3483 : /* (a - (-b)) -> (a + b). True even for IEEE. */
3484 40243218 : if (GET_CODE (op1) == NEG)
3485 8470 : return simplify_gen_binary (PLUS, mode, op0, XEXP (op1, 0));
3486 :
3487 : /* (-x - c) may be simplified as (-c - x). */
3488 40234748 : if (GET_CODE (op0) == NEG
3489 117750 : && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1)))
3490 : {
3491 694 : tem = simplify_unary_operation (NEG, mode, op1, mode);
3492 694 : if (tem)
3493 694 : return simplify_gen_binary (MINUS, mode, tem, XEXP (op0, 0));
3494 : }
3495 :
3496 40234054 : if ((GET_CODE (op0) == CONST
3497 40234054 : || GET_CODE (op0) == SYMBOL_REF
3498 35216213 : || GET_CODE (op0) == LABEL_REF)
3499 40234054 : && poly_int_rtx_p (op1, &offset))
3500 50702 : return plus_constant (mode, op0, trunc_int_for_mode (-offset, mode));
3501 :
3502 : /* Don't let a relocatable value get a negative coeff. */
3503 40183352 : if (is_a <scalar_int_mode> (mode)
3504 38919120 : && poly_int_rtx_p (op1)
3505 47313467 : && GET_MODE (op0) != VOIDmode)
3506 7130115 : return simplify_gen_binary (PLUS, mode,
3507 : op0,
3508 7130115 : neg_poly_int_rtx (mode, op1));
3509 :
3510 : /* (x - (x & y)) -> (x & ~y) */
3511 33053237 : if (INTEGRAL_MODE_P (mode) && GET_CODE (op1) == AND)
3512 : {
3513 289415 : if (rtx_equal_p (op0, XEXP (op1, 0)))
3514 : {
3515 502 : tem = simplify_gen_unary (NOT, mode, XEXP (op1, 1),
3516 251 : GET_MODE (XEXP (op1, 1)));
3517 251 : return simplify_gen_binary (AND, mode, op0, tem);
3518 : }
3519 289164 : if (rtx_equal_p (op0, XEXP (op1, 1)))
3520 : {
3521 2004 : tem = simplify_gen_unary (NOT, mode, XEXP (op1, 0),
3522 1002 : GET_MODE (XEXP (op1, 0)));
3523 1002 : return simplify_gen_binary (AND, mode, op0, tem);
3524 : }
3525 : }
3526 :
3527 : /* If STORE_FLAG_VALUE is 1, (minus 1 (comparison foo bar)) can be done
3528 : by reversing the comparison code if valid. */
3529 33051984 : if (STORE_FLAG_VALUE == 1
3530 33051984 : && trueop0 == const1_rtx
3531 1114817 : && COMPARISON_P (op1)
3532 33156928 : && (reversed = reversed_comparison (op1, mode)))
3533 : return reversed;
3534 :
3535 : /* Canonicalize (minus A (mult (neg B) C)) to (plus (mult B C) A). */
3536 32947063 : if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
3537 32945656 : && GET_CODE (op1) == MULT
3538 33197537 : && GET_CODE (XEXP (op1, 0)) == NEG)
3539 : {
3540 165 : rtx in1, in2;
3541 :
3542 165 : in1 = XEXP (XEXP (op1, 0), 0);
3543 165 : in2 = XEXP (op1, 1);
3544 165 : return simplify_gen_binary (PLUS, mode,
3545 : simplify_gen_binary (MULT, mode,
3546 : in1, in2),
3547 165 : op0);
3548 : }
3549 :
3550 : /* Canonicalize (minus (neg A) (mult B C)) to
3551 : (minus (mult (neg B) C) A). */
3552 32946898 : if (!HONOR_SIGN_DEPENDENT_ROUNDING (mode)
3553 32945491 : && GET_CODE (op1) == MULT
3554 33197207 : && GET_CODE (op0) == NEG)
3555 : {
3556 655 : rtx in1, in2;
3557 :
3558 655 : in1 = simplify_gen_unary (NEG, mode, XEXP (op1, 0), mode);
3559 655 : in2 = XEXP (op1, 1);
3560 655 : return simplify_gen_binary (MINUS, mode,
3561 : simplify_gen_binary (MULT, mode,
3562 : in1, in2),
3563 655 : XEXP (op0, 0));
3564 : }
3565 :
3566 : /* If one of the operands is a PLUS or a MINUS, see if we can
3567 : simplify this by the associative law. This will, for example,
3568 : canonicalize (minus A (plus B C)) to (minus (minus A B) C).
3569 : Don't use the associative law for floating point.
3570 : The inaccuracy makes it nonassociative,
3571 : and subtle programs can break if operations are associated. */
3572 :
3573 32946243 : if (INTEGRAL_MODE_P (mode)
3574 32131185 : && (plus_minus_operand_p (op0)
3575 29500495 : || plus_minus_operand_p (op1))
3576 3828473 : && (tem = simplify_plus_minus (code, mode, op0, op1)) != 0)
3577 : return tem;
3578 :
3579 : /* Handle vector series. */
3580 29257285 : if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
3581 : {
3582 427902 : tem = simplify_binary_operation_series (code, mode, op0, op1);
3583 427902 : if (tem)
3584 : return tem;
3585 : }
3586 : break;
3587 :
3588 12222889 : case MULT:
3589 12222889 : if (trueop1 == constm1_rtx)
3590 32334 : return simplify_gen_unary (NEG, mode, op0, mode);
3591 :
3592 12190555 : if (GET_CODE (op0) == NEG)
3593 : {
3594 33434 : rtx temp = simplify_unary_operation (NEG, mode, op1, mode);
3595 : /* If op1 is a MULT as well and simplify_unary_operation
3596 : just moved the NEG to the second operand, simplify_gen_binary
3597 : below could through simplify_associative_operation move
3598 : the NEG around again and recurse endlessly. */
3599 33434 : if (temp
3600 1527 : && GET_CODE (op1) == MULT
3601 0 : && GET_CODE (temp) == MULT
3602 0 : && XEXP (op1, 0) == XEXP (temp, 0)
3603 0 : && GET_CODE (XEXP (temp, 1)) == NEG
3604 0 : && XEXP (op1, 1) == XEXP (XEXP (temp, 1), 0))
3605 : temp = NULL_RTX;
3606 : if (temp)
3607 1527 : return simplify_gen_binary (MULT, mode, XEXP (op0, 0), temp);
3608 : }
3609 12189028 : if (GET_CODE (op1) == NEG)
3610 : {
3611 986 : rtx temp = simplify_unary_operation (NEG, mode, op0, mode);
3612 : /* If op0 is a MULT as well and simplify_unary_operation
3613 : just moved the NEG to the second operand, simplify_gen_binary
3614 : below could through simplify_associative_operation move
3615 : the NEG around again and recurse endlessly. */
3616 986 : if (temp
3617 418 : && GET_CODE (op0) == MULT
3618 298 : && GET_CODE (temp) == MULT
3619 298 : && XEXP (op0, 0) == XEXP (temp, 0)
3620 6 : && GET_CODE (XEXP (temp, 1)) == NEG
3621 5 : && XEXP (op0, 1) == XEXP (XEXP (temp, 1), 0))
3622 : temp = NULL_RTX;
3623 : if (temp)
3624 413 : return simplify_gen_binary (MULT, mode, temp, XEXP (op1, 0));
3625 : }
3626 :
3627 : /* Maybe simplify x * 0 to 0. The reduction is not valid if
3628 : x is NaN, since x * 0 is then also NaN. Nor is it valid
3629 : when the mode has signed zeros, since multiplying a negative
3630 : number by 0 will give -0, not 0. */
3631 12188615 : if (!HONOR_NANS (mode)
3632 11228093 : && !HONOR_SIGNED_ZEROS (mode)
3633 11227697 : && trueop1 == CONST0_RTX (mode)
3634 12231069 : && ! side_effects_p (op0))
3635 : return op1;
3636 :
3637 : /* In IEEE floating point, x*1 is not equivalent to x for
3638 : signalling NaNs. */
3639 12147424 : if (!HONOR_SNANS (mode)
3640 12147424 : && trueop1 == CONST1_RTX (mode))
3641 : return op0;
3642 :
3643 : /* Convert multiply by constant power of two into shift. */
3644 11627301 : if (mem_depth == 0 && CONST_SCALAR_INT_P (trueop1))
3645 : {
3646 6310706 : val = wi::exact_log2 (rtx_mode_t (trueop1, mode));
3647 6310706 : if (val >= 0)
3648 3025484 : return simplify_gen_binary (ASHIFT, mode, op0,
3649 3025484 : gen_int_shift_amount (mode, val));
3650 : }
3651 :
3652 : /* x*2 is x+x and x*(-1) is -x */
3653 8601817 : if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
3654 167029 : && SCALAR_FLOAT_MODE_P (GET_MODE (trueop1))
3655 167029 : && !DECIMAL_FLOAT_MODE_P (GET_MODE (trueop1))
3656 166745 : && GET_MODE (op0) == mode)
3657 : {
3658 166745 : const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
3659 :
3660 166745 : if (real_equal (d1, &dconst2))
3661 615 : return simplify_gen_binary (PLUS, mode, op0, copy_rtx (op0));
3662 :
3663 166130 : if (!HONOR_SNANS (mode)
3664 166130 : && real_equal (d1, &dconstm1))
3665 24 : return simplify_gen_unary (NEG, mode, op0, mode);
3666 : }
3667 :
3668 : /* Optimize -x * -x as x * x. */
3669 8601178 : if (FLOAT_MODE_P (mode)
3670 1377706 : && GET_CODE (op0) == NEG
3671 7941 : && GET_CODE (op1) == NEG
3672 0 : && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
3673 0 : && !side_effects_p (XEXP (op0, 0)))
3674 0 : return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
3675 :
3676 : /* Likewise, optimize abs(x) * abs(x) as x * x. */
3677 8601178 : if (SCALAR_FLOAT_MODE_P (mode)
3678 1090755 : && GET_CODE (op0) == ABS
3679 1339 : && GET_CODE (op1) == ABS
3680 0 : && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0))
3681 8601178 : && !side_effects_p (XEXP (op0, 0)))
3682 0 : return simplify_gen_binary (MULT, mode, XEXP (op0, 0), XEXP (op1, 0));
3683 :
3684 : /* Reassociate multiplication, but for floating point MULTs
3685 : only when the user specifies unsafe math optimizations. */
3686 8601178 : if (! FLOAT_MODE_P (mode)
3687 1377706 : || flag_unsafe_math_optimizations)
3688 : {
3689 7641814 : tem = simplify_associative_operation (code, mode, op0, op1);
3690 7641814 : if (tem)
3691 : return tem;
3692 : }
3693 : break;
3694 :
3695 15853214 : case IOR:
3696 15853214 : if (trueop1 == CONST0_RTX (mode))
3697 : return op0;
3698 15024658 : if (INTEGRAL_MODE_P (mode)
3699 14749602 : && trueop1 == CONSTM1_RTX (mode)
3700 9505 : && !side_effects_p (op0))
3701 : return op1;
3702 15015153 : if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
3703 : return op0;
3704 : /* A | (~A) -> -1 */
3705 75164 : if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
3706 14995616 : || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
3707 11 : && ! side_effects_p (op0)
3708 14995638 : && GET_MODE_CLASS (mode) != MODE_CC)
3709 11 : return CONSTM1_RTX (mode);
3710 :
3711 : /* IOR of two single bit bitfields extracted from the same object.
3712 : Bitfields are represented as an AND based extraction */
3713 14995616 : if (GET_CODE (op0) == AND
3714 4236492 : && GET_CODE (op1) == AND
3715 : /* Verify both AND operands are logical right shifts. */
3716 330450 : && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
3717 5203 : && GET_CODE (XEXP (op1, 0)) == LSHIFTRT
3718 : /* Verify both bitfields are extracted from the same object. */
3719 54 : && XEXP (XEXP (op0, 0), 0) == XEXP (XEXP (op1, 0), 0)
3720 : /* Verify both fields are a single bit (could be generalized). */
3721 54 : && XEXP (op0, 1) == CONST1_RTX (mode)
3722 0 : && XEXP (op1, 1) == CONST1_RTX (mode)
3723 : /* Verify bit positions (for cases with variable bit position). */
3724 0 : && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
3725 0 : && CONST_INT_P (XEXP (XEXP (op1, 0), 1)))
3726 : {
3727 0 : unsigned HOST_WIDE_INT bitpos1 = INTVAL (XEXP (XEXP (op0, 0), 1));
3728 0 : unsigned HOST_WIDE_INT bitpos2 = INTVAL (XEXP (XEXP (op1, 0), 1));
3729 0 : unsigned HOST_WIDE_INT mask
3730 0 : = (HOST_WIDE_INT_1U << bitpos1) | (HOST_WIDE_INT_1U << bitpos2);
3731 :
3732 0 : rtx m = GEN_INT (mask);
3733 0 : rtx t = gen_rtx_AND (mode, XEXP (XEXP (op0, 0), 0), m);
3734 0 : t = gen_rtx_NE (mode, t, CONST0_RTX (mode));
3735 0 : return t;
3736 : }
3737 :
3738 : /* IOR of multiple single bit bitfields extracted from the same object
3739 : (building on previous case).
3740 : First bitfield is represented as an AND based extraction, as done
3741 : above. Second represented as NE based extraction, from
3742 : output above. */
3743 14995616 : if (GET_CODE (op0) == AND
3744 4236492 : && GET_CODE (op1) == NE
3745 : /* Verify AND operand is logical right shift. */
3746 4590 : && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
3747 : /* Verify NE operand is an AND (based on output above). */
3748 86 : && GET_CODE (XEXP (op1, 0)) == AND
3749 : /* Verify both bitfields are extracted from the same object. */
3750 0 : && XEXP (XEXP (op0, 0), 0) == XEXP (XEXP (op1, 0), 0)
3751 : /* Verify masking is with a single bit and that we have a NE 0
3752 : comparison for the other operand. */
3753 0 : && XEXP (op0, 1) == CONST1_RTX (mode)
3754 0 : && XEXP (op1, 1) == CONST0_RTX (mode)
3755 : /* Verify bit position. */
3756 0 : && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
3757 : {
3758 0 : unsigned HOST_WIDE_INT bitpos1 = INTVAL (XEXP (XEXP (op0, 0), 1));
3759 0 : unsigned HOST_WIDE_INT mask
3760 0 : = (HOST_WIDE_INT_1U << bitpos1) | INTVAL (XEXP (XEXP (op1, 0), 1));
3761 :
3762 0 : rtx m = GEN_INT (mask);
3763 0 : rtx t = gen_rtx_AND (mode, XEXP (XEXP (op0, 0), 0), m);
3764 0 : t = gen_rtx_NE (mode, t, CONST0_RTX (mode));
3765 0 : return t;
3766 : }
3767 :
3768 : /* Convert (ior (plus (A - 1)) (neg A)) to -1. */
3769 14995616 : if (match_plus_neg_pattern (op0, op1, mode))
3770 0 : return CONSTM1_RTX (mode);
3771 :
3772 : /* (ior A C) is C if all bits of A that might be nonzero are on in C. */
3773 14995616 : if (CONST_INT_P (op1)
3774 3836239 : && HWI_COMPUTABLE_MODE_P (mode)
3775 3779795 : && (nonzero_bits (op0, mode) & ~UINTVAL (op1)) == 0
3776 15362095 : && !side_effects_p (op0))
3777 : return op1;
3778 :
3779 : /* Canonicalize (X & C1) | C2. */
3780 14629137 : if (GET_CODE (op0) == AND
3781 4227756 : && CONST_INT_P (trueop1)
3782 706045 : && CONST_INT_P (XEXP (op0, 1)))
3783 : {
3784 546428 : HOST_WIDE_INT mask = GET_MODE_MASK (mode);
3785 546428 : HOST_WIDE_INT c1 = INTVAL (XEXP (op0, 1));
3786 546428 : HOST_WIDE_INT c2 = INTVAL (trueop1);
3787 :
3788 : /* If (C1&C2) == C1, then (X&C1)|C2 becomes C2. */
3789 546428 : if ((c1 & c2) == c1
3790 546428 : && !side_effects_p (XEXP (op0, 0)))
3791 : return trueop1;
3792 :
3793 : /* If (C1|C2) == ~0 then (X&C1)|C2 becomes X|C2. */
3794 546408 : if (((c1|c2) & mask) == mask)
3795 73892 : return simplify_gen_binary (IOR, mode, XEXP (op0, 0), op1);
3796 :
3797 : /* If (C1|C2) has a single bit clear, then adjust C1 so that
3798 : when split it'll match a single bit clear style insn.
3799 :
3800 : This could have been done with a target dependent splitter, but
3801 : then every target with single bit manipulation insns would need
3802 : to implement such splitters. */
3803 472516 : if (exact_log2 (~(c1 | c2)) >= 0)
3804 : {
3805 66158 : rtx temp = gen_rtx_AND (mode, XEXP (op0, 0), GEN_INT (c1 | c2));
3806 66158 : temp = gen_rtx_IOR (mode, temp, trueop1);
3807 66158 : return temp;
3808 : }
3809 : }
3810 :
3811 : /* Convert (A & B) | A to A. */
3812 14489067 : if (GET_CODE (op0) == AND
3813 4087686 : && (rtx_equal_p (XEXP (op0, 0), op1)
3814 4087575 : || rtx_equal_p (XEXP (op0, 1), op1))
3815 3851 : && ! side_effects_p (XEXP (op0, 0))
3816 14492918 : && ! side_effects_p (XEXP (op0, 1)))
3817 : return op1;
3818 :
3819 : /* Convert (ior (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
3820 : mode size to (rotate A CX). */
3821 14485216 : tem = simplify_rotate_op (op0, op1, mode);
3822 14485216 : if (tem)
3823 : return tem;
3824 :
3825 : /* If OP0 is (ashiftrt (plus ...) C), it might actually be
3826 : a (sign_extend (plus ...)). Then check if OP1 is a CONST_INT and
3827 : the PLUS does not affect any of the bits in OP1: then we can do
3828 : the IOR as a PLUS and we can associate. This is valid if OP1
3829 : can be safely shifted left C bits. */
3830 14482687 : if (CONST_INT_P (trueop1) && GET_CODE (op0) == ASHIFTRT
3831 6378 : && GET_CODE (XEXP (op0, 0)) == PLUS
3832 141 : && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
3833 87 : && CONST_INT_P (XEXP (op0, 1))
3834 87 : && INTVAL (XEXP (op0, 1)) < HOST_BITS_PER_WIDE_INT)
3835 : {
3836 87 : int count = INTVAL (XEXP (op0, 1));
3837 87 : HOST_WIDE_INT mask = UINTVAL (trueop1) << count;
3838 :
3839 87 : if (mask >> count == INTVAL (trueop1)
3840 80 : && trunc_int_for_mode (mask, mode) == mask
3841 154 : && (mask & nonzero_bits (XEXP (op0, 0), mode)) == 0)
3842 0 : return simplify_gen_binary (ASHIFTRT, mode,
3843 : plus_constant (mode, XEXP (op0, 0),
3844 0 : mask),
3845 : XEXP (op0, 1));
3846 : }
3847 :
3848 : /* The following happens with bitfield merging.
3849 : (X & C) | ((X | Y) & ~C) -> X | (Y & ~C) */
3850 14482687 : if (GET_CODE (op0) == AND
3851 4083835 : && GET_CODE (op1) == AND
3852 330450 : && CONST_INT_P (XEXP (op0, 1))
3853 160977 : && CONST_INT_P (XEXP (op1, 1))
3854 155520 : && (INTVAL (XEXP (op0, 1))
3855 155520 : == ~INTVAL (XEXP (op1, 1))))
3856 : {
3857 : /* The IOR may be on both sides. */
3858 34471 : rtx top0 = NULL_RTX, top1 = NULL_RTX;
3859 34471 : if (GET_CODE (XEXP (op1, 0)) == IOR)
3860 : top0 = op0, top1 = op1;
3861 34415 : else if (GET_CODE (XEXP (op0, 0)) == IOR)
3862 3 : top0 = op1, top1 = op0;
3863 34471 : if (top0 && top1)
3864 : {
3865 : /* X may be on either side of the inner IOR. */
3866 59 : rtx tem = NULL_RTX;
3867 59 : if (rtx_equal_p (XEXP (top0, 0),
3868 59 : XEXP (XEXP (top1, 0), 0)))
3869 42 : tem = XEXP (XEXP (top1, 0), 1);
3870 17 : else if (rtx_equal_p (XEXP (top0, 0),
3871 17 : XEXP (XEXP (top1, 0), 1)))
3872 8 : tem = XEXP (XEXP (top1, 0), 0);
3873 50 : if (tem)
3874 50 : return simplify_gen_binary (IOR, mode, XEXP (top0, 0),
3875 : simplify_gen_binary
3876 50 : (AND, mode, tem, XEXP (top1, 1)));
3877 : }
3878 : }
3879 :
3880 : /* Convert (ior (and A C) (and B C)) into (and (ior A B) C). */
3881 14482637 : if (GET_CODE (op0) == GET_CODE (op1)
3882 3619571 : && (GET_CODE (op0) == AND
3883 : || GET_CODE (op0) == IOR
3884 3619571 : || GET_CODE (op0) == LSHIFTRT
3885 3287899 : || GET_CODE (op0) == ASHIFTRT
3886 3287805 : || GET_CODE (op0) == ASHIFT
3887 3266893 : || GET_CODE (op0) == ROTATE
3888 3266893 : || GET_CODE (op0) == ROTATERT))
3889 : {
3890 352678 : tem = simplify_distributive_operation (code, mode, op0, op1);
3891 352678 : if (tem)
3892 : return tem;
3893 : }
3894 :
3895 : /* Convert (ior (and (not A) B) A) into A | B. */
3896 14390727 : if (GET_CODE (op0) == AND
3897 14390727 : && negated_ops_p (XEXP (op0, 0), op1))
3898 4261 : return simplify_gen_binary (IOR, mode, XEXP (op0, 1), op1);
3899 :
3900 14386466 : tem = simplify_with_subreg_not (code, mode, op0, op1);
3901 14386466 : if (tem)
3902 : return tem;
3903 :
3904 14386461 : tem = simplify_byte_swapping_operation (code, mode, op0, op1);
3905 14386461 : if (tem)
3906 : return tem;
3907 :
3908 14386428 : tem = simplify_associative_operation (code, mode, op0, op1);
3909 14386428 : if (tem)
3910 : return tem;
3911 :
3912 14073516 : tem = simplify_logical_relational_operation (code, mode, op0, op1);
3913 14073516 : if (tem)
3914 : return tem;
3915 : break;
3916 :
3917 1806399 : case XOR:
3918 1806399 : if (trueop1 == CONST0_RTX (mode))
3919 : return op0;
3920 1750740 : if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
3921 24640 : return simplify_gen_unary (NOT, mode, op0, mode);
3922 1726100 : if (rtx_equal_p (trueop0, trueop1)
3923 2421 : && ! side_effects_p (op0)
3924 1728517 : && GET_MODE_CLASS (mode) != MODE_CC)
3925 2417 : return CONST0_RTX (mode);
3926 :
3927 : /* Canonicalize XOR of the most significant bit to PLUS. */
3928 1723683 : if (CONST_SCALAR_INT_P (op1)
3929 1723683 : && mode_signbit_p (mode, op1))
3930 40257 : return simplify_gen_binary (PLUS, mode, op0, op1);
3931 : /* (xor (plus X C1) C2) is (xor X (C1^C2)) if C1 is signbit. */
3932 1683426 : if (CONST_SCALAR_INT_P (op1)
3933 561273 : && GET_CODE (op0) == PLUS
3934 2511 : && CONST_SCALAR_INT_P (XEXP (op0, 1))
3935 1685021 : && mode_signbit_p (mode, XEXP (op0, 1)))
3936 189 : return simplify_gen_binary (XOR, mode, XEXP (op0, 0),
3937 : simplify_gen_binary (XOR, mode, op1,
3938 189 : XEXP (op0, 1)));
3939 :
3940 : /* If we are XORing two things that have no bits in common,
3941 : convert them into an IOR. This helps to detect rotation encoded
3942 : using those methods and possibly other simplifications. */
3943 :
3944 1683237 : if (HWI_COMPUTABLE_MODE_P (mode)
3945 1390108 : && (nonzero_bits (op0, mode)
3946 1390108 : & nonzero_bits (op1, mode)) == 0)
3947 10899 : return (simplify_gen_binary (IOR, mode, op0, op1));
3948 :
3949 : /* Convert (xor (plus (A - 1)) (neg A)) to -1. */
3950 1672338 : if (match_plus_neg_pattern (op0, op1, mode))
3951 0 : return CONSTM1_RTX (mode);
3952 :
3953 : /* Convert (XOR (NOT x) (NOT y)) to (XOR x y).
3954 : Also convert (XOR (NOT x) y) to (NOT (XOR x y)), similarly for
3955 : (NOT y). */
3956 1672338 : {
3957 1672338 : int num_negated = 0;
3958 :
3959 1672338 : if (GET_CODE (op0) == NOT)
3960 948 : num_negated++, op0 = XEXP (op0, 0);
3961 1672338 : if (GET_CODE (op1) == NOT)
3962 0 : num_negated++, op1 = XEXP (op1, 0);
3963 :
3964 0 : if (num_negated == 2)
3965 0 : return simplify_gen_binary (XOR, mode, op0, op1);
3966 1672338 : else if (num_negated == 1)
3967 948 : return simplify_gen_unary (NOT, mode,
3968 : simplify_gen_binary (XOR, mode, op0, op1),
3969 948 : mode);
3970 : }
3971 :
3972 : /* Convert (xor (and A B) B) to (and (not A) B). The latter may
3973 : correspond to a machine insn or result in further simplifications
3974 : if B is a constant. */
3975 :
3976 1671390 : if (GET_CODE (op0) == AND
3977 176177 : && rtx_equal_p (XEXP (op0, 1), op1)
3978 1699231 : && ! side_effects_p (op1))
3979 27841 : return simplify_gen_binary (AND, mode,
3980 : simplify_gen_unary (NOT, mode,
3981 : XEXP (op0, 0), mode),
3982 27841 : op1);
3983 :
3984 1643549 : else if (GET_CODE (op0) == AND
3985 148336 : && rtx_equal_p (XEXP (op0, 0), op1)
3986 1644922 : && ! side_effects_p (op1))
3987 1373 : return simplify_gen_binary (AND, mode,
3988 : simplify_gen_unary (NOT, mode,
3989 : XEXP (op0, 1), mode),
3990 1373 : op1);
3991 :
3992 : /* Given (xor (ior (xor A B) C) D), where B, C and D are
3993 : constants, simplify to (xor (ior A C) (B&~C)^D), canceling
3994 : out bits inverted twice and not set by C. Similarly, given
3995 : (xor (and (xor A B) C) D), simplify without inverting C in
3996 : the xor operand: (xor (and A C) (B&C)^D).
3997 : */
3998 1642176 : else if ((GET_CODE (op0) == IOR || GET_CODE (op0) == AND)
3999 168613 : && GET_CODE (XEXP (op0, 0)) == XOR
4000 7283 : && CONST_INT_P (op1)
4001 331 : && CONST_INT_P (XEXP (op0, 1))
4002 286 : && CONST_INT_P (XEXP (XEXP (op0, 0), 1)))
4003 : {
4004 38 : enum rtx_code op = GET_CODE (op0);
4005 38 : rtx a = XEXP (XEXP (op0, 0), 0);
4006 38 : rtx b = XEXP (XEXP (op0, 0), 1);
4007 38 : rtx c = XEXP (op0, 1);
4008 38 : rtx d = op1;
4009 38 : HOST_WIDE_INT bval = INTVAL (b);
4010 38 : HOST_WIDE_INT cval = INTVAL (c);
4011 38 : HOST_WIDE_INT dval = INTVAL (d);
4012 38 : HOST_WIDE_INT xcval;
4013 :
4014 38 : if (op == IOR)
4015 8 : xcval = ~cval;
4016 : else
4017 : xcval = cval;
4018 :
4019 38 : return simplify_gen_binary (XOR, mode,
4020 : simplify_gen_binary (op, mode, a, c),
4021 38 : gen_int_mode ((bval & xcval) ^ dval,
4022 : mode));
4023 : }
4024 :
4025 : /* Given (xor (and A B) C), using P^Q == (~P&Q) | (~Q&P),
4026 : we can transform like this:
4027 : (A&B)^C == ~(A&B)&C | ~C&(A&B)
4028 : == (~A|~B)&C | ~C&(A&B) * DeMorgan's Law
4029 : == ~A&C | ~B&C | A&(~C&B) * Distribute and re-order
4030 : Attempt a few simplifications when B and C are both constants. */
4031 1642138 : if (GET_CODE (op0) == AND
4032 146933 : && CONST_INT_P (op1)
4033 13559 : && CONST_INT_P (XEXP (op0, 1)))
4034 : {
4035 11821 : rtx a = XEXP (op0, 0);
4036 11821 : rtx b = XEXP (op0, 1);
4037 11821 : rtx c = op1;
4038 11821 : HOST_WIDE_INT bval = INTVAL (b);
4039 11821 : HOST_WIDE_INT cval = INTVAL (c);
4040 :
4041 : /* Instead of computing ~A&C, we compute its negated value,
4042 : ~(A|~C). If it yields -1, ~A&C is zero, so we can
4043 : optimize for sure. If it does not simplify, we still try
4044 : to compute ~A&C below, but since that always allocates
4045 : RTL, we don't try that before committing to returning a
4046 : simplified expression. */
4047 11821 : rtx n_na_c = simplify_binary_operation (IOR, mode, a,
4048 : GEN_INT (~cval));
4049 :
4050 11821 : if ((~cval & bval) == 0)
4051 : {
4052 518 : rtx na_c = NULL_RTX;
4053 518 : if (n_na_c)
4054 0 : na_c = simplify_gen_unary (NOT, mode, n_na_c, mode);
4055 : else
4056 : {
4057 : /* If ~A does not simplify, don't bother: we don't
4058 : want to simplify 2 operations into 3, and if na_c
4059 : were to simplify with na, n_na_c would have
4060 : simplified as well. */
4061 518 : rtx na = simplify_unary_operation (NOT, mode, a, mode);
4062 518 : if (na)
4063 0 : na_c = simplify_gen_binary (AND, mode, na, c);
4064 : }
4065 :
4066 : /* Try to simplify ~A&C | ~B&C. */
4067 0 : if (na_c != NULL_RTX)
4068 0 : return simplify_gen_binary (IOR, mode, na_c,
4069 0 : gen_int_mode (~bval & cval, mode));
4070 : }
4071 : else
4072 : {
4073 : /* If ~A&C is zero, simplify A&(~C&B) | ~B&C. */
4074 11303 : if (n_na_c == CONSTM1_RTX (mode))
4075 : {
4076 0 : rtx a_nc_b = simplify_gen_binary (AND, mode, a,
4077 0 : gen_int_mode (~cval & bval,
4078 : mode));
4079 0 : return simplify_gen_binary (IOR, mode, a_nc_b,
4080 0 : gen_int_mode (~bval & cval,
4081 : mode));
4082 : }
4083 : }
4084 : }
4085 :
4086 : /* If we have (xor (and (xor A B) C) A) with C a constant we can instead
4087 : do (ior (and A ~C) (and B C)) which is a machine instruction on some
4088 : machines, and also has shorter instruction path length. */
4089 1642138 : if (GET_CODE (op0) == AND
4090 146933 : && GET_CODE (XEXP (op0, 0)) == XOR
4091 6798 : && CONST_INT_P (XEXP (op0, 1))
4092 1645666 : && rtx_equal_p (XEXP (XEXP (op0, 0), 0), trueop1))
4093 : {
4094 7 : rtx a = trueop1;
4095 7 : rtx b = XEXP (XEXP (op0, 0), 1);
4096 7 : rtx c = XEXP (op0, 1);
4097 7 : rtx nc = simplify_gen_unary (NOT, mode, c, mode);
4098 7 : rtx a_nc = simplify_gen_binary (AND, mode, a, nc);
4099 7 : rtx bc = simplify_gen_binary (AND, mode, b, c);
4100 7 : return simplify_gen_binary (IOR, mode, a_nc, bc);
4101 : }
4102 : /* Similarly, (xor (and (xor A B) C) B) as (ior (and A C) (and B ~C)) */
4103 1642131 : else if (GET_CODE (op0) == AND
4104 146926 : && GET_CODE (XEXP (op0, 0)) == XOR
4105 6791 : && CONST_INT_P (XEXP (op0, 1))
4106 1645652 : && rtx_equal_p (XEXP (XEXP (op0, 0), 1), trueop1))
4107 : {
4108 8 : rtx a = XEXP (XEXP (op0, 0), 0);
4109 8 : rtx b = trueop1;
4110 8 : rtx c = XEXP (op0, 1);
4111 8 : rtx nc = simplify_gen_unary (NOT, mode, c, mode);
4112 8 : rtx b_nc = simplify_gen_binary (AND, mode, b, nc);
4113 8 : rtx ac = simplify_gen_binary (AND, mode, a, c);
4114 8 : return simplify_gen_binary (IOR, mode, ac, b_nc);
4115 : }
4116 :
4117 : /* (xor (comparison foo bar) (const_int 1)) can become the reversed
4118 : comparison if STORE_FLAG_VALUE is 1. */
4119 1642123 : if (STORE_FLAG_VALUE == 1
4120 1642123 : && trueop1 == const1_rtx
4121 205744 : && COMPARISON_P (op0)
4122 1648316 : && (reversed = reversed_comparison (op0, mode)))
4123 : return reversed;
4124 :
4125 : /* (lshiftrt foo C) where C is the number of bits in FOO minus 1
4126 : is (lt foo (const_int 0)), so we can perform the above
4127 : simplification if STORE_FLAG_VALUE is 1. */
4128 :
4129 1635938 : if (is_a <scalar_int_mode> (mode, &int_mode)
4130 : && STORE_FLAG_VALUE == 1
4131 1346974 : && trueop1 == const1_rtx
4132 199559 : && GET_CODE (op0) == LSHIFTRT
4133 35172 : && CONST_INT_P (XEXP (op0, 1))
4134 35172 : && INTVAL (XEXP (op0, 1)) == GET_MODE_PRECISION (int_mode) - 1)
4135 34275 : return gen_rtx_GE (int_mode, XEXP (op0, 0), const0_rtx);
4136 :
4137 : /* (xor (comparison foo bar) (const_int sign-bit))
4138 : when STORE_FLAG_VALUE is the sign bit. */
4139 1601663 : if (is_a <scalar_int_mode> (mode, &int_mode)
4140 1312699 : && val_signbit_p (int_mode, STORE_FLAG_VALUE)
4141 0 : && trueop1 == const_true_rtx
4142 0 : && COMPARISON_P (op0)
4143 0 : && (reversed = reversed_comparison (op0, int_mode)))
4144 : return reversed;
4145 :
4146 : /* Convert (xor (and A C) (and B C)) into (and (xor A B) C). */
4147 1601663 : if (GET_CODE (op0) == GET_CODE (op1)
4148 529276 : && (GET_CODE (op0) == AND
4149 529276 : || GET_CODE (op0) == LSHIFTRT
4150 457619 : || GET_CODE (op0) == ASHIFTRT
4151 457555 : || GET_CODE (op0) == ASHIFT
4152 457439 : || GET_CODE (op0) == ROTATE
4153 457331 : || GET_CODE (op0) == ROTATERT))
4154 : {
4155 72479 : tem = simplify_distributive_operation (code, mode, op0, op1);
4156 72479 : if (tem)
4157 : return tem;
4158 : }
4159 :
4160 : /* Convert (xor (ashift A CX) (lshiftrt A CY)) where CX+CY equals the
4161 : mode size to (rotate A CX). */
4162 1533670 : tem = simplify_rotate_op (op0, op1, mode);
4163 1533670 : if (tem)
4164 : return tem;
4165 :
4166 : /* Convert (xor (and (not A) B) A) into A | B. */
4167 1532290 : if (GET_CODE (op0) == AND
4168 1532290 : && negated_ops_p (XEXP (op0, 0), op1))
4169 1 : return simplify_gen_binary (IOR, mode, XEXP (op0, 1), op1);
4170 :
4171 : /* Convert (xor (and (rotate (~1) A) B) (ashift 1 A))
4172 : into B | (1 << A). */
4173 1532289 : if (SHIFT_COUNT_TRUNCATED
4174 : && GET_CODE (op0) == AND
4175 : && GET_CODE (XEXP (op0, 0)) == ROTATE
4176 : && CONST_INT_P (XEXP (XEXP (op0, 0), 0))
4177 : && INTVAL (XEXP (XEXP (op0, 0), 0)) == -2
4178 : && GET_CODE (op1) == ASHIFT
4179 : && CONST_INT_P (XEXP (op1, 0))
4180 : && INTVAL (XEXP (op1, 0)) == 1
4181 : && rtx_equal_p (XEXP (XEXP (op0, 0), 1), XEXP (op1, 1))
4182 : && !side_effects_p (XEXP (op1, 1)))
4183 : return simplify_gen_binary (IOR, mode, XEXP (op0, 1), op1);
4184 :
4185 1532289 : tem = simplify_with_subreg_not (code, mode, op0, op1);
4186 1532289 : if (tem)
4187 : return tem;
4188 :
4189 1532288 : tem = simplify_byte_swapping_operation (code, mode, op0, op1);
4190 1532288 : if (tem)
4191 : return tem;
4192 :
4193 1532288 : tem = simplify_associative_operation (code, mode, op0, op1);
4194 1532288 : if (tem)
4195 : return tem;
4196 : break;
4197 :
4198 24544119 : case AND:
4199 24544119 : if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
4200 : return trueop1;
4201 24291475 : if (INTEGRAL_MODE_P (mode) && trueop1 == CONSTM1_RTX (mode))
4202 : return op0;
4203 23905317 : if (HWI_COMPUTABLE_MODE_P (mode))
4204 : {
4205 : /* When WORD_REGISTER_OPERATIONS is true, we need to know the
4206 : nonzero bits in WORD_MODE rather than MODE. */
4207 21007340 : scalar_int_mode tmode = as_a <scalar_int_mode> (mode);
4208 21007340 : if (WORD_REGISTER_OPERATIONS
4209 : && GET_MODE_BITSIZE (tmode) < BITS_PER_WORD)
4210 : tmode = word_mode;
4211 21007340 : HOST_WIDE_INT nzop0 = nonzero_bits (trueop0, tmode);
4212 21007340 : HOST_WIDE_INT nzop1;
4213 21007340 : if (CONST_INT_P (trueop1))
4214 : {
4215 17910462 : HOST_WIDE_INT val1 = INTVAL (trueop1);
4216 : /* If we are turning off bits already known off in OP0, we need
4217 : not do an AND. */
4218 17910462 : if ((nzop0 & ~val1) == 0)
4219 417003 : return op0;
4220 :
4221 : /* Canonicalize (and (subreg (lshiftrt X shift)) mask) into
4222 : (and (lshiftrt (subreg X) shift) mask).
4223 :
4224 : Keeps shift and AND in the same mode, improving recognition.
4225 : Only applied when subreg is a lowpart, shift is valid,
4226 : and no precision is lost. */
4227 17576259 : if (SUBREG_P (op0)
4228 6053325 : && subreg_lowpart_p (op0)
4229 6037385 : && !paradoxical_subreg_p (op0)
4230 926317 : && GET_CODE (XEXP (op0, 0)) == LSHIFTRT
4231 : /* simplify_subreg asserts the object being accessed is not
4232 : VOIDmode or BLKmode. We may have a REG_EQUAL note which
4233 : is not simplified and the source operand is a constant,
4234 : and thus VOIDmode. Guard against that. */
4235 118297 : && GET_MODE (XEXP (XEXP (op0, 0), 0)) != VOIDmode
4236 118247 : && GET_MODE (XEXP (XEXP (op0, 0), 0)) != BLKmode
4237 118247 : && !CONST_INT_P (XEXP (XEXP (op0, 0), 0))
4238 118247 : && CONST_INT_P (XEXP (XEXP (op0, 0), 1))
4239 96619 : && INTVAL (XEXP (XEXP (op0, 0), 1)) >= 0
4240 96619 : && INTVAL (XEXP (XEXP (op0, 0), 1)) < HOST_BITS_PER_WIDE_INT
4241 17672877 : && ((INTVAL (XEXP (XEXP (op0, 0), 1))
4242 96618 : + floor_log2 (val1))
4243 17576259 : < GET_MODE_PRECISION (as_a <scalar_int_mode> (mode))))
4244 : {
4245 10558 : tem = XEXP (XEXP (op0, 0), 0);
4246 10558 : if (SUBREG_P (tem))
4247 : {
4248 265 : if (subreg_lowpart_p (tem))
4249 265 : tem = SUBREG_REG (tem);
4250 : else
4251 : tem = NULL_RTX;
4252 : }
4253 265 : if (tem != NULL_RTX)
4254 : {
4255 10558 : offset = subreg_lowpart_offset (mode, GET_MODE (tem));
4256 10558 : tem = simplify_gen_subreg (mode, tem, GET_MODE (tem),
4257 10558 : offset);
4258 10558 : if (tem)
4259 : {
4260 10558 : unsigned shiftamt = INTVAL (XEXP (XEXP (op0, 0), 1));
4261 10558 : rtx shiftamtrtx = gen_int_shift_amount (mode,
4262 10558 : shiftamt);
4263 10558 : op0 = simplify_gen_binary (LSHIFTRT, mode, tem,
4264 : shiftamtrtx);
4265 10558 : return simplify_gen_binary (AND, mode, op0, op1);
4266 : }
4267 : }
4268 : }
4269 : }
4270 20662579 : nzop1 = nonzero_bits (trueop1, mode);
4271 : /* If we are clearing all the nonzero bits, the result is zero. */
4272 20662579 : if ((nzop1 & nzop0) == 0
4273 20662579 : && !side_effects_p (op0) && !side_effects_p (op1))
4274 72242 : return CONST0_RTX (mode);
4275 : }
4276 23491722 : if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0)
4277 23491718 : && GET_MODE_CLASS (mode) != MODE_CC)
4278 : return op0;
4279 : /* A & (~A) -> 0 */
4280 642791 : if (((GET_CODE (op0) == NOT && rtx_equal_p (XEXP (op0, 0), op1))
4281 23480956 : || (GET_CODE (op1) == NOT && rtx_equal_p (XEXP (op1, 0), op0)))
4282 4000 : && ! side_effects_p (op0)
4283 23488909 : && GET_MODE_CLASS (mode) != MODE_CC)
4284 3999 : return CONST0_RTX (mode);
4285 :
4286 : /* Convert (and (plus (A - 1)) (neg A)) to 0. */
4287 23480911 : if (match_plus_neg_pattern (op0, op1, mode))
4288 2 : return CONST0_RTX (mode);
4289 :
4290 : /* Transform (and (extend X) C) into (zero_extend (and X C)) if
4291 : there are no nonzero bits of C outside of X's mode. */
4292 46961818 : if ((GET_CODE (op0) == SIGN_EXTEND
4293 23480909 : || GET_CODE (op0) == ZERO_EXTEND)
4294 95861 : && CONST_SCALAR_INT_P (trueop1)
4295 81786 : && is_a <scalar_int_mode> (mode, &int_mode)
4296 81786 : && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
4297 23562695 : && (wi::mask (GET_MODE_PRECISION (inner_mode), true,
4298 81786 : GET_MODE_PRECISION (int_mode))
4299 23562695 : & rtx_mode_t (trueop1, mode)) == 0)
4300 : {
4301 79756 : machine_mode imode = GET_MODE (XEXP (op0, 0));
4302 79756 : tem = immed_wide_int_const (rtx_mode_t (trueop1, mode), imode);
4303 79756 : tem = simplify_gen_binary (AND, imode, XEXP (op0, 0), tem);
4304 79756 : return simplify_gen_unary (ZERO_EXTEND, mode, tem, imode);
4305 : }
4306 :
4307 : /* Transform (and (truncate X) C) into (truncate (and X C)). This way
4308 : we might be able to further simplify the AND with X and potentially
4309 : remove the truncation altogether. */
4310 23401153 : if (GET_CODE (op0) == TRUNCATE && CONST_INT_P (trueop1))
4311 : {
4312 6 : rtx x = XEXP (op0, 0);
4313 6 : machine_mode xmode = GET_MODE (x);
4314 6 : tem = simplify_gen_binary (AND, xmode, x,
4315 6 : gen_int_mode (INTVAL (trueop1), xmode));
4316 6 : return simplify_gen_unary (TRUNCATE, mode, tem, xmode);
4317 : }
4318 :
4319 : /* Canonicalize (A | C1) & C2 as (A & C2) | (C1 & C2). */
4320 23401147 : if (GET_CODE (op0) == IOR
4321 1471090 : && CONST_INT_P (trueop1)
4322 235869 : && CONST_INT_P (XEXP (op0, 1)))
4323 : {
4324 138962 : HOST_WIDE_INT tmp = INTVAL (trueop1) & INTVAL (XEXP (op0, 1));
4325 138962 : return simplify_gen_binary (IOR, mode,
4326 : simplify_gen_binary (AND, mode,
4327 : XEXP (op0, 0), op1),
4328 138962 : gen_int_mode (tmp, mode));
4329 : }
4330 :
4331 : /* Convert (A ^ B) & A to A & (~B) since the latter is often a single
4332 : insn (and may simplify more). */
4333 23262185 : if (GET_CODE (op0) == XOR
4334 137640 : && rtx_equal_p (XEXP (op0, 0), op1)
4335 23263626 : && ! side_effects_p (op1))
4336 1441 : return simplify_gen_binary (AND, mode,
4337 : simplify_gen_unary (NOT, mode,
4338 : XEXP (op0, 1), mode),
4339 1441 : op1);
4340 :
4341 23260744 : if (GET_CODE (op0) == XOR
4342 136199 : && rtx_equal_p (XEXP (op0, 1), op1)
4343 23263946 : && ! side_effects_p (op1))
4344 3202 : return simplify_gen_binary (AND, mode,
4345 : simplify_gen_unary (NOT, mode,
4346 : XEXP (op0, 0), mode),
4347 3202 : op1);
4348 :
4349 : /* Similarly for (~(A ^ B)) & A. */
4350 23257542 : if (GET_CODE (op0) == NOT
4351 638838 : && GET_CODE (XEXP (op0, 0)) == XOR
4352 3464 : && rtx_equal_p (XEXP (XEXP (op0, 0), 0), op1)
4353 23257596 : && ! side_effects_p (op1))
4354 54 : return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 1), op1);
4355 :
4356 23257488 : if (GET_CODE (op0) == NOT
4357 638784 : && GET_CODE (XEXP (op0, 0)) == XOR
4358 3410 : && rtx_equal_p (XEXP (XEXP (op0, 0), 1), op1)
4359 23257525 : && ! side_effects_p (op1))
4360 37 : return simplify_gen_binary (AND, mode, XEXP (XEXP (op0, 0), 0), op1);
4361 :
4362 : /* Convert (A | B) & A to A. */
4363 23257451 : if (GET_CODE (op0) == IOR
4364 1332128 : && (rtx_equal_p (XEXP (op0, 0), op1)
4365 1331606 : || rtx_equal_p (XEXP (op0, 1), op1))
4366 725 : && ! side_effects_p (XEXP (op0, 0))
4367 23258176 : && ! side_effects_p (XEXP (op0, 1)))
4368 : return op1;
4369 :
4370 : /* For constants M and N, if M == (1LL << cst) - 1 && (N & M) == M,
4371 : ((A & N) + B) & M -> (A + B) & M
4372 : Similarly if (N & M) == 0,
4373 : ((A | N) + B) & M -> (A + B) & M
4374 : and for - instead of + and/or ^ instead of |.
4375 : Also, if (N & M) == 0, then
4376 : (A +- N) & M -> A & M. */
4377 23256726 : if (CONST_INT_P (trueop1)
4378 17376852 : && HWI_COMPUTABLE_MODE_P (mode)
4379 17348211 : && ~UINTVAL (trueop1)
4380 17348211 : && (UINTVAL (trueop1) & (UINTVAL (trueop1) + 1)) == 0
4381 34291188 : && (GET_CODE (op0) == PLUS || GET_CODE (op0) == MINUS))
4382 : {
4383 982382 : rtx pmop[2];
4384 982382 : int which;
4385 :
4386 982382 : pmop[0] = XEXP (op0, 0);
4387 982382 : pmop[1] = XEXP (op0, 1);
4388 :
4389 982382 : if (CONST_INT_P (pmop[1])
4390 523208 : && (UINTVAL (pmop[1]) & UINTVAL (trueop1)) == 0)
4391 170612 : return simplify_gen_binary (AND, mode, pmop[0], op1);
4392 :
4393 2458830 : for (which = 0; which < 2; which++)
4394 : {
4395 1639220 : tem = pmop[which];
4396 1639220 : switch (GET_CODE (tem))
4397 : {
4398 12115 : case AND:
4399 12115 : if (CONST_INT_P (XEXP (tem, 1))
4400 10605 : && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1))
4401 : == UINTVAL (trueop1))
4402 7701 : pmop[which] = XEXP (tem, 0);
4403 : break;
4404 1728 : case IOR:
4405 1728 : case XOR:
4406 1728 : if (CONST_INT_P (XEXP (tem, 1))
4407 699 : && (UINTVAL (XEXP (tem, 1)) & UINTVAL (trueop1)) == 0)
4408 139 : pmop[which] = XEXP (tem, 0);
4409 : break;
4410 : default:
4411 : break;
4412 : }
4413 : }
4414 :
4415 819610 : if (pmop[0] != XEXP (op0, 0) || pmop[1] != XEXP (op0, 1))
4416 : {
4417 7840 : tem = simplify_gen_binary (GET_CODE (op0), mode,
4418 : pmop[0], pmop[1]);
4419 7840 : return simplify_gen_binary (code, mode, tem, op1);
4420 : }
4421 : }
4422 :
4423 : /* (and X (ior (not X) Y) -> (and X Y) */
4424 23086114 : if (GET_CODE (op1) == IOR
4425 1000428 : && GET_CODE (XEXP (op1, 0)) == NOT
4426 23091591 : && rtx_equal_p (op0, XEXP (XEXP (op1, 0), 0)))
4427 0 : return simplify_gen_binary (AND, mode, op0, XEXP (op1, 1));
4428 :
4429 : /* (and (ior (not X) Y) X) -> (and X Y) */
4430 23086114 : if (GET_CODE (op0) == IOR
4431 1331403 : && GET_CODE (XEXP (op0, 0)) == NOT
4432 23137738 : && rtx_equal_p (op1, XEXP (XEXP (op0, 0), 0)))
4433 6 : return simplify_gen_binary (AND, mode, op1, XEXP (op0, 1));
4434 :
4435 : /* (and X (ior Y (not X)) -> (and X Y) */
4436 23086108 : if (GET_CODE (op1) == IOR
4437 1000428 : && GET_CODE (XEXP (op1, 1)) == NOT
4438 23086447 : && rtx_equal_p (op0, XEXP (XEXP (op1, 1), 0)))
4439 0 : return simplify_gen_binary (AND, mode, op0, XEXP (op1, 0));
4440 :
4441 : /* (and (ior Y (not X)) X) -> (and X Y) */
4442 23086108 : if (GET_CODE (op0) == IOR
4443 1331397 : && GET_CODE (XEXP (op0, 1)) == NOT
4444 23094986 : && rtx_equal_p (op1, XEXP (XEXP (op0, 1), 0)))
4445 43 : return simplify_gen_binary (AND, mode, op1, XEXP (op0, 0));
4446 :
4447 : /* (and (ior/xor X Y) (not Y)) -> X & ~Y */
4448 23086065 : if ((GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
4449 23086065 : && negated_ops_p (op1, XEXP (op0, 1)))
4450 23 : return simplify_gen_binary (AND, mode, XEXP (op0, 0),
4451 : simplify_gen_unary (NOT, mode,
4452 : XEXP (op0, 1),
4453 23 : mode));
4454 : /* (and (ior/xor Y X) (not Y)) -> X & ~Y */
4455 23086042 : if ((GET_CODE (op0) == IOR || GET_CODE (op0) == XOR)
4456 23086042 : && negated_ops_p (op1, XEXP (op0, 0)))
4457 4 : return simplify_gen_binary (AND, mode, XEXP (op0, 1),
4458 : simplify_gen_unary (NOT, mode,
4459 : XEXP (op0, 0),
4460 4 : mode));
4461 :
4462 : /* Convert (and (ior A C) (ior B C)) into (ior (and A B) C). */
4463 23086038 : if (GET_CODE (op0) == GET_CODE (op1)
4464 2207445 : && (GET_CODE (op0) == AND
4465 : || GET_CODE (op0) == IOR
4466 2207445 : || GET_CODE (op0) == LSHIFTRT
4467 1207679 : || GET_CODE (op0) == ASHIFTRT
4468 1207565 : || GET_CODE (op0) == ASHIFT
4469 1207414 : || GET_CODE (op0) == ROTATE
4470 1207414 : || GET_CODE (op0) == ROTATERT))
4471 : {
4472 1000031 : tem = simplify_distributive_operation (code, mode, op0, op1);
4473 1000031 : if (tem)
4474 : return tem;
4475 : }
4476 :
4477 : /* (and:v4si
4478 : (ashiftrt:v4si A 16)
4479 : (const_vector: 0xffff x4))
4480 : is just (lshiftrt:v4si A 16). */
4481 22130397 : if (VECTOR_MODE_P (mode) && GET_CODE (op0) == ASHIFTRT
4482 4465 : && (CONST_INT_P (XEXP (op0, 1))
4483 1958 : || (GET_CODE (XEXP (op0, 1)) == CONST_VECTOR
4484 94 : && const_vec_duplicate_p (XEXP (op0, 1))
4485 0 : && CONST_INT_P (XVECEXP (XEXP (op0, 1), 0, 0))))
4486 2507 : && GET_CODE (op1) == CONST_VECTOR
4487 22130422 : && const_vec_duplicate_p (op1)
4488 22130464 : && CONST_INT_P (XVECEXP (op1, 0, 0)))
4489 : {
4490 130 : unsigned HOST_WIDE_INT shift_count
4491 : = (CONST_INT_P (XEXP (op0, 1))
4492 65 : ? UINTVAL (XEXP (op0, 1))
4493 0 : : UINTVAL (XVECEXP (XEXP (op0, 1), 0, 0)));
4494 65 : unsigned HOST_WIDE_INT inner_prec
4495 130 : = GET_MODE_PRECISION (GET_MODE_INNER (mode));
4496 :
4497 : /* Avoid UD shift count. */
4498 65 : if (shift_count < inner_prec
4499 59 : && (UINTVAL (XVECEXP (op1, 0, 0))
4500 59 : == (HOST_WIDE_INT_1U << (inner_prec - shift_count)) - 1))
4501 42 : return simplify_gen_binary (LSHIFTRT, mode, XEXP (op0, 0), XEXP (op0, 1));
4502 : }
4503 :
4504 22130355 : tem = simplify_with_subreg_not (code, mode, op0, op1);
4505 22130355 : if (tem)
4506 : return tem;
4507 :
4508 22128070 : tem = simplify_byte_swapping_operation (code, mode, op0, op1);
4509 22128070 : if (tem)
4510 : return tem;
4511 :
4512 22127597 : tem = simplify_associative_operation (code, mode, op0, op1);
4513 22127597 : if (tem)
4514 : return tem;
4515 : break;
4516 :
4517 903660 : case UDIV:
4518 : /* 0/x is 0 (or x&0 if x has side-effects). */
4519 903660 : if (trueop0 == CONST0_RTX (mode)
4520 265 : && !cfun->can_throw_non_call_exceptions)
4521 : {
4522 265 : if (side_effects_p (op1))
4523 0 : return simplify_gen_binary (AND, mode, op1, trueop0);
4524 : return trueop0;
4525 : }
4526 : /* x/1 is x. */
4527 903395 : if (trueop1 == CONST1_RTX (mode))
4528 : {
4529 239486 : tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
4530 239486 : if (tem)
4531 : return tem;
4532 : }
4533 : /* Convert divide by power of two into shift. */
4534 663909 : if (CONST_INT_P (trueop1)
4535 994708 : && (val = exact_log2 (UINTVAL (trueop1))) > 0)
4536 330799 : return simplify_gen_binary (LSHIFTRT, mode, op0,
4537 330799 : gen_int_shift_amount (mode, val));
4538 : break;
4539 :
4540 1175456 : case DIV:
4541 : /* Handle floating point and integers separately. */
4542 1175456 : if (SCALAR_FLOAT_MODE_P (mode))
4543 : {
4544 : /* Maybe change 0.0 / x to 0.0. This transformation isn't
4545 : safe for modes with NaNs, since 0.0 / 0.0 will then be
4546 : NaN rather than 0.0. Nor is it safe for modes with signed
4547 : zeros, since dividing 0 by a negative number gives -0.0 */
4548 326579 : if (trueop0 == CONST0_RTX (mode)
4549 2886 : && !HONOR_NANS (mode)
4550 14 : && !HONOR_SIGNED_ZEROS (mode)
4551 326593 : && ! side_effects_p (op1))
4552 : return op0;
4553 : /* x/1.0 is x. */
4554 326565 : if (trueop1 == CONST1_RTX (mode)
4555 326565 : && !HONOR_SNANS (mode))
4556 : return op0;
4557 :
4558 326560 : if (CONST_DOUBLE_AS_FLOAT_P (trueop1)
4559 26869 : && trueop1 != CONST0_RTX (mode))
4560 : {
4561 20586 : const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
4562 :
4563 : /* x/-1.0 is -x. */
4564 20586 : if (real_equal (d1, &dconstm1)
4565 20586 : && !HONOR_SNANS (mode))
4566 0 : return simplify_gen_unary (NEG, mode, op0, mode);
4567 :
4568 : /* Change FP division by a constant into multiplication.
4569 : Only do this with -freciprocal-math. */
4570 20586 : if (flag_reciprocal_math
4571 20586 : && !real_equal (d1, &dconst0))
4572 : {
4573 7 : REAL_VALUE_TYPE d;
4574 7 : real_arithmetic (&d, RDIV_EXPR, &dconst1, d1);
4575 7 : tem = const_double_from_real_value (d, mode);
4576 7 : return simplify_gen_binary (MULT, mode, op0, tem);
4577 : }
4578 : }
4579 : }
4580 848877 : else if (SCALAR_INT_MODE_P (mode) || GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
4581 : {
4582 : /* 0/x is 0 (or x&0 if x has side-effects). */
4583 826277 : if (trueop0 == CONST0_RTX (mode)
4584 678 : && !cfun->can_throw_non_call_exceptions)
4585 : {
4586 601 : if (side_effects_p (op1))
4587 8 : return simplify_gen_binary (AND, mode, op1, trueop0);
4588 : return trueop0;
4589 : }
4590 : /* x/1 is x. */
4591 825676 : if (trueop1 == CONST1_RTX (mode))
4592 : {
4593 876 : tem = rtl_hooks.gen_lowpart_no_emit (mode, op0);
4594 876 : if (tem)
4595 : return tem;
4596 : }
4597 : /* x/-1 is -x. */
4598 824800 : if (trueop1 == CONSTM1_RTX (mode))
4599 : {
4600 215 : rtx x = rtl_hooks.gen_lowpart_no_emit (mode, op0);
4601 215 : if (x)
4602 215 : return simplify_gen_unary (NEG, mode, x, mode);
4603 : }
4604 : }
4605 : break;
4606 :
4607 931347 : case UMOD:
4608 : /* 0%x is 0 (or x&0 if x has side-effects). */
4609 931347 : if (trueop0 == CONST0_RTX (mode))
4610 : {
4611 767 : if (side_effects_p (op1))
4612 0 : return simplify_gen_binary (AND, mode, op1, trueop0);
4613 : return trueop0;
4614 : }
4615 : /* x%1 is 0 (of x&0 if x has side-effects). */
4616 930580 : if (trueop1 == CONST1_RTX (mode))
4617 : {
4618 273766 : if (side_effects_p (op0))
4619 0 : return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
4620 273766 : return CONST0_RTX (mode);
4621 : }
4622 : /* Implement modulus by power of two as AND. */
4623 656814 : if (CONST_INT_P (trueop1)
4624 978374 : && exact_log2 (UINTVAL (trueop1)) > 0)
4625 321560 : return simplify_gen_binary (AND, mode, op0,
4626 321560 : gen_int_mode (UINTVAL (trueop1) - 1,
4627 : mode));
4628 : break;
4629 :
4630 359521 : case MOD:
4631 : /* 0%x is 0 (or x&0 if x has side-effects). */
4632 359521 : if (trueop0 == CONST0_RTX (mode))
4633 : {
4634 833 : if (side_effects_p (op1))
4635 8 : return simplify_gen_binary (AND, mode, op1, trueop0);
4636 : return trueop0;
4637 : }
4638 : /* x%1 and x%-1 is 0 (or x&0 if x has side-effects). */
4639 358688 : if (trueop1 == CONST1_RTX (mode) || trueop1 == constm1_rtx)
4640 : {
4641 440 : if (side_effects_p (op0))
4642 0 : return simplify_gen_binary (AND, mode, op0, CONST0_RTX (mode));
4643 440 : return CONST0_RTX (mode);
4644 : }
4645 : break;
4646 :
4647 137161 : case ROTATERT:
4648 137161 : case ROTATE:
4649 137161 : if (trueop1 == CONST0_RTX (mode))
4650 : return op0;
4651 : /* Canonicalize rotates by constant amount. If the condition of
4652 : reversing direction is met, then reverse the direction. */
4653 : #if defined(HAVE_rotate) && defined(HAVE_rotatert)
4654 137071 : if (reverse_rotate_by_imm_p (mode, (code == ROTATE), trueop1))
4655 : {
4656 11681 : int new_amount = GET_MODE_UNIT_PRECISION (mode) - INTVAL (trueop1);
4657 11681 : rtx new_amount_rtx = gen_int_shift_amount (mode, new_amount);
4658 12225 : return simplify_gen_binary (code == ROTATE ? ROTATERT : ROTATE,
4659 : mode, op0, new_amount_rtx);
4660 : }
4661 : #endif
4662 : /* ROTATE/ROTATERT:HI (X:HI, 8) is BSWAP:HI (X). Other combinations
4663 : such as SImode with a count of 16 do not correspond to RTL BSWAP
4664 : semantics. */
4665 125390 : tem = unwrap_const_vec_duplicate (trueop1);
4666 125390 : if (GET_MODE_UNIT_BITSIZE (mode) == (2 * BITS_PER_UNIT)
4667 125390 : && CONST_INT_P (tem) && INTVAL (tem) == BITS_PER_UNIT)
4668 599 : return simplify_gen_unary (BSWAP, mode, op0, mode);
4669 :
4670 : /* FALLTHRU */
4671 5227101 : case ASHIFTRT:
4672 5227101 : if (trueop1 == CONST0_RTX (mode))
4673 : return op0;
4674 5225267 : if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4675 : return op0;
4676 : /* Rotating ~0 always results in ~0. */
4677 5225095 : if (CONST_INT_P (trueop0)
4678 14585 : && HWI_COMPUTABLE_MODE_P (mode)
4679 14557 : && UINTVAL (trueop0) == GET_MODE_MASK (mode)
4680 5225095 : && ! side_effects_p (op1))
4681 : return op0;
4682 :
4683 31510996 : canonicalize_shift:
4684 : /* Given:
4685 : scalar modes M1, M2
4686 : scalar constants c1, c2
4687 : size (M2) > size (M1)
4688 : c1 == size (M2) - size (M1)
4689 : optimize:
4690 : ([a|l]shiftrt:M1 (subreg:M1 (lshiftrt:M2 (reg:M2) (const_int <c1>))
4691 : <low_part>)
4692 : (const_int <c2>))
4693 : to:
4694 : (subreg:M1 ([a|l]shiftrt:M2 (reg:M2) (const_int <c1 + c2>))
4695 : <low_part>). */
4696 31510996 : if ((code == ASHIFTRT || code == LSHIFTRT)
4697 11924853 : && is_a <scalar_int_mode> (mode, &int_mode)
4698 11126555 : && SUBREG_P (op0)
4699 1203281 : && CONST_INT_P (op1)
4700 1201056 : && GET_CODE (SUBREG_REG (op0)) == LSHIFTRT
4701 19802 : && is_a <scalar_int_mode> (GET_MODE (SUBREG_REG (op0)),
4702 : &inner_mode)
4703 19802 : && CONST_INT_P (XEXP (SUBREG_REG (op0), 1))
4704 39298 : && GET_MODE_BITSIZE (inner_mode) > GET_MODE_BITSIZE (int_mode)
4705 19649 : && (INTVAL (XEXP (SUBREG_REG (op0), 1))
4706 39298 : == GET_MODE_BITSIZE (inner_mode) - GET_MODE_BITSIZE (int_mode))
4707 31530393 : && subreg_lowpart_p (op0))
4708 : {
4709 19397 : rtx tmp = gen_int_shift_amount
4710 19397 : (inner_mode, INTVAL (XEXP (SUBREG_REG (op0), 1)) + INTVAL (op1));
4711 :
4712 : /* Combine would usually zero out the value when combining two
4713 : local shifts and the range becomes larger or equal to the mode.
4714 : However since we fold away one of the shifts here combine won't
4715 : see it so we should immediately zero the result if it's out of
4716 : range. */
4717 19397 : if (code == LSHIFTRT
4718 35300 : && INTVAL (tmp) >= GET_MODE_BITSIZE (inner_mode))
4719 0 : tmp = const0_rtx;
4720 : else
4721 19397 : tmp = simplify_gen_binary (code,
4722 : inner_mode,
4723 19397 : XEXP (SUBREG_REG (op0), 0),
4724 : tmp);
4725 :
4726 19397 : return lowpart_subreg (int_mode, tmp, inner_mode);
4727 : }
4728 :
4729 31491599 : if (SHIFT_COUNT_TRUNCATED && CONST_INT_P (op1))
4730 : {
4731 : val = INTVAL (op1) & (GET_MODE_UNIT_PRECISION (mode) - 1);
4732 : if (val != INTVAL (op1))
4733 : return simplify_gen_binary (code, mode, op0,
4734 : gen_int_shift_amount (mode, val));
4735 : }
4736 :
4737 : /* Simplify:
4738 :
4739 : (code:M1
4740 : (subreg:M1
4741 : ([al]shiftrt:M2
4742 : (subreg:M2
4743 : (ashift:M1 X C1))
4744 : C2))
4745 : C3)
4746 :
4747 : to:
4748 :
4749 : (code:M1
4750 : ([al]shiftrt:M1
4751 : (ashift:M1 X C1+N)
4752 : C2+N)
4753 : C3)
4754 :
4755 : where M1 is N bits wider than M2. Optimizing the (subreg:M1 ...)
4756 : directly would be arithmetically correct, but restricting the
4757 : simplification to shifts by constants is more conservative,
4758 : since it is more likely to lead to further simplifications. */
4759 31491599 : if (is_a<scalar_int_mode> (mode, &int_mode)
4760 5648494 : && paradoxical_subreg_p (op0)
4761 5202149 : && is_a<scalar_int_mode> (GET_MODE (SUBREG_REG (op0)), &inner_mode)
4762 5202063 : && (GET_CODE (SUBREG_REG (op0)) == ASHIFTRT
4763 5202063 : || GET_CODE (SUBREG_REG (op0)) == LSHIFTRT)
4764 148000 : && CONST_INT_P (op1))
4765 : {
4766 148000 : auto xcode = GET_CODE (SUBREG_REG (op0));
4767 148000 : rtx xop0 = XEXP (SUBREG_REG (op0), 0);
4768 148000 : rtx xop1 = XEXP (SUBREG_REG (op0), 1);
4769 148000 : if (SUBREG_P (xop0)
4770 11205 : && GET_MODE (SUBREG_REG (xop0)) == mode
4771 11112 : && GET_CODE (SUBREG_REG (xop0)) == ASHIFT
4772 605 : && CONST_INT_P (xop1)
4773 148605 : && UINTVAL (xop1) < GET_MODE_PRECISION (inner_mode))
4774 : {
4775 605 : rtx yop0 = XEXP (SUBREG_REG (xop0), 0);
4776 605 : rtx yop1 = XEXP (SUBREG_REG (xop0), 1);
4777 605 : if (CONST_INT_P (yop1)
4778 605 : && UINTVAL (yop1) < GET_MODE_PRECISION (inner_mode))
4779 : {
4780 1210 : auto bias = (GET_MODE_BITSIZE (int_mode)
4781 605 : - GET_MODE_BITSIZE (inner_mode));
4782 605 : tem = simplify_gen_binary (ASHIFT, mode, yop0,
4783 605 : GEN_INT (INTVAL (yop1) + bias));
4784 605 : tem = simplify_gen_binary (xcode, mode, tem,
4785 605 : GEN_INT (INTVAL (xop1) + bias));
4786 605 : return simplify_gen_binary (code, mode, tem, op1);
4787 : }
4788 : }
4789 : }
4790 : break;
4791 :
4792 0 : case SS_ASHIFT:
4793 0 : if (CONST_INT_P (trueop0)
4794 0 : && HWI_COMPUTABLE_MODE_P (mode)
4795 0 : && (UINTVAL (trueop0) == (GET_MODE_MASK (mode) >> 1)
4796 0 : || mode_signbit_p (mode, trueop0))
4797 0 : && ! side_effects_p (op1))
4798 : return op0;
4799 0 : goto simplify_ashift;
4800 :
4801 0 : case US_ASHIFT:
4802 0 : if (CONST_INT_P (trueop0)
4803 0 : && HWI_COMPUTABLE_MODE_P (mode)
4804 0 : && UINTVAL (trueop0) == GET_MODE_MASK (mode)
4805 0 : && ! side_effects_p (op1))
4806 : return op0;
4807 : /* FALLTHRU */
4808 :
4809 19893859 : case ASHIFT:
4810 19893859 : simplify_ashift:
4811 19893859 : if (trueop1 == CONST0_RTX (mode))
4812 : return op0;
4813 19735450 : if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4814 : return op0;
4815 19706320 : if (mem_depth
4816 244937 : && code == ASHIFT
4817 244937 : && CONST_INT_P (trueop1)
4818 244929 : && is_a <scalar_int_mode> (mode, &int_mode)
4819 19951237 : && IN_RANGE (UINTVAL (trueop1),
4820 : 1, GET_MODE_PRECISION (int_mode) - 1))
4821 : {
4822 244917 : auto c = (wi::one (GET_MODE_PRECISION (int_mode))
4823 244917 : << UINTVAL (trueop1));
4824 244917 : rtx new_op1 = immed_wide_int_const (c, int_mode);
4825 244917 : return simplify_gen_binary (MULT, int_mode, op0, new_op1);
4826 244917 : }
4827 :
4828 : /* If we're shifting left a signed bitfield extraction and the
4829 : shift count + bitfield size is a natural integral mode and
4830 : the field starts at offset 0 (counting from the LSB), then
4831 : this can be simplified to a sign extension of a left shift.
4832 :
4833 : Some ISAs (RISC-V 64-bit) have inherent support for such
4834 : instructions and it's better for various optimizations to
4835 : express as a SIGN_EXTEND rather than a shifted SIGN_EXTRACT. */
4836 19461403 : if (GET_CODE (op0) == SIGN_EXTRACT
4837 27 : && REG_P (XEXP (op0, 0))
4838 : /* The size of the bitfield, the location of the bitfield and
4839 : shift count must be CONST_INTs. */
4840 21 : && CONST_INT_P (op1)
4841 21 : && CONST_INT_P (XEXP (op0, 1))
4842 21 : && CONST_INT_P (XEXP (op0, 2)))
4843 : {
4844 21 : int size = INTVAL (op1) + INTVAL (XEXP (op0, 1));
4845 21 : machine_mode smaller_mode;
4846 : /* Now we need to verify the size of the bitfield plus the shift
4847 : count is an integral mode and smaller than MODE. This is
4848 : requirement for using SIGN_EXTEND. We also need to verify the
4849 : field starts at bit location 0 and that the subreg lowpart also
4850 : starts at zero. */
4851 21 : if (int_mode_for_size (size, size).exists (&smaller_mode)
4852 3 : && mode > smaller_mode
4853 21 : && (subreg_lowpart_offset (smaller_mode, mode).to_constant ()
4854 3 : == UINTVAL (XEXP (op0, 2)))
4855 1 : && XEXP (op0, 2) == CONST0_RTX (mode))
4856 : {
4857 : /* Everything passed. So we just need to get the subreg of the
4858 : original input, shift it and sign extend the result. */
4859 1 : rtx op = gen_lowpart (smaller_mode, XEXP (op0, 0));
4860 1 : rtx x = gen_rtx_ASHIFT (smaller_mode, op, op1);
4861 1 : return gen_rtx_SIGN_EXTEND (mode, x);
4862 : }
4863 : }
4864 19461402 : goto canonicalize_shift;
4865 :
4866 8616217 : case LSHIFTRT:
4867 8616217 : if (trueop1 == CONST0_RTX (mode))
4868 : return op0;
4869 6825924 : if (trueop0 == CONST0_RTX (mode) && ! side_effects_p (op1))
4870 : return op0;
4871 : /* Optimize (lshiftrt (clz X) C) as (eq X 0). */
4872 6824499 : if (GET_CODE (op0) == CLZ
4873 0 : && is_a <scalar_int_mode> (GET_MODE (XEXP (op0, 0)), &inner_mode)
4874 0 : && CONST_INT_P (trueop1)
4875 : && STORE_FLAG_VALUE == 1
4876 6824499 : && INTVAL (trueop1) < GET_MODE_UNIT_PRECISION (mode))
4877 : {
4878 0 : unsigned HOST_WIDE_INT zero_val = 0;
4879 :
4880 0 : if (CLZ_DEFINED_VALUE_AT_ZERO (inner_mode, zero_val)
4881 0 : && zero_val == GET_MODE_PRECISION (inner_mode)
4882 0 : && INTVAL (trueop1) == exact_log2 (zero_val))
4883 0 : return simplify_gen_relational (EQ, mode, inner_mode,
4884 0 : XEXP (op0, 0), const0_rtx);
4885 : }
4886 6824499 : goto canonicalize_shift;
4887 :
4888 236385 : case SMIN:
4889 236385 : if (HWI_COMPUTABLE_MODE_P (mode)
4890 215394 : && mode_signbit_p (mode, trueop1)
4891 0 : && ! side_effects_p (op0))
4892 : return op1;
4893 236385 : if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4894 : return op0;
4895 236229 : tem = simplify_associative_operation (code, mode, op0, op1);
4896 236229 : if (tem)
4897 : return tem;
4898 : break;
4899 :
4900 484251 : case SMAX:
4901 484251 : if (HWI_COMPUTABLE_MODE_P (mode)
4902 457087 : && CONST_INT_P (trueop1)
4903 424719 : && (UINTVAL (trueop1) == GET_MODE_MASK (mode) >> 1)
4904 0 : && ! side_effects_p (op0))
4905 : return op1;
4906 484251 : if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4907 : return op0;
4908 484146 : tem = simplify_associative_operation (code, mode, op0, op1);
4909 484146 : if (tem)
4910 : return tem;
4911 : break;
4912 :
4913 341656 : case UMIN:
4914 341656 : if (trueop1 == CONST0_RTX (mode) && ! side_effects_p (op0))
4915 : return op1;
4916 341643 : if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4917 : return op0;
4918 341526 : tem = simplify_associative_operation (code, mode, op0, op1);
4919 341526 : if (tem)
4920 : return tem;
4921 : break;
4922 :
4923 316236 : case UMAX:
4924 316236 : if (trueop1 == constm1_rtx && ! side_effects_p (op0))
4925 : return op1;
4926 316236 : if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4927 : return op0;
4928 316146 : tem = simplify_associative_operation (code, mode, op0, op1);
4929 316146 : if (tem)
4930 : return tem;
4931 : break;
4932 :
4933 11647 : case SS_PLUS:
4934 11647 : case US_PLUS:
4935 11647 : case SS_MINUS:
4936 11647 : case US_MINUS:
4937 : /* Simplify x +/- 0 to x, if possible. */
4938 11647 : if (trueop1 == CONST0_RTX (mode))
4939 : return op0;
4940 : return 0;
4941 :
4942 0 : case SS_MULT:
4943 0 : case US_MULT:
4944 : /* Simplify x * 0 to 0, if possible. */
4945 0 : if (trueop1 == CONST0_RTX (mode)
4946 0 : && !side_effects_p (op0))
4947 : return op1;
4948 :
4949 : /* Simplify x * 1 to x, if possible. */
4950 0 : if (trueop1 == CONST1_RTX (mode))
4951 : return op0;
4952 : return 0;
4953 :
4954 476271 : case SMUL_HIGHPART:
4955 476271 : case UMUL_HIGHPART:
4956 : /* Simplify x * 0 to 0, if possible. */
4957 476271 : if (trueop1 == CONST0_RTX (mode)
4958 476271 : && !side_effects_p (op0))
4959 : return op1;
4960 : return 0;
4961 :
4962 0 : case SS_DIV:
4963 0 : case US_DIV:
4964 : /* Simplify x / 1 to x, if possible. */
4965 0 : if (trueop1 == CONST1_RTX (mode))
4966 : return op0;
4967 : return 0;
4968 :
4969 0 : case COPYSIGN:
4970 0 : if (rtx_equal_p (trueop0, trueop1) && ! side_effects_p (op0))
4971 : return op0;
4972 0 : if (CONST_DOUBLE_AS_FLOAT_P (trueop1))
4973 : {
4974 0 : REAL_VALUE_TYPE f1;
4975 0 : real_convert (&f1, mode, CONST_DOUBLE_REAL_VALUE (trueop1));
4976 0 : rtx tmp = simplify_gen_unary (ABS, mode, op0, mode);
4977 0 : if (REAL_VALUE_NEGATIVE (f1))
4978 0 : tmp = simplify_unary_operation (NEG, mode, tmp, mode);
4979 0 : return tmp;
4980 : }
4981 0 : if (GET_CODE (op0) == NEG || GET_CODE (op0) == ABS)
4982 0 : return simplify_gen_binary (COPYSIGN, mode, XEXP (op0, 0), op1);
4983 0 : if (GET_CODE (op1) == ABS
4984 0 : && ! side_effects_p (op1))
4985 0 : return simplify_gen_unary (ABS, mode, op0, mode);
4986 0 : if (GET_CODE (op0) == COPYSIGN
4987 0 : && ! side_effects_p (XEXP (op0, 1)))
4988 0 : return simplify_gen_binary (COPYSIGN, mode, XEXP (op0, 0), op1);
4989 0 : if (GET_CODE (op1) == COPYSIGN
4990 0 : && ! side_effects_p (XEXP (op1, 0)))
4991 0 : return simplify_gen_binary (COPYSIGN, mode, op0, XEXP (op1, 1));
4992 : return 0;
4993 :
4994 1107 : case VEC_SERIES:
4995 2214 : if (op1 == CONST0_RTX (GET_MODE_INNER (mode)))
4996 92 : return gen_vec_duplicate (mode, op0);
4997 1015 : if (valid_for_const_vector_p (mode, op0)
4998 1015 : && valid_for_const_vector_p (mode, op1))
4999 93 : return gen_const_vec_series (mode, op0, op1);
5000 : return 0;
5001 :
5002 3479136 : case VEC_SELECT:
5003 3479136 : if (!VECTOR_MODE_P (mode))
5004 : {
5005 958079 : gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
5006 1916158 : gcc_assert (mode == GET_MODE_INNER (GET_MODE (trueop0)));
5007 958079 : gcc_assert (GET_CODE (trueop1) == PARALLEL);
5008 958079 : gcc_assert (XVECLEN (trueop1, 0) == 1);
5009 :
5010 : /* We can't reason about selections made at runtime. */
5011 958079 : if (!CONST_INT_P (XVECEXP (trueop1, 0, 0)))
5012 443809010 : return 0;
5013 :
5014 958079 : if (vec_duplicate_p (trueop0, &elt0))
5015 2142 : return elt0;
5016 :
5017 955937 : if (GET_CODE (trueop0) == CONST_VECTOR)
5018 7242 : return CONST_VECTOR_ELT (trueop0, INTVAL (XVECEXP
5019 : (trueop1, 0, 0)));
5020 :
5021 : /* Extract a scalar element from a nested VEC_SELECT expression
5022 : (with optional nested VEC_CONCAT expression). Some targets
5023 : (i386) extract scalar element from a vector using chain of
5024 : nested VEC_SELECT expressions. When input operand is a memory
5025 : operand, this operation can be simplified to a simple scalar
5026 : load from an offseted memory address. */
5027 948695 : int n_elts;
5028 948695 : if (GET_CODE (trueop0) == VEC_SELECT
5029 1021759 : && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
5030 73064 : .is_constant (&n_elts)))
5031 : {
5032 73064 : rtx op0 = XEXP (trueop0, 0);
5033 73064 : rtx op1 = XEXP (trueop0, 1);
5034 :
5035 73064 : int i = INTVAL (XVECEXP (trueop1, 0, 0));
5036 73064 : int elem;
5037 :
5038 73064 : rtvec vec;
5039 73064 : rtx tmp_op, tmp;
5040 :
5041 73064 : gcc_assert (GET_CODE (op1) == PARALLEL);
5042 73064 : gcc_assert (i < XVECLEN (op1, 0));
5043 :
5044 : /* Select element, pointed by nested selector. */
5045 73064 : elem = INTVAL (XVECEXP (op1, 0, i));
5046 :
5047 73064 : gcc_assert (elem < n_elts);
5048 :
5049 : /* Handle the case when nested VEC_SELECT wraps VEC_CONCAT. */
5050 73064 : if (GET_CODE (op0) == VEC_CONCAT)
5051 : {
5052 29407 : rtx op00 = XEXP (op0, 0);
5053 29407 : rtx op01 = XEXP (op0, 1);
5054 :
5055 29407 : machine_mode mode00, mode01;
5056 29407 : int n_elts00, n_elts01;
5057 :
5058 29407 : mode00 = GET_MODE (op00);
5059 29407 : mode01 = GET_MODE (op01);
5060 :
5061 : /* Find out the number of elements of each operand.
5062 : Since the concatenated result has a constant number
5063 : of elements, the operands must too. */
5064 29407 : n_elts00 = GET_MODE_NUNITS (mode00).to_constant ();
5065 29407 : n_elts01 = GET_MODE_NUNITS (mode01).to_constant ();
5066 :
5067 29407 : gcc_assert (n_elts == n_elts00 + n_elts01);
5068 :
5069 : /* Select correct operand of VEC_CONCAT
5070 : and adjust selector. */
5071 29407 : if (elem < n_elts01)
5072 : tmp_op = op00;
5073 : else
5074 : {
5075 43 : tmp_op = op01;
5076 43 : elem -= n_elts00;
5077 : }
5078 : }
5079 : else
5080 : tmp_op = op0;
5081 :
5082 73064 : vec = rtvec_alloc (1);
5083 73064 : RTVEC_ELT (vec, 0) = GEN_INT (elem);
5084 :
5085 73064 : tmp = gen_rtx_fmt_ee (code, mode,
5086 : tmp_op, gen_rtx_PARALLEL (VOIDmode, vec));
5087 73064 : return tmp;
5088 : }
5089 : }
5090 : else
5091 : {
5092 2521057 : gcc_assert (VECTOR_MODE_P (GET_MODE (trueop0)));
5093 7563171 : gcc_assert (GET_MODE_INNER (mode)
5094 : == GET_MODE_INNER (GET_MODE (trueop0)));
5095 2521057 : gcc_assert (GET_CODE (trueop1) == PARALLEL);
5096 :
5097 2521057 : if (vec_duplicate_p (trueop0, &elt0))
5098 : /* It doesn't matter which elements are selected by trueop1,
5099 : because they are all the same. */
5100 15492 : return gen_vec_duplicate (mode, elt0);
5101 :
5102 2505565 : if (GET_CODE (trueop0) == CONST_VECTOR)
5103 : {
5104 17374 : unsigned n_elts = XVECLEN (trueop1, 0);
5105 17374 : rtvec v = rtvec_alloc (n_elts);
5106 17374 : unsigned int i;
5107 :
5108 34748 : gcc_assert (known_eq (n_elts, GET_MODE_NUNITS (mode)));
5109 89342 : for (i = 0; i < n_elts; i++)
5110 : {
5111 71968 : rtx x = XVECEXP (trueop1, 0, i);
5112 :
5113 71968 : if (!CONST_INT_P (x))
5114 : return 0;
5115 :
5116 71968 : RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0,
5117 : INTVAL (x));
5118 : }
5119 :
5120 17374 : return gen_rtx_CONST_VECTOR (mode, v);
5121 : }
5122 :
5123 : /* Recognize the identity. */
5124 2488191 : if (GET_MODE (trueop0) == mode)
5125 : {
5126 586115 : bool maybe_ident = true;
5127 586115 : for (int i = 0; i < XVECLEN (trueop1, 0); i++)
5128 : {
5129 585732 : rtx j = XVECEXP (trueop1, 0, i);
5130 585732 : if (!CONST_INT_P (j) || INTVAL (j) != i)
5131 : {
5132 : maybe_ident = false;
5133 : break;
5134 : }
5135 : }
5136 358509 : if (maybe_ident)
5137 : return trueop0;
5138 : }
5139 :
5140 : /* If we select a low-part subreg, return that. */
5141 2487808 : if (vec_series_lowpart_p (mode, GET_MODE (trueop0), trueop1))
5142 : {
5143 0 : rtx new_rtx = lowpart_subreg (mode, trueop0,
5144 0 : GET_MODE (trueop0));
5145 0 : if (new_rtx != NULL_RTX)
5146 : return new_rtx;
5147 : }
5148 :
5149 : /* If we build {a,b} then permute it, build the result directly. */
5150 2487808 : if (XVECLEN (trueop1, 0) == 2
5151 592360 : && CONST_INT_P (XVECEXP (trueop1, 0, 0))
5152 592360 : && CONST_INT_P (XVECEXP (trueop1, 0, 1))
5153 592360 : && GET_CODE (trueop0) == VEC_CONCAT
5154 178562 : && GET_CODE (XEXP (trueop0, 0)) == VEC_CONCAT
5155 78 : && GET_MODE (XEXP (trueop0, 0)) == mode
5156 78 : && GET_CODE (XEXP (trueop0, 1)) == VEC_CONCAT
5157 54 : && GET_MODE (XEXP (trueop0, 1)) == mode)
5158 : {
5159 54 : unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
5160 54 : unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
5161 54 : rtx subop0, subop1;
5162 :
5163 54 : gcc_assert (i0 < 4 && i1 < 4);
5164 54 : subop0 = XEXP (XEXP (trueop0, i0 / 2), i0 % 2);
5165 54 : subop1 = XEXP (XEXP (trueop0, i1 / 2), i1 % 2);
5166 :
5167 54 : return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
5168 : }
5169 :
5170 2487754 : if (XVECLEN (trueop1, 0) == 2
5171 592306 : && CONST_INT_P (XVECEXP (trueop1, 0, 0))
5172 592306 : && CONST_INT_P (XVECEXP (trueop1, 0, 1))
5173 592306 : && GET_CODE (trueop0) == VEC_CONCAT
5174 178508 : && GET_MODE (trueop0) == mode)
5175 : {
5176 2 : unsigned int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
5177 2 : unsigned int i1 = INTVAL (XVECEXP (trueop1, 0, 1));
5178 2 : rtx subop0, subop1;
5179 :
5180 2 : gcc_assert (i0 < 2 && i1 < 2);
5181 2 : subop0 = XEXP (trueop0, i0);
5182 2 : subop1 = XEXP (trueop0, i1);
5183 :
5184 2 : return simplify_gen_binary (VEC_CONCAT, mode, subop0, subop1);
5185 : }
5186 :
5187 : /* If we select one half of a vec_concat, return that. */
5188 2487752 : int l0, l1;
5189 2487752 : if (GET_CODE (trueop0) == VEC_CONCAT
5190 3085436 : && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 0)))
5191 1542718 : .is_constant (&l0))
5192 3085436 : && (GET_MODE_NUNITS (GET_MODE (XEXP (trueop0, 1)))
5193 1542718 : .is_constant (&l1))
5194 4030470 : && CONST_INT_P (XVECEXP (trueop1, 0, 0)))
5195 : {
5196 1542718 : rtx subop0 = XEXP (trueop0, 0);
5197 1542718 : rtx subop1 = XEXP (trueop0, 1);
5198 1542718 : machine_mode mode0 = GET_MODE (subop0);
5199 1542718 : machine_mode mode1 = GET_MODE (subop1);
5200 1542718 : int i0 = INTVAL (XVECEXP (trueop1, 0, 0));
5201 1542718 : if (i0 == 0 && !side_effects_p (op1) && mode == mode0)
5202 : {
5203 985673 : bool success = true;
5204 985673 : for (int i = 1; i < l0; ++i)
5205 : {
5206 985344 : rtx j = XVECEXP (trueop1, 0, i);
5207 985344 : if (!CONST_INT_P (j) || INTVAL (j) != i)
5208 : {
5209 : success = false;
5210 : break;
5211 : }
5212 : }
5213 900298 : if (success)
5214 : return subop0;
5215 : }
5216 1542389 : if (i0 == l0 && !side_effects_p (op0) && mode == mode1)
5217 : {
5218 590 : bool success = true;
5219 590 : for (int i = 1; i < l1; ++i)
5220 : {
5221 543 : rtx j = XVECEXP (trueop1, 0, i);
5222 543 : if (!CONST_INT_P (j) || INTVAL (j) != i0 + i)
5223 : {
5224 : success = false;
5225 : break;
5226 : }
5227 : }
5228 76 : if (success)
5229 : return subop1;
5230 : }
5231 : }
5232 :
5233 : /* Simplify vec_select of a subreg of X to just a vec_select of X
5234 : when X has same component mode as vec_select. */
5235 2487376 : unsigned HOST_WIDE_INT subreg_offset = 0;
5236 2487376 : if (GET_CODE (trueop0) == SUBREG
5237 366347 : && GET_MODE_INNER (mode)
5238 732694 : == GET_MODE_INNER (GET_MODE (SUBREG_REG (trueop0)))
5239 28688 : && GET_MODE_NUNITS (mode).is_constant (&l1)
5240 2853723 : && constant_multiple_p (subreg_memory_offset (trueop0),
5241 28688 : GET_MODE_UNIT_BITSIZE (mode),
5242 : &subreg_offset))
5243 : {
5244 14344 : poly_uint64 nunits
5245 28688 : = GET_MODE_NUNITS (GET_MODE (SUBREG_REG (trueop0)));
5246 14344 : bool success = true;
5247 81398 : for (int i = 0; i != l1; i++)
5248 : {
5249 77403 : rtx idx = XVECEXP (trueop1, 0, i);
5250 77403 : if (!CONST_INT_P (idx)
5251 77403 : || maybe_ge (UINTVAL (idx) + subreg_offset, nunits))
5252 : {
5253 : success = false;
5254 : break;
5255 : }
5256 : }
5257 :
5258 14344 : if (success)
5259 : {
5260 3995 : rtx par = trueop1;
5261 3995 : if (subreg_offset)
5262 : {
5263 0 : rtvec vec = rtvec_alloc (l1);
5264 0 : for (int i = 0; i < l1; i++)
5265 0 : RTVEC_ELT (vec, i)
5266 0 : = GEN_INT (INTVAL (XVECEXP (trueop1, 0, i))
5267 : + subreg_offset);
5268 0 : par = gen_rtx_PARALLEL (VOIDmode, vec);
5269 : }
5270 3995 : return gen_rtx_VEC_SELECT (mode, SUBREG_REG (trueop0), par);
5271 : }
5272 : }
5273 : }
5274 :
5275 3359012 : if (XVECLEN (trueop1, 0) == 1
5276 875715 : && CONST_INT_P (XVECEXP (trueop1, 0, 0))
5277 875715 : && GET_CODE (trueop0) == VEC_CONCAT)
5278 : {
5279 1384 : rtx vec = trueop0;
5280 2768 : offset = INTVAL (XVECEXP (trueop1, 0, 0)) * GET_MODE_SIZE (mode);
5281 :
5282 : /* Try to find the element in the VEC_CONCAT. */
5283 1384 : while (GET_MODE (vec) != mode
5284 2768 : && GET_CODE (vec) == VEC_CONCAT)
5285 : {
5286 1384 : poly_int64 vec_size;
5287 :
5288 1384 : if (CONST_INT_P (XEXP (vec, 0)))
5289 : {
5290 : /* vec_concat of two const_ints doesn't make sense with
5291 : respect to modes. */
5292 3 : if (CONST_INT_P (XEXP (vec, 1)))
5293 380938264 : return 0;
5294 :
5295 3 : vec_size = GET_MODE_SIZE (GET_MODE (trueop0))
5296 9 : - GET_MODE_SIZE (GET_MODE (XEXP (vec, 1)));
5297 : }
5298 : else
5299 2762 : vec_size = GET_MODE_SIZE (GET_MODE (XEXP (vec, 0)));
5300 :
5301 1384 : if (known_lt (offset, vec_size))
5302 : vec = XEXP (vec, 0);
5303 323 : else if (known_ge (offset, vec_size))
5304 : {
5305 323 : offset -= vec_size;
5306 323 : vec = XEXP (vec, 1);
5307 : }
5308 : else
5309 : break;
5310 1384 : vec = avoid_constant_pool_reference (vec);
5311 : }
5312 :
5313 1384 : if (GET_MODE (vec) == mode)
5314 : return vec;
5315 : }
5316 :
5317 : /* If we select elements in a vec_merge that all come from the same
5318 : operand, select from that operand directly. */
5319 3357808 : if (GET_CODE (op0) == VEC_MERGE)
5320 : {
5321 10020 : rtx trueop02 = avoid_constant_pool_reference (XEXP (op0, 2));
5322 10020 : if (CONST_INT_P (trueop02))
5323 : {
5324 3297 : unsigned HOST_WIDE_INT sel = UINTVAL (trueop02);
5325 3297 : bool all_operand0 = true;
5326 3297 : bool all_operand1 = true;
5327 10987 : for (int i = 0; i < XVECLEN (trueop1, 0); i++)
5328 : {
5329 7690 : rtx j = XVECEXP (trueop1, 0, i);
5330 7690 : if (sel & (HOST_WIDE_INT_1U << UINTVAL (j)))
5331 : all_operand1 = false;
5332 : else
5333 3445 : all_operand0 = false;
5334 : }
5335 3297 : if (all_operand0 && !side_effects_p (XEXP (op0, 1)))
5336 1443 : return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 0), op1);
5337 1854 : if (all_operand1 && !side_effects_p (XEXP (op0, 0)))
5338 47 : return simplify_gen_binary (VEC_SELECT, mode, XEXP (op0, 1), op1);
5339 : }
5340 : }
5341 :
5342 : /* If we have two nested selects that are inverses of each
5343 : other, replace them with the source operand. */
5344 3356318 : if (GET_CODE (trueop0) == VEC_SELECT
5345 70612 : && GET_MODE (XEXP (trueop0, 0)) == mode)
5346 : {
5347 1275 : rtx op0_subop1 = XEXP (trueop0, 1);
5348 1275 : gcc_assert (GET_CODE (op0_subop1) == PARALLEL);
5349 2550 : gcc_assert (known_eq (XVECLEN (trueop1, 0), GET_MODE_NUNITS (mode)));
5350 :
5351 : /* Apply the outer ordering vector to the inner one. (The inner
5352 : ordering vector is expressly permitted to be of a different
5353 : length than the outer one.) If the result is { 0, 1, ..., n-1 }
5354 : then the two VEC_SELECTs cancel. */
5355 1581 : for (int i = 0; i < XVECLEN (trueop1, 0); ++i)
5356 : {
5357 1581 : rtx x = XVECEXP (trueop1, 0, i);
5358 1581 : if (!CONST_INT_P (x))
5359 : return 0;
5360 1581 : rtx y = XVECEXP (op0_subop1, 0, INTVAL (x));
5361 1581 : if (!CONST_INT_P (y) || i != INTVAL (y))
5362 : return 0;
5363 : }
5364 : return XEXP (trueop0, 0);
5365 : }
5366 :
5367 : return 0;
5368 4153068 : case VEC_CONCAT:
5369 4153068 : {
5370 4153068 : machine_mode op0_mode = (GET_MODE (trueop0) != VOIDmode
5371 4153068 : ? GET_MODE (trueop0)
5372 4153068 : : GET_MODE_INNER (mode));
5373 4153068 : machine_mode op1_mode = (GET_MODE (trueop1) != VOIDmode
5374 4153068 : ? GET_MODE (trueop1)
5375 4153068 : : GET_MODE_INNER (mode));
5376 :
5377 4153068 : gcc_assert (VECTOR_MODE_P (mode));
5378 16612272 : gcc_assert (known_eq (GET_MODE_SIZE (op0_mode)
5379 : + GET_MODE_SIZE (op1_mode),
5380 : GET_MODE_SIZE (mode)));
5381 :
5382 4153068 : if (VECTOR_MODE_P (op0_mode))
5383 5804145 : gcc_assert (GET_MODE_INNER (mode)
5384 : == GET_MODE_INNER (op0_mode));
5385 : else
5386 4436706 : gcc_assert (GET_MODE_INNER (mode) == op0_mode);
5387 :
5388 4153068 : if (VECTOR_MODE_P (op1_mode))
5389 5804145 : gcc_assert (GET_MODE_INNER (mode)
5390 : == GET_MODE_INNER (op1_mode));
5391 : else
5392 4436706 : gcc_assert (GET_MODE_INNER (mode) == op1_mode);
5393 :
5394 4153068 : unsigned int n_elts, in_n_elts;
5395 4153068 : if ((GET_CODE (trueop0) == CONST_VECTOR
5396 4153068 : || CONST_SCALAR_INT_P (trueop0)
5397 3997605 : || CONST_DOUBLE_AS_FLOAT_P (trueop0))
5398 156900 : && (GET_CODE (trueop1) == CONST_VECTOR
5399 156900 : || CONST_SCALAR_INT_P (trueop1)
5400 156900 : || CONST_DOUBLE_AS_FLOAT_P (trueop1))
5401 0 : && GET_MODE_NUNITS (mode).is_constant (&n_elts)
5402 4153068 : && GET_MODE_NUNITS (op0_mode).is_constant (&in_n_elts))
5403 : {
5404 0 : rtvec v = rtvec_alloc (n_elts);
5405 0 : unsigned int i;
5406 0 : for (i = 0; i < n_elts; i++)
5407 : {
5408 0 : if (i < in_n_elts)
5409 : {
5410 0 : if (!VECTOR_MODE_P (op0_mode))
5411 0 : RTVEC_ELT (v, i) = trueop0;
5412 : else
5413 0 : RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop0, i);
5414 : }
5415 : else
5416 : {
5417 0 : if (!VECTOR_MODE_P (op1_mode))
5418 0 : RTVEC_ELT (v, i) = trueop1;
5419 : else
5420 0 : RTVEC_ELT (v, i) = CONST_VECTOR_ELT (trueop1,
5421 : i - in_n_elts);
5422 : }
5423 : }
5424 :
5425 0 : return gen_rtx_CONST_VECTOR (mode, v);
5426 : }
5427 :
5428 : /* Try to merge two VEC_SELECTs from the same vector into a single one.
5429 : Restrict the transformation to avoid generating a VEC_SELECT with a
5430 : mode unrelated to its operand. */
5431 4153068 : if (GET_CODE (trueop0) == VEC_SELECT
5432 133362 : && GET_CODE (trueop1) == VEC_SELECT
5433 28740 : && rtx_equal_p (XEXP (trueop0, 0), XEXP (trueop1, 0))
5434 4169169 : && GET_MODE_INNER (GET_MODE (XEXP (trueop0, 0)))
5435 32202 : == GET_MODE_INNER(mode))
5436 : {
5437 16101 : rtx par0 = XEXP (trueop0, 1);
5438 16101 : rtx par1 = XEXP (trueop1, 1);
5439 16101 : int len0 = XVECLEN (par0, 0);
5440 16101 : int len1 = XVECLEN (par1, 0);
5441 16101 : rtvec vec = rtvec_alloc (len0 + len1);
5442 99363 : for (int i = 0; i < len0; i++)
5443 83262 : RTVEC_ELT (vec, i) = XVECEXP (par0, 0, i);
5444 99363 : for (int i = 0; i < len1; i++)
5445 83262 : RTVEC_ELT (vec, len0 + i) = XVECEXP (par1, 0, i);
5446 16101 : return simplify_gen_binary (VEC_SELECT, mode, XEXP (trueop0, 0),
5447 16101 : gen_rtx_PARALLEL (VOIDmode, vec));
5448 : }
5449 : /* (vec_concat:
5450 : (subreg_lowpart:N OP)
5451 : (vec_select:N OP P)) --> OP when P selects the high half
5452 : of the OP. */
5453 4136967 : if (GET_CODE (trueop0) == SUBREG
5454 494303 : && subreg_lowpart_p (trueop0)
5455 494096 : && GET_CODE (trueop1) == VEC_SELECT
5456 49 : && SUBREG_REG (trueop0) == XEXP (trueop1, 0)
5457 0 : && !side_effects_p (XEXP (trueop1, 0))
5458 4136967 : && vec_series_highpart_p (op1_mode, mode, XEXP (trueop1, 1)))
5459 0 : return XEXP (trueop1, 0);
5460 : }
5461 : return 0;
5462 :
5463 0 : default:
5464 0 : gcc_unreachable ();
5465 : }
5466 :
5467 372956203 : if (mode == GET_MODE (op0)
5468 319113414 : && mode == GET_MODE (op1)
5469 97929085 : && vec_duplicate_p (op0, &elt0)
5470 373073924 : && vec_duplicate_p (op1, &elt1))
5471 : {
5472 : /* Try applying the operator to ELT and see if that simplifies.
5473 : We can duplicate the result if so.
5474 :
5475 : The reason we don't use simplify_gen_binary is that it isn't
5476 : necessarily a win to convert things like:
5477 :
5478 : (plus:V (vec_duplicate:V (reg:S R1))
5479 : (vec_duplicate:V (reg:S R2)))
5480 :
5481 : to:
5482 :
5483 : (vec_duplicate:V (plus:S (reg:S R1) (reg:S R2)))
5484 :
5485 : The first might be done entirely in vector registers while the
5486 : second might need a move between register files. */
5487 148 : tem = simplify_binary_operation (code, GET_MODE_INNER (mode),
5488 : elt0, elt1);
5489 74 : if (tem)
5490 2 : return gen_vec_duplicate (mode, tem);
5491 : }
5492 :
5493 : return 0;
5494 : }
5495 :
5496 : /* Return true if binary operation OP distributes over addition in operand
5497 : OPNO, with the other operand being held constant. OPNO counts from 1. */
5498 :
5499 : static bool
5500 7489 : distributes_over_addition_p (rtx_code op, int opno)
5501 : {
5502 0 : switch (op)
5503 : {
5504 : case PLUS:
5505 : case MINUS:
5506 : case MULT:
5507 : return true;
5508 :
5509 0 : case ASHIFT:
5510 0 : return opno == 1;
5511 :
5512 0 : default:
5513 0 : return false;
5514 : }
5515 : }
5516 :
5517 : rtx
5518 477294626 : simplify_const_binary_operation (enum rtx_code code, machine_mode mode,
5519 : rtx op0, rtx op1)
5520 : {
5521 477294626 : if (VECTOR_MODE_P (mode)
5522 14962557 : && code != VEC_CONCAT
5523 10797903 : && GET_CODE (op0) == CONST_VECTOR
5524 193716 : && GET_CODE (op1) == CONST_VECTOR)
5525 : {
5526 8206 : bool step_ok_p;
5527 8206 : if (CONST_VECTOR_STEPPED_P (op0)
5528 8206 : && CONST_VECTOR_STEPPED_P (op1))
5529 : /* We can operate directly on the encoding if:
5530 :
5531 : a3 - a2 == a2 - a1 && b3 - b2 == b2 - b1
5532 : implies
5533 : (a3 op b3) - (a2 op b2) == (a2 op b2) - (a1 op b1)
5534 :
5535 : Addition and subtraction are the supported operators
5536 : for which this is true. */
5537 717 : step_ok_p = (code == PLUS || code == MINUS);
5538 7489 : else if (CONST_VECTOR_STEPPED_P (op0))
5539 : /* We can operate directly on stepped encodings if:
5540 :
5541 : a3 - a2 == a2 - a1
5542 : implies:
5543 : (a3 op c) - (a2 op c) == (a2 op c) - (a1 op c)
5544 :
5545 : which is true if (x -> x op c) distributes over addition. */
5546 1059 : step_ok_p = distributes_over_addition_p (code, 1);
5547 : else
5548 : /* Similarly in reverse. */
5549 6430 : step_ok_p = distributes_over_addition_p (code, 2);
5550 8206 : rtx_vector_builder builder;
5551 8206 : if (!builder.new_binary_operation (mode, op0, op1, step_ok_p))
5552 : return 0;
5553 :
5554 8206 : unsigned int count = builder.encoded_nelts ();
5555 51955 : for (unsigned int i = 0; i < count; i++)
5556 : {
5557 87798 : rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
5558 : CONST_VECTOR_ELT (op0, i),
5559 43899 : CONST_VECTOR_ELT (op1, i));
5560 43899 : if (!x || !valid_for_const_vector_p (mode, x))
5561 150 : return 0;
5562 43749 : builder.quick_push (x);
5563 : }
5564 8056 : return builder.build ();
5565 8206 : }
5566 :
5567 477286420 : if (VECTOR_MODE_P (mode)
5568 14954351 : && code == VEC_CONCAT
5569 4164654 : && (CONST_SCALAR_INT_P (op0)
5570 4018973 : || CONST_FIXED_P (op0)
5571 4018973 : || CONST_DOUBLE_AS_FLOAT_P (op0)
5572 4016173 : || CONST_VECTOR_P (op0))
5573 168486 : && (CONST_SCALAR_INT_P (op1)
5574 165509 : || CONST_DOUBLE_AS_FLOAT_P (op1)
5575 164146 : || CONST_FIXED_P (op1)
5576 164146 : || CONST_VECTOR_P (op1)))
5577 : {
5578 : /* Both inputs have a constant number of elements, so the result
5579 : must too. */
5580 11586 : unsigned n_elts = GET_MODE_NUNITS (mode).to_constant ();
5581 11586 : rtvec v = rtvec_alloc (n_elts);
5582 :
5583 11586 : gcc_assert (n_elts >= 2);
5584 11586 : if (n_elts == 2)
5585 : {
5586 4340 : gcc_assert (GET_CODE (op0) != CONST_VECTOR);
5587 4340 : gcc_assert (GET_CODE (op1) != CONST_VECTOR);
5588 :
5589 4340 : RTVEC_ELT (v, 0) = op0;
5590 4340 : RTVEC_ELT (v, 1) = op1;
5591 : }
5592 : else
5593 : {
5594 7246 : unsigned op0_n_elts = GET_MODE_NUNITS (GET_MODE (op0)).to_constant ();
5595 7246 : unsigned op1_n_elts = GET_MODE_NUNITS (GET_MODE (op1)).to_constant ();
5596 7246 : unsigned i;
5597 :
5598 7246 : gcc_assert (GET_CODE (op0) == CONST_VECTOR);
5599 7246 : gcc_assert (GET_CODE (op1) == CONST_VECTOR);
5600 7246 : gcc_assert (op0_n_elts + op1_n_elts == n_elts);
5601 :
5602 62524 : for (i = 0; i < op0_n_elts; ++i)
5603 55278 : RTVEC_ELT (v, i) = CONST_VECTOR_ELT (op0, i);
5604 62716 : for (i = 0; i < op1_n_elts; ++i)
5605 55470 : RTVEC_ELT (v, op0_n_elts+i) = CONST_VECTOR_ELT (op1, i);
5606 : }
5607 :
5608 11586 : return gen_rtx_CONST_VECTOR (mode, v);
5609 : }
5610 :
5611 465589776 : if (VECTOR_MODE_P (mode)
5612 14942765 : && GET_CODE (op0) == CONST_VECTOR
5613 198269 : && (CONST_SCALAR_INT_P (op1) || CONST_DOUBLE_AS_FLOAT_P (op1))
5614 477274834 : && (CONST_VECTOR_DUPLICATE_P (op0)
5615 : || CONST_VECTOR_NUNITS (op0).is_constant ()))
5616 : {
5617 141525 : switch (code)
5618 : {
5619 141525 : case PLUS:
5620 141525 : case MINUS:
5621 141525 : case MULT:
5622 141525 : case DIV:
5623 141525 : case MOD:
5624 141525 : case UDIV:
5625 141525 : case UMOD:
5626 141525 : case AND:
5627 141525 : case IOR:
5628 141525 : case XOR:
5629 141525 : case SMIN:
5630 141525 : case SMAX:
5631 141525 : case UMIN:
5632 141525 : case UMAX:
5633 141525 : case LSHIFTRT:
5634 141525 : case ASHIFTRT:
5635 141525 : case ASHIFT:
5636 141525 : case ROTATE:
5637 141525 : case ROTATERT:
5638 141525 : case SS_PLUS:
5639 141525 : case US_PLUS:
5640 141525 : case SS_MINUS:
5641 141525 : case US_MINUS:
5642 141525 : case SS_ASHIFT:
5643 141525 : case US_ASHIFT:
5644 141525 : case COPYSIGN:
5645 141525 : break;
5646 : default:
5647 : return NULL_RTX;
5648 : }
5649 :
5650 141525 : unsigned int npatterns = (CONST_VECTOR_DUPLICATE_P (op0)
5651 141525 : ? CONST_VECTOR_NPATTERNS (op0)
5652 149280 : : CONST_VECTOR_NUNITS (op0).to_constant ());
5653 141525 : rtx_vector_builder builder (mode, npatterns, 1);
5654 296203 : for (unsigned i = 0; i < npatterns; i++)
5655 : {
5656 309356 : rtx x = simplify_binary_operation (code, GET_MODE_INNER (mode),
5657 154678 : CONST_VECTOR_ELT (op0, i), op1);
5658 154678 : if (!x || !valid_for_const_vector_p (mode, x))
5659 0 : return 0;
5660 154678 : builder.quick_push (x);
5661 : }
5662 141525 : return builder.build ();
5663 : }
5664 :
5665 477133309 : if (SCALAR_FLOAT_MODE_P (mode)
5666 6384418 : && CONST_DOUBLE_AS_FLOAT_P (op0)
5667 76038 : && CONST_DOUBLE_AS_FLOAT_P (op1)
5668 11648 : && mode == GET_MODE (op0) && mode == GET_MODE (op1))
5669 : {
5670 11648 : if (code == AND
5671 : || code == IOR
5672 11648 : || code == XOR)
5673 : {
5674 2516 : long tmp0[4];
5675 2516 : long tmp1[4];
5676 2516 : REAL_VALUE_TYPE r;
5677 2516 : int i;
5678 :
5679 2516 : real_to_target (tmp0, CONST_DOUBLE_REAL_VALUE (op0),
5680 2516 : GET_MODE (op0));
5681 2516 : real_to_target (tmp1, CONST_DOUBLE_REAL_VALUE (op1),
5682 2516 : GET_MODE (op1));
5683 12580 : for (i = 0; i < 4; i++)
5684 : {
5685 10064 : switch (code)
5686 : {
5687 5272 : case AND:
5688 5272 : tmp0[i] &= tmp1[i];
5689 5272 : break;
5690 2512 : case IOR:
5691 2512 : tmp0[i] |= tmp1[i];
5692 2512 : break;
5693 2280 : case XOR:
5694 2280 : tmp0[i] ^= tmp1[i];
5695 2280 : break;
5696 : default:
5697 : gcc_unreachable ();
5698 : }
5699 : }
5700 2516 : real_from_target (&r, tmp0, mode);
5701 2516 : return const_double_from_real_value (r, mode);
5702 : }
5703 9132 : else if (code == COPYSIGN)
5704 : {
5705 0 : REAL_VALUE_TYPE f0, f1;
5706 0 : real_convert (&f0, mode, CONST_DOUBLE_REAL_VALUE (op0));
5707 0 : real_convert (&f1, mode, CONST_DOUBLE_REAL_VALUE (op1));
5708 0 : real_copysign (&f0, &f1);
5709 0 : return const_double_from_real_value (f0, mode);
5710 : }
5711 : else
5712 : {
5713 9132 : REAL_VALUE_TYPE f0, f1, value, result;
5714 9132 : const REAL_VALUE_TYPE *opr0, *opr1;
5715 9132 : bool inexact;
5716 :
5717 9132 : opr0 = CONST_DOUBLE_REAL_VALUE (op0);
5718 9132 : opr1 = CONST_DOUBLE_REAL_VALUE (op1);
5719 :
5720 9132 : if (HONOR_SNANS (mode)
5721 9132 : && (REAL_VALUE_ISSIGNALING_NAN (*opr0)
5722 803 : || REAL_VALUE_ISSIGNALING_NAN (*opr1)))
5723 10 : return 0;
5724 :
5725 9122 : real_convert (&f0, mode, opr0);
5726 9122 : real_convert (&f1, mode, opr1);
5727 :
5728 9122 : if (code == DIV
5729 4232 : && real_equal (&f1, &dconst0)
5730 12891 : && (flag_trapping_math || ! MODE_HAS_INFINITIES (mode)))
5731 3765 : return 0;
5732 :
5733 26684 : if (MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
5734 5265 : && flag_trapping_math
5735 5189 : && REAL_VALUE_ISINF (f0) && REAL_VALUE_ISINF (f1))
5736 : {
5737 9 : int s0 = REAL_VALUE_NEGATIVE (f0);
5738 9 : int s1 = REAL_VALUE_NEGATIVE (f1);
5739 :
5740 9 : switch (code)
5741 : {
5742 0 : case PLUS:
5743 : /* Inf + -Inf = NaN plus exception. */
5744 0 : if (s0 != s1)
5745 : return 0;
5746 : break;
5747 0 : case MINUS:
5748 : /* Inf - Inf = NaN plus exception. */
5749 0 : if (s0 == s1)
5750 : return 0;
5751 : break;
5752 : case DIV:
5753 : /* Inf / Inf = NaN plus exception. */
5754 : return 0;
5755 : default:
5756 : break;
5757 : }
5758 : }
5759 :
5760 7932 : if (code == MULT && MODE_HAS_INFINITIES (mode) && HONOR_NANS (mode)
5761 1949 : && flag_trapping_math
5762 7251 : && ((REAL_VALUE_ISINF (f0) && real_equal (&f1, &dconst0))
5763 1895 : || (REAL_VALUE_ISINF (f1)
5764 10 : && real_equal (&f0, &dconst0))))
5765 : /* Inf * 0 = NaN plus exception. */
5766 18 : return 0;
5767 :
5768 5330 : inexact = real_arithmetic (&value, rtx_to_tree_code (code),
5769 : &f0, &f1);
5770 5330 : real_convert (&result, mode, &value);
5771 :
5772 : /* Don't constant fold this floating point operation if
5773 : the result has overflowed and flag_trapping_math. */
5774 :
5775 5330 : if (flag_trapping_math
5776 20648 : && MODE_HAS_INFINITIES (mode)
5777 5162 : && REAL_VALUE_ISINF (result)
5778 1104 : && !REAL_VALUE_ISINF (f0)
5779 6420 : && !REAL_VALUE_ISINF (f1))
5780 : /* Overflow plus exception. */
5781 1090 : return 0;
5782 :
5783 : /* Don't constant fold this floating point operation if the
5784 : result may dependent upon the run-time rounding mode and
5785 : flag_rounding_math is set, or if GCC's software emulation
5786 : is unable to accurately represent the result. */
5787 :
5788 4240 : if ((flag_rounding_math
5789 26999 : || (MODE_COMPOSITE_P (mode) && !flag_unsafe_math_optimizations))
5790 4240 : && (inexact || !real_identical (&result, &value)))
5791 378 : return NULL_RTX;
5792 :
5793 3862 : return const_double_from_real_value (result, mode);
5794 : }
5795 : }
5796 :
5797 : /* We can fold some multi-word operations. */
5798 477121661 : scalar_int_mode int_mode;
5799 477121661 : if (is_a <scalar_int_mode> (mode, &int_mode)
5800 407747912 : && CONST_SCALAR_INT_P (op0)
5801 39925275 : && CONST_SCALAR_INT_P (op1)
5802 33318844 : && GET_MODE_PRECISION (int_mode) <= MAX_BITSIZE_MODE_ANY_INT)
5803 : {
5804 33318844 : wide_int result;
5805 33318844 : wi::overflow_type overflow;
5806 33318844 : rtx_mode_t pop0 = rtx_mode_t (op0, int_mode);
5807 33318844 : rtx_mode_t pop1 = rtx_mode_t (op1, int_mode);
5808 :
5809 : #if TARGET_SUPPORTS_WIDE_INT == 0
5810 : /* This assert keeps the simplification from producing a result
5811 : that cannot be represented in a CONST_DOUBLE but a lot of
5812 : upstream callers expect that this function never fails to
5813 : simplify something and so you if you added this to the test
5814 : above the code would die later anyway. If this assert
5815 : happens, you just need to make the port support wide int. */
5816 : gcc_assert (GET_MODE_PRECISION (int_mode) <= HOST_BITS_PER_DOUBLE_INT);
5817 : #endif
5818 33318844 : switch (code)
5819 : {
5820 1123280 : case MINUS:
5821 1123280 : result = wi::sub (pop0, pop1);
5822 1123280 : break;
5823 :
5824 26018197 : case PLUS:
5825 26018197 : result = wi::add (pop0, pop1);
5826 26018197 : break;
5827 :
5828 315673 : case MULT:
5829 315673 : result = wi::mul (pop0, pop1);
5830 315673 : break;
5831 :
5832 6205 : case DIV:
5833 6205 : result = wi::div_trunc (pop0, pop1, SIGNED, &overflow);
5834 6205 : if (overflow)
5835 : return NULL_RTX;
5836 : break;
5837 :
5838 251 : case MOD:
5839 251 : result = wi::mod_trunc (pop0, pop1, SIGNED, &overflow);
5840 251 : if (overflow)
5841 : return NULL_RTX;
5842 : break;
5843 :
5844 6388 : case UDIV:
5845 6388 : result = wi::div_trunc (pop0, pop1, UNSIGNED, &overflow);
5846 6388 : if (overflow)
5847 : return NULL_RTX;
5848 : break;
5849 :
5850 16088 : case UMOD:
5851 16088 : result = wi::mod_trunc (pop0, pop1, UNSIGNED, &overflow);
5852 16088 : if (overflow)
5853 : return NULL_RTX;
5854 : break;
5855 :
5856 594971 : case AND:
5857 594971 : result = wi::bit_and (pop0, pop1);
5858 594971 : break;
5859 :
5860 281778 : case IOR:
5861 281778 : result = wi::bit_or (pop0, pop1);
5862 281778 : break;
5863 :
5864 70271 : case XOR:
5865 70271 : result = wi::bit_xor (pop0, pop1);
5866 70271 : break;
5867 :
5868 1761 : case SMIN:
5869 1761 : result = wi::smin (pop0, pop1);
5870 1761 : break;
5871 :
5872 1849 : case SMAX:
5873 1849 : result = wi::smax (pop0, pop1);
5874 1849 : break;
5875 :
5876 3405 : case UMIN:
5877 3405 : result = wi::umin (pop0, pop1);
5878 3405 : break;
5879 :
5880 3000 : case UMAX:
5881 3000 : result = wi::umax (pop0, pop1);
5882 3000 : break;
5883 :
5884 4832686 : case LSHIFTRT:
5885 4832686 : case ASHIFTRT:
5886 4832686 : case ASHIFT:
5887 4832686 : case SS_ASHIFT:
5888 4832686 : case US_ASHIFT:
5889 4832686 : {
5890 : /* The shift count might be in SImode while int_mode might
5891 : be narrower. On IA-64 it is even DImode. If the shift
5892 : count is too large and doesn't fit into int_mode, we'd
5893 : ICE. So, if int_mode is narrower than
5894 : HOST_BITS_PER_WIDE_INT, use DImode for the shift count. */
5895 4832686 : if (GET_MODE (op1) == VOIDmode
5896 4832686 : && GET_MODE_PRECISION (int_mode) < HOST_BITS_PER_WIDE_INT)
5897 1896674 : pop1 = rtx_mode_t (op1, DImode);
5898 :
5899 4832686 : wide_int wop1 = pop1;
5900 4832686 : if (SHIFT_COUNT_TRUNCATED)
5901 : wop1 = wi::umod_trunc (wop1, GET_MODE_PRECISION (int_mode));
5902 4832686 : else if (wi::geu_p (wop1, GET_MODE_PRECISION (int_mode)))
5903 132 : return NULL_RTX;
5904 :
5905 4832554 : switch (code)
5906 : {
5907 2768672 : case LSHIFTRT:
5908 2768672 : result = wi::lrshift (pop0, wop1);
5909 2768672 : break;
5910 :
5911 66851 : case ASHIFTRT:
5912 66851 : result = wi::arshift (pop0, wop1);
5913 66851 : break;
5914 :
5915 1997031 : case ASHIFT:
5916 1997031 : result = wi::lshift (pop0, wop1);
5917 1997031 : break;
5918 :
5919 0 : case SS_ASHIFT:
5920 0 : if (wi::leu_p (wop1, wi::clrsb (pop0)))
5921 0 : result = wi::lshift (pop0, wop1);
5922 0 : else if (wi::neg_p (pop0))
5923 0 : result = wi::min_value (int_mode, SIGNED);
5924 : else
5925 0 : result = wi::max_value (int_mode, SIGNED);
5926 : break;
5927 :
5928 0 : case US_ASHIFT:
5929 0 : if (wi::eq_p (pop0, 0))
5930 0 : result = pop0;
5931 0 : else if (wi::leu_p (wop1, wi::clz (pop0)))
5932 0 : result = wi::lshift (pop0, wop1);
5933 : else
5934 0 : result = wi::max_value (int_mode, UNSIGNED);
5935 : break;
5936 :
5937 0 : default:
5938 0 : gcc_unreachable ();
5939 : }
5940 4832554 : break;
5941 4832686 : }
5942 34477 : case ROTATE:
5943 34477 : case ROTATERT:
5944 34477 : {
5945 : /* The rotate count might be in SImode while int_mode might
5946 : be narrower. On IA-64 it is even DImode. If the shift
5947 : count is too large and doesn't fit into int_mode, we'd
5948 : ICE. So, if int_mode is narrower than
5949 : HOST_BITS_PER_WIDE_INT, use DImode for the shift count. */
5950 34477 : if (GET_MODE (op1) == VOIDmode
5951 34477 : && GET_MODE_PRECISION (int_mode) < HOST_BITS_PER_WIDE_INT)
5952 27505 : pop1 = rtx_mode_t (op1, DImode);
5953 :
5954 34477 : if (wi::neg_p (pop1))
5955 : return NULL_RTX;
5956 :
5957 34377 : switch (code)
5958 : {
5959 11569 : case ROTATE:
5960 11569 : result = wi::lrotate (pop0, pop1);
5961 11569 : break;
5962 :
5963 22808 : case ROTATERT:
5964 22808 : result = wi::rrotate (pop0, pop1);
5965 22808 : break;
5966 :
5967 0 : default:
5968 0 : gcc_unreachable ();
5969 : }
5970 : break;
5971 : }
5972 :
5973 2270 : case SS_PLUS:
5974 2270 : result = wi::add (pop0, pop1, SIGNED, &overflow);
5975 4484 : clamp_signed_saturation:
5976 4484 : if (overflow == wi::OVF_OVERFLOW)
5977 314 : result = wi::max_value (GET_MODE_PRECISION (int_mode), SIGNED);
5978 4170 : else if (overflow == wi::OVF_UNDERFLOW)
5979 278 : result = wi::min_value (GET_MODE_PRECISION (int_mode), SIGNED);
5980 3892 : else if (overflow != wi::OVF_NONE)
5981 : return NULL_RTX;
5982 : break;
5983 :
5984 2220 : case US_PLUS:
5985 2220 : result = wi::add (pop0, pop1, UNSIGNED, &overflow);
5986 2220 : clamp_unsigned_saturation:
5987 2220 : if (overflow != wi::OVF_NONE)
5988 461 : result = wi::max_value (GET_MODE_PRECISION (int_mode), UNSIGNED);
5989 : break;
5990 :
5991 2214 : case SS_MINUS:
5992 2214 : result = wi::sub (pop0, pop1, SIGNED, &overflow);
5993 2214 : goto clamp_signed_saturation;
5994 :
5995 1852 : case US_MINUS:
5996 1852 : result = wi::sub (pop0, pop1, UNSIGNED, &overflow);
5997 1852 : if (overflow != wi::OVF_NONE)
5998 1203 : result = wi::min_value (GET_MODE_PRECISION (int_mode), UNSIGNED);
5999 : break;
6000 :
6001 0 : case SS_MULT:
6002 0 : result = wi::mul (pop0, pop1, SIGNED, &overflow);
6003 0 : goto clamp_signed_saturation;
6004 :
6005 0 : case US_MULT:
6006 0 : result = wi::mul (pop0, pop1, UNSIGNED, &overflow);
6007 0 : goto clamp_unsigned_saturation;
6008 :
6009 8 : case SMUL_HIGHPART:
6010 8 : result = wi::mul_high (pop0, pop1, SIGNED);
6011 8 : break;
6012 :
6013 0 : case UMUL_HIGHPART:
6014 0 : result = wi::mul_high (pop0, pop1, UNSIGNED);
6015 0 : break;
6016 :
6017 : default:
6018 : return NULL_RTX;
6019 : }
6020 33318071 : return immed_wide_int_const (result, int_mode);
6021 33318844 : }
6022 :
6023 : /* Handle polynomial integers. */
6024 : if (NUM_POLY_INT_COEFFS > 1
6025 : && is_a <scalar_int_mode> (mode, &int_mode)
6026 : && poly_int_rtx_p (op0)
6027 : && poly_int_rtx_p (op1))
6028 : {
6029 : poly_wide_int result;
6030 : switch (code)
6031 : {
6032 : case PLUS:
6033 : result = wi::to_poly_wide (op0, mode) + wi::to_poly_wide (op1, mode);
6034 : break;
6035 :
6036 : case MINUS:
6037 : result = wi::to_poly_wide (op0, mode) - wi::to_poly_wide (op1, mode);
6038 : break;
6039 :
6040 : case MULT:
6041 : if (CONST_SCALAR_INT_P (op1))
6042 : result = wi::to_poly_wide (op0, mode) * rtx_mode_t (op1, mode);
6043 : else
6044 : return NULL_RTX;
6045 : break;
6046 :
6047 : case ASHIFT:
6048 : if (CONST_SCALAR_INT_P (op1))
6049 : {
6050 : wide_int shift
6051 : = rtx_mode_t (op1,
6052 : GET_MODE (op1) == VOIDmode
6053 : && (GET_MODE_PRECISION (int_mode)
6054 : < HOST_BITS_PER_WIDE_INT)
6055 : ? DImode : mode);
6056 : if (SHIFT_COUNT_TRUNCATED)
6057 : shift = wi::umod_trunc (shift, GET_MODE_PRECISION (int_mode));
6058 : else if (wi::geu_p (shift, GET_MODE_PRECISION (int_mode)))
6059 : return NULL_RTX;
6060 : result = wi::to_poly_wide (op0, mode) << shift;
6061 : }
6062 : else
6063 : return NULL_RTX;
6064 : break;
6065 :
6066 : case IOR:
6067 : if (!CONST_SCALAR_INT_P (op1)
6068 : || !can_ior_p (wi::to_poly_wide (op0, mode),
6069 : rtx_mode_t (op1, mode), &result))
6070 : return NULL_RTX;
6071 : break;
6072 :
6073 : default:
6074 : return NULL_RTX;
6075 : }
6076 : return immed_wide_int_const (result, int_mode);
6077 : }
6078 :
6079 : return NULL_RTX;
6080 : }
6081 :
6082 :
6083 :
6084 : /* Return a positive integer if X should sort after Y. The value
6085 : returned is 1 if and only if X and Y are both regs. */
6086 :
6087 : static int
6088 113179987 : simplify_plus_minus_op_data_cmp (rtx x, rtx y)
6089 : {
6090 113179987 : int result;
6091 :
6092 113179987 : result = (commutative_operand_precedence (y)
6093 113179987 : - commutative_operand_precedence (x));
6094 113179987 : if (result)
6095 79214870 : return result + result;
6096 :
6097 : /* Group together equal REGs to do more simplification. */
6098 33965117 : if (REG_P (x) && REG_P (y))
6099 8637493 : return REGNO (x) > REGNO (y);
6100 :
6101 : return 0;
6102 : }
6103 :
6104 : /* Simplify and canonicalize a PLUS or MINUS, at least one of whose
6105 : operands may be another PLUS or MINUS.
6106 :
6107 : Rather than test for specific case, we do this by a brute-force method
6108 : and do all possible simplifications until no more changes occur. Then
6109 : we rebuild the operation.
6110 :
6111 : May return NULL_RTX when no changes were made. */
6112 :
6113 : rtx
6114 38029972 : simplify_context::simplify_plus_minus (rtx_code code, machine_mode mode,
6115 : rtx op0, rtx op1)
6116 : {
6117 38029972 : struct simplify_plus_minus_op_data
6118 : {
6119 : rtx op;
6120 : short neg;
6121 : } ops[16];
6122 38029972 : rtx result, tem;
6123 38029972 : int n_ops = 2;
6124 38029972 : int changed, n_constants, canonicalized = 0;
6125 38029972 : int i, j;
6126 :
6127 38029972 : memset (ops, 0, sizeof ops);
6128 :
6129 : /* Set up the two operands and then expand them until nothing has been
6130 : changed. If we run out of room in our array, give up; this should
6131 : almost never happen. */
6132 :
6133 38029972 : ops[0].op = op0;
6134 38029972 : ops[0].neg = 0;
6135 38029972 : ops[1].op = op1;
6136 38029972 : ops[1].neg = (code == MINUS);
6137 :
6138 77416714 : do
6139 : {
6140 77416714 : changed = 0;
6141 77416714 : n_constants = 0;
6142 :
6143 313545232 : for (i = 0; i < n_ops; i++)
6144 : {
6145 236128533 : rtx this_op = ops[i].op;
6146 236128533 : int this_neg = ops[i].neg;
6147 236128533 : enum rtx_code this_code = GET_CODE (this_op);
6148 :
6149 236128533 : switch (this_code)
6150 : {
6151 38528990 : case PLUS:
6152 38528990 : case MINUS:
6153 38528990 : if (n_ops == ARRAY_SIZE (ops))
6154 : return NULL_RTX;
6155 :
6156 38528975 : ops[n_ops].op = XEXP (this_op, 1);
6157 38528975 : ops[n_ops].neg = (this_code == MINUS) ^ this_neg;
6158 38528975 : n_ops++;
6159 :
6160 38528975 : ops[i].op = XEXP (this_op, 0);
6161 38528975 : changed = 1;
6162 : /* If this operand was negated then we will potentially
6163 : canonicalize the expression. Similarly if we don't
6164 : place the operands adjacent we're re-ordering the
6165 : expression and thus might be performing a
6166 : canonicalization. Ignore register re-ordering.
6167 : ??? It might be better to shuffle the ops array here,
6168 : but then (plus (plus (A, B), plus (C, D))) wouldn't
6169 : be seen as non-canonical. */
6170 38528975 : if (this_neg
6171 37829945 : || (i != n_ops - 2
6172 37141265 : && !(REG_P (ops[i].op) && REG_P (ops[n_ops - 1].op))))
6173 236128518 : canonicalized = 1;
6174 : break;
6175 :
6176 1682 : case NEG:
6177 1682 : ops[i].op = XEXP (this_op, 0);
6178 1682 : ops[i].neg = ! this_neg;
6179 1682 : changed = 1;
6180 1682 : canonicalized = 1;
6181 1682 : break;
6182 :
6183 1486941 : case CONST:
6184 1486941 : if (n_ops != ARRAY_SIZE (ops)
6185 1486941 : && GET_CODE (XEXP (this_op, 0)) == PLUS
6186 1364674 : && CONSTANT_P (XEXP (XEXP (this_op, 0), 0))
6187 1345065 : && CONSTANT_P (XEXP (XEXP (this_op, 0), 1)))
6188 : {
6189 1345065 : ops[i].op = XEXP (XEXP (this_op, 0), 0);
6190 1345065 : ops[n_ops].op = XEXP (XEXP (this_op, 0), 1);
6191 1345065 : ops[n_ops].neg = this_neg;
6192 1345065 : n_ops++;
6193 1345065 : changed = 1;
6194 1345065 : canonicalized = 1;
6195 : }
6196 : break;
6197 :
6198 40040 : case NOT:
6199 : /* ~a -> (-a - 1) */
6200 40040 : if (n_ops != ARRAY_SIZE (ops))
6201 : {
6202 40040 : ops[n_ops].op = CONSTM1_RTX (mode);
6203 40040 : ops[n_ops++].neg = this_neg;
6204 40040 : ops[i].op = XEXP (this_op, 0);
6205 40040 : ops[i].neg = !this_neg;
6206 40040 : changed = 1;
6207 40040 : canonicalized = 1;
6208 : }
6209 : break;
6210 :
6211 118675202 : CASE_CONST_SCALAR_INT:
6212 118675202 : case CONST_POLY_INT:
6213 118675202 : n_constants++;
6214 118675202 : if (this_neg)
6215 : {
6216 1174644 : ops[i].op = neg_poly_int_rtx (mode, this_op);
6217 1174644 : ops[i].neg = 0;
6218 1174644 : changed = 1;
6219 1174644 : canonicalized = 1;
6220 : }
6221 : break;
6222 :
6223 : default:
6224 : break;
6225 : }
6226 : }
6227 : }
6228 77416699 : while (changed);
6229 :
6230 38029957 : if (n_constants > 1)
6231 23574558 : canonicalized = 1;
6232 :
6233 38029957 : gcc_assert (n_ops >= 2);
6234 :
6235 : /* If we only have two operands, we can avoid the loops. */
6236 38029957 : if (n_ops == 2)
6237 : {
6238 0 : enum rtx_code code = ops[0].neg || ops[1].neg ? MINUS : PLUS;
6239 0 : rtx lhs, rhs;
6240 :
6241 : /* Get the two operands. Be careful with the order, especially for
6242 : the cases where code == MINUS. */
6243 0 : if (ops[0].neg && ops[1].neg)
6244 : {
6245 0 : lhs = gen_rtx_NEG (mode, ops[0].op);
6246 0 : rhs = ops[1].op;
6247 : }
6248 0 : else if (ops[0].neg)
6249 : {
6250 0 : lhs = ops[1].op;
6251 0 : rhs = ops[0].op;
6252 : }
6253 : else
6254 : {
6255 0 : lhs = ops[0].op;
6256 0 : rhs = ops[1].op;
6257 : }
6258 :
6259 0 : return simplify_const_binary_operation (code, mode, lhs, rhs);
6260 : }
6261 :
6262 : /* Now simplify each pair of operands until nothing changes. */
6263 62386931 : while (1)
6264 : {
6265 : /* Insertion sort is good enough for a small array. */
6266 165175316 : for (i = 1; i < n_ops; i++)
6267 : {
6268 102788385 : struct simplify_plus_minus_op_data save;
6269 102788385 : int cmp;
6270 :
6271 102788385 : j = i - 1;
6272 102788385 : cmp = simplify_plus_minus_op_data_cmp (ops[j].op, ops[i].op);
6273 102788385 : if (cmp <= 0)
6274 90582936 : continue;
6275 : /* Just swapping registers doesn't count as canonicalization. */
6276 12205449 : if (cmp != 1)
6277 9215873 : canonicalized = 1;
6278 :
6279 12205449 : save = ops[i];
6280 14612977 : do
6281 14612977 : ops[j + 1] = ops[j];
6282 14612977 : while (j--
6283 26818426 : && simplify_plus_minus_op_data_cmp (ops[j].op, save.op) > 0);
6284 12205449 : ops[j + 1] = save;
6285 : }
6286 :
6287 62386931 : changed = 0;
6288 165175316 : for (i = n_ops - 1; i > 0; i--)
6289 246882078 : for (j = i - 1; j >= 0; j--)
6290 : {
6291 144914494 : rtx lhs = ops[j].op, rhs = ops[i].op;
6292 144914494 : int lneg = ops[j].neg, rneg = ops[i].neg;
6293 :
6294 144914494 : if (lhs != 0 && rhs != 0)
6295 : {
6296 119711770 : enum rtx_code ncode = PLUS;
6297 :
6298 119711770 : if (lneg != rneg)
6299 : {
6300 10565329 : ncode = MINUS;
6301 10565329 : if (lneg)
6302 6781018 : std::swap (lhs, rhs);
6303 : }
6304 109146441 : else if (swap_commutative_operands_p (lhs, rhs))
6305 215560 : std::swap (lhs, rhs);
6306 :
6307 119711770 : if ((GET_CODE (lhs) == CONST || CONST_INT_P (lhs))
6308 28126051 : && (GET_CODE (rhs) == CONST || CONST_INT_P (rhs)))
6309 : {
6310 23724788 : rtx tem_lhs, tem_rhs;
6311 :
6312 23724788 : tem_lhs = GET_CODE (lhs) == CONST ? XEXP (lhs, 0) : lhs;
6313 23724788 : tem_rhs = GET_CODE (rhs) == CONST ? XEXP (rhs, 0) : rhs;
6314 23724788 : tem = simplify_binary_operation (ncode, mode, tem_lhs,
6315 : tem_rhs);
6316 :
6317 23724788 : if (tem && !CONSTANT_P (tem))
6318 1752 : tem = gen_rtx_CONST (GET_MODE (tem), tem);
6319 : }
6320 : else
6321 95986982 : tem = simplify_binary_operation (ncode, mode, lhs, rhs);
6322 :
6323 95988734 : if (tem)
6324 : {
6325 : /* Reject "simplifications" that just wrap the two
6326 : arguments in a CONST. Failure to do so can result
6327 : in infinite recursion with simplify_binary_operation
6328 : when it calls us to simplify CONST operations.
6329 : Also, if we find such a simplification, don't try
6330 : any more combinations with this rhs: We must have
6331 : something like symbol+offset, ie. one of the
6332 : trivial CONST expressions we handle later. */
6333 25660067 : if (GET_CODE (tem) == CONST
6334 822553 : && GET_CODE (XEXP (tem, 0)) == ncode
6335 822004 : && XEXP (XEXP (tem, 0), 0) == lhs
6336 820801 : && XEXP (XEXP (tem, 0), 1) == rhs)
6337 : break;
6338 24839266 : lneg &= rneg;
6339 24839266 : if (GET_CODE (tem) == NEG)
6340 45644 : tem = XEXP (tem, 0), lneg = !lneg;
6341 24839266 : if (poly_int_rtx_p (tem) && lneg)
6342 0 : tem = neg_poly_int_rtx (mode, tem), lneg = 0;
6343 :
6344 24839266 : ops[i].op = tem;
6345 24839266 : ops[i].neg = lneg;
6346 24839266 : ops[j].op = NULL_RTX;
6347 24839266 : changed = 1;
6348 24839266 : canonicalized = 1;
6349 : }
6350 : }
6351 : }
6352 :
6353 62386931 : if (!changed)
6354 : break;
6355 :
6356 : /* Pack all the operands to the lower-numbered entries. */
6357 98397772 : for (i = 0, j = 0; j < n_ops; j++)
6358 74040798 : if (ops[j].op)
6359 : {
6360 49201532 : ops[i] = ops[j];
6361 49201532 : i++;
6362 : }
6363 : n_ops = i;
6364 : }
6365 :
6366 : /* If nothing changed, check that rematerialization of rtl instructions
6367 : is still required. */
6368 38029957 : if (!canonicalized)
6369 : {
6370 : /* Perform rematerialization if only all operands are registers and
6371 : all operations are PLUS. */
6372 : /* ??? Also disallow (non-global, non-frame) fixed registers to work
6373 : around rs6000 and how it uses the CA register. See PR67145. */
6374 5130123 : for (i = 0; i < n_ops; i++)
6375 4140638 : if (ops[i].neg
6376 3861042 : || !REG_P (ops[i].op)
6377 7422167 : || (REGNO (ops[i].op) < FIRST_PSEUDO_REGISTER
6378 317365 : && fixed_regs[REGNO (ops[i].op)]
6379 193 : && !global_regs[REGNO (ops[i].op)]
6380 193 : && ops[i].op != frame_pointer_rtx
6381 101 : && ops[i].op != arg_pointer_rtx
6382 88 : && ops[i].op != stack_pointer_rtx))
6383 : return NULL_RTX;
6384 989485 : goto gen_result;
6385 : }
6386 :
6387 : /* Create (minus -C X) instead of (neg (const (plus X C))). */
6388 36181363 : if (n_ops == 2
6389 22639817 : && CONST_INT_P (ops[1].op)
6390 22185843 : && CONSTANT_P (ops[0].op)
6391 162 : && ops[0].neg)
6392 56 : return gen_rtx_fmt_ee (MINUS, mode, ops[1].op, ops[0].op);
6393 :
6394 : /* We suppressed creation of trivial CONST expressions in the
6395 : combination loop to avoid recursion. Create one manually now.
6396 : The combination loop should have ensured that there is exactly
6397 : one CONST_INT, and the sort will have ensured that it is last
6398 : in the array and that any other constant will be next-to-last. */
6399 :
6400 36181307 : if (n_ops > 1
6401 35635309 : && poly_int_rtx_p (ops[n_ops - 1].op)
6402 69648224 : && CONSTANT_P (ops[n_ops - 2].op))
6403 : {
6404 1416073 : rtx value = ops[n_ops - 1].op;
6405 1416073 : if (ops[n_ops - 1].neg ^ ops[n_ops - 2].neg)
6406 680692 : value = neg_poly_int_rtx (mode, value);
6407 1416073 : if (CONST_INT_P (value))
6408 : {
6409 2832146 : ops[n_ops - 2].op = plus_constant (mode, ops[n_ops - 2].op,
6410 1416073 : INTVAL (value));
6411 1416073 : n_ops--;
6412 : }
6413 : }
6414 :
6415 : /* Put a non-negated operand first, if possible. */
6416 :
6417 37883958 : for (i = 0; i < n_ops && ops[i].neg; i++)
6418 1702651 : continue;
6419 36181307 : if (i == n_ops)
6420 9503 : ops[0].op = gen_rtx_NEG (mode, ops[0].op);
6421 36171804 : else if (i != 0)
6422 : {
6423 1601199 : tem = ops[0].op;
6424 1601199 : ops[0] = ops[i];
6425 1601199 : ops[i].op = tem;
6426 1601199 : ops[i].neg = 1;
6427 : }
6428 :
6429 : /* Now make the result by performing the requested operations. */
6430 34570605 : gen_result:
6431 37170792 : result = ops[0].op;
6432 87139135 : for (i = 1; i < n_ops; i++)
6433 99936686 : result = gen_rtx_fmt_ee (ops[i].neg ? MINUS : PLUS,
6434 : mode, result, ops[i].op);
6435 :
6436 : return result;
6437 1702651 : }
6438 :
6439 : /* Check whether an operand is suitable for calling simplify_plus_minus. */
6440 : static bool
6441 514215133 : plus_minus_operand_p (const_rtx x)
6442 : {
6443 514215133 : return GET_CODE (x) == PLUS
6444 514215133 : || GET_CODE (x) == MINUS
6445 514215133 : || (GET_CODE (x) == CONST
6446 1853752 : && GET_CODE (XEXP (x, 0)) == PLUS
6447 1246527 : && CONSTANT_P (XEXP (XEXP (x, 0), 0))
6448 1174023 : && CONSTANT_P (XEXP (XEXP (x, 0), 1)));
6449 : }
6450 :
6451 : /* Like simplify_binary_operation except used for relational operators.
6452 : MODE is the mode of the result. If MODE is VOIDmode, both operands must
6453 : not also be VOIDmode.
6454 :
6455 : CMP_MODE specifies in which mode the comparison is done in, so it is
6456 : the mode of the operands. If CMP_MODE is VOIDmode, it is taken from
6457 : the operands or, if both are VOIDmode, the operands are compared in
6458 : "infinite precision". */
6459 : rtx
6460 130213457 : simplify_context::simplify_relational_operation (rtx_code code,
6461 : machine_mode mode,
6462 : machine_mode cmp_mode,
6463 : rtx op0, rtx op1)
6464 : {
6465 130213457 : rtx tem, trueop0, trueop1;
6466 :
6467 130213457 : if (cmp_mode == VOIDmode)
6468 28744771 : cmp_mode = GET_MODE (op0);
6469 28744771 : if (cmp_mode == VOIDmode)
6470 511256 : cmp_mode = GET_MODE (op1);
6471 :
6472 130213457 : tem = simplify_const_relational_operation (code, cmp_mode, op0, op1);
6473 130213457 : if (tem)
6474 878743 : return relational_result (mode, cmp_mode, tem);
6475 :
6476 : /* For the following tests, ensure const0_rtx is op1. */
6477 129334714 : if (swap_commutative_operands_p (op0, op1)
6478 129334714 : || (op0 == const0_rtx && op1 != const0_rtx))
6479 2760401 : std::swap (op0, op1), code = swap_condition (code);
6480 :
6481 : /* If op0 is a compare, extract the comparison arguments from it. */
6482 129334714 : if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6483 13996101 : return simplify_gen_relational (code, mode, VOIDmode,
6484 13996101 : XEXP (op0, 0), XEXP (op0, 1));
6485 :
6486 115338613 : if (GET_MODE_CLASS (cmp_mode) == MODE_CC)
6487 : return NULL_RTX;
6488 :
6489 84917415 : trueop0 = avoid_constant_pool_reference (op0);
6490 84917415 : trueop1 = avoid_constant_pool_reference (op1);
6491 84917415 : return simplify_relational_operation_1 (code, mode, cmp_mode,
6492 84917415 : trueop0, trueop1);
6493 : }
6494 :
6495 : /* This part of simplify_relational_operation is only used when CMP_MODE
6496 : is not in class MODE_CC (i.e. it is a real comparison).
6497 :
6498 : MODE is the mode of the result, while CMP_MODE specifies in which
6499 : mode the comparison is done in, so it is the mode of the operands. */
6500 :
6501 : rtx
6502 84917415 : simplify_context::simplify_relational_operation_1 (rtx_code code,
6503 : machine_mode mode,
6504 : machine_mode cmp_mode,
6505 : rtx op0, rtx op1)
6506 : {
6507 84917415 : enum rtx_code op0code = GET_CODE (op0);
6508 :
6509 84917415 : if (op1 == const0_rtx && COMPARISON_P (op0))
6510 : {
6511 : /* If op0 is a comparison, extract the comparison arguments
6512 : from it. */
6513 453628 : if (code == NE)
6514 : {
6515 209705 : if (GET_MODE (op0) == mode)
6516 164 : return simplify_rtx (op0);
6517 : else
6518 209541 : return simplify_gen_relational (GET_CODE (op0), mode, VOIDmode,
6519 209541 : XEXP (op0, 0), XEXP (op0, 1));
6520 : }
6521 243923 : else if (code == EQ)
6522 : {
6523 119341 : enum rtx_code new_code = reversed_comparison_code (op0, NULL);
6524 119341 : if (new_code != UNKNOWN)
6525 119024 : return simplify_gen_relational (new_code, mode, VOIDmode,
6526 119024 : XEXP (op0, 0), XEXP (op0, 1));
6527 : }
6528 : }
6529 :
6530 : /* (LTU/GEU (PLUS a C) C), where C is constant, can be simplified to
6531 : (GEU/LTU a -C). Likewise for (LTU/GEU (PLUS a C) a). */
6532 84588686 : if ((code == LTU || code == GEU)
6533 5231179 : && GET_CODE (op0) == PLUS
6534 693132 : && CONST_INT_P (XEXP (op0, 1))
6535 463422 : && (rtx_equal_p (op1, XEXP (op0, 0))
6536 349603 : || rtx_equal_p (op1, XEXP (op0, 1)))
6537 : /* (LTU/GEU (PLUS a 0) 0) is not the same as (GEU/LTU a 0). */
6538 84768223 : && XEXP (op0, 1) != const0_rtx)
6539 : {
6540 179537 : rtx new_cmp
6541 179537 : = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
6542 181520 : return simplify_gen_relational ((code == LTU ? GEU : LTU), mode,
6543 179537 : cmp_mode, XEXP (op0, 0), new_cmp);
6544 : }
6545 :
6546 : /* (GTU (PLUS a C) (C - 1)) where C is a non-zero constant can be
6547 : transformed into (LTU a -C). */
6548 84409149 : if (code == GTU && GET_CODE (op0) == PLUS && CONST_INT_P (op1)
6549 342669 : && CONST_INT_P (XEXP (op0, 1))
6550 265150 : && (UINTVAL (op1) == UINTVAL (XEXP (op0, 1)) - 1)
6551 28757 : && XEXP (op0, 1) != const0_rtx)
6552 : {
6553 28757 : rtx new_cmp
6554 28757 : = simplify_gen_unary (NEG, cmp_mode, XEXP (op0, 1), cmp_mode);
6555 28757 : return simplify_gen_relational (LTU, mode, cmp_mode,
6556 28757 : XEXP (op0, 0), new_cmp);
6557 : }
6558 :
6559 : /* Canonicalize (LTU/GEU (PLUS a b) b) as (LTU/GEU (PLUS a b) a). */
6560 84380392 : if ((code == LTU || code == GEU)
6561 5051642 : && GET_CODE (op0) == PLUS
6562 513595 : && rtx_equal_p (op1, XEXP (op0, 1))
6563 : /* Don't recurse "infinitely" for (LTU/GEU (PLUS b b) b). */
6564 84385808 : && !rtx_equal_p (op1, XEXP (op0, 0)))
6565 5416 : return simplify_gen_relational (code, mode, cmp_mode, op0,
6566 5416 : copy_rtx (XEXP (op0, 0)));
6567 :
6568 84374976 : if (op1 == const0_rtx)
6569 : {
6570 : /* Canonicalize (GTU x 0) as (NE x 0). */
6571 37139118 : if (code == GTU)
6572 172201 : return simplify_gen_relational (NE, mode, cmp_mode, op0, op1);
6573 : /* Canonicalize (LEU x 0) as (EQ x 0). */
6574 36966917 : if (code == LEU)
6575 40180 : return simplify_gen_relational (EQ, mode, cmp_mode, op0, op1);
6576 :
6577 36926737 : if ((code == NE || code == EQ)
6578 : /* Verify op0 is IOR */
6579 33289369 : && GET_CODE (op0) == IOR
6580 : /* only enters if op1 is 0 */
6581 : /* Verify IOR operand is NE */
6582 546497 : && GET_CODE (XEXP (op0, 0)) == NE
6583 15711 : && GET_MODE (XEXP (XEXP (op0, 0), 0)) == cmp_mode
6584 : /* Verify second NE operand is 0 */
6585 110 : && XEXP (XEXP (op0, 0), 1) == CONST0_RTX (cmp_mode))
6586 : {
6587 31 : rtx t = gen_rtx_IOR (cmp_mode, XEXP (XEXP (op0, 0), 0), XEXP (op0, 1));
6588 31 : t = gen_rtx_fmt_ee (code, mode, t, CONST0_RTX (mode));
6589 31 : return t;
6590 : }
6591 :
6592 : }
6593 47235858 : else if (op1 == const1_rtx)
6594 : {
6595 3107460 : switch (code)
6596 : {
6597 9101 : case GE:
6598 : /* Canonicalize (GE x 1) as (GT x 0). */
6599 9101 : return simplify_gen_relational (GT, mode, cmp_mode,
6600 9101 : op0, const0_rtx);
6601 171670 : case GEU:
6602 : /* Canonicalize (GEU x 1) as (NE x 0). */
6603 171670 : return simplify_gen_relational (NE, mode, cmp_mode,
6604 171670 : op0, const0_rtx);
6605 10444 : case LT:
6606 : /* Canonicalize (LT x 1) as (LE x 0). */
6607 10444 : return simplify_gen_relational (LE, mode, cmp_mode,
6608 10444 : op0, const0_rtx);
6609 52249 : case LTU:
6610 : /* Canonicalize (LTU x 1) as (EQ x 0). */
6611 52249 : return simplify_gen_relational (EQ, mode, cmp_mode,
6612 52249 : op0, const0_rtx);
6613 : default:
6614 : break;
6615 : }
6616 : }
6617 44128398 : else if (op1 == constm1_rtx)
6618 : {
6619 : /* Canonicalize (LE x -1) as (LT x 0). */
6620 1089994 : if (code == LE)
6621 1662 : return simplify_gen_relational (LT, mode, cmp_mode, op0, const0_rtx);
6622 : /* Canonicalize (GT x -1) as (GE x 0). */
6623 1088332 : if (code == GT)
6624 5209 : return simplify_gen_relational (GE, mode, cmp_mode, op0, const0_rtx);
6625 : }
6626 :
6627 : /* (eq/ne (plus x cst1) cst2) simplifies to (eq/ne x (cst2 - cst1)) */
6628 80274861 : if ((code == EQ || code == NE)
6629 62693719 : && (op0code == PLUS || op0code == MINUS)
6630 2533686 : && CONSTANT_P (op1)
6631 930988 : && CONSTANT_P (XEXP (op0, 1))
6632 537484 : && (INTEGRAL_MODE_P (cmp_mode) || flag_unsafe_math_optimizations))
6633 : {
6634 537451 : rtx x = XEXP (op0, 0);
6635 537451 : rtx c = XEXP (op0, 1);
6636 537451 : enum rtx_code invcode = op0code == PLUS ? MINUS : PLUS;
6637 537451 : rtx tem = simplify_gen_binary (invcode, cmp_mode, op1, c);
6638 :
6639 : /* Detect an infinite recursive condition, where we oscillate at this
6640 : simplification case between:
6641 : A + B == C <---> C - B == A,
6642 : where A, B, and C are all constants with non-simplifiable expressions,
6643 : usually SYMBOL_REFs. */
6644 537451 : if (GET_CODE (tem) == invcode
6645 46 : && CONSTANT_P (x)
6646 537469 : && rtx_equal_p (c, XEXP (tem, 1)))
6647 : return NULL_RTX;
6648 :
6649 537433 : return simplify_gen_relational (code, mode, cmp_mode, x, tem);
6650 : }
6651 :
6652 : /* (ne:SI (zero_extract:SI FOO (const_int 1) BAR) (const_int 0))) is
6653 : the same as (zero_extract:SI FOO (const_int 1) BAR). */
6654 62156268 : scalar_int_mode int_mode, int_cmp_mode;
6655 62156268 : if (code == NE
6656 32883685 : && op1 == const0_rtx
6657 2175705 : && is_int_mode (mode, &int_mode)
6658 83318929 : && is_a <scalar_int_mode> (cmp_mode, &int_cmp_mode)
6659 : /* ??? Work-around BImode bugs in the ia64 backend. */
6660 2175705 : && int_mode != BImode
6661 2175681 : && int_cmp_mode != BImode
6662 2175681 : && nonzero_bits (op0, int_cmp_mode) == 1
6663 62156268 : && STORE_FLAG_VALUE == 1)
6664 111698 : return GET_MODE_SIZE (int_mode) > GET_MODE_SIZE (int_cmp_mode)
6665 55849 : ? simplify_gen_unary (ZERO_EXTEND, int_mode, op0, int_cmp_mode)
6666 23354 : : lowpart_subreg (int_mode, op0, int_cmp_mode);
6667 :
6668 : /* (eq/ne (xor x y) 0) simplifies to (eq/ne x y). */
6669 83318929 : if ((code == EQ || code == NE)
6670 62100419 : && op1 == const0_rtx
6671 33159982 : && op0code == XOR)
6672 13197 : return simplify_gen_relational (code, mode, cmp_mode,
6673 13197 : XEXP (op0, 0), XEXP (op0, 1));
6674 :
6675 : /* (eq/ne (xor x y) x) simplifies to (eq/ne y 0). */
6676 62087222 : if ((code == EQ || code == NE)
6677 62087222 : && op0code == XOR
6678 4479 : && rtx_equal_p (XEXP (op0, 0), op1)
6679 6 : && !side_effects_p (XEXP (op0, 0)))
6680 0 : return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 1),
6681 0 : CONST0_RTX (mode));
6682 :
6683 : /* Likewise (eq/ne (xor x y) y) simplifies to (eq/ne x 0). */
6684 83305732 : if ((code == EQ || code == NE)
6685 62087222 : && op0code == XOR
6686 4479 : && rtx_equal_p (XEXP (op0, 1), op1)
6687 83305885 : && !side_effects_p (XEXP (op0, 1)))
6688 153 : return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
6689 153 : CONST0_RTX (mode));
6690 :
6691 : /* (eq/ne (xor x C1) C2) simplifies to (eq/ne x (C1^C2)). */
6692 83305579 : if ((code == EQ || code == NE)
6693 62087069 : && op0code == XOR
6694 4326 : && CONST_SCALAR_INT_P (op1)
6695 987 : && CONST_SCALAR_INT_P (XEXP (op0, 1)))
6696 449 : return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
6697 : simplify_gen_binary (XOR, cmp_mode,
6698 449 : XEXP (op0, 1), op1));
6699 :
6700 : /* Simplify eq/ne (and/ior x y) x/y) for targets with a BICS instruction or
6701 : constant folding if x/y is a constant. */
6702 62086620 : if ((code == EQ || code == NE)
6703 62086620 : && (op0code == AND || op0code == IOR)
6704 3877130 : && !side_effects_p (op1)
6705 3877024 : && op1 != CONST0_RTX (cmp_mode))
6706 : {
6707 : /* Both (eq/ne (and x y) x) and (eq/ne (ior x y) y) simplify to
6708 : (eq/ne (and (not y) x) 0). */
6709 469666 : if ((op0code == AND && rtx_equal_p (XEXP (op0, 0), op1))
6710 941015 : || (op0code == IOR && rtx_equal_p (XEXP (op0, 1), op1)))
6711 : {
6712 25129 : rtx not_y = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 1),
6713 : cmp_mode);
6714 25129 : rtx lhs = simplify_gen_binary (AND, cmp_mode, not_y, XEXP (op0, 0));
6715 :
6716 25129 : return simplify_gen_relational (code, mode, cmp_mode, lhs,
6717 25129 : CONST0_RTX (cmp_mode));
6718 : }
6719 :
6720 : /* Both (eq/ne (and x y) y) and (eq/ne (ior x y) x) simplify to
6721 : (eq/ne (and (not x) y) 0). */
6722 444614 : if ((op0code == AND && rtx_equal_p (XEXP (op0, 1), op1))
6723 873240 : || (op0code == IOR && rtx_equal_p (XEXP (op0, 0), op1)))
6724 : {
6725 42666 : rtx not_x = simplify_gen_unary (NOT, cmp_mode, XEXP (op0, 0),
6726 : cmp_mode);
6727 42666 : rtx lhs = simplify_gen_binary (AND, cmp_mode, not_x, XEXP (op0, 1));
6728 :
6729 42666 : return simplify_gen_relational (code, mode, cmp_mode, lhs,
6730 42666 : CONST0_RTX (cmp_mode));
6731 : }
6732 : }
6733 :
6734 : /* (eq/ne (bswap x) C1) simplifies to (eq/ne x C2) with C2 swapped. */
6735 83237335 : if ((code == EQ || code == NE)
6736 62018825 : && GET_CODE (op0) == BSWAP
6737 324 : && CONST_SCALAR_INT_P (op1))
6738 93 : return simplify_gen_relational (code, mode, cmp_mode, XEXP (op0, 0),
6739 : simplify_gen_unary (BSWAP, cmp_mode,
6740 93 : op1, cmp_mode));
6741 :
6742 : /* (eq/ne (bswap x) (bswap y)) simplifies to (eq/ne x y). */
6743 62018732 : if ((code == EQ || code == NE)
6744 62018732 : && GET_CODE (op0) == BSWAP
6745 231 : && GET_CODE (op1) == BSWAP)
6746 18 : return simplify_gen_relational (code, mode, cmp_mode,
6747 18 : XEXP (op0, 0), XEXP (op1, 0));
6748 :
6749 83237224 : if (op0code == POPCOUNT && op1 == const0_rtx)
6750 0 : switch (code)
6751 : {
6752 0 : case EQ:
6753 0 : case LE:
6754 0 : case LEU:
6755 : /* (eq (popcount x) (const_int 0)) -> (eq x (const_int 0)). */
6756 0 : return simplify_gen_relational (EQ, mode, GET_MODE (XEXP (op0, 0)),
6757 : XEXP (op0, 0),
6758 0 : CONST0_RTX (GET_MODE (XEXP (op0, 0))));
6759 :
6760 0 : case NE:
6761 0 : case GT:
6762 0 : case GTU:
6763 : /* (ne (popcount x) (const_int 0)) -> (ne x (const_int 0)). */
6764 0 : return simplify_gen_relational (NE, mode, GET_MODE (XEXP (op0, 0)),
6765 : XEXP (op0, 0),
6766 0 : CONST0_RTX (GET_MODE (XEXP (op0, 0))));
6767 :
6768 : default:
6769 : break;
6770 : }
6771 :
6772 : /* (ne:SI (subreg:QI (ashift:SI x 7) 0) 0) -> (and:SI x 1). */
6773 83237224 : if (code == NE
6774 32786596 : && op1 == const0_rtx
6775 16546296 : && (op0code == TRUNCATE
6776 147519 : || (partial_subreg_p (op0)
6777 146771 : && subreg_lowpart_p (op0)))
6778 123474 : && SCALAR_INT_MODE_P (mode)
6779 83237224 : && STORE_FLAG_VALUE == 1)
6780 : {
6781 29403 : rtx tmp = XEXP (op0, 0);
6782 29403 : if (GET_CODE (tmp) == ASHIFT
6783 1894 : && GET_MODE (tmp) == mode
6784 188 : && CONST_INT_P (XEXP (tmp, 1))
6785 188 : && is_int_mode (GET_MODE (op0), &int_mode)
6786 29591 : && INTVAL (XEXP (tmp, 1)) == GET_MODE_PRECISION (int_mode) - 1)
6787 188 : return simplify_gen_binary (AND, mode, XEXP (tmp, 0), const1_rtx);
6788 : }
6789 :
6790 : /* For two unsigned booleans A and B:
6791 :
6792 : A > B == ~B & A
6793 : A >= B == ~B | A
6794 : A < B == ~A & B
6795 : A <= B == ~A | B
6796 : A == B == ~A ^ B (== ~B ^ A)
6797 : A != B == A ^ B
6798 :
6799 : For signed comparisons, we have to take STORE_FLAG_VALUE into account,
6800 : with the rules above applying for positive STORE_FLAG_VALUE and with
6801 : the relations reversed for negative STORE_FLAG_VALUE. */
6802 83237036 : if (is_a<scalar_int_mode> (cmp_mode)
6803 80627665 : && COMPARISON_P (op0)
6804 83311282 : && COMPARISON_P (op1))
6805 : {
6806 9974 : rtx t = NULL_RTX;
6807 9974 : if (code == GTU || code == (STORE_FLAG_VALUE > 0 ? GT : LT))
6808 755 : t = simplify_logical_relational_operation (AND, mode, op1, op0, true);
6809 : else if (code == GEU || code == (STORE_FLAG_VALUE > 0 ? GE : LE))
6810 720 : t = simplify_logical_relational_operation (IOR, mode, op1, op0, true);
6811 : else if (code == LTU || code == (STORE_FLAG_VALUE > 0 ? LT : GT))
6812 720 : t = simplify_logical_relational_operation (AND, mode, op0, op1, true);
6813 : else if (code == LEU || code == (STORE_FLAG_VALUE > 0 ? LE : GE))
6814 720 : t = simplify_logical_relational_operation (IOR, mode, op0, op1, true);
6815 : else if (code == EQ)
6816 3249 : t = simplify_logical_relational_operation (XOR, mode, op0, op1, true);
6817 : else if (code == NE)
6818 3810 : t = simplify_logical_relational_operation (XOR, mode, op0, op1);
6819 9974 : if (t)
6820 : return t;
6821 : }
6822 :
6823 : return NULL_RTX;
6824 : }
6825 :
6826 : enum
6827 : {
6828 : CMP_EQ = 1,
6829 : CMP_LT = 2,
6830 : CMP_GT = 4,
6831 : CMP_LTU = 8,
6832 : CMP_GTU = 16
6833 : };
6834 :
6835 :
6836 : /* Convert the known results for EQ, LT, GT, LTU, GTU contained in
6837 : KNOWN_RESULT to a CONST_INT, based on the requested comparison CODE
6838 : For KNOWN_RESULT to make sense it should be either CMP_EQ, or the
6839 : logical OR of one of (CMP_LT, CMP_GT) and one of (CMP_LTU, CMP_GTU).
6840 : For floating-point comparisons, assume that the operands were ordered. */
6841 :
6842 : static rtx
6843 710953 : comparison_result (enum rtx_code code, int known_results)
6844 : {
6845 710953 : switch (code)
6846 : {
6847 131026 : case EQ:
6848 131026 : case UNEQ:
6849 131026 : return (known_results & CMP_EQ) ? const_true_rtx : const0_rtx;
6850 442384 : case NE:
6851 442384 : case LTGT:
6852 442384 : return (known_results & CMP_EQ) ? const0_rtx : const_true_rtx;
6853 :
6854 9509 : case LT:
6855 9509 : case UNLT:
6856 9509 : return (known_results & CMP_LT) ? const_true_rtx : const0_rtx;
6857 8617 : case GE:
6858 8617 : case UNGE:
6859 8617 : return (known_results & CMP_LT) ? const0_rtx : const_true_rtx;
6860 :
6861 12714 : case GT:
6862 12714 : case UNGT:
6863 12714 : return (known_results & CMP_GT) ? const_true_rtx : const0_rtx;
6864 14767 : case LE:
6865 14767 : case UNLE:
6866 14767 : return (known_results & CMP_GT) ? const0_rtx : const_true_rtx;
6867 :
6868 24394 : case LTU:
6869 24394 : return (known_results & CMP_LTU) ? const_true_rtx : const0_rtx;
6870 8828 : case GEU:
6871 8828 : return (known_results & CMP_LTU) ? const0_rtx : const_true_rtx;
6872 :
6873 47856 : case GTU:
6874 47856 : return (known_results & CMP_GTU) ? const_true_rtx : const0_rtx;
6875 10850 : case LEU:
6876 10850 : return (known_results & CMP_GTU) ? const0_rtx : const_true_rtx;
6877 :
6878 0 : case ORDERED:
6879 0 : return const_true_rtx;
6880 8 : case UNORDERED:
6881 8 : return const0_rtx;
6882 0 : default:
6883 0 : gcc_unreachable ();
6884 : }
6885 : }
6886 :
6887 : /* Check if the given comparison (done in the given MODE) is actually
6888 : a tautology or a contradiction. If the mode is VOIDmode, the
6889 : comparison is done in "infinite precision". If no simplification
6890 : is possible, this function returns zero. Otherwise, it returns
6891 : either const_true_rtx or const0_rtx. */
6892 :
6893 : rtx
6894 130303601 : simplify_const_relational_operation (enum rtx_code code,
6895 : machine_mode mode,
6896 : rtx op0, rtx op1)
6897 : {
6898 137268625 : rtx tem;
6899 137268625 : rtx trueop0;
6900 137268625 : rtx trueop1;
6901 :
6902 137268625 : gcc_assert (mode != VOIDmode
6903 : || (GET_MODE (op0) == VOIDmode
6904 : && GET_MODE (op1) == VOIDmode));
6905 :
6906 : /* We only handle MODE_CC comparisons that are COMPARE against zero. */
6907 137268625 : if (GET_MODE_CLASS (mode) == MODE_CC
6908 44423357 : && (op1 != const0_rtx
6909 44423357 : || GET_CODE (op0) != COMPARE))
6910 : return NULL_RTX;
6911 :
6912 : /* If op0 is a compare, extract the comparison arguments from it. */
6913 106847427 : if (GET_CODE (op0) == COMPARE && op1 == const0_rtx)
6914 : {
6915 14002159 : op1 = XEXP (op0, 1);
6916 14002159 : op0 = XEXP (op0, 0);
6917 :
6918 14002159 : if (GET_MODE (op0) != VOIDmode)
6919 13847213 : mode = GET_MODE (op0);
6920 154946 : else if (GET_MODE (op1) != VOIDmode)
6921 123133 : mode = GET_MODE (op1);
6922 : else
6923 : return 0;
6924 : }
6925 :
6926 : /* We can't simplify MODE_CC values since we don't know what the
6927 : actual comparison is. */
6928 106815614 : if (GET_MODE_CLASS (GET_MODE (op0)) == MODE_CC)
6929 : return 0;
6930 :
6931 : /* Make sure the constant is second. */
6932 106815614 : if (swap_commutative_operands_p (op0, op1))
6933 : {
6934 3256032 : std::swap (op0, op1);
6935 3256032 : code = swap_condition (code);
6936 : }
6937 :
6938 106815614 : trueop0 = avoid_constant_pool_reference (op0);
6939 106815614 : trueop1 = avoid_constant_pool_reference (op1);
6940 :
6941 : /* For integer comparisons of A and B maybe we can simplify A - B and can
6942 : then simplify a comparison of that with zero. If A and B are both either
6943 : a register or a CONST_INT, this can't help; testing for these cases will
6944 : prevent infinite recursion here and speed things up.
6945 :
6946 : We can only do this for EQ and NE comparisons as otherwise we may
6947 : lose or introduce overflow which we cannot disregard as undefined as
6948 : we do not know the signedness of the operation on either the left or
6949 : the right hand side of the comparison. */
6950 :
6951 106815614 : if (INTEGRAL_MODE_P (mode)
6952 104243244 : && trueop1 != CONST0_RTX (mode)
6953 53536419 : && (code == EQ || code == NE)
6954 34002993 : && ! ((REG_P (op0)
6955 9979780 : || CONST_SCALAR_INT_P (trueop0)
6956 9951198 : || CONST_VECTOR_P (trueop0))
6957 24051815 : && (REG_P (op1)
6958 14357970 : || CONST_SCALAR_INT_P (trueop1)
6959 3320676 : || CONST_VECTOR_P (trueop1)))
6960 13269624 : && (tem = simplify_binary_operation (MINUS, mode, op0, op1)) != 0
6961 : /* We cannot do this if tem is a nonzero address. */
6962 6965026 : && ! nonzero_address_p (tem))
6963 6965024 : return simplify_const_relational_operation (signed_condition (code),
6964 6965024 : mode, tem, CONST0_RTX (mode));
6965 :
6966 99850590 : if (! HONOR_NANS (mode) && code == ORDERED)
6967 0 : return const_true_rtx;
6968 :
6969 99850590 : if (! HONOR_NANS (mode) && code == UNORDERED)
6970 8 : return const0_rtx;
6971 :
6972 : /* For modes without NaNs, if the two operands are equal, we know the
6973 : result except if they have side-effects. Even with NaNs we know
6974 : the result of unordered comparisons and, if signaling NaNs are
6975 : irrelevant, also the result of LT/GT/LTGT. */
6976 99850582 : if ((! HONOR_NANS (trueop0)
6977 2086078 : || code == UNEQ || code == UNLE || code == UNGE
6978 : || ((code == LT || code == GT || code == LTGT)
6979 829980 : && ! HONOR_SNANS (trueop0)))
6980 98699641 : && rtx_equal_p (trueop0, trueop1)
6981 100357113 : && ! side_effects_p (trueop0))
6982 506442 : return comparison_result (code, CMP_EQ);
6983 :
6984 : /* If the operands are floating-point constants, see if we can fold
6985 : the result. */
6986 99344140 : if (CONST_DOUBLE_AS_FLOAT_P (trueop0)
6987 1283 : && CONST_DOUBLE_AS_FLOAT_P (trueop1)
6988 1283 : && SCALAR_FLOAT_MODE_P (GET_MODE (trueop0)))
6989 : {
6990 1283 : const REAL_VALUE_TYPE *d0 = CONST_DOUBLE_REAL_VALUE (trueop0);
6991 1283 : const REAL_VALUE_TYPE *d1 = CONST_DOUBLE_REAL_VALUE (trueop1);
6992 :
6993 : /* Comparisons are unordered iff at least one of the values is NaN. */
6994 1283 : if (REAL_VALUE_ISNAN (*d0) || REAL_VALUE_ISNAN (*d1))
6995 167 : switch (code)
6996 : {
6997 0 : case UNEQ:
6998 0 : case UNLT:
6999 0 : case UNGT:
7000 0 : case UNLE:
7001 0 : case UNGE:
7002 0 : case NE:
7003 0 : case UNORDERED:
7004 0 : return const_true_rtx;
7005 167 : case EQ:
7006 167 : case LT:
7007 167 : case GT:
7008 167 : case LE:
7009 167 : case GE:
7010 167 : case LTGT:
7011 167 : case ORDERED:
7012 167 : return const0_rtx;
7013 : default:
7014 : return 0;
7015 : }
7016 :
7017 1201 : return comparison_result (code,
7018 1201 : (real_equal (d0, d1) ? CMP_EQ :
7019 1201 : real_less (d0, d1) ? CMP_LT : CMP_GT));
7020 : }
7021 :
7022 : /* Otherwise, see if the operands are both integers. */
7023 99342857 : if ((GET_MODE_CLASS (mode) == MODE_INT || mode == VOIDmode)
7024 96341115 : && CONST_SCALAR_INT_P (trueop0) && CONST_SCALAR_INT_P (trueop1))
7025 : {
7026 : /* It would be nice if we really had a mode here. However, the
7027 : largest int representable on the target is as good as
7028 : infinite. */
7029 203395 : machine_mode cmode = (mode == VOIDmode) ? MAX_MODE_INT : mode;
7030 203395 : rtx_mode_t ptrueop0 = rtx_mode_t (trueop0, cmode);
7031 203395 : rtx_mode_t ptrueop1 = rtx_mode_t (trueop1, cmode);
7032 :
7033 203395 : if (wi::eq_p (ptrueop0, ptrueop1))
7034 0 : return comparison_result (code, CMP_EQ);
7035 : else
7036 : {
7037 203395 : int cr = wi::lts_p (ptrueop0, ptrueop1) ? CMP_LT : CMP_GT;
7038 203395 : cr |= wi::ltu_p (ptrueop0, ptrueop1) ? CMP_LTU : CMP_GTU;
7039 203395 : return comparison_result (code, cr);
7040 : }
7041 : }
7042 :
7043 : /* Optimize comparisons with upper and lower bounds. */
7044 99139462 : scalar_int_mode int_mode;
7045 99139462 : if (CONST_INT_P (trueop1)
7046 69358673 : && is_a <scalar_int_mode> (mode, &int_mode)
7047 69358673 : && HWI_COMPUTABLE_MODE_P (int_mode)
7048 168021829 : && !side_effects_p (trueop0))
7049 : {
7050 68732069 : int sign;
7051 68732069 : unsigned HOST_WIDE_INT nonzero = nonzero_bits (trueop0, int_mode);
7052 68732069 : HOST_WIDE_INT val = INTVAL (trueop1);
7053 68732069 : HOST_WIDE_INT mmin, mmax;
7054 :
7055 68732069 : if (code == GEU
7056 68732069 : || code == LEU
7057 65451912 : || code == GTU
7058 65451912 : || code == LTU)
7059 : sign = 0;
7060 : else
7061 68732069 : sign = 1;
7062 :
7063 : /* Get a reduced range if the sign bit is zero. */
7064 68732069 : if (nonzero <= (GET_MODE_MASK (int_mode) >> 1))
7065 : {
7066 6770985 : mmin = 0;
7067 6770985 : mmax = nonzero;
7068 : }
7069 : else
7070 : {
7071 61961084 : rtx mmin_rtx, mmax_rtx;
7072 61961084 : get_mode_bounds (int_mode, sign, int_mode, &mmin_rtx, &mmax_rtx);
7073 :
7074 61961084 : mmin = INTVAL (mmin_rtx);
7075 61961084 : mmax = INTVAL (mmax_rtx);
7076 61961084 : if (sign)
7077 : {
7078 55822859 : unsigned int sign_copies
7079 55822859 : = num_sign_bit_copies (trueop0, int_mode);
7080 :
7081 55822859 : mmin >>= (sign_copies - 1);
7082 55822859 : mmax >>= (sign_copies - 1);
7083 : }
7084 : }
7085 :
7086 68732069 : switch (code)
7087 : {
7088 : /* x >= y is always true for y <= mmin, always false for y > mmax. */
7089 568532 : case GEU:
7090 568532 : if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
7091 12259 : return const_true_rtx;
7092 556273 : if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
7093 49 : return const0_rtx;
7094 : break;
7095 928757 : case GE:
7096 928757 : if (val <= mmin)
7097 2350 : return const_true_rtx;
7098 926407 : if (val > mmax)
7099 0 : return const0_rtx;
7100 : break;
7101 :
7102 : /* x <= y is always true for y >= mmax, always false for y < mmin. */
7103 2711625 : case LEU:
7104 2711625 : if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
7105 15273 : return const_true_rtx;
7106 2696352 : if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
7107 0 : return const0_rtx;
7108 : break;
7109 2273305 : case LE:
7110 2273305 : if (val >= mmax)
7111 463 : return const_true_rtx;
7112 2272842 : if (val < mmin)
7113 0 : return const0_rtx;
7114 : break;
7115 :
7116 25624508 : case EQ:
7117 : /* x == y is always false for y out of range. */
7118 25624508 : if (val < mmin || val > mmax)
7119 431 : return const0_rtx;
7120 : break;
7121 :
7122 : /* x > y is always false for y >= mmax, always true for y < mmin. */
7123 2577787 : case GTU:
7124 2577787 : if ((unsigned HOST_WIDE_INT) val >= (unsigned HOST_WIDE_INT) mmax)
7125 129556 : return const0_rtx;
7126 2448231 : if ((unsigned HOST_WIDE_INT) val < (unsigned HOST_WIDE_INT) mmin)
7127 0 : return const_true_rtx;
7128 : break;
7129 1776183 : case GT:
7130 1776183 : if (val >= mmax)
7131 335 : return const0_rtx;
7132 1775848 : if (val < mmin)
7133 5 : return const_true_rtx;
7134 : break;
7135 :
7136 : /* x < y is always false for y <= mmin, always true for y > mmax. */
7137 848949 : case LTU:
7138 848949 : if ((unsigned HOST_WIDE_INT) val <= (unsigned HOST_WIDE_INT) mmin)
7139 2951 : return const0_rtx;
7140 845998 : if ((unsigned HOST_WIDE_INT) val > (unsigned HOST_WIDE_INT) mmax)
7141 87437 : return const_true_rtx;
7142 : break;
7143 1115010 : case LT:
7144 1115010 : if (val <= mmin)
7145 2308 : return const0_rtx;
7146 1112702 : if (val > mmax)
7147 3478 : return const_true_rtx;
7148 : break;
7149 :
7150 30307413 : case NE:
7151 : /* x != y is always true for y out of range. */
7152 30307413 : if (val < mmin || val > mmax)
7153 128 : return const_true_rtx;
7154 : break;
7155 :
7156 : default:
7157 : break;
7158 : }
7159 : }
7160 :
7161 : /* Optimize integer comparisons with zero. */
7162 98882439 : if (is_a <scalar_int_mode> (mode, &int_mode)
7163 95923778 : && trueop1 == const0_rtx
7164 49971527 : && !side_effects_p (trueop0))
7165 : {
7166 : /* Some addresses are known to be nonzero. We don't know
7167 : their sign, but equality comparisons are known. */
7168 49819157 : if (nonzero_address_p (trueop0))
7169 : {
7170 532 : if (code == EQ || code == LEU)
7171 251 : return const0_rtx;
7172 281 : if (code == NE || code == GTU)
7173 281 : return const_true_rtx;
7174 : }
7175 :
7176 : /* See if the first operand is an IOR with a constant. If so, we
7177 : may be able to determine the result of this comparison. */
7178 49818625 : if (GET_CODE (op0) == IOR)
7179 : {
7180 628947 : rtx inner_const = avoid_constant_pool_reference (XEXP (op0, 1));
7181 628947 : if (CONST_INT_P (inner_const) && inner_const != const0_rtx)
7182 : {
7183 290 : int sign_bitnum = GET_MODE_PRECISION (int_mode) - 1;
7184 580 : int has_sign = (HOST_BITS_PER_WIDE_INT >= sign_bitnum
7185 290 : && (UINTVAL (inner_const)
7186 290 : & (HOST_WIDE_INT_1U
7187 : << sign_bitnum)));
7188 :
7189 290 : switch (code)
7190 : {
7191 : case EQ:
7192 : case LEU:
7193 : return const0_rtx;
7194 4 : case NE:
7195 4 : case GTU:
7196 4 : return const_true_rtx;
7197 70 : case LT:
7198 70 : case LE:
7199 70 : if (has_sign)
7200 2 : return const_true_rtx;
7201 : break;
7202 210 : case GT:
7203 210 : case GE:
7204 210 : if (has_sign)
7205 : return const0_rtx;
7206 : break;
7207 : default:
7208 : break;
7209 : }
7210 : }
7211 : }
7212 : }
7213 :
7214 : /* Optimize comparison of ABS with zero. */
7215 50309056 : if (trueop1 == CONST0_RTX (mode) && !side_effects_p (trueop0)
7216 149038076 : && (GET_CODE (trueop0) == ABS
7217 50155790 : || (GET_CODE (trueop0) == FLOAT_EXTEND
7218 38 : && GET_CODE (XEXP (trueop0, 0)) == ABS)))
7219 : {
7220 583 : switch (code)
7221 : {
7222 60 : case LT:
7223 : /* Optimize abs(x) < 0.0. */
7224 60 : if (!INTEGRAL_MODE_P (mode) && !HONOR_SNANS (mode))
7225 0 : return const0_rtx;
7226 : break;
7227 :
7228 42 : case GE:
7229 : /* Optimize abs(x) >= 0.0. */
7230 42 : if (!INTEGRAL_MODE_P (mode) && !HONOR_NANS (mode))
7231 0 : return const_true_rtx;
7232 : break;
7233 :
7234 0 : case UNGE:
7235 : /* Optimize ! (abs(x) < 0.0). */
7236 0 : return const_true_rtx;
7237 :
7238 : default:
7239 : break;
7240 : }
7241 : }
7242 :
7243 : return 0;
7244 : }
7245 :
7246 : /* Recognize expressions of the form (X CMP 0) ? VAL : OP (X)
7247 : where OP is CLZ or CTZ and VAL is the value from CLZ_DEFINED_VALUE_AT_ZERO
7248 : or CTZ_DEFINED_VALUE_AT_ZERO respectively and return OP (X) if the expression
7249 : can be simplified to that or NULL_RTX if not.
7250 : Assume X is compared against zero with CMP_CODE and the true
7251 : arm is TRUE_VAL and the false arm is FALSE_VAL. */
7252 :
7253 : rtx
7254 30893382 : simplify_context::simplify_cond_clz_ctz (rtx x, rtx_code cmp_code,
7255 : rtx true_val, rtx false_val)
7256 : {
7257 30893382 : if (cmp_code != EQ && cmp_code != NE)
7258 : return NULL_RTX;
7259 :
7260 : /* Result on X == 0 and X !=0 respectively. */
7261 22348426 : rtx on_zero, on_nonzero;
7262 22348426 : if (cmp_code == EQ)
7263 : {
7264 : on_zero = true_val;
7265 : on_nonzero = false_val;
7266 : }
7267 : else
7268 : {
7269 11954399 : on_zero = false_val;
7270 11954399 : on_nonzero = true_val;
7271 : }
7272 :
7273 22348426 : rtx_code op_code = GET_CODE (on_nonzero);
7274 22348426 : if ((op_code != CLZ && op_code != CTZ)
7275 1961 : || !rtx_equal_p (XEXP (on_nonzero, 0), x)
7276 22349452 : || !CONST_INT_P (on_zero))
7277 22348124 : return NULL_RTX;
7278 :
7279 302 : HOST_WIDE_INT op_val;
7280 302 : scalar_int_mode mode ATTRIBUTE_UNUSED
7281 302 : = as_a <scalar_int_mode> (GET_MODE (XEXP (on_nonzero, 0)));
7282 0 : if (((op_code == CLZ && CLZ_DEFINED_VALUE_AT_ZERO (mode, op_val))
7283 604 : || (op_code == CTZ && CTZ_DEFINED_VALUE_AT_ZERO (mode, op_val)))
7284 326 : && op_val == INTVAL (on_zero))
7285 : return on_nonzero;
7286 :
7287 : return NULL_RTX;
7288 : }
7289 :
7290 : /* Try to simplify X given that it appears within operand OP of a
7291 : VEC_MERGE operation whose mask is MASK. X need not use the same
7292 : vector mode as the VEC_MERGE, but it must have the same number of
7293 : elements.
7294 :
7295 : Return the simplified X on success, otherwise return NULL_RTX. */
7296 :
7297 : rtx
7298 1654810 : simplify_context::simplify_merge_mask (rtx x, rtx mask, int op)
7299 : {
7300 1654810 : gcc_assert (VECTOR_MODE_P (GET_MODE (x)));
7301 3309620 : poly_uint64 nunits = GET_MODE_NUNITS (GET_MODE (x));
7302 1654810 : if (GET_CODE (x) == VEC_MERGE && rtx_equal_p (XEXP (x, 2), mask))
7303 : {
7304 5484 : if (side_effects_p (XEXP (x, 1 - op)))
7305 : return NULL_RTX;
7306 :
7307 5260 : return XEXP (x, op);
7308 : }
7309 1649326 : if (UNARY_P (x)
7310 193970 : && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
7311 1707134 : && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits))
7312 : {
7313 24337 : rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
7314 24337 : if (top0)
7315 448 : return simplify_gen_unary (GET_CODE (x), GET_MODE (x), top0,
7316 448 : GET_MODE (XEXP (x, 0)));
7317 : }
7318 1648878 : if (BINARY_P (x)
7319 205777 : && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
7320 411248 : && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
7321 180396 : && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
7322 1935784 : && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits))
7323 : {
7324 143453 : rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
7325 143453 : rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
7326 143453 : if (top0 || top1)
7327 : {
7328 952 : if (COMPARISON_P (x))
7329 0 : return simplify_gen_relational (GET_CODE (x), GET_MODE (x),
7330 0 : GET_MODE (XEXP (x, 0)) != VOIDmode
7331 : ? GET_MODE (XEXP (x, 0))
7332 0 : : GET_MODE (XEXP (x, 1)),
7333 : top0 ? top0 : XEXP (x, 0),
7334 0 : top1 ? top1 : XEXP (x, 1));
7335 : else
7336 952 : return simplify_gen_binary (GET_CODE (x), GET_MODE (x),
7337 : top0 ? top0 : XEXP (x, 0),
7338 952 : top1 ? top1 : XEXP (x, 1));
7339 : }
7340 : }
7341 1647926 : if (GET_RTX_CLASS (GET_CODE (x)) == RTX_TERNARY
7342 35690 : && VECTOR_MODE_P (GET_MODE (XEXP (x, 0)))
7343 71380 : && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 0))), nunits)
7344 35690 : && VECTOR_MODE_P (GET_MODE (XEXP (x, 1)))
7345 71380 : && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 1))), nunits)
7346 35690 : && VECTOR_MODE_P (GET_MODE (XEXP (x, 2)))
7347 1666106 : && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (x, 2))), nunits))
7348 : {
7349 9090 : rtx top0 = simplify_merge_mask (XEXP (x, 0), mask, op);
7350 9090 : rtx top1 = simplify_merge_mask (XEXP (x, 1), mask, op);
7351 9090 : rtx top2 = simplify_merge_mask (XEXP (x, 2), mask, op);
7352 9090 : if (top0 || top1 || top2)
7353 448 : return simplify_gen_ternary (GET_CODE (x), GET_MODE (x),
7354 448 : GET_MODE (XEXP (x, 0)),
7355 : top0 ? top0 : XEXP (x, 0),
7356 : top1 ? top1 : XEXP (x, 1),
7357 448 : top2 ? top2 : XEXP (x, 2));
7358 : }
7359 : return NULL_RTX;
7360 : }
7361 :
7362 :
7363 : /* Simplify CODE, an operation with result mode MODE and three operands,
7364 : OP0, OP1, and OP2. OP0_MODE was the mode of OP0 before it became
7365 : a constant. Return 0 if no simplifications is possible. */
7366 :
7367 : rtx
7368 42758949 : simplify_context::simplify_ternary_operation (rtx_code code, machine_mode mode,
7369 : machine_mode op0_mode,
7370 : rtx op0, rtx op1, rtx op2)
7371 : {
7372 42758949 : bool any_change = false;
7373 42758949 : rtx tem, trueop2;
7374 42758949 : scalar_int_mode int_mode, int_op0_mode;
7375 42758949 : unsigned int n_elts;
7376 :
7377 42758949 : switch (code)
7378 : {
7379 337143 : case FMA:
7380 : /* Simplify negations around the multiplication. */
7381 : /* -a * -b + c => a * b + c. */
7382 337143 : if (GET_CODE (op0) == NEG)
7383 : {
7384 81168 : tem = simplify_unary_operation (NEG, mode, op1, mode);
7385 81168 : if (tem)
7386 259 : op1 = tem, op0 = XEXP (op0, 0), any_change = true;
7387 : }
7388 255975 : else if (GET_CODE (op1) == NEG)
7389 : {
7390 1086 : tem = simplify_unary_operation (NEG, mode, op0, mode);
7391 1086 : if (tem)
7392 0 : op0 = tem, op1 = XEXP (op1, 0), any_change = true;
7393 : }
7394 :
7395 : /* Canonicalize the two multiplication operands. */
7396 : /* a * -b + c => -b * a + c. */
7397 337143 : if (swap_commutative_operands_p (op0, op1))
7398 : std::swap (op0, op1), any_change = true;
7399 :
7400 308796 : if (any_change)
7401 28597 : return gen_rtx_FMA (mode, op0, op1, op2);
7402 : return NULL_RTX;
7403 :
7404 672485 : case SIGN_EXTRACT:
7405 672485 : case ZERO_EXTRACT:
7406 672485 : if (CONST_INT_P (op0)
7407 17964 : && CONST_INT_P (op1)
7408 17964 : && CONST_INT_P (op2)
7409 42758981 : && is_a <scalar_int_mode> (mode, &int_mode)
7410 32 : && INTVAL (op1) + INTVAL (op2) <= GET_MODE_PRECISION (int_mode)
7411 672517 : && HWI_COMPUTABLE_MODE_P (int_mode))
7412 : {
7413 : /* Extracting a bit-field from a constant */
7414 32 : unsigned HOST_WIDE_INT val = UINTVAL (op0);
7415 32 : HOST_WIDE_INT op1val = INTVAL (op1);
7416 32 : HOST_WIDE_INT op2val = INTVAL (op2);
7417 32 : if (!BITS_BIG_ENDIAN)
7418 32 : val >>= op2val;
7419 : else if (is_a <scalar_int_mode> (op0_mode, &int_op0_mode))
7420 : val >>= GET_MODE_PRECISION (int_op0_mode) - op2val - op1val;
7421 : else
7422 : /* Not enough information to calculate the bit position. */
7423 : break;
7424 :
7425 32 : if (HOST_BITS_PER_WIDE_INT != op1val)
7426 : {
7427 : /* First zero-extend. */
7428 29 : val &= (HOST_WIDE_INT_1U << op1val) - 1;
7429 : /* If desired, propagate sign bit. */
7430 29 : if (code == SIGN_EXTRACT
7431 5 : && (val & (HOST_WIDE_INT_1U << (op1val - 1)))
7432 5 : != 0)
7433 2 : val |= ~ ((HOST_WIDE_INT_1U << op1val) - 1);
7434 : }
7435 :
7436 32 : return gen_int_mode (val, int_mode);
7437 : }
7438 : break;
7439 :
7440 40954274 : case IF_THEN_ELSE:
7441 40954274 : if (CONST_INT_P (op0))
7442 282480 : return op0 != const0_rtx ? op1 : op2;
7443 :
7444 : /* Convert c ? a : a into "a". */
7445 40763972 : if (rtx_equal_p (op1, op2) && ! side_effects_p (op0))
7446 : return op1;
7447 :
7448 : /* Convert a != b ? a : b into "a". */
7449 40760671 : if (GET_CODE (op0) == NE
7450 15796376 : && ! side_effects_p (op0)
7451 15754982 : && ! HONOR_NANS (mode)
7452 15749014 : && ! HONOR_SIGNED_ZEROS (mode)
7453 56509685 : && ((rtx_equal_p (XEXP (op0, 0), op1)
7454 106029 : && rtx_equal_p (XEXP (op0, 1), op2))
7455 15748675 : || (rtx_equal_p (XEXP (op0, 0), op2)
7456 5231 : && rtx_equal_p (XEXP (op0, 1), op1))))
7457 546 : return op1;
7458 :
7459 : /* Convert a == b ? a : b into "b". */
7460 40760125 : if (GET_CODE (op0) == EQ
7461 12954507 : && ! side_effects_p (op0)
7462 12928423 : && ! HONOR_NANS (mode)
7463 12790972 : && ! HONOR_SIGNED_ZEROS (mode)
7464 53551097 : && ((rtx_equal_p (XEXP (op0, 0), op1)
7465 14319 : && rtx_equal_p (XEXP (op0, 1), op2))
7466 12790962 : || (rtx_equal_p (XEXP (op0, 0), op2)
7467 7240 : && rtx_equal_p (XEXP (op0, 1), op1))))
7468 26 : return op2;
7469 :
7470 : /* Convert a != 0 ? -a : 0 into "-a". */
7471 40760099 : if (GET_CODE (op0) == NE
7472 15795830 : && ! side_effects_p (op0)
7473 15754436 : && ! HONOR_NANS (mode)
7474 15748468 : && ! HONOR_SIGNED_ZEROS (mode)
7475 15748468 : && XEXP (op0, 1) == CONST0_RTX (mode)
7476 11948321 : && op2 == CONST0_RTX (mode)
7477 181872 : && GET_CODE (op1) == NEG
7478 40760151 : && rtx_equal_p (XEXP (op0, 0), XEXP (op1, 0)))
7479 : return op1;
7480 :
7481 : /* Convert a == 0 ? 0 : -a into "-a". */
7482 40760090 : if (GET_CODE (op0) == EQ
7483 12954481 : && ! side_effects_p (op0)
7484 12928397 : && ! HONOR_NANS (mode)
7485 12790946 : && ! HONOR_SIGNED_ZEROS (mode)
7486 12790946 : && op1 == CONST0_RTX (mode)
7487 31783 : && XEXP (op0, 1) == CONST0_RTX (mode)
7488 14279 : && GET_CODE (op2) == NEG
7489 40760096 : && rtx_equal_p (XEXP (op0, 0), XEXP (op2, 0)))
7490 : return op2;
7491 :
7492 : /* Convert (!c) != {0,...,0} ? a : b into
7493 : c != {0,...,0} ? b : a for vector modes. */
7494 40760084 : if (VECTOR_MODE_P (GET_MODE (op1))
7495 14895 : && GET_CODE (op0) == NE
7496 450 : && GET_CODE (XEXP (op0, 0)) == NOT
7497 0 : && GET_CODE (XEXP (op0, 1)) == CONST_VECTOR)
7498 : {
7499 0 : rtx cv = XEXP (op0, 1);
7500 0 : int nunits;
7501 0 : bool ok = true;
7502 0 : if (!CONST_VECTOR_NUNITS (cv).is_constant (&nunits))
7503 : ok = false;
7504 : else
7505 0 : for (int i = 0; i < nunits; ++i)
7506 0 : if (CONST_VECTOR_ELT (cv, i) != const0_rtx)
7507 : {
7508 : ok = false;
7509 : break;
7510 : }
7511 0 : if (ok)
7512 : {
7513 0 : rtx new_op0 = gen_rtx_NE (GET_MODE (op0),
7514 : XEXP (XEXP (op0, 0), 0),
7515 : XEXP (op0, 1));
7516 0 : rtx retval = gen_rtx_IF_THEN_ELSE (mode, new_op0, op2, op1);
7517 0 : return retval;
7518 : }
7519 : }
7520 :
7521 : /* Convert x == 0 ? N : clz (x) into clz (x) when
7522 : CLZ_DEFINED_VALUE_AT_ZERO is defined to N for the mode of x.
7523 : Similarly for ctz (x). */
7524 40759086 : if (COMPARISON_P (op0) && !side_effects_p (op0)
7525 81418760 : && XEXP (op0, 1) == const0_rtx)
7526 : {
7527 30893382 : rtx simplified
7528 30893382 : = simplify_cond_clz_ctz (XEXP (op0, 0), GET_CODE (op0),
7529 : op1, op2);
7530 30893382 : if (simplified)
7531 : return simplified;
7532 : }
7533 :
7534 40760084 : if (COMPARISON_P (op0) && ! side_effects_p (op0))
7535 : {
7536 81409986 : machine_mode cmp_mode = (GET_MODE (XEXP (op0, 0)) == VOIDmode
7537 40658676 : ? GET_MODE (XEXP (op0, 1))
7538 : : GET_MODE (XEXP (op0, 0)));
7539 40658676 : rtx temp;
7540 :
7541 : /* Look for happy constants in op1 and op2. */
7542 40658676 : if (CONST_INT_P (op1) && CONST_INT_P (op2))
7543 : {
7544 208663 : HOST_WIDE_INT t = INTVAL (op1);
7545 208663 : HOST_WIDE_INT f = INTVAL (op2);
7546 :
7547 208663 : if (t == STORE_FLAG_VALUE && f == 0)
7548 52619 : code = GET_CODE (op0);
7549 156044 : else if (t == 0 && f == STORE_FLAG_VALUE)
7550 : {
7551 31183 : enum rtx_code tmp;
7552 31183 : tmp = reversed_comparison_code (op0, NULL);
7553 31183 : if (tmp == UNKNOWN)
7554 : break;
7555 : code = tmp;
7556 : }
7557 : else
7558 : break;
7559 :
7560 78419 : return simplify_gen_relational (code, mode, cmp_mode,
7561 78419 : XEXP (op0, 0), XEXP (op0, 1));
7562 : }
7563 :
7564 40450013 : temp = simplify_relational_operation (GET_CODE (op0), op0_mode,
7565 : cmp_mode, XEXP (op0, 0),
7566 : XEXP (op0, 1));
7567 :
7568 : /* See if any simplifications were possible. */
7569 40450013 : if (temp)
7570 : {
7571 6910 : if (CONST_INT_P (temp))
7572 868 : return temp == const0_rtx ? op2 : op1;
7573 6087 : else if (temp)
7574 6087 : return gen_rtx_IF_THEN_ELSE (mode, temp, op1, op2);
7575 : }
7576 : }
7577 : break;
7578 :
7579 795047 : case VEC_MERGE:
7580 795047 : gcc_assert (GET_MODE (op0) == mode);
7581 795047 : gcc_assert (GET_MODE (op1) == mode);
7582 795047 : gcc_assert (VECTOR_MODE_P (mode));
7583 795047 : trueop2 = avoid_constant_pool_reference (op2);
7584 795047 : if (CONST_INT_P (trueop2)
7585 1269898 : && GET_MODE_NUNITS (mode).is_constant (&n_elts))
7586 : {
7587 474851 : unsigned HOST_WIDE_INT sel = UINTVAL (trueop2);
7588 474851 : unsigned HOST_WIDE_INT mask;
7589 474851 : if (n_elts == HOST_BITS_PER_WIDE_INT)
7590 : mask = -1;
7591 : else
7592 472397 : mask = (HOST_WIDE_INT_1U << n_elts) - 1;
7593 :
7594 474851 : if (!(sel & mask) && !side_effects_p (op0))
7595 : return op1;
7596 474414 : if ((sel & mask) == mask && !side_effects_p (op1))
7597 : return op0;
7598 :
7599 463666 : rtx trueop0 = avoid_constant_pool_reference (op0);
7600 463666 : rtx trueop1 = avoid_constant_pool_reference (op1);
7601 463666 : if (GET_CODE (trueop0) == CONST_VECTOR
7602 9240 : && GET_CODE (trueop1) == CONST_VECTOR)
7603 : {
7604 4804 : rtvec v = rtvec_alloc (n_elts);
7605 4804 : unsigned int i;
7606 :
7607 54262 : for (i = 0; i < n_elts; i++)
7608 44654 : RTVEC_ELT (v, i) = ((sel & (HOST_WIDE_INT_1U << i))
7609 44654 : ? CONST_VECTOR_ELT (trueop0, i)
7610 25070 : : CONST_VECTOR_ELT (trueop1, i));
7611 4804 : return gen_rtx_CONST_VECTOR (mode, v);
7612 : }
7613 :
7614 458862 : if (swap_commutative_operands_p (op0, op1)
7615 : /* Two operands have same precedence, then first bit of mask
7616 : select first operand. */
7617 458862 : || (!swap_commutative_operands_p (op1, op0) && !(sel & 1)))
7618 31469 : return simplify_gen_ternary (code, mode, mode, op1, op0,
7619 62938 : GEN_INT (~sel & mask));
7620 :
7621 : /* Replace (vec_merge (vec_merge a b m) c n) with (vec_merge b c n)
7622 : if no element from a appears in the result. */
7623 427393 : if (GET_CODE (op0) == VEC_MERGE)
7624 : {
7625 17218 : tem = avoid_constant_pool_reference (XEXP (op0, 2));
7626 17218 : if (CONST_INT_P (tem))
7627 : {
7628 1475 : unsigned HOST_WIDE_INT sel0 = UINTVAL (tem);
7629 1475 : if (!(sel & sel0 & mask) && !side_effects_p (XEXP (op0, 0)))
7630 104 : return simplify_gen_ternary (code, mode, mode,
7631 104 : XEXP (op0, 1), op1, op2);
7632 1371 : if (!(sel & ~sel0 & mask) && !side_effects_p (XEXP (op0, 1)))
7633 834 : return simplify_gen_ternary (code, mode, mode,
7634 834 : XEXP (op0, 0), op1, op2);
7635 : }
7636 : }
7637 426455 : if (GET_CODE (op1) == VEC_MERGE)
7638 : {
7639 588 : tem = avoid_constant_pool_reference (XEXP (op1, 2));
7640 588 : if (CONST_INT_P (tem))
7641 : {
7642 557 : unsigned HOST_WIDE_INT sel1 = UINTVAL (tem);
7643 557 : if (!(~sel & sel1 & mask) && !side_effects_p (XEXP (op1, 0)))
7644 526 : return simplify_gen_ternary (code, mode, mode,
7645 526 : op0, XEXP (op1, 1), op2);
7646 31 : if (!(~sel & ~sel1 & mask) && !side_effects_p (XEXP (op1, 1)))
7647 4 : return simplify_gen_ternary (code, mode, mode,
7648 4 : op0, XEXP (op1, 0), op2);
7649 : }
7650 : }
7651 :
7652 : /* Replace (vec_merge (vec_duplicate (vec_select a parallel (i))) a 1 << i)
7653 : with a. */
7654 425925 : if (GET_CODE (op0) == VEC_DUPLICATE
7655 142871 : && GET_CODE (XEXP (op0, 0)) == VEC_SELECT
7656 702 : && GET_CODE (XEXP (XEXP (op0, 0), 1)) == PARALLEL
7657 427329 : && known_eq (GET_MODE_NUNITS (GET_MODE (XEXP (op0, 0))), 1))
7658 : {
7659 634 : tem = XVECEXP ((XEXP (XEXP (op0, 0), 1)), 0, 0);
7660 634 : if (CONST_INT_P (tem) && CONST_INT_P (op2))
7661 : {
7662 634 : if (XEXP (XEXP (op0, 0), 0) == op1
7663 2 : && UINTVAL (op2) == HOST_WIDE_INT_1U << UINTVAL (tem))
7664 : return op1;
7665 : }
7666 : }
7667 : /* Replace (vec_merge (vec_duplicate (X)) (const_vector [A, B])
7668 : (const_int N))
7669 : with (vec_concat (X) (B)) if N == 1 or
7670 : (vec_concat (A) (X)) if N == 2. */
7671 425923 : if (GET_CODE (op0) == VEC_DUPLICATE
7672 142869 : && GET_CODE (op1) == CONST_VECTOR
7673 151570 : && known_eq (CONST_VECTOR_NUNITS (op1), 2)
7674 2344 : && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
7675 427095 : && IN_RANGE (sel, 1, 2))
7676 : {
7677 1170 : rtx newop0 = XEXP (op0, 0);
7678 1170 : rtx newop1 = CONST_VECTOR_ELT (op1, 2 - sel);
7679 1170 : if (sel == 2)
7680 123 : std::swap (newop0, newop1);
7681 1170 : return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
7682 : }
7683 : /* Replace (vec_merge (vec_duplicate x) (vec_concat (y) (z)) (const_int N))
7684 : with (vec_concat x z) if N == 1, or (vec_concat y x) if N == 2.
7685 : Only applies for vectors of two elements. */
7686 424753 : if (GET_CODE (op0) == VEC_DUPLICATE
7687 141699 : && GET_CODE (op1) == VEC_CONCAT
7688 0 : && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
7689 0 : && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
7690 424753 : && IN_RANGE (sel, 1, 2))
7691 : {
7692 0 : rtx newop0 = XEXP (op0, 0);
7693 0 : rtx newop1 = XEXP (op1, 2 - sel);
7694 0 : rtx otherop = XEXP (op1, sel - 1);
7695 0 : if (sel == 2)
7696 0 : std::swap (newop0, newop1);
7697 : /* Don't want to throw away the other part of the vec_concat if
7698 : it has side-effects. */
7699 0 : if (!side_effects_p (otherop))
7700 0 : return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
7701 : }
7702 :
7703 : /* Replace:
7704 :
7705 : (vec_merge:outer (vec_duplicate:outer x:inner)
7706 : (subreg:outer y:inner 0)
7707 : (const_int N))
7708 :
7709 : with (vec_concat:outer x:inner y:inner) if N == 1,
7710 : or (vec_concat:outer y:inner x:inner) if N == 2.
7711 :
7712 : Implicitly, this means we have a paradoxical subreg, but such
7713 : a check is cheap, so make it anyway.
7714 :
7715 : Only applies for vectors of two elements. */
7716 424753 : if (GET_CODE (op0) == VEC_DUPLICATE
7717 141699 : && GET_CODE (op1) == SUBREG
7718 44335 : && GET_MODE (op1) == GET_MODE (op0)
7719 44335 : && GET_MODE (SUBREG_REG (op1)) == GET_MODE (XEXP (op0, 0))
7720 0 : && paradoxical_subreg_p (op1)
7721 0 : && subreg_lowpart_p (op1)
7722 0 : && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
7723 0 : && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
7724 424753 : && IN_RANGE (sel, 1, 2))
7725 : {
7726 0 : rtx newop0 = XEXP (op0, 0);
7727 0 : rtx newop1 = SUBREG_REG (op1);
7728 0 : if (sel == 2)
7729 0 : std::swap (newop0, newop1);
7730 0 : return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
7731 : }
7732 :
7733 : /* Same as above but with switched operands:
7734 : Replace (vec_merge:outer (subreg:outer x:inner 0)
7735 : (vec_duplicate:outer y:inner)
7736 : (const_int N))
7737 :
7738 : with (vec_concat:outer x:inner y:inner) if N == 1,
7739 : or (vec_concat:outer y:inner x:inner) if N == 2. */
7740 424753 : if (GET_CODE (op1) == VEC_DUPLICATE
7741 29589 : && GET_CODE (op0) == SUBREG
7742 26567 : && GET_MODE (op0) == GET_MODE (op1)
7743 26567 : && GET_MODE (SUBREG_REG (op0)) == GET_MODE (XEXP (op1, 0))
7744 0 : && paradoxical_subreg_p (op0)
7745 0 : && subreg_lowpart_p (op0)
7746 0 : && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
7747 0 : && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
7748 424753 : && IN_RANGE (sel, 1, 2))
7749 : {
7750 0 : rtx newop0 = SUBREG_REG (op0);
7751 0 : rtx newop1 = XEXP (op1, 0);
7752 0 : if (sel == 2)
7753 0 : std::swap (newop0, newop1);
7754 0 : return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
7755 : }
7756 :
7757 : /* Replace (vec_merge (vec_duplicate x) (vec_duplicate y)
7758 : (const_int n))
7759 : with (vec_concat x y) or (vec_concat y x) depending on value
7760 : of N. */
7761 424753 : if (GET_CODE (op0) == VEC_DUPLICATE
7762 141699 : && GET_CODE (op1) == VEC_DUPLICATE
7763 198 : && known_eq (GET_MODE_NUNITS (GET_MODE (op0)), 2)
7764 0 : && known_eq (GET_MODE_NUNITS (GET_MODE (op1)), 2)
7765 424753 : && IN_RANGE (sel, 1, 2))
7766 : {
7767 0 : rtx newop0 = XEXP (op0, 0);
7768 0 : rtx newop1 = XEXP (op1, 0);
7769 0 : if (sel == 2)
7770 0 : std::swap (newop0, newop1);
7771 :
7772 0 : return simplify_gen_binary (VEC_CONCAT, mode, newop0, newop1);
7773 : }
7774 : }
7775 :
7776 744949 : if (rtx_equal_p (op0, op1)
7777 744949 : && !side_effects_p (op2) && !side_effects_p (op1))
7778 : return op0;
7779 :
7780 744656 : if (!side_effects_p (op2))
7781 : {
7782 740978 : rtx top0
7783 740978 : = may_trap_p (op0) ? NULL_RTX : simplify_merge_mask (op0, op2, 0);
7784 740978 : rtx top1
7785 740978 : = may_trap_p (op1) ? NULL_RTX : simplify_merge_mask (op1, op2, 1);
7786 740978 : if (top0 || top1)
7787 984 : return simplify_gen_ternary (code, mode, mode,
7788 : top0 ? top0 : op0,
7789 812 : top1 ? top1 : op1, op2);
7790 : }
7791 :
7792 : break;
7793 :
7794 0 : default:
7795 0 : gcc_unreachable ();
7796 : }
7797 :
7798 : return 0;
7799 : }
7800 :
7801 : /* Try to calculate NUM_BYTES bytes of the target memory image of X,
7802 : starting at byte FIRST_BYTE. Return true on success and add the
7803 : bytes to BYTES, such that each byte has BITS_PER_UNIT bits and such
7804 : that the bytes follow target memory order. Leave BYTES unmodified
7805 : on failure.
7806 :
7807 : MODE is the mode of X. The caller must reserve NUM_BYTES bytes in
7808 : BYTES before calling this function. */
7809 :
7810 : bool
7811 13276239 : native_encode_rtx (machine_mode mode, rtx x, vec<target_unit> &bytes,
7812 : unsigned int first_byte, unsigned int num_bytes)
7813 : {
7814 : /* Check the mode is sensible. */
7815 13276239 : gcc_assert (GET_MODE (x) == VOIDmode
7816 : ? is_a <scalar_int_mode> (mode)
7817 : : mode == GET_MODE (x));
7818 :
7819 13276239 : if (GET_CODE (x) == CONST_VECTOR)
7820 : {
7821 : /* CONST_VECTOR_ELT follows target memory order, so no shuffling
7822 : is necessary. The only complication is that MODE_VECTOR_BOOL
7823 : vectors can have several elements per byte. */
7824 988978 : unsigned int elt_bits = vector_element_size (GET_MODE_PRECISION (mode),
7825 : GET_MODE_NUNITS (mode));
7826 494489 : unsigned int elt = first_byte * BITS_PER_UNIT / elt_bits;
7827 494489 : if (elt_bits < BITS_PER_UNIT)
7828 : {
7829 : /* This is the only case in which elements can be smaller than
7830 : a byte. */
7831 0 : gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
7832 0 : auto mask = GET_MODE_MASK (GET_MODE_INNER (mode));
7833 0 : for (unsigned int i = 0; i < num_bytes; ++i)
7834 : {
7835 0 : target_unit value = 0;
7836 0 : for (unsigned int j = 0; j < BITS_PER_UNIT; j += elt_bits)
7837 : {
7838 0 : if (INTVAL (CONST_VECTOR_ELT (x, elt)))
7839 0 : value |= mask << j;
7840 0 : elt += 1;
7841 : }
7842 0 : bytes.quick_push (value);
7843 : }
7844 : return true;
7845 : }
7846 :
7847 494489 : unsigned int start = bytes.length ();
7848 494489 : unsigned int elt_bytes = GET_MODE_UNIT_SIZE (mode);
7849 : /* Make FIRST_BYTE relative to ELT. */
7850 494489 : first_byte %= elt_bytes;
7851 2580602 : while (num_bytes > 0)
7852 : {
7853 : /* Work out how many bytes we want from element ELT. */
7854 2086113 : unsigned int chunk_bytes = MIN (num_bytes, elt_bytes - first_byte);
7855 4172226 : if (!native_encode_rtx (GET_MODE_INNER (mode),
7856 : CONST_VECTOR_ELT (x, elt), bytes,
7857 : first_byte, chunk_bytes))
7858 : {
7859 0 : bytes.truncate (start);
7860 0 : return false;
7861 : }
7862 2086113 : elt += 1;
7863 2086113 : first_byte = 0;
7864 2086113 : num_bytes -= chunk_bytes;
7865 : }
7866 : return true;
7867 : }
7868 :
7869 : /* All subsequent cases are limited to scalars. */
7870 12781750 : scalar_mode smode;
7871 12812605 : if (!is_a <scalar_mode> (mode, &smode))
7872 : return false;
7873 :
7874 : /* Make sure that the region is in range. */
7875 12781750 : unsigned int end_byte = first_byte + num_bytes;
7876 12781750 : unsigned int mode_bytes = GET_MODE_SIZE (smode);
7877 12781750 : gcc_assert (end_byte <= mode_bytes);
7878 :
7879 12781750 : if (CONST_SCALAR_INT_P (x))
7880 : {
7881 : /* The target memory layout is affected by both BYTES_BIG_ENDIAN
7882 : and WORDS_BIG_ENDIAN. Use the subreg machinery to get the lsb
7883 : position of each byte. */
7884 12114119 : rtx_mode_t value (x, smode);
7885 12114119 : wide_int_ref value_wi (value);
7886 51786191 : for (unsigned int byte = first_byte; byte < end_byte; ++byte)
7887 : {
7888 : /* Always constant because the inputs are. */
7889 39672072 : unsigned int lsb
7890 39672072 : = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
7891 : /* Operate directly on the encoding rather than using
7892 : wi::extract_uhwi, so that we preserve the sign or zero
7893 : extension for modes that are not a whole number of bits in
7894 : size. (Zero extension is only used for the combination of
7895 : innermode == BImode && STORE_FLAG_VALUE == 1). */
7896 39672072 : unsigned int elt = lsb / HOST_BITS_PER_WIDE_INT;
7897 39672072 : unsigned int shift = lsb % HOST_BITS_PER_WIDE_INT;
7898 39672072 : unsigned HOST_WIDE_INT uhwi = value_wi.elt (elt);
7899 39672072 : bytes.quick_push (uhwi >> shift);
7900 : }
7901 12114119 : return true;
7902 : }
7903 :
7904 667631 : if (CONST_DOUBLE_P (x))
7905 : {
7906 : /* real_to_target produces an array of integers in target memory order.
7907 : All integers before the last one have 32 bits; the last one may
7908 : have 32 bits or fewer, depending on whether the mode bitsize
7909 : is divisible by 32. Each of these integers is then laid out
7910 : in target memory as any other integer would be. */
7911 636776 : long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
7912 636776 : real_to_target (el32, CONST_DOUBLE_REAL_VALUE (x), smode);
7913 :
7914 : /* The (maximum) number of target bytes per element of el32. */
7915 636776 : unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
7916 636776 : gcc_assert (bytes_per_el32 != 0);
7917 :
7918 : /* Build up the integers in a similar way to the CONST_SCALAR_INT_P
7919 : handling above. */
7920 4359740 : for (unsigned int byte = first_byte; byte < end_byte; ++byte)
7921 : {
7922 3722964 : unsigned int index = byte / bytes_per_el32;
7923 3722964 : unsigned int subbyte = byte % bytes_per_el32;
7924 3722964 : unsigned int int_bytes = MIN (bytes_per_el32,
7925 : mode_bytes - index * bytes_per_el32);
7926 : /* Always constant because the inputs are. */
7927 3722964 : unsigned int lsb
7928 3722964 : = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
7929 3722964 : bytes.quick_push ((unsigned long) el32[index] >> lsb);
7930 : }
7931 636776 : return true;
7932 : }
7933 :
7934 30855 : if (GET_CODE (x) == CONST_FIXED)
7935 : {
7936 0 : for (unsigned int byte = first_byte; byte < end_byte; ++byte)
7937 : {
7938 : /* Always constant because the inputs are. */
7939 0 : unsigned int lsb
7940 0 : = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
7941 0 : unsigned HOST_WIDE_INT piece = CONST_FIXED_VALUE_LOW (x);
7942 0 : if (lsb >= HOST_BITS_PER_WIDE_INT)
7943 : {
7944 0 : lsb -= HOST_BITS_PER_WIDE_INT;
7945 0 : piece = CONST_FIXED_VALUE_HIGH (x);
7946 : }
7947 0 : bytes.quick_push (piece >> lsb);
7948 : }
7949 : return true;
7950 : }
7951 :
7952 : return false;
7953 : }
7954 :
7955 : /* Read a vector of mode MODE from the target memory image given by BYTES,
7956 : starting at byte FIRST_BYTE. The vector is known to be encodable using
7957 : NPATTERNS interleaved patterns with NELTS_PER_PATTERN elements each,
7958 : and BYTES is known to have enough bytes to supply NPATTERNS *
7959 : NELTS_PER_PATTERN vector elements. Each element of BYTES contains
7960 : BITS_PER_UNIT bits and the bytes are in target memory order.
7961 :
7962 : Return the vector on success, otherwise return NULL_RTX. */
7963 :
7964 : rtx
7965 231489 : native_decode_vector_rtx (machine_mode mode, const vec<target_unit> &bytes,
7966 : unsigned int first_byte, unsigned int npatterns,
7967 : unsigned int nelts_per_pattern)
7968 : {
7969 231489 : rtx_vector_builder builder (mode, npatterns, nelts_per_pattern);
7970 :
7971 462978 : unsigned int elt_bits = vector_element_size (GET_MODE_PRECISION (mode),
7972 : GET_MODE_NUNITS (mode));
7973 231489 : if (elt_bits < BITS_PER_UNIT)
7974 : {
7975 : /* This is the only case in which elements can be smaller than a byte.
7976 : Element 0 is always in the lsb of the containing byte. */
7977 0 : gcc_assert (GET_MODE_CLASS (mode) == MODE_VECTOR_BOOL);
7978 0 : for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
7979 : {
7980 0 : unsigned int bit_index = first_byte * BITS_PER_UNIT + i * elt_bits;
7981 0 : unsigned int byte_index = bit_index / BITS_PER_UNIT;
7982 0 : unsigned int lsb = bit_index % BITS_PER_UNIT;
7983 0 : unsigned int value = bytes[byte_index] >> lsb;
7984 0 : builder.quick_push (gen_int_mode (value, GET_MODE_INNER (mode)));
7985 : }
7986 : }
7987 : else
7988 : {
7989 924740 : for (unsigned int i = 0; i < builder.encoded_nelts (); ++i)
7990 : {
7991 1386502 : rtx x = native_decode_rtx (GET_MODE_INNER (mode), bytes, first_byte);
7992 693251 : if (!x)
7993 0 : return NULL_RTX;
7994 693251 : builder.quick_push (x);
7995 693251 : first_byte += elt_bits / BITS_PER_UNIT;
7996 : }
7997 : }
7998 231489 : return builder.build ();
7999 231489 : }
8000 :
8001 : /* Extract a PRECISION-bit integer from bytes [FIRST_BYTE, FIRST_BYTE + SIZE)
8002 : of target memory image BYTES. */
8003 :
8004 : wide_int
8005 11173667 : native_decode_int (const vec<target_unit> &bytes, unsigned int first_byte,
8006 : unsigned int size, unsigned int precision)
8007 : {
8008 : /* Pull the bytes msb first, so that we can use simple
8009 : shift-and-insert wide_int operations. */
8010 11173667 : wide_int result (wi::zero (precision));
8011 51211317 : for (unsigned int i = 0; i < size; ++i)
8012 : {
8013 40037650 : unsigned int lsb = (size - i - 1) * BITS_PER_UNIT;
8014 : /* Always constant because the inputs are. */
8015 40037650 : unsigned int subbyte
8016 40037650 : = subreg_size_offset_from_lsb (1, size, lsb).to_constant ();
8017 40037650 : result <<= BITS_PER_UNIT;
8018 40037650 : result |= bytes[first_byte + subbyte];
8019 : }
8020 11173667 : return result;
8021 : }
8022 :
8023 : /* Read an rtx of mode MODE from the target memory image given by BYTES,
8024 : starting at byte FIRST_BYTE. Each element of BYTES contains BITS_PER_UNIT
8025 : bits and the bytes are in target memory order. The image has enough
8026 : values to specify all bytes of MODE.
8027 :
8028 : Return the rtx on success, otherwise return NULL_RTX. */
8029 :
8030 : rtx
8031 11472011 : native_decode_rtx (machine_mode mode, const vec<target_unit> &bytes,
8032 : unsigned int first_byte)
8033 : {
8034 11472011 : if (VECTOR_MODE_P (mode))
8035 : {
8036 : /* If we know at compile time how many elements there are,
8037 : pull each element directly from BYTES. */
8038 59434 : unsigned int nelts;
8039 118868 : if (GET_MODE_NUNITS (mode).is_constant (&nelts))
8040 59434 : return native_decode_vector_rtx (mode, bytes, first_byte, nelts, 1);
8041 : return NULL_RTX;
8042 : }
8043 :
8044 11412577 : scalar_int_mode imode;
8045 11412577 : if (is_a <scalar_int_mode> (mode, &imode)
8046 11173667 : && GET_MODE_PRECISION (imode) <= MAX_BITSIZE_MODE_ANY_INT)
8047 : {
8048 11173667 : auto result = native_decode_int (bytes, first_byte,
8049 11173667 : GET_MODE_SIZE (imode),
8050 22347334 : GET_MODE_PRECISION (imode));
8051 11173667 : return immed_wide_int_const (result, imode);
8052 11173667 : }
8053 :
8054 238910 : scalar_float_mode fmode;
8055 238910 : if (is_a <scalar_float_mode> (mode, &fmode))
8056 : {
8057 : /* We need to build an array of integers in target memory order.
8058 : All integers before the last one have 32 bits; the last one may
8059 : have 32 bits or fewer, depending on whether the mode bitsize
8060 : is divisible by 32. */
8061 238880 : long el32[MAX_BITSIZE_MODE_ANY_MODE / 32];
8062 238880 : unsigned int num_el32 = CEIL (GET_MODE_BITSIZE (fmode), 32);
8063 238880 : memset (el32, 0, num_el32 * sizeof (long));
8064 :
8065 : /* The (maximum) number of target bytes per element of el32. */
8066 238880 : unsigned int bytes_per_el32 = 32 / BITS_PER_UNIT;
8067 238880 : gcc_assert (bytes_per_el32 != 0);
8068 :
8069 238880 : unsigned int mode_bytes = GET_MODE_SIZE (fmode);
8070 1662532 : for (unsigned int byte = 0; byte < mode_bytes; ++byte)
8071 : {
8072 1423652 : unsigned int index = byte / bytes_per_el32;
8073 1423652 : unsigned int subbyte = byte % bytes_per_el32;
8074 1423652 : unsigned int int_bytes = MIN (bytes_per_el32,
8075 : mode_bytes - index * bytes_per_el32);
8076 : /* Always constant because the inputs are. */
8077 1423652 : unsigned int lsb
8078 1423652 : = subreg_size_lsb (1, int_bytes, subbyte).to_constant ();
8079 1423652 : el32[index] |= (unsigned long) bytes[first_byte + byte] << lsb;
8080 : }
8081 238880 : REAL_VALUE_TYPE r;
8082 238880 : real_from_target (&r, el32, fmode);
8083 238880 : return const_double_from_real_value (r, fmode);
8084 : }
8085 :
8086 30 : if (ALL_SCALAR_FIXED_POINT_MODE_P (mode))
8087 : {
8088 0 : scalar_mode smode = as_a <scalar_mode> (mode);
8089 0 : FIXED_VALUE_TYPE f;
8090 0 : f.data.low = 0;
8091 0 : f.data.high = 0;
8092 0 : f.mode = smode;
8093 :
8094 0 : unsigned int mode_bytes = GET_MODE_SIZE (smode);
8095 0 : for (unsigned int byte = 0; byte < mode_bytes; ++byte)
8096 : {
8097 : /* Always constant because the inputs are. */
8098 0 : unsigned int lsb
8099 0 : = subreg_size_lsb (1, mode_bytes, byte).to_constant ();
8100 0 : unsigned HOST_WIDE_INT unit = bytes[first_byte + byte];
8101 0 : if (lsb >= HOST_BITS_PER_WIDE_INT)
8102 0 : f.data.high |= unit << (lsb - HOST_BITS_PER_WIDE_INT);
8103 : else
8104 0 : f.data.low |= unit << lsb;
8105 : }
8106 0 : return CONST_FIXED_FROM_FIXED_VALUE (f, mode);
8107 : }
8108 :
8109 : return NULL_RTX;
8110 : }
8111 :
8112 : /* Simplify a byte offset BYTE into CONST_VECTOR X. The main purpose
8113 : is to convert a runtime BYTE value into a constant one. */
8114 :
8115 : static poly_uint64
8116 282068 : simplify_const_vector_byte_offset (rtx x, poly_uint64 byte)
8117 : {
8118 : /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
8119 282068 : machine_mode mode = GET_MODE (x);
8120 564136 : unsigned int elt_bits = vector_element_size (GET_MODE_PRECISION (mode),
8121 : GET_MODE_NUNITS (mode));
8122 : /* The number of bits needed to encode one element from each pattern. */
8123 282068 : unsigned int sequence_bits = CONST_VECTOR_NPATTERNS (x) * elt_bits;
8124 :
8125 : /* Identify the start point in terms of a sequence number and a byte offset
8126 : within that sequence. */
8127 282068 : poly_uint64 first_sequence;
8128 282068 : unsigned HOST_WIDE_INT subbit;
8129 282068 : if (can_div_trunc_p (byte * BITS_PER_UNIT, sequence_bits,
8130 : &first_sequence, &subbit))
8131 : {
8132 282068 : unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
8133 282068 : if (nelts_per_pattern == 1)
8134 : /* This is a duplicated vector, so the value of FIRST_SEQUENCE
8135 : doesn't matter. */
8136 229587 : byte = subbit / BITS_PER_UNIT;
8137 52481 : else if (nelts_per_pattern == 2 && known_gt (first_sequence, 0U))
8138 : {
8139 : /* The subreg drops the first element from each pattern and
8140 : only uses the second element. Find the first sequence
8141 : that starts on a byte boundary. */
8142 5568 : subbit += least_common_multiple (sequence_bits, BITS_PER_UNIT);
8143 5568 : byte = subbit / BITS_PER_UNIT;
8144 : }
8145 : }
8146 282068 : return byte;
8147 : }
8148 :
8149 : /* Subroutine of simplify_subreg in which:
8150 :
8151 : - X is known to be a CONST_VECTOR
8152 : - OUTERMODE is known to be a vector mode
8153 :
8154 : Try to handle the subreg by operating on the CONST_VECTOR encoding
8155 : rather than on each individual element of the CONST_VECTOR.
8156 :
8157 : Return the simplified subreg on success, otherwise return NULL_RTX. */
8158 :
8159 : static rtx
8160 179806 : simplify_const_vector_subreg (machine_mode outermode, rtx x,
8161 : machine_mode innermode, unsigned int first_byte)
8162 : {
8163 : /* Paradoxical subregs of vectors have dubious semantics. */
8164 179806 : if (paradoxical_subreg_p (outermode, innermode))
8165 : return NULL_RTX;
8166 :
8167 : /* We can only preserve the semantics of a stepped pattern if the new
8168 : vector element is the same as the original one. */
8169 179740 : if (CONST_VECTOR_STEPPED_P (x)
8170 200334 : && GET_MODE_INNER (outermode) != GET_MODE_INNER (innermode))
8171 : return NULL_RTX;
8172 :
8173 : /* Cope with MODE_VECTOR_BOOL by operating on bits rather than bytes. */
8174 172055 : unsigned int x_elt_bits
8175 172055 : = vector_element_size (GET_MODE_PRECISION (innermode),
8176 : GET_MODE_NUNITS (innermode));
8177 172055 : unsigned int out_elt_bits
8178 172055 : = vector_element_size (GET_MODE_PRECISION (outermode),
8179 : GET_MODE_NUNITS (outermode));
8180 :
8181 : /* The number of bits needed to encode one element from every pattern
8182 : of the original vector. */
8183 172055 : unsigned int x_sequence_bits = CONST_VECTOR_NPATTERNS (x) * x_elt_bits;
8184 :
8185 : /* The number of bits needed to encode one element from every pattern
8186 : of the result. */
8187 172055 : unsigned int out_sequence_bits
8188 172055 : = least_common_multiple (x_sequence_bits, out_elt_bits);
8189 :
8190 : /* Work out the number of interleaved patterns in the output vector
8191 : and the number of encoded elements per pattern. */
8192 172055 : unsigned int out_npatterns = out_sequence_bits / out_elt_bits;
8193 172055 : unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
8194 :
8195 : /* The encoding scheme requires the number of elements to be a multiple
8196 : of the number of patterns, so that each pattern appears at least once
8197 : and so that the same number of elements appear from each pattern. */
8198 344110 : bool ok_p = multiple_p (GET_MODE_NUNITS (outermode), out_npatterns);
8199 172055 : unsigned int const_nunits;
8200 344110 : if (GET_MODE_NUNITS (outermode).is_constant (&const_nunits)
8201 172055 : && (!ok_p || out_npatterns * nelts_per_pattern > const_nunits))
8202 : {
8203 : /* Either the encoding is invalid, or applying it would give us
8204 : more elements than we need. Just encode each element directly. */
8205 : out_npatterns = const_nunits;
8206 : nelts_per_pattern = 1;
8207 : }
8208 : else if (!ok_p)
8209 : return NULL_RTX;
8210 :
8211 : /* Get enough bytes of X to form the new encoding. */
8212 172055 : unsigned int buffer_bits = out_npatterns * nelts_per_pattern * out_elt_bits;
8213 172055 : unsigned int buffer_bytes = CEIL (buffer_bits, BITS_PER_UNIT);
8214 172055 : auto_vec<target_unit, 128> buffer (buffer_bytes);
8215 172055 : if (!native_encode_rtx (innermode, x, buffer, first_byte, buffer_bytes))
8216 : return NULL_RTX;
8217 :
8218 : /* Reencode the bytes as OUTERMODE. */
8219 172055 : return native_decode_vector_rtx (outermode, buffer, 0, out_npatterns,
8220 172055 : nelts_per_pattern);
8221 172055 : }
8222 :
8223 : /* Try to simplify a subreg of a constant by encoding the subreg region
8224 : as a sequence of target bytes and reading them back in the new mode.
8225 : Return the new value on success, otherwise return null.
8226 :
8227 : The subreg has outer mode OUTERMODE, inner mode INNERMODE, inner value X
8228 : and byte offset FIRST_BYTE. */
8229 :
8230 : static rtx
8231 10488875 : simplify_immed_subreg (fixed_size_mode outermode, rtx x,
8232 : machine_mode innermode, unsigned int first_byte)
8233 : {
8234 10488875 : unsigned int buffer_bytes = GET_MODE_SIZE (outermode);
8235 10488875 : auto_vec<target_unit, 128> buffer (buffer_bytes);
8236 :
8237 : /* Some ports misuse CCmode. */
8238 10488875 : if (GET_MODE_CLASS (outermode) == MODE_CC && CONST_INT_P (x))
8239 : return x;
8240 :
8241 : /* Paradoxical subregs read undefined values for bytes outside of the
8242 : inner value. However, we have traditionally always sign-extended
8243 : integer constants and zero-extended others. */
8244 10486937 : unsigned int inner_bytes = buffer_bytes;
8245 10486937 : if (paradoxical_subreg_p (outermode, innermode))
8246 : {
8247 759368 : if (!GET_MODE_SIZE (innermode).is_constant (&inner_bytes))
8248 0 : return NULL_RTX;
8249 :
8250 379684 : target_unit filler = 0;
8251 379684 : if (CONST_SCALAR_INT_P (x) && wi::neg_p (rtx_mode_t (x, innermode)))
8252 43703 : filler = -1;
8253 :
8254 : /* Add any leading bytes due to big-endian layout. The number of
8255 : bytes must be constant because both modes have constant size. */
8256 379684 : unsigned int leading_bytes
8257 379684 : = -byte_lowpart_offset (outermode, innermode).to_constant ();
8258 379684 : for (unsigned int i = 0; i < leading_bytes; ++i)
8259 0 : buffer.quick_push (filler);
8260 :
8261 379684 : if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
8262 0 : return NULL_RTX;
8263 :
8264 : /* Add any trailing bytes due to little-endian layout. */
8265 4606688 : while (buffer.length () < buffer_bytes)
8266 1923660 : buffer.quick_push (filler);
8267 : }
8268 10107253 : else if (!native_encode_rtx (innermode, x, buffer, first_byte, inner_bytes))
8269 : return NULL_RTX;
8270 10486937 : rtx ret = native_decode_rtx (outermode, buffer, 0);
8271 10486937 : if (ret && FLOAT_MODE_P (outermode))
8272 : {
8273 124989 : auto_vec<target_unit, 128> buffer2 (buffer_bytes);
8274 124989 : if (!native_encode_rtx (outermode, ret, buffer2, 0, buffer_bytes))
8275 : return NULL_RTX;
8276 1385720 : for (unsigned int i = 0; i < buffer_bytes; ++i)
8277 1260766 : if (buffer[i] != buffer2[i])
8278 : return NULL_RTX;
8279 124989 : }
8280 : return ret;
8281 10488875 : }
8282 :
8283 : /* Simplify SUBREG:OUTERMODE(OP:INNERMODE, BYTE)
8284 : Return 0 if no simplifications are possible. */
8285 : rtx
8286 68469958 : simplify_context::simplify_subreg (machine_mode outermode, rtx op,
8287 : machine_mode innermode, poly_uint64 byte)
8288 : {
8289 : /* Little bit of sanity checking. */
8290 68469958 : gcc_assert (innermode != VOIDmode);
8291 68469958 : gcc_assert (outermode != VOIDmode);
8292 68469958 : gcc_assert (innermode != BLKmode);
8293 68469958 : gcc_assert (outermode != BLKmode);
8294 :
8295 68469958 : gcc_assert (GET_MODE (op) == innermode
8296 : || GET_MODE (op) == VOIDmode);
8297 :
8298 136939916 : poly_uint64 outersize = GET_MODE_SIZE (outermode);
8299 68469958 : if (!multiple_p (byte, outersize))
8300 : return NULL_RTX;
8301 :
8302 136939876 : poly_uint64 innersize = GET_MODE_SIZE (innermode);
8303 68469938 : if (maybe_ge (byte, innersize))
8304 : return NULL_RTX;
8305 :
8306 68469938 : if (outermode == innermode && known_eq (byte, 0U))
8307 4610308 : return op;
8308 :
8309 63859630 : if (GET_CODE (op) == CONST_VECTOR)
8310 282068 : byte = simplify_const_vector_byte_offset (op, byte);
8311 :
8312 127719260 : if (multiple_p (byte, GET_MODE_UNIT_SIZE (innermode)))
8313 : {
8314 58030885 : rtx elt;
8315 :
8316 49410956 : if (VECTOR_MODE_P (outermode)
8317 25859787 : && GET_MODE_INNER (outermode) == GET_MODE_INNER (innermode)
8318 59737542 : && vec_duplicate_p (op, &elt))
8319 12213 : return gen_vec_duplicate (outermode, elt);
8320 :
8321 58026978 : if (outermode == GET_MODE_INNER (innermode)
8322 58026978 : && vec_duplicate_p (op, &elt))
8323 8306 : return elt;
8324 : }
8325 :
8326 63847417 : if (CONST_SCALAR_INT_P (op)
8327 53518574 : || CONST_DOUBLE_AS_FLOAT_P (op)
8328 53461617 : || CONST_FIXED_P (op)
8329 53461617 : || GET_CODE (op) == CONST_VECTOR)
8330 : {
8331 10660930 : unsigned HOST_WIDE_INT cbyte;
8332 10660930 : if (byte.is_constant (&cbyte))
8333 : {
8334 10660930 : if (GET_CODE (op) == CONST_VECTOR && VECTOR_MODE_P (outermode))
8335 : {
8336 179806 : rtx tmp = simplify_const_vector_subreg (outermode, op,
8337 : innermode, cbyte);
8338 179806 : if (tmp)
8339 10660930 : return tmp;
8340 : }
8341 :
8342 10488875 : fixed_size_mode fs_outermode;
8343 10488875 : if (is_a <fixed_size_mode> (outermode, &fs_outermode))
8344 10488875 : return simplify_immed_subreg (fs_outermode, op, innermode, cbyte);
8345 : }
8346 : }
8347 :
8348 : /* Changing mode twice with SUBREG => just change it once,
8349 : or not at all if changing back op starting mode. */
8350 53186487 : if (GET_CODE (op) == SUBREG)
8351 : {
8352 1318354 : machine_mode innermostmode = GET_MODE (SUBREG_REG (op));
8353 2636708 : poly_uint64 innermostsize = GET_MODE_SIZE (innermostmode);
8354 1318354 : rtx newx;
8355 :
8356 : /* Make sure that the relationship between the two subregs is
8357 : known at compile time. */
8358 1318354 : if (!ordered_p (outersize, innermostsize))
8359 : return NULL_RTX;
8360 :
8361 1318354 : if (outermode == innermostmode
8362 737792 : && known_eq (byte, subreg_lowpart_offset (outermode, innermode))
8363 2056145 : && known_eq (SUBREG_BYTE (op),
8364 : subreg_lowpart_offset (innermode, innermostmode)))
8365 737791 : return SUBREG_REG (op);
8366 :
8367 : /* Work out the memory offset of the final OUTERMODE value relative
8368 : to the inner value of OP. */
8369 580563 : poly_int64 mem_offset = subreg_memory_offset (outermode,
8370 : innermode, byte);
8371 580563 : poly_int64 op_mem_offset = subreg_memory_offset (op);
8372 580563 : poly_int64 final_offset = mem_offset + op_mem_offset;
8373 :
8374 : /* See whether resulting subreg will be paradoxical. */
8375 580563 : if (!paradoxical_subreg_p (outermode, innermostmode))
8376 : {
8377 : /* Bail out in case resulting subreg would be incorrect. */
8378 943810 : if (maybe_lt (final_offset, 0)
8379 943801 : || maybe_ge (poly_uint64 (final_offset), innermostsize)
8380 943809 : || !multiple_p (final_offset, outersize))
8381 9 : return NULL_RTX;
8382 : }
8383 : else
8384 : {
8385 108658 : poly_int64 required_offset = subreg_memory_offset (outermode,
8386 : innermostmode, 0);
8387 108658 : if (maybe_ne (final_offset, required_offset))
8388 0 : return NULL_RTX;
8389 : /* Paradoxical subregs always have byte offset 0. */
8390 108658 : final_offset = 0;
8391 : }
8392 :
8393 : /* Recurse for further possible simplifications. */
8394 580554 : newx = simplify_subreg (outermode, SUBREG_REG (op), innermostmode,
8395 580554 : final_offset);
8396 580554 : if (newx)
8397 : return newx;
8398 580175 : if (validate_subreg (outermode, innermostmode,
8399 580175 : SUBREG_REG (op), final_offset))
8400 : {
8401 521495 : newx = gen_rtx_SUBREG (outermode, SUBREG_REG (op), final_offset);
8402 521495 : if (SUBREG_PROMOTED_VAR_P (op)
8403 574 : && SUBREG_PROMOTED_SIGN (op) >= 0
8404 574 : && GET_MODE_CLASS (outermode) == MODE_INT
8405 570 : && known_ge (outersize, innersize)
8406 357 : && known_le (outersize, innermostsize)
8407 521499 : && subreg_lowpart_p (newx))
8408 : {
8409 4 : SUBREG_PROMOTED_VAR_P (newx) = 1;
8410 4 : SUBREG_PROMOTED_SET (newx, SUBREG_PROMOTED_GET (op));
8411 : }
8412 521495 : return newx;
8413 : }
8414 : return NULL_RTX;
8415 : }
8416 :
8417 : /* SUBREG of a hard register => just change the register number
8418 : and/or mode. If the hard register is not valid in that mode,
8419 : suppress this simplification. If the hard register is the stack,
8420 : frame, or argument pointer, leave this as a SUBREG. */
8421 :
8422 51868133 : if (REG_P (op) && HARD_REGISTER_P (op))
8423 : {
8424 10709649 : unsigned int regno, final_regno;
8425 :
8426 10709649 : regno = REGNO (op);
8427 10709649 : final_regno = simplify_subreg_regno (regno, innermode, byte, outermode);
8428 10709649 : if (HARD_REGISTER_NUM_P (final_regno))
8429 : {
8430 10684769 : rtx x = gen_rtx_REG_offset (op, outermode, final_regno,
8431 : subreg_memory_offset (outermode,
8432 : innermode, byte));
8433 :
8434 : /* Propagate original regno. We don't have any way to specify
8435 : the offset inside original regno, so do so only for lowpart.
8436 : The information is used only by alias analysis that cannot
8437 : grog partial register anyway. */
8438 :
8439 10684769 : if (known_eq (subreg_lowpart_offset (outermode, innermode), byte))
8440 8009853 : ORIGINAL_REGNO (x) = ORIGINAL_REGNO (op);
8441 10684769 : return x;
8442 : }
8443 : }
8444 :
8445 : /* If we have a SUBREG of a register that we are replacing and we are
8446 : replacing it with a MEM, make a new MEM and try replacing the
8447 : SUBREG with it. Don't do this if the MEM has a mode-dependent address
8448 : or if we would be widening it. */
8449 :
8450 41183364 : if (MEM_P (op)
8451 1722563 : && ! mode_dependent_address_p (XEXP (op, 0), MEM_ADDR_SPACE (op))
8452 : /* Allow splitting of volatile memory references in case we don't
8453 : have instruction to move the whole thing. */
8454 1722560 : && (! MEM_VOLATILE_P (op)
8455 44289 : || ! have_insn_for (SET, innermode))
8456 : && !(STRICT_ALIGNMENT && MEM_ALIGN (op) < GET_MODE_ALIGNMENT (outermode))
8457 42861635 : && known_le (outersize, innersize))
8458 812998 : return adjust_address_nv (op, outermode, byte);
8459 :
8460 : /* Handle complex or vector values represented as CONCAT or VEC_CONCAT
8461 : of two parts. */
8462 40370366 : if (GET_CODE (op) == CONCAT
8463 40370366 : || GET_CODE (op) == VEC_CONCAT)
8464 : {
8465 195982 : poly_uint64 final_offset;
8466 195982 : rtx part, res;
8467 :
8468 195982 : machine_mode part_mode = GET_MODE (XEXP (op, 0));
8469 195982 : if (part_mode == VOIDmode)
8470 11 : part_mode = GET_MODE_INNER (GET_MODE (op));
8471 391964 : poly_uint64 part_size = GET_MODE_SIZE (part_mode);
8472 195982 : if (known_lt (byte, part_size))
8473 : {
8474 194426 : part = XEXP (op, 0);
8475 194426 : final_offset = byte;
8476 : }
8477 1556 : else if (known_ge (byte, part_size))
8478 : {
8479 1556 : part = XEXP (op, 1);
8480 1556 : final_offset = byte - part_size;
8481 : }
8482 : else
8483 : return NULL_RTX;
8484 :
8485 195982 : if (maybe_gt (final_offset + outersize, part_size))
8486 : return NULL_RTX;
8487 :
8488 128498 : part_mode = GET_MODE (part);
8489 128498 : if (part_mode == VOIDmode)
8490 0 : part_mode = GET_MODE_INNER (GET_MODE (op));
8491 128498 : res = simplify_subreg (outermode, part, part_mode, final_offset);
8492 128498 : if (res)
8493 : return res;
8494 295 : if (GET_MODE (part) != VOIDmode
8495 295 : && validate_subreg (outermode, part_mode, part, final_offset))
8496 295 : return gen_rtx_SUBREG (outermode, part, final_offset);
8497 0 : return NULL_RTX;
8498 : }
8499 :
8500 : /* Simplify
8501 : (subreg (vec_merge (X)
8502 : (vector)
8503 : (const_int ((1 << N) | M)))
8504 : (N * sizeof (outermode)))
8505 : to
8506 : (subreg (X) (N * sizeof (outermode)))
8507 : */
8508 40174384 : unsigned int idx;
8509 80348768 : if (constant_multiple_p (byte, GET_MODE_SIZE (outermode), &idx)
8510 40174384 : && idx < HOST_BITS_PER_WIDE_INT
8511 40174384 : && GET_CODE (op) == VEC_MERGE
8512 638590 : && GET_MODE_INNER (innermode) == outermode
8513 4891 : && CONST_INT_P (XEXP (op, 2))
8514 40178693 : && (UINTVAL (XEXP (op, 2)) & (HOST_WIDE_INT_1U << idx)) != 0)
8515 4300 : return simplify_gen_subreg (outermode, XEXP (op, 0), innermode, byte);
8516 :
8517 : /* A SUBREG resulting from a zero extension may fold to zero if
8518 : it extracts higher bits that the ZERO_EXTEND's source bits. */
8519 40170084 : if (GET_CODE (op) == ZERO_EXTEND && SCALAR_INT_MODE_P (innermode))
8520 : {
8521 217502 : poly_uint64 bitpos = subreg_lsb_1 (outermode, innermode, byte);
8522 217502 : if (known_ge (bitpos, GET_MODE_PRECISION (GET_MODE (XEXP (op, 0)))))
8523 54837 : return CONST0_RTX (outermode);
8524 : }
8525 :
8526 : /* Optimize SUBREGS of scalar integral ASHIFT by a valid constant. */
8527 40115247 : if (GET_CODE (op) == ASHIFT
8528 713049 : && SCALAR_INT_MODE_P (innermode)
8529 685666 : && CONST_INT_P (XEXP (op, 1))
8530 606386 : && INTVAL (XEXP (op, 1)) > 0
8531 41434632 : && known_gt (GET_MODE_BITSIZE (innermode), INTVAL (XEXP (op, 1))))
8532 : {
8533 606336 : HOST_WIDE_INT val = INTVAL (XEXP (op, 1));
8534 : /* A lowpart SUBREG of a ASHIFT by a constant may fold to zero. */
8535 606336 : if (known_eq (subreg_lowpart_offset (outermode, innermode), byte)
8536 1175266 : && known_le (GET_MODE_BITSIZE (outermode), val))
8537 195307 : return CONST0_RTX (outermode);
8538 : /* Optimize the highpart SUBREG of a suitable ASHIFT (ZERO_EXTEND). */
8539 445908 : if (GET_CODE (XEXP (op, 0)) == ZERO_EXTEND
8540 35558 : && GET_MODE (XEXP (XEXP (op, 0), 0)) == outermode
8541 70802 : && known_eq (GET_MODE_BITSIZE (outermode), val)
8542 69758 : && known_eq (GET_MODE_BITSIZE (innermode), 2 * val)
8543 481466 : && known_eq (subreg_highpart_offset (outermode, innermode), byte))
8544 34879 : return XEXP (XEXP (op, 0), 0);
8545 : }
8546 :
8547 41405638 : auto distribute_subreg = [&](rtx op)
8548 : {
8549 1485698 : return simplify_subreg (outermode, op, innermode, byte);
8550 39919940 : };
8551 :
8552 : /* Try distributing the subreg through logic operations, if that
8553 : leads to all subexpressions being simplified. For example,
8554 : distributing the outer subreg in:
8555 :
8556 : (subreg:SI (not:QI (subreg:QI (reg:SI X) <lowpart>)) 0)
8557 :
8558 : gives:
8559 :
8560 : (not:SI (reg:SI X))
8561 :
8562 : This should be a win if the outermode is word_mode, since logical
8563 : operations on word_mode should (a) be no more expensive than logical
8564 : operations on subword modes and (b) are likely to be cheaper than
8565 : logical operations on multiword modes.
8566 :
8567 : Otherwise, handle the case where the subreg is non-narrowing and does
8568 : not change the number of words. The non-narrowing condition ensures
8569 : that we don't convert word_mode operations to subword operations. */
8570 39919940 : scalar_int_mode int_outermode, int_innermode;
8571 39919940 : if (is_a <scalar_int_mode> (outermode, &int_outermode)
8572 33059186 : && is_a <scalar_int_mode> (innermode, &int_innermode)
8573 71794237 : && (outermode == word_mode
8574 17953749 : || ((GET_MODE_PRECISION (int_outermode)
8575 17953749 : >= GET_MODE_PRECISION (int_innermode))
8576 4534436 : && (CEIL (GET_MODE_SIZE (int_outermode), UNITS_PER_WORD)
8577 4453000 : <= CEIL (GET_MODE_SIZE (int_innermode), UNITS_PER_WORD)))))
8578 18313803 : switch (GET_CODE (op))
8579 : {
8580 31430 : case NOT:
8581 31430 : if (rtx op0 = distribute_subreg (XEXP (op, 0)))
8582 1444 : return simplify_gen_unary (GET_CODE (op), outermode, op0, outermode);
8583 : break;
8584 :
8585 463152 : case AND:
8586 463152 : case IOR:
8587 463152 : case XOR:
8588 463152 : if (rtx op0 = distribute_subreg (XEXP (op, 0)))
8589 207019 : if (rtx op1 = distribute_subreg (XEXP (op, 1)))
8590 201998 : return simplify_gen_binary (GET_CODE (op), outermode, op0, op1);
8591 : break;
8592 :
8593 : default:
8594 : break;
8595 : }
8596 :
8597 39716498 : if (is_a <scalar_int_mode> (outermode, &int_outermode)
8598 32855744 : && is_a <scalar_int_mode> (innermode, &int_innermode)
8599 72572242 : && known_eq (byte, subreg_lowpart_offset (int_outermode, int_innermode)))
8600 : {
8601 : /* Handle polynomial integers. The upper bits of a paradoxical
8602 : subreg are undefined, so this is safe regardless of whether
8603 : we're truncating or extending. */
8604 29489583 : if (CONST_POLY_INT_P (op))
8605 : {
8606 : poly_wide_int val
8607 : = poly_wide_int::from (const_poly_int_value (op),
8608 : GET_MODE_PRECISION (int_outermode),
8609 : SIGNED);
8610 : return immed_wide_int_const (val, int_outermode);
8611 : }
8612 :
8613 29489583 : if (GET_MODE_PRECISION (int_outermode)
8614 29489583 : < GET_MODE_PRECISION (int_innermode))
8615 : {
8616 16774892 : rtx tem = simplify_truncation (int_outermode, op, int_innermode);
8617 16774892 : if (tem)
8618 : return tem;
8619 : }
8620 : }
8621 :
8622 : /* If the outer mode is not integral, try taking a subreg with the equivalent
8623 : integer outer mode and then bitcasting the result.
8624 : Other simplifications rely on integer to integer subregs and we'd
8625 : potentially miss out on optimizations otherwise. */
8626 77505376 : if (known_gt (GET_MODE_SIZE (innermode),
8627 : GET_MODE_SIZE (outermode))
8628 19236090 : && SCALAR_INT_MODE_P (innermode)
8629 18081332 : && !SCALAR_INT_MODE_P (outermode)
8630 58166734 : && int_mode_for_size (GET_MODE_BITSIZE (outermode),
8631 88978 : 0).exists (&int_outermode))
8632 : {
8633 88978 : rtx tem = simplify_subreg (int_outermode, op, innermode, byte);
8634 88978 : if (tem)
8635 1984 : return lowpart_subreg (outermode, tem, int_outermode);
8636 : }
8637 :
8638 : /* If OP is a vector comparison and the subreg is not changing the
8639 : number of elements or the size of the elements, change the result
8640 : of the comparison to the new mode. */
8641 38750704 : if (COMPARISON_P (op)
8642 271827 : && VECTOR_MODE_P (outermode)
8643 199410 : && VECTOR_MODE_P (innermode)
8644 598206 : && known_eq (GET_MODE_NUNITS (outermode), GET_MODE_NUNITS (innermode))
8645 39121945 : && known_eq (GET_MODE_UNIT_SIZE (outermode),
8646 : GET_MODE_UNIT_SIZE (innermode)))
8647 123403 : return simplify_gen_relational (GET_CODE (op), outermode, innermode,
8648 123403 : XEXP (op, 0), XEXP (op, 1));
8649 :
8650 : /* Distribute non-paradoxical subregs through logic ops in cases where
8651 : one term disappears.
8652 :
8653 : (subreg:M1 (and:M2 X C1)) -> (subreg:M1 X)
8654 : (subreg:M1 (ior:M2 X C1)) -> (subreg:M1 C1)
8655 : (subreg:M1 (xor:M2 X C1)) -> (subreg:M1 (not:M2 X))
8656 :
8657 : if M2 is no smaller than M1 and (subreg:M1 C1) is all-ones.
8658 :
8659 : (subreg:M1 (and:M2 X C2)) -> (subreg:M1 C2)
8660 : (subreg:M1 (ior/xor:M2 X C2)) -> (subreg:M1 X)
8661 :
8662 : if M2 is no smaller than M1 and (subreg:M1 C2) is zero. */
8663 38627301 : if (known_ge (innersize, outersize)
8664 25436320 : && GET_MODE_CLASS (outermode) == GET_MODE_CLASS (innermode)
8665 23387489 : && (GET_CODE (op) == AND || GET_CODE (op) == IOR || GET_CODE (op) == XOR)
8666 40196505 : && CONSTANT_P (XEXP (op, 1)))
8667 : {
8668 776859 : rtx op1_subreg = distribute_subreg (XEXP (op, 1));
8669 776859 : if (op1_subreg == CONSTM1_RTX (outermode))
8670 : {
8671 116989 : if (GET_CODE (op) == IOR)
8672 : return op1_subreg;
8673 116755 : rtx op0 = XEXP (op, 0);
8674 116755 : if (GET_CODE (op) == XOR)
8675 801 : op0 = simplify_gen_unary (NOT, innermode, op0, innermode);
8676 116755 : return simplify_gen_subreg (outermode, op0, innermode, byte);
8677 : }
8678 :
8679 659870 : if (op1_subreg == CONST0_RTX (outermode))
8680 12399 : return (GET_CODE (op) == AND
8681 12399 : ? op1_subreg
8682 7238 : : distribute_subreg (XEXP (op, 0)));
8683 : }
8684 :
8685 : return NULL_RTX;
8686 : }
8687 :
8688 : /* Make a SUBREG operation or equivalent if it folds. */
8689 :
8690 : rtx
8691 43000899 : simplify_context::simplify_gen_subreg (machine_mode outermode, rtx op,
8692 : machine_mode innermode,
8693 : poly_uint64 byte)
8694 : {
8695 43000899 : rtx newx;
8696 :
8697 43000899 : newx = simplify_subreg (outermode, op, innermode, byte);
8698 43000899 : if (newx)
8699 : return newx;
8700 :
8701 20160220 : if (GET_CODE (op) == SUBREG
8702 20160220 : || GET_CODE (op) == CONCAT
8703 20125546 : || CONST_SCALAR_INT_P (op)
8704 20125520 : || CONST_DOUBLE_AS_FLOAT_P (op)
8705 20125520 : || CONST_FIXED_P (op)
8706 20125520 : || GET_CODE (op) == CONST_VECTOR)
8707 : return NULL_RTX;
8708 :
8709 20125510 : if (validate_subreg (outermode, innermode, op, byte))
8710 20093700 : return gen_rtx_SUBREG (outermode, op, byte);
8711 :
8712 : return NULL_RTX;
8713 : }
8714 :
8715 : /* Generates a subreg to get the least significant part of EXPR (in mode
8716 : INNER_MODE) to OUTER_MODE. */
8717 :
8718 : rtx
8719 32045748 : simplify_context::lowpart_subreg (machine_mode outer_mode, rtx expr,
8720 : machine_mode inner_mode)
8721 : {
8722 32045748 : return simplify_gen_subreg (outer_mode, expr, inner_mode,
8723 32045748 : subreg_lowpart_offset (outer_mode, inner_mode));
8724 : }
8725 :
8726 : /* Generate RTX to select element at INDEX out of vector OP. */
8727 :
8728 : rtx
8729 646525 : simplify_context::simplify_gen_vec_select (rtx op, unsigned int index)
8730 : {
8731 646525 : gcc_assert (VECTOR_MODE_P (GET_MODE (op)));
8732 :
8733 646525 : scalar_mode imode = GET_MODE_INNER (GET_MODE (op));
8734 :
8735 1293050 : if (known_eq (index * GET_MODE_SIZE (imode),
8736 : subreg_lowpart_offset (imode, GET_MODE (op))))
8737 : {
8738 646375 : rtx res = lowpart_subreg (imode, op, GET_MODE (op));
8739 646375 : if (res)
8740 : return res;
8741 : }
8742 :
8743 472 : rtx tmp = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, GEN_INT (index)));
8744 472 : return gen_rtx_VEC_SELECT (imode, op, tmp);
8745 : }
8746 :
8747 :
8748 : /* Simplify X, an rtx expression.
8749 :
8750 : Return the simplified expression or NULL if no simplifications
8751 : were possible.
8752 :
8753 : This is the preferred entry point into the simplification routines;
8754 : however, we still allow passes to call the more specific routines.
8755 :
8756 : Right now GCC has three (yes, three) major bodies of RTL simplification
8757 : code that need to be unified.
8758 :
8759 : 1. fold_rtx in cse.cc. This code uses various CSE specific
8760 : information to aid in RTL simplification.
8761 :
8762 : 2. simplify_rtx in combine.cc. Similar to fold_rtx, except that
8763 : it uses combine specific information to aid in RTL
8764 : simplification.
8765 :
8766 : 3. The routines in this file.
8767 :
8768 :
8769 : Long term we want to only have one body of simplification code; to
8770 : get to that state I recommend the following steps:
8771 :
8772 : 1. Pour over fold_rtx & simplify_rtx and move any simplifications
8773 : which are not pass dependent state into these routines.
8774 :
8775 : 2. As code is moved by #1, change fold_rtx & simplify_rtx to
8776 : use this routine whenever possible.
8777 :
8778 : 3. Allow for pass dependent state to be provided to these
8779 : routines and add simplifications based on the pass dependent
8780 : state. Remove code from cse.cc & combine.cc that becomes
8781 : redundant/dead.
8782 :
8783 : It will take time, but ultimately the compiler will be easier to
8784 : maintain and improve. It's totally silly that when we add a
8785 : simplification that it needs to be added to 4 places (3 for RTL
8786 : simplification and 1 for tree simplification. */
8787 :
8788 : rtx
8789 46357735 : simplify_rtx (const_rtx x)
8790 : {
8791 46357735 : const enum rtx_code code = GET_CODE (x);
8792 46357735 : const machine_mode mode = GET_MODE (x);
8793 :
8794 46357735 : switch (GET_RTX_CLASS (code))
8795 : {
8796 877525 : case RTX_UNARY:
8797 1755050 : return simplify_unary_operation (code, mode,
8798 877525 : XEXP (x, 0), GET_MODE (XEXP (x, 0)));
8799 26819060 : case RTX_COMM_ARITH:
8800 26819060 : if (swap_commutative_operands_p (XEXP (x, 0), XEXP (x, 1)))
8801 534780 : return simplify_gen_binary (code, mode, XEXP (x, 1), XEXP (x, 0));
8802 :
8803 : /* Fall through. */
8804 :
8805 32291388 : case RTX_BIN_ARITH:
8806 32291388 : return simplify_binary_operation (code, mode, XEXP (x, 0), XEXP (x, 1));
8807 :
8808 64852 : case RTX_TERNARY:
8809 64852 : case RTX_BITFIELD_OPS:
8810 64852 : return simplify_ternary_operation (code, mode, GET_MODE (XEXP (x, 0)),
8811 64852 : XEXP (x, 0), XEXP (x, 1),
8812 64852 : XEXP (x, 2));
8813 :
8814 176266 : case RTX_COMPARE:
8815 176266 : case RTX_COMM_COMPARE:
8816 176266 : return simplify_relational_operation (code, mode,
8817 176266 : ((GET_MODE (XEXP (x, 0))
8818 : != VOIDmode)
8819 : ? GET_MODE (XEXP (x, 0))
8820 483 : : GET_MODE (XEXP (x, 1))),
8821 176266 : XEXP (x, 0),
8822 352532 : XEXP (x, 1));
8823 :
8824 231615 : case RTX_EXTRA:
8825 231615 : if (code == SUBREG)
8826 2444 : return simplify_subreg (mode, SUBREG_REG (x),
8827 2444 : GET_MODE (SUBREG_REG (x)),
8828 2444 : SUBREG_BYTE (x));
8829 : break;
8830 :
8831 6488432 : case RTX_OBJ:
8832 6488432 : if (code == LO_SUM)
8833 : {
8834 : /* Convert (lo_sum (high FOO) FOO) to FOO. */
8835 0 : if (GET_CODE (XEXP (x, 0)) == HIGH
8836 0 : && rtx_equal_p (XEXP (XEXP (x, 0), 0), XEXP (x, 1)))
8837 0 : return XEXP (x, 1);
8838 : }
8839 : break;
8840 :
8841 : default:
8842 : break;
8843 : }
8844 : return NULL;
8845 : }
8846 :
8847 : #if CHECKING_P
8848 :
8849 : namespace selftest {
8850 :
8851 : /* Make a unique pseudo REG of mode MODE for use by selftests. */
8852 :
8853 : static rtx
8854 2672 : make_test_reg (machine_mode mode)
8855 : {
8856 2672 : static int test_reg_num = LAST_VIRTUAL_REGISTER + 1;
8857 :
8858 2672 : return gen_rtx_REG (mode, test_reg_num++);
8859 : }
8860 :
8861 : static void
8862 40 : test_scalar_int_ops (machine_mode mode)
8863 : {
8864 40 : rtx op0 = make_test_reg (mode);
8865 40 : rtx op1 = make_test_reg (mode);
8866 40 : rtx six = GEN_INT (6);
8867 :
8868 40 : rtx neg_op0 = simplify_gen_unary (NEG, mode, op0, mode);
8869 40 : rtx not_op0 = simplify_gen_unary (NOT, mode, op0, mode);
8870 40 : rtx bswap_op0 = simplify_gen_unary (BSWAP, mode, op0, mode);
8871 :
8872 40 : rtx and_op0_op1 = simplify_gen_binary (AND, mode, op0, op1);
8873 40 : rtx ior_op0_op1 = simplify_gen_binary (IOR, mode, op0, op1);
8874 40 : rtx xor_op0_op1 = simplify_gen_binary (XOR, mode, op0, op1);
8875 :
8876 40 : rtx and_op0_6 = simplify_gen_binary (AND, mode, op0, six);
8877 40 : rtx and_op1_6 = simplify_gen_binary (AND, mode, op1, six);
8878 :
8879 : /* Test some binary identities. */
8880 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, op0, const0_rtx));
8881 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (PLUS, mode, const0_rtx, op0));
8882 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (MINUS, mode, op0, const0_rtx));
8883 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, op0, const1_rtx));
8884 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (MULT, mode, const1_rtx, op0));
8885 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (DIV, mode, op0, const1_rtx));
8886 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, constm1_rtx));
8887 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, constm1_rtx, op0));
8888 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, const0_rtx));
8889 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, const0_rtx, op0));
8890 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, op0, const0_rtx));
8891 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (XOR, mode, const0_rtx, op0));
8892 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFT, mode, op0, const0_rtx));
8893 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATE, mode, op0, const0_rtx));
8894 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (ASHIFTRT, mode, op0, const0_rtx));
8895 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (LSHIFTRT, mode, op0, const0_rtx));
8896 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (ROTATERT, mode, op0, const0_rtx));
8897 :
8898 : /* Test some self-inverse operations. */
8899 40 : ASSERT_RTX_EQ (op0, simplify_gen_unary (NEG, mode, neg_op0, mode));
8900 40 : ASSERT_RTX_EQ (op0, simplify_gen_unary (NOT, mode, not_op0, mode));
8901 40 : ASSERT_RTX_EQ (op0, simplify_gen_unary (BSWAP, mode, bswap_op0, mode));
8902 :
8903 : /* Test some reflexive operations. */
8904 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (AND, mode, op0, op0));
8905 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (IOR, mode, op0, op0));
8906 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (SMIN, mode, op0, op0));
8907 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (SMAX, mode, op0, op0));
8908 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (UMIN, mode, op0, op0));
8909 40 : ASSERT_RTX_EQ (op0, simplify_gen_binary (UMAX, mode, op0, op0));
8910 :
8911 40 : ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (MINUS, mode, op0, op0));
8912 40 : ASSERT_RTX_EQ (const0_rtx, simplify_gen_binary (XOR, mode, op0, op0));
8913 :
8914 : /* Test simplify_distributive_operation. */
8915 40 : ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, xor_op0_op1, six),
8916 : simplify_gen_binary (XOR, mode, and_op0_6, and_op1_6));
8917 40 : ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, ior_op0_op1, six),
8918 : simplify_gen_binary (IOR, mode, and_op0_6, and_op1_6));
8919 40 : ASSERT_RTX_EQ (simplify_gen_binary (AND, mode, and_op0_op1, six),
8920 : simplify_gen_binary (AND, mode, and_op0_6, and_op1_6));
8921 :
8922 : /* Test useless extensions are eliminated. */
8923 40 : ASSERT_RTX_EQ (op0, simplify_gen_unary (TRUNCATE, mode, op0, mode));
8924 40 : ASSERT_RTX_EQ (op0, simplify_gen_unary (ZERO_EXTEND, mode, op0, mode));
8925 40 : ASSERT_RTX_EQ (op0, simplify_gen_unary (SIGN_EXTEND, mode, op0, mode));
8926 40 : ASSERT_RTX_EQ (op0, lowpart_subreg (mode, op0, mode));
8927 40 : }
8928 :
8929 : /* Verify some simplifications of integer extension/truncation.
8930 : Machine mode BMODE is the guaranteed wider than SMODE. */
8931 :
8932 : static void
8933 24 : test_scalar_int_ext_ops (machine_mode bmode, machine_mode smode)
8934 : {
8935 24 : rtx sreg = make_test_reg (smode);
8936 :
8937 : /* Check truncation of extension. */
8938 24 : ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
8939 : simplify_gen_unary (ZERO_EXTEND, bmode,
8940 : sreg, smode),
8941 : bmode),
8942 : sreg);
8943 24 : ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
8944 : simplify_gen_unary (SIGN_EXTEND, bmode,
8945 : sreg, smode),
8946 : bmode),
8947 : sreg);
8948 24 : ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
8949 : lowpart_subreg (bmode, sreg, smode),
8950 : bmode),
8951 : sreg);
8952 :
8953 : /* Test extensions, followed by logic ops, followed by truncations. */
8954 24 : rtx bsubreg = lowpart_subreg (bmode, sreg, smode);
8955 24 : rtx smask = gen_int_mode (GET_MODE_MASK (smode), bmode);
8956 24 : rtx inv_smask = gen_int_mode (~GET_MODE_MASK (smode), bmode);
8957 24 : ASSERT_RTX_EQ (lowpart_subreg (smode,
8958 : simplify_gen_binary (AND, bmode,
8959 : bsubreg, smask),
8960 : bmode),
8961 : sreg);
8962 24 : ASSERT_RTX_EQ (lowpart_subreg (smode,
8963 : simplify_gen_binary (AND, bmode,
8964 : bsubreg, inv_smask),
8965 : bmode),
8966 : const0_rtx);
8967 24 : ASSERT_RTX_EQ (lowpart_subreg (smode,
8968 : simplify_gen_binary (IOR, bmode,
8969 : bsubreg, smask),
8970 : bmode),
8971 : constm1_rtx);
8972 24 : ASSERT_RTX_EQ (lowpart_subreg (smode,
8973 : simplify_gen_binary (IOR, bmode,
8974 : bsubreg, inv_smask),
8975 : bmode),
8976 : sreg);
8977 24 : ASSERT_RTX_EQ (lowpart_subreg (smode,
8978 : simplify_gen_binary (XOR, bmode,
8979 : bsubreg, smask),
8980 : bmode),
8981 : lowpart_subreg (smode,
8982 : gen_rtx_NOT (bmode, bsubreg),
8983 : bmode));
8984 24 : ASSERT_RTX_EQ (lowpart_subreg (smode,
8985 : simplify_gen_binary (XOR, bmode,
8986 : bsubreg, inv_smask),
8987 : bmode),
8988 : sreg);
8989 :
8990 24 : if (known_le (GET_MODE_PRECISION (bmode), BITS_PER_WORD))
8991 : {
8992 24 : rtx breg1 = make_test_reg (bmode);
8993 24 : rtx breg2 = make_test_reg (bmode);
8994 24 : rtx ssubreg1 = lowpart_subreg (smode, breg1, bmode);
8995 24 : rtx ssubreg2 = lowpart_subreg (smode, breg2, bmode);
8996 24 : rtx not_1 = simplify_gen_unary (NOT, smode, ssubreg1, smode);
8997 24 : rtx and_12 = simplify_gen_binary (AND, smode, ssubreg1, ssubreg2);
8998 24 : rtx ior_12 = simplify_gen_binary (IOR, smode, ssubreg1, ssubreg2);
8999 24 : rtx xor_12 = simplify_gen_binary (XOR, smode, ssubreg1, ssubreg2);
9000 24 : rtx and_n12 = simplify_gen_binary (AND, smode, not_1, ssubreg2);
9001 24 : rtx ior_n12 = simplify_gen_binary (IOR, smode, not_1, ssubreg2);
9002 24 : rtx xor_12_c = simplify_gen_binary (XOR, smode, xor_12, const1_rtx);
9003 24 : ASSERT_RTX_EQ (lowpart_subreg (bmode, not_1, smode),
9004 : gen_rtx_NOT (bmode, breg1));
9005 24 : ASSERT_RTX_EQ (lowpart_subreg (bmode, and_12, smode),
9006 : gen_rtx_AND (bmode, breg1, breg2));
9007 24 : ASSERT_RTX_EQ (lowpart_subreg (bmode, ior_12, smode),
9008 : gen_rtx_IOR (bmode, breg1, breg2));
9009 24 : ASSERT_RTX_EQ (lowpart_subreg (bmode, xor_12, smode),
9010 : gen_rtx_XOR (bmode, breg1, breg2));
9011 24 : ASSERT_RTX_EQ (lowpart_subreg (bmode, and_n12, smode),
9012 : gen_rtx_AND (bmode, gen_rtx_NOT (bmode, breg1), breg2));
9013 24 : ASSERT_RTX_EQ (lowpart_subreg (bmode, ior_n12, smode),
9014 : gen_rtx_IOR (bmode, gen_rtx_NOT (bmode, breg1), breg2));
9015 24 : ASSERT_RTX_EQ (lowpart_subreg (bmode, xor_12_c, smode),
9016 : gen_rtx_XOR (bmode,
9017 : gen_rtx_XOR (bmode, breg1, breg2),
9018 : const1_rtx));
9019 : }
9020 24 : }
9021 :
9022 : /* Verify more simplifications of integer extension/truncation.
9023 : BMODE is wider than MMODE which is wider than SMODE. */
9024 :
9025 : static void
9026 16 : test_scalar_int_ext_ops2 (machine_mode bmode, machine_mode mmode,
9027 : machine_mode smode)
9028 : {
9029 16 : rtx breg = make_test_reg (bmode);
9030 16 : rtx mreg = make_test_reg (mmode);
9031 16 : rtx sreg = make_test_reg (smode);
9032 :
9033 : /* Check truncate of truncate. */
9034 16 : ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
9035 : simplify_gen_unary (TRUNCATE, mmode,
9036 : breg, bmode),
9037 : mmode),
9038 : simplify_gen_unary (TRUNCATE, smode, breg, bmode));
9039 :
9040 : /* Check extension of extension. */
9041 16 : ASSERT_RTX_EQ (simplify_gen_unary (ZERO_EXTEND, bmode,
9042 : simplify_gen_unary (ZERO_EXTEND, mmode,
9043 : sreg, smode),
9044 : mmode),
9045 : simplify_gen_unary (ZERO_EXTEND, bmode, sreg, smode));
9046 16 : ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND, bmode,
9047 : simplify_gen_unary (SIGN_EXTEND, mmode,
9048 : sreg, smode),
9049 : mmode),
9050 : simplify_gen_unary (SIGN_EXTEND, bmode, sreg, smode));
9051 16 : ASSERT_RTX_EQ (simplify_gen_unary (SIGN_EXTEND, bmode,
9052 : simplify_gen_unary (ZERO_EXTEND, mmode,
9053 : sreg, smode),
9054 : mmode),
9055 : simplify_gen_unary (ZERO_EXTEND, bmode, sreg, smode));
9056 :
9057 : /* Check truncation of extension. */
9058 16 : ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
9059 : simplify_gen_unary (ZERO_EXTEND, bmode,
9060 : mreg, mmode),
9061 : bmode),
9062 : simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
9063 16 : ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
9064 : simplify_gen_unary (SIGN_EXTEND, bmode,
9065 : mreg, mmode),
9066 : bmode),
9067 : simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
9068 16 : ASSERT_RTX_EQ (simplify_gen_unary (TRUNCATE, smode,
9069 : lowpart_subreg (bmode, mreg, mmode),
9070 : bmode),
9071 : simplify_gen_unary (TRUNCATE, smode, mreg, mmode));
9072 16 : }
9073 :
9074 : /* Test comparisons of comparisons, with the inner comparisons being
9075 : between values of mode MODE2 and producing results of mode MODE1,
9076 : and with the outer comparisons producing results of mode MODE0. */
9077 :
9078 : static void
9079 4 : test_comparisons (machine_mode mode0, machine_mode mode1, machine_mode mode2)
9080 : {
9081 4 : rtx reg0 = make_test_reg (mode2);
9082 4 : rtx reg1 = make_test_reg (mode2);
9083 :
9084 4 : static const rtx_code codes[] = {
9085 : EQ, NE, LT, LTU, LE, LEU, GE, GEU, GT, GTU
9086 : };
9087 4 : constexpr auto num_codes = ARRAY_SIZE (codes);
9088 4 : rtx cmps[num_codes];
9089 4 : rtx vals[] = { constm1_rtx, const0_rtx, const1_rtx };
9090 :
9091 44 : for (unsigned int i = 0; i < num_codes; ++i)
9092 40 : cmps[i] = gen_rtx_fmt_ee (codes[i], mode1, reg0, reg1);
9093 :
9094 44 : for (auto code : codes)
9095 440 : for (unsigned int i0 = 0; i0 < num_codes; ++i0)
9096 4400 : for (unsigned int i1 = 0; i1 < num_codes; ++i1)
9097 : {
9098 4000 : rtx cmp_res = simplify_relational_operation (code, mode0, mode1,
9099 : cmps[i0], cmps[i1]);
9100 4000 : if (i0 >= 2 && i1 >= 2 && (i0 ^ i1) & 1)
9101 1280 : ASSERT_TRUE (cmp_res == NULL_RTX);
9102 : else
9103 : {
9104 2720 : ASSERT_TRUE (cmp_res != NULL_RTX
9105 : && (CONSTANT_P (cmp_res)
9106 : || (COMPARISON_P (cmp_res)
9107 : && GET_MODE (cmp_res) == mode0
9108 : && REG_P (XEXP (cmp_res, 0))
9109 : && REG_P (XEXP (cmp_res, 1)))));
9110 10880 : for (rtx reg0_val : vals)
9111 32640 : for (rtx reg1_val : vals)
9112 : {
9113 24480 : rtx val0 = simplify_const_relational_operation
9114 24480 : (codes[i0], mode1, reg0_val, reg1_val);
9115 24480 : rtx val1 = simplify_const_relational_operation
9116 24480 : (codes[i1], mode1, reg0_val, reg1_val);
9117 24480 : rtx val = simplify_const_relational_operation
9118 24480 : (code, mode0, val0, val1);
9119 24480 : rtx folded = cmp_res;
9120 24480 : if (COMPARISON_P (cmp_res))
9121 16704 : folded = simplify_const_relational_operation
9122 16704 : (GET_CODE (cmp_res), mode0,
9123 16704 : XEXP (cmp_res, 0) == reg0 ? reg0_val : reg1_val,
9124 16704 : XEXP (cmp_res, 1) == reg0 ? reg0_val : reg1_val);
9125 24480 : ASSERT_RTX_EQ (val, folded);
9126 : }
9127 : }
9128 : }
9129 4 : }
9130 :
9131 :
9132 : /* Verify some simplifications involving scalar expressions. */
9133 :
9134 : static void
9135 4 : test_scalar_ops ()
9136 : {
9137 500 : for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
9138 : {
9139 496 : machine_mode mode = (machine_mode) i;
9140 496 : if (SCALAR_INT_MODE_P (mode) && mode != BImode)
9141 40 : test_scalar_int_ops (mode);
9142 : }
9143 :
9144 4 : test_scalar_int_ext_ops (HImode, QImode);
9145 4 : test_scalar_int_ext_ops (SImode, QImode);
9146 4 : test_scalar_int_ext_ops (SImode, HImode);
9147 4 : test_scalar_int_ext_ops (DImode, QImode);
9148 4 : test_scalar_int_ext_ops (DImode, HImode);
9149 4 : test_scalar_int_ext_ops (DImode, SImode);
9150 :
9151 4 : test_scalar_int_ext_ops2 (SImode, HImode, QImode);
9152 4 : test_scalar_int_ext_ops2 (DImode, HImode, QImode);
9153 4 : test_scalar_int_ext_ops2 (DImode, SImode, QImode);
9154 4 : test_scalar_int_ext_ops2 (DImode, SImode, HImode);
9155 :
9156 4 : test_comparisons (QImode, HImode, SImode);
9157 4 : }
9158 :
9159 : /* Test vector simplifications involving VEC_DUPLICATE in which the
9160 : operands and result have vector mode MODE. SCALAR_REG is a pseudo
9161 : register that holds one element of MODE. */
9162 :
9163 : static void
9164 224 : test_vector_ops_duplicate (machine_mode mode, rtx scalar_reg)
9165 : {
9166 224 : scalar_mode inner_mode = GET_MODE_INNER (mode);
9167 224 : rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
9168 448 : poly_uint64 nunits = GET_MODE_NUNITS (mode);
9169 224 : if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
9170 : {
9171 : /* Test some simple unary cases with VEC_DUPLICATE arguments. */
9172 124 : rtx not_scalar_reg = gen_rtx_NOT (inner_mode, scalar_reg);
9173 124 : rtx duplicate_not = gen_rtx_VEC_DUPLICATE (mode, not_scalar_reg);
9174 124 : ASSERT_RTX_EQ (duplicate,
9175 : simplify_unary_operation (NOT, mode,
9176 : duplicate_not, mode));
9177 :
9178 124 : rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
9179 124 : rtx duplicate_neg = gen_rtx_VEC_DUPLICATE (mode, neg_scalar_reg);
9180 124 : ASSERT_RTX_EQ (duplicate,
9181 : simplify_unary_operation (NEG, mode,
9182 : duplicate_neg, mode));
9183 :
9184 : /* Test some simple binary cases with VEC_DUPLICATE arguments. */
9185 124 : ASSERT_RTX_EQ (duplicate,
9186 : simplify_binary_operation (PLUS, mode, duplicate,
9187 : CONST0_RTX (mode)));
9188 :
9189 124 : ASSERT_RTX_EQ (duplicate,
9190 : simplify_binary_operation (MINUS, mode, duplicate,
9191 : CONST0_RTX (mode)));
9192 :
9193 124 : ASSERT_RTX_PTR_EQ (CONST0_RTX (mode),
9194 : simplify_binary_operation (MINUS, mode, duplicate,
9195 : duplicate));
9196 : }
9197 :
9198 : /* Test a scalar VEC_SELECT of a VEC_DUPLICATE. */
9199 224 : rtx zero_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, const0_rtx));
9200 224 : ASSERT_RTX_PTR_EQ (scalar_reg,
9201 : simplify_binary_operation (VEC_SELECT, inner_mode,
9202 : duplicate, zero_par));
9203 :
9204 224 : unsigned HOST_WIDE_INT const_nunits;
9205 224 : if (nunits.is_constant (&const_nunits))
9206 : {
9207 : /* And again with the final element. */
9208 224 : rtx last_index = gen_int_mode (const_nunits - 1, word_mode);
9209 224 : rtx last_par = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (1, last_index));
9210 224 : ASSERT_RTX_PTR_EQ (scalar_reg,
9211 : simplify_binary_operation (VEC_SELECT, inner_mode,
9212 : duplicate, last_par));
9213 :
9214 : /* Test a scalar subreg of a VEC_MERGE of a VEC_DUPLICATE. */
9215 : /* Skip this test for vectors of booleans, because offset is in bytes,
9216 : while vec_merge indices are in elements (usually bits). */
9217 224 : if (GET_MODE_CLASS (mode) != MODE_VECTOR_BOOL)
9218 : {
9219 224 : rtx vector_reg = make_test_reg (mode);
9220 3508 : for (unsigned HOST_WIDE_INT i = 0; i < const_nunits; i++)
9221 : {
9222 3288 : if (i >= HOST_BITS_PER_WIDE_INT)
9223 : break;
9224 3284 : rtx mask = GEN_INT ((HOST_WIDE_INT_1U << i) | (i + 1));
9225 3284 : rtx vm = gen_rtx_VEC_MERGE (mode, duplicate, vector_reg, mask);
9226 6568 : poly_uint64 offset = i * GET_MODE_SIZE (inner_mode);
9227 :
9228 3284 : ASSERT_RTX_EQ (scalar_reg,
9229 : simplify_gen_subreg (inner_mode, vm,
9230 : mode, offset));
9231 : }
9232 : }
9233 : }
9234 :
9235 : /* Test a scalar subreg of a VEC_DUPLICATE. */
9236 224 : poly_uint64 offset = subreg_lowpart_offset (inner_mode, mode);
9237 224 : ASSERT_RTX_EQ (scalar_reg,
9238 : simplify_gen_subreg (inner_mode, duplicate,
9239 : mode, offset));
9240 :
9241 224 : machine_mode narrower_mode;
9242 224 : if (maybe_ne (nunits, 2U)
9243 184 : && multiple_p (nunits, 2)
9244 396 : && mode_for_vector (inner_mode, 2).exists (&narrower_mode)
9245 396 : && VECTOR_MODE_P (narrower_mode))
9246 : {
9247 : /* Test VEC_DUPLICATE of a vector. */
9248 172 : rtx_vector_builder nbuilder (narrower_mode, 2, 1);
9249 172 : nbuilder.quick_push (const0_rtx);
9250 172 : nbuilder.quick_push (const1_rtx);
9251 172 : rtx_vector_builder builder (mode, 2, 1);
9252 172 : builder.quick_push (const0_rtx);
9253 172 : builder.quick_push (const1_rtx);
9254 172 : ASSERT_RTX_EQ (builder.build (),
9255 : simplify_unary_operation (VEC_DUPLICATE, mode,
9256 : nbuilder.build (),
9257 : narrower_mode));
9258 :
9259 : /* Test VEC_SELECT of a vector. */
9260 172 : rtx vec_par
9261 172 : = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, const1_rtx, const0_rtx));
9262 172 : rtx narrower_duplicate
9263 172 : = gen_rtx_VEC_DUPLICATE (narrower_mode, scalar_reg);
9264 172 : ASSERT_RTX_EQ (narrower_duplicate,
9265 : simplify_binary_operation (VEC_SELECT, narrower_mode,
9266 : duplicate, vec_par));
9267 :
9268 : /* Test a vector subreg of a VEC_DUPLICATE. */
9269 172 : poly_uint64 offset = subreg_lowpart_offset (narrower_mode, mode);
9270 172 : ASSERT_RTX_EQ (narrower_duplicate,
9271 : simplify_gen_subreg (narrower_mode, duplicate,
9272 : mode, offset));
9273 172 : }
9274 224 : }
9275 :
9276 : /* Test vector simplifications involving VEC_SERIES in which the
9277 : operands and result have vector mode MODE. SCALAR_REG is a pseudo
9278 : register that holds one element of MODE. */
9279 :
9280 : static void
9281 92 : test_vector_ops_series (machine_mode mode, rtx scalar_reg)
9282 : {
9283 : /* Test unary cases with VEC_SERIES arguments. */
9284 92 : scalar_mode inner_mode = GET_MODE_INNER (mode);
9285 92 : rtx duplicate = gen_rtx_VEC_DUPLICATE (mode, scalar_reg);
9286 92 : rtx neg_scalar_reg = gen_rtx_NEG (inner_mode, scalar_reg);
9287 92 : rtx series_0_r = gen_rtx_VEC_SERIES (mode, const0_rtx, scalar_reg);
9288 92 : rtx series_0_nr = gen_rtx_VEC_SERIES (mode, const0_rtx, neg_scalar_reg);
9289 92 : rtx series_nr_1 = gen_rtx_VEC_SERIES (mode, neg_scalar_reg, const1_rtx);
9290 92 : rtx series_r_m1 = gen_rtx_VEC_SERIES (mode, scalar_reg, constm1_rtx);
9291 92 : rtx series_r_r = gen_rtx_VEC_SERIES (mode, scalar_reg, scalar_reg);
9292 92 : rtx series_nr_nr = gen_rtx_VEC_SERIES (mode, neg_scalar_reg,
9293 : neg_scalar_reg);
9294 92 : ASSERT_RTX_EQ (series_0_r,
9295 : simplify_unary_operation (NEG, mode, series_0_nr, mode));
9296 92 : ASSERT_RTX_EQ (series_r_m1,
9297 : simplify_unary_operation (NEG, mode, series_nr_1, mode));
9298 92 : ASSERT_RTX_EQ (series_r_r,
9299 : simplify_unary_operation (NEG, mode, series_nr_nr, mode));
9300 :
9301 : /* Test that a VEC_SERIES with a zero step is simplified away. */
9302 92 : ASSERT_RTX_EQ (duplicate,
9303 : simplify_binary_operation (VEC_SERIES, mode,
9304 : scalar_reg, const0_rtx));
9305 :
9306 : /* Test PLUS and MINUS with VEC_SERIES. */
9307 92 : rtx series_0_1 = gen_const_vec_series (mode, const0_rtx, const1_rtx);
9308 92 : rtx series_0_m1 = gen_const_vec_series (mode, const0_rtx, constm1_rtx);
9309 92 : rtx series_r_1 = gen_rtx_VEC_SERIES (mode, scalar_reg, const1_rtx);
9310 92 : ASSERT_RTX_EQ (series_r_r,
9311 : simplify_binary_operation (PLUS, mode, series_0_r,
9312 : duplicate));
9313 92 : ASSERT_RTX_EQ (series_r_1,
9314 : simplify_binary_operation (PLUS, mode, duplicate,
9315 : series_0_1));
9316 92 : ASSERT_RTX_EQ (series_r_m1,
9317 : simplify_binary_operation (PLUS, mode, duplicate,
9318 : series_0_m1));
9319 92 : ASSERT_RTX_EQ (series_0_r,
9320 : simplify_binary_operation (MINUS, mode, series_r_r,
9321 : duplicate));
9322 92 : ASSERT_RTX_EQ (series_r_m1,
9323 : simplify_binary_operation (MINUS, mode, duplicate,
9324 : series_0_1));
9325 92 : ASSERT_RTX_EQ (series_r_1,
9326 : simplify_binary_operation (MINUS, mode, duplicate,
9327 : series_0_m1));
9328 92 : ASSERT_RTX_EQ (series_0_m1,
9329 : simplify_binary_operation (VEC_SERIES, mode, const0_rtx,
9330 : constm1_rtx));
9331 :
9332 : /* Test NEG on constant vector series. */
9333 92 : ASSERT_RTX_EQ (series_0_m1,
9334 : simplify_unary_operation (NEG, mode, series_0_1, mode));
9335 92 : ASSERT_RTX_EQ (series_0_1,
9336 : simplify_unary_operation (NEG, mode, series_0_m1, mode));
9337 :
9338 : /* Test PLUS and MINUS on constant vector series. */
9339 92 : rtx scalar2 = gen_int_mode (2, inner_mode);
9340 92 : rtx scalar3 = gen_int_mode (3, inner_mode);
9341 92 : rtx series_1_1 = gen_const_vec_series (mode, const1_rtx, const1_rtx);
9342 92 : rtx series_0_2 = gen_const_vec_series (mode, const0_rtx, scalar2);
9343 92 : rtx series_1_3 = gen_const_vec_series (mode, const1_rtx, scalar3);
9344 92 : ASSERT_RTX_EQ (series_1_1,
9345 : simplify_binary_operation (PLUS, mode, series_0_1,
9346 : CONST1_RTX (mode)));
9347 92 : ASSERT_RTX_EQ (series_0_m1,
9348 : simplify_binary_operation (PLUS, mode, CONST0_RTX (mode),
9349 : series_0_m1));
9350 92 : ASSERT_RTX_EQ (series_1_3,
9351 : simplify_binary_operation (PLUS, mode, series_1_1,
9352 : series_0_2));
9353 92 : ASSERT_RTX_EQ (series_0_1,
9354 : simplify_binary_operation (MINUS, mode, series_1_1,
9355 : CONST1_RTX (mode)));
9356 92 : ASSERT_RTX_EQ (series_1_1,
9357 : simplify_binary_operation (MINUS, mode, CONST1_RTX (mode),
9358 : series_0_m1));
9359 92 : ASSERT_RTX_EQ (series_1_1,
9360 : simplify_binary_operation (MINUS, mode, series_1_3,
9361 : series_0_2));
9362 :
9363 : /* Test MULT between constant vectors. */
9364 92 : rtx vec2 = gen_const_vec_duplicate (mode, scalar2);
9365 92 : rtx vec3 = gen_const_vec_duplicate (mode, scalar3);
9366 92 : rtx scalar9 = gen_int_mode (9, inner_mode);
9367 92 : rtx series_3_9 = gen_const_vec_series (mode, scalar3, scalar9);
9368 92 : ASSERT_RTX_EQ (series_0_2,
9369 : simplify_binary_operation (MULT, mode, series_0_1, vec2));
9370 92 : ASSERT_RTX_EQ (series_3_9,
9371 : simplify_binary_operation (MULT, mode, vec3, series_1_3));
9372 92 : if (!GET_MODE_NUNITS (mode).is_constant ())
9373 : ASSERT_FALSE (simplify_binary_operation (MULT, mode, series_0_1,
9374 : series_0_1));
9375 :
9376 : /* Test ASHIFT between constant vectors. */
9377 92 : ASSERT_RTX_EQ (series_0_2,
9378 : simplify_binary_operation (ASHIFT, mode, series_0_1,
9379 : CONST1_RTX (mode)));
9380 92 : if (!GET_MODE_NUNITS (mode).is_constant ())
9381 : ASSERT_FALSE (simplify_binary_operation (ASHIFT, mode, CONST1_RTX (mode),
9382 : series_0_1));
9383 92 : }
9384 :
9385 : static rtx
9386 3136 : simplify_merge_mask (rtx x, rtx mask, int op)
9387 : {
9388 0 : return simplify_context ().simplify_merge_mask (x, mask, op);
9389 : }
9390 :
9391 : /* Verify simplify_merge_mask works correctly. */
9392 :
9393 : static void
9394 224 : test_vec_merge (machine_mode mode)
9395 : {
9396 224 : rtx op0 = make_test_reg (mode);
9397 224 : rtx op1 = make_test_reg (mode);
9398 224 : rtx op2 = make_test_reg (mode);
9399 224 : rtx op3 = make_test_reg (mode);
9400 224 : rtx op4 = make_test_reg (mode);
9401 224 : rtx op5 = make_test_reg (mode);
9402 224 : rtx mask1 = make_test_reg (SImode);
9403 224 : rtx mask2 = make_test_reg (SImode);
9404 224 : rtx vm1 = gen_rtx_VEC_MERGE (mode, op0, op1, mask1);
9405 224 : rtx vm2 = gen_rtx_VEC_MERGE (mode, op2, op3, mask1);
9406 224 : rtx vm3 = gen_rtx_VEC_MERGE (mode, op4, op5, mask1);
9407 :
9408 : /* Simple vec_merge. */
9409 224 : ASSERT_EQ (op0, simplify_merge_mask (vm1, mask1, 0));
9410 224 : ASSERT_EQ (op1, simplify_merge_mask (vm1, mask1, 1));
9411 224 : ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 0));
9412 224 : ASSERT_EQ (NULL_RTX, simplify_merge_mask (vm1, mask2, 1));
9413 :
9414 : /* Nested vec_merge.
9415 : It's tempting to make this simplify right down to opN, but we don't
9416 : because all the simplify_* functions assume that the operands have
9417 : already been simplified. */
9418 224 : rtx nvm = gen_rtx_VEC_MERGE (mode, vm1, vm2, mask1);
9419 224 : ASSERT_EQ (vm1, simplify_merge_mask (nvm, mask1, 0));
9420 224 : ASSERT_EQ (vm2, simplify_merge_mask (nvm, mask1, 1));
9421 :
9422 : /* Intermediate unary op. */
9423 224 : rtx unop = gen_rtx_NOT (mode, vm1);
9424 224 : ASSERT_RTX_EQ (gen_rtx_NOT (mode, op0),
9425 : simplify_merge_mask (unop, mask1, 0));
9426 224 : ASSERT_RTX_EQ (gen_rtx_NOT (mode, op1),
9427 : simplify_merge_mask (unop, mask1, 1));
9428 :
9429 : /* Intermediate binary op. */
9430 224 : rtx binop = gen_rtx_PLUS (mode, vm1, vm2);
9431 224 : ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op0, op2),
9432 : simplify_merge_mask (binop, mask1, 0));
9433 224 : ASSERT_RTX_EQ (gen_rtx_PLUS (mode, op1, op3),
9434 : simplify_merge_mask (binop, mask1, 1));
9435 :
9436 : /* Intermediate ternary op. */
9437 224 : rtx tenop = gen_rtx_FMA (mode, vm1, vm2, vm3);
9438 224 : ASSERT_RTX_EQ (gen_rtx_FMA (mode, op0, op2, op4),
9439 : simplify_merge_mask (tenop, mask1, 0));
9440 224 : ASSERT_RTX_EQ (gen_rtx_FMA (mode, op1, op3, op5),
9441 : simplify_merge_mask (tenop, mask1, 1));
9442 :
9443 : /* Side effects. */
9444 224 : rtx badop0 = gen_rtx_PRE_INC (mode, op0);
9445 224 : rtx badvm = gen_rtx_VEC_MERGE (mode, badop0, op1, mask1);
9446 224 : ASSERT_EQ (badop0, simplify_merge_mask (badvm, mask1, 0));
9447 224 : ASSERT_EQ (NULL_RTX, simplify_merge_mask (badvm, mask1, 1));
9448 :
9449 : /* Called indirectly. */
9450 224 : ASSERT_RTX_EQ (gen_rtx_VEC_MERGE (mode, op0, op3, mask1),
9451 : simplify_rtx (nvm));
9452 224 : }
9453 :
9454 : /* Test that vector rotate formation works at RTL level. Try various
9455 : combinations of (REG << C) [|,^,+] (REG >> (<bitwidth> - C)). */
9456 :
9457 : static void
9458 92 : test_vector_rotate (rtx reg)
9459 : {
9460 92 : machine_mode mode = GET_MODE (reg);
9461 92 : unsigned bitwidth = GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT;
9462 92 : rtx plus_rtx = gen_rtx_PLUS (mode, reg, reg);
9463 92 : rtx lshftrt_amnt = GEN_INT (bitwidth - 1);
9464 92 : lshftrt_amnt = gen_const_vec_duplicate (mode, lshftrt_amnt);
9465 92 : rtx lshiftrt_rtx = gen_rtx_LSHIFTRT (mode, reg, lshftrt_amnt);
9466 92 : rtx rotate_rtx = gen_rtx_ROTATE (mode, reg, CONST1_RTX (mode));
9467 : /* Test explicitly the case where ASHIFT (x, 1) is a PLUS (x, x). */
9468 92 : ASSERT_RTX_EQ (rotate_rtx,
9469 : simplify_rtx (gen_rtx_IOR (mode, plus_rtx, lshiftrt_rtx)));
9470 92 : ASSERT_RTX_EQ (rotate_rtx,
9471 : simplify_rtx (gen_rtx_XOR (mode, plus_rtx, lshiftrt_rtx)));
9472 92 : ASSERT_RTX_EQ (rotate_rtx,
9473 : simplify_rtx (gen_rtx_PLUS (mode, plus_rtx, lshiftrt_rtx)));
9474 :
9475 : /* Don't go through every possible rotate amount to save execution time.
9476 : Multiple of BITS_PER_UNIT amounts could conceivably be simplified to
9477 : other bswap operations sometimes. Go through just the odd amounts. */
9478 1380 : for (unsigned i = 3; i < bitwidth - 2; i += 2)
9479 : {
9480 1288 : rtx rot_amnt = gen_const_vec_duplicate (mode, GEN_INT (i));
9481 1288 : rtx ashift_rtx = gen_rtx_ASHIFT (mode, reg, rot_amnt);
9482 1288 : lshftrt_amnt = gen_const_vec_duplicate (mode, GEN_INT (bitwidth - i));
9483 1288 : lshiftrt_rtx = gen_rtx_LSHIFTRT (mode, reg, lshftrt_amnt);
9484 1288 : rotate_rtx = gen_rtx_ROTATE (mode, reg, rot_amnt);
9485 1288 : ASSERT_RTX_EQ (rotate_rtx,
9486 : simplify_rtx (gen_rtx_IOR (mode, ashift_rtx, lshiftrt_rtx)));
9487 1288 : ASSERT_RTX_EQ (rotate_rtx,
9488 : simplify_rtx (gen_rtx_XOR (mode, ashift_rtx, lshiftrt_rtx)));
9489 1288 : ASSERT_RTX_EQ (rotate_rtx,
9490 : simplify_rtx (gen_rtx_PLUS (mode, ashift_rtx, lshiftrt_rtx)));
9491 : }
9492 92 : }
9493 :
9494 : /* Test subregs of integer vector constant X, trying elements in
9495 : the range [ELT_BIAS, ELT_BIAS + constant_lower_bound (NELTS)),
9496 : where NELTS is the number of elements in X. Subregs involving
9497 : elements [ELT_BIAS, ELT_BIAS + FIRST_VALID) are expected to fail. */
9498 :
9499 : static void
9500 276 : test_vector_subregs_modes (rtx x, poly_uint64 elt_bias = 0,
9501 : unsigned int first_valid = 0)
9502 : {
9503 276 : machine_mode inner_mode = GET_MODE (x);
9504 276 : scalar_mode int_mode = GET_MODE_INNER (inner_mode);
9505 :
9506 34500 : for (unsigned int modei = 0; modei < NUM_MACHINE_MODES; ++modei)
9507 : {
9508 34224 : machine_mode outer_mode = (machine_mode) modei;
9509 34224 : if (!VECTOR_MODE_P (outer_mode))
9510 18768 : continue;
9511 :
9512 15456 : unsigned int outer_nunits;
9513 15456 : if (GET_MODE_INNER (outer_mode) == int_mode
9514 1932 : && GET_MODE_NUNITS (outer_mode).is_constant (&outer_nunits)
9515 20412 : && multiple_p (GET_MODE_NUNITS (inner_mode), outer_nunits))
9516 : {
9517 : /* Test subregs in which the outer mode is a smaller,
9518 : constant-sized vector of the same element type. */
9519 1092 : unsigned int limit
9520 1092 : = constant_lower_bound (GET_MODE_NUNITS (inner_mode));
9521 8028 : for (unsigned int elt = 0; elt < limit; elt += outer_nunits)
9522 : {
9523 6936 : rtx expected = NULL_RTX;
9524 6936 : if (elt >= first_valid)
9525 : {
9526 6936 : rtx_vector_builder builder (outer_mode, outer_nunits, 1);
9527 39768 : for (unsigned int i = 0; i < outer_nunits; ++i)
9528 32832 : builder.quick_push (CONST_VECTOR_ELT (x, elt + i));
9529 6936 : expected = builder.build ();
9530 6936 : }
9531 13872 : poly_uint64 byte = (elt_bias + elt) * GET_MODE_SIZE (int_mode);
9532 6936 : ASSERT_RTX_EQ (expected,
9533 : simplify_subreg (outer_mode, x,
9534 : inner_mode, byte));
9535 : }
9536 : }
9537 28728 : else if (known_eq (GET_MODE_SIZE (outer_mode),
9538 : GET_MODE_SIZE (inner_mode))
9539 2040 : && known_eq (elt_bias, 0U)
9540 2040 : && (GET_MODE_CLASS (outer_mode) != MODE_VECTOR_BOOL
9541 0 : || known_eq (GET_MODE_BITSIZE (outer_mode),
9542 : GET_MODE_NUNITS (outer_mode)))
9543 2040 : && (!FLOAT_MODE_P (outer_mode)
9544 15876 : || (FLOAT_MODE_FORMAT (outer_mode)->ieee_bits
9545 1104 : == GET_MODE_UNIT_PRECISION (outer_mode)))
9546 14364 : && (GET_MODE_SIZE (inner_mode).is_constant ()
9547 : || !CONST_VECTOR_STEPPED_P (x)))
9548 : {
9549 : /* Try converting to OUTER_MODE and back. */
9550 1800 : rtx outer_x = simplify_subreg (outer_mode, x, inner_mode, 0);
9551 1800 : ASSERT_TRUE (outer_x != NULL_RTX);
9552 1800 : ASSERT_RTX_EQ (x, simplify_subreg (inner_mode, outer_x,
9553 : outer_mode, 0));
9554 : }
9555 : }
9556 :
9557 276 : if (BYTES_BIG_ENDIAN == WORDS_BIG_ENDIAN)
9558 : {
9559 : /* Test each byte in the element range. */
9560 276 : unsigned int limit
9561 276 : = constant_lower_bound (GET_MODE_SIZE (inner_mode));
9562 14604 : for (unsigned int i = 0; i < limit; ++i)
9563 : {
9564 14328 : unsigned int elt = i / GET_MODE_SIZE (int_mode);
9565 14328 : rtx expected = NULL_RTX;
9566 14328 : if (elt >= first_valid)
9567 : {
9568 14328 : unsigned int byte_shift = i % GET_MODE_SIZE (int_mode);
9569 14328 : if (BYTES_BIG_ENDIAN)
9570 : byte_shift = GET_MODE_SIZE (int_mode) - byte_shift - 1;
9571 14328 : rtx_mode_t vec_elt (CONST_VECTOR_ELT (x, elt), int_mode);
9572 14328 : wide_int shifted_elt
9573 14328 : = wi::lrshift (vec_elt, byte_shift * BITS_PER_UNIT);
9574 14328 : expected = immed_wide_int_const (shifted_elt, QImode);
9575 14328 : }
9576 28656 : poly_uint64 byte = elt_bias * GET_MODE_SIZE (int_mode) + i;
9577 14328 : ASSERT_RTX_EQ (expected,
9578 : simplify_subreg (QImode, x, inner_mode, byte));
9579 : }
9580 : }
9581 276 : }
9582 :
9583 : /* Test constant subregs of integer vector mode INNER_MODE, using 1
9584 : element per pattern. */
9585 :
9586 : static void
9587 92 : test_vector_subregs_repeating (machine_mode inner_mode)
9588 : {
9589 184 : poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
9590 92 : unsigned int min_nunits = constant_lower_bound (nunits);
9591 92 : scalar_mode int_mode = GET_MODE_INNER (inner_mode);
9592 92 : unsigned int count = gcd (min_nunits, 8);
9593 :
9594 92 : rtx_vector_builder builder (inner_mode, count, 1);
9595 684 : for (unsigned int i = 0; i < count; ++i)
9596 592 : builder.quick_push (gen_int_mode (8 - i, int_mode));
9597 92 : rtx x = builder.build ();
9598 :
9599 92 : test_vector_subregs_modes (x);
9600 92 : if (!nunits.is_constant ())
9601 : test_vector_subregs_modes (x, nunits - min_nunits);
9602 92 : }
9603 :
9604 : /* Test constant subregs of integer vector mode INNER_MODE, using 2
9605 : elements per pattern. */
9606 :
9607 : static void
9608 92 : test_vector_subregs_fore_back (machine_mode inner_mode)
9609 : {
9610 184 : poly_uint64 nunits = GET_MODE_NUNITS (inner_mode);
9611 92 : unsigned int min_nunits = constant_lower_bound (nunits);
9612 92 : scalar_mode int_mode = GET_MODE_INNER (inner_mode);
9613 92 : unsigned int count = gcd (min_nunits, 4);
9614 :
9615 92 : rtx_vector_builder builder (inner_mode, count, 2);
9616 444 : for (unsigned int i = 0; i < count; ++i)
9617 352 : builder.quick_push (gen_int_mode (i, int_mode));
9618 444 : for (unsigned int i = 0; i < count; ++i)
9619 352 : builder.quick_push (gen_int_mode (-1 - (int) i, int_mode));
9620 92 : rtx x = builder.build ();
9621 :
9622 92 : test_vector_subregs_modes (x);
9623 92 : if (!nunits.is_constant ())
9624 : test_vector_subregs_modes (x, nunits - min_nunits, count);
9625 92 : }
9626 :
9627 : /* Test constant subregs of integer vector mode INNER_MODE, using 3
9628 : elements per pattern. */
9629 :
9630 : static void
9631 92 : test_vector_subregs_stepped (machine_mode inner_mode)
9632 : {
9633 : /* Build { 0, 1, 2, 3, ... }. */
9634 92 : scalar_mode int_mode = GET_MODE_INNER (inner_mode);
9635 92 : rtx_vector_builder builder (inner_mode, 1, 3);
9636 368 : for (unsigned int i = 0; i < 3; ++i)
9637 276 : builder.quick_push (gen_int_mode (i, int_mode));
9638 92 : rtx x = builder.build ();
9639 :
9640 92 : test_vector_subregs_modes (x);
9641 92 : }
9642 :
9643 : /* Test constant subregs of integer vector mode INNER_MODE. */
9644 :
9645 : static void
9646 92 : test_vector_subregs (machine_mode inner_mode)
9647 : {
9648 92 : test_vector_subregs_repeating (inner_mode);
9649 92 : test_vector_subregs_fore_back (inner_mode);
9650 92 : test_vector_subregs_stepped (inner_mode);
9651 92 : }
9652 :
9653 : /* Verify some simplifications involving vectors. */
9654 :
9655 : static void
9656 4 : test_vector_ops ()
9657 : {
9658 500 : for (unsigned int i = 0; i < NUM_MACHINE_MODES; ++i)
9659 : {
9660 496 : machine_mode mode = (machine_mode) i;
9661 496 : if (VECTOR_MODE_P (mode))
9662 : {
9663 448 : rtx scalar_reg = make_test_reg (GET_MODE_INNER (mode));
9664 224 : test_vector_ops_duplicate (mode, scalar_reg);
9665 224 : rtx vector_reg = make_test_reg (mode);
9666 224 : if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
9667 348 : && maybe_gt (GET_MODE_NUNITS (mode), 2))
9668 : {
9669 92 : test_vector_ops_series (mode, scalar_reg);
9670 92 : test_vector_subregs (mode);
9671 92 : test_vector_rotate (vector_reg);
9672 : }
9673 224 : test_vec_merge (mode);
9674 : }
9675 : }
9676 4 : }
9677 :
9678 : template<unsigned int N>
9679 : struct simplify_const_poly_int_tests
9680 : {
9681 : static void run ();
9682 : };
9683 :
9684 : template<>
9685 : struct simplify_const_poly_int_tests<1>
9686 : {
9687 : static void run () {}
9688 : };
9689 :
9690 : /* Test various CONST_POLY_INT properties. */
9691 :
9692 : template<unsigned int N>
9693 : void
9694 : simplify_const_poly_int_tests<N>::run ()
9695 : {
9696 : using poly_int64 = poly_int<N, HOST_WIDE_INT>;
9697 : rtx x1 = gen_int_mode (poly_int64 (1, 1), QImode);
9698 : rtx x2 = gen_int_mode (poly_int64 (-80, 127), QImode);
9699 : rtx x3 = gen_int_mode (poly_int64 (-79, -128), QImode);
9700 : rtx x4 = gen_int_mode (poly_int64 (5, 4), QImode);
9701 : rtx x5 = gen_int_mode (poly_int64 (30, 24), QImode);
9702 : rtx x6 = gen_int_mode (poly_int64 (20, 16), QImode);
9703 : rtx x7 = gen_int_mode (poly_int64 (7, 4), QImode);
9704 : rtx x8 = gen_int_mode (poly_int64 (30, 24), HImode);
9705 : rtx x9 = gen_int_mode (poly_int64 (-30, -24), HImode);
9706 : rtx x10 = gen_int_mode (poly_int64 (-31, -24), HImode);
9707 : rtx two = GEN_INT (2);
9708 : rtx six = GEN_INT (6);
9709 : poly_uint64 offset = subreg_lowpart_offset (QImode, HImode);
9710 :
9711 : /* These tests only try limited operation combinations. Fuller arithmetic
9712 : testing is done directly on poly_ints. */
9713 : ASSERT_EQ (simplify_unary_operation (NEG, HImode, x8, HImode), x9);
9714 : ASSERT_EQ (simplify_unary_operation (NOT, HImode, x8, HImode), x10);
9715 : ASSERT_EQ (simplify_unary_operation (TRUNCATE, QImode, x8, HImode), x5);
9716 : ASSERT_EQ (simplify_binary_operation (PLUS, QImode, x1, x2), x3);
9717 : ASSERT_EQ (simplify_binary_operation (MINUS, QImode, x3, x1), x2);
9718 : ASSERT_EQ (simplify_binary_operation (MULT, QImode, x4, six), x5);
9719 : ASSERT_EQ (simplify_binary_operation (MULT, QImode, six, x4), x5);
9720 : ASSERT_EQ (simplify_binary_operation (ASHIFT, QImode, x4, two), x6);
9721 : ASSERT_EQ (simplify_binary_operation (IOR, QImode, x4, two), x7);
9722 : ASSERT_EQ (simplify_subreg (HImode, x5, QImode, 0), x8);
9723 : ASSERT_EQ (simplify_subreg (QImode, x8, HImode, offset), x5);
9724 : }
9725 :
9726 : /* Run all of the selftests within this file. */
9727 :
9728 : void
9729 4 : simplify_rtx_cc_tests ()
9730 : {
9731 4 : test_scalar_ops ();
9732 4 : test_vector_ops ();
9733 4 : simplify_const_poly_int_tests<NUM_POLY_INT_COEFFS>::run ();
9734 4 : }
9735 :
9736 : } // namespace selftest
9737 :
9738 : #endif /* CHECKING_P */
|