Line data Source code
1 : /* Medium-level subroutines: convert bit-field store and extract
2 : and shifts, multiplies and divides to rtl instructions.
3 : Copyright (C) 1987-2026 Free Software Foundation, Inc.
4 :
5 : This file is part of GCC.
6 :
7 : GCC is free software; you can redistribute it and/or modify it under
8 : the terms of the GNU General Public License as published by the Free
9 : Software Foundation; either version 3, or (at your option) any later
10 : version.
11 :
12 : GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 : WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 : for more details.
16 :
17 : You should have received a copy of the GNU General Public License
18 : along with GCC; see the file COPYING3. If not see
19 : <http://www.gnu.org/licenses/>. */
20 :
21 : /* Work around tree-optimization/91825. */
22 : #pragma GCC diagnostic warning "-Wmaybe-uninitialized"
23 :
24 : #include "config.h"
25 : #include "system.h"
26 : #include "coretypes.h"
27 : #include "backend.h"
28 : #include "target.h"
29 : #include "rtl.h"
30 : #include "tree.h"
31 : #include "predict.h"
32 : #include "memmodel.h"
33 : #include "tm_p.h"
34 : #include "optabs.h"
35 : #include "expmed.h"
36 : #include "regs.h"
37 : #include "emit-rtl.h"
38 : #include "diagnostic-core.h"
39 : #include "fold-const.h"
40 : #include "stor-layout.h"
41 : #include "dojump.h"
42 : #include "explow.h"
43 : #include "expr.h"
44 : #include "langhooks.h"
45 : #include "tree-vector-builder.h"
46 : #include "recog.h"
47 :
48 : struct target_expmed default_target_expmed;
49 : #if SWITCHABLE_TARGET
50 : struct target_expmed *this_target_expmed = &default_target_expmed;
51 : #endif
52 :
53 : static bool store_integral_bit_field (rtx, opt_scalar_int_mode,
54 : unsigned HOST_WIDE_INT,
55 : unsigned HOST_WIDE_INT,
56 : poly_uint64, poly_uint64,
57 : machine_mode, rtx, bool, bool);
58 : static void store_fixed_bit_field (rtx, opt_scalar_int_mode,
59 : unsigned HOST_WIDE_INT,
60 : unsigned HOST_WIDE_INT,
61 : poly_uint64, poly_uint64,
62 : rtx, scalar_int_mode, bool);
63 : static void store_fixed_bit_field_1 (rtx, scalar_int_mode,
64 : unsigned HOST_WIDE_INT,
65 : unsigned HOST_WIDE_INT,
66 : rtx, scalar_int_mode, bool);
67 : static void store_split_bit_field (rtx, opt_scalar_int_mode,
68 : unsigned HOST_WIDE_INT,
69 : unsigned HOST_WIDE_INT,
70 : poly_uint64, poly_uint64,
71 : rtx, scalar_int_mode, bool);
72 : static rtx extract_integral_bit_field (rtx, opt_scalar_int_mode,
73 : unsigned HOST_WIDE_INT,
74 : unsigned HOST_WIDE_INT, int, rtx,
75 : machine_mode, machine_mode, bool, bool);
76 : static rtx extract_fixed_bit_field (machine_mode, rtx, opt_scalar_int_mode,
77 : unsigned HOST_WIDE_INT,
78 : unsigned HOST_WIDE_INT, rtx, int, bool);
79 : static rtx extract_fixed_bit_field_1 (machine_mode, rtx, scalar_int_mode,
80 : unsigned HOST_WIDE_INT,
81 : unsigned HOST_WIDE_INT, rtx, int, bool);
82 : static rtx lshift_value (machine_mode, unsigned HOST_WIDE_INT, int);
83 : static rtx extract_split_bit_field (rtx, opt_scalar_int_mode,
84 : unsigned HOST_WIDE_INT,
85 : unsigned HOST_WIDE_INT, int, bool);
86 : static void do_cmp_and_jump (rtx, rtx, enum rtx_code, machine_mode, rtx_code_label *);
87 : static rtx expand_smod_pow2 (scalar_int_mode, rtx, HOST_WIDE_INT);
88 : static rtx expand_sdiv_pow2 (scalar_int_mode, rtx, HOST_WIDE_INT);
89 :
90 : /* Return a constant integer mask value of mode MODE with BITSIZE ones
91 : followed by BITPOS zeros, or the complement of that if COMPLEMENT.
92 : The mask is truncated if necessary to the width of mode MODE. The
93 : mask is zero-extended if BITSIZE+BITPOS is too small for MODE. */
94 :
95 : static inline rtx
96 226584 : mask_rtx (scalar_int_mode mode, int bitpos, int bitsize, bool complement)
97 : {
98 226584 : return immed_wide_int_const
99 226584 : (wi::shifted_mask (bitpos, bitsize, complement,
100 226584 : GET_MODE_PRECISION (mode)), mode);
101 : }
102 :
103 : /* Test whether a value is zero of a power of two. */
104 : #define EXACT_POWER_OF_2_OR_ZERO_P(x) \
105 : (((x) & ((x) - HOST_WIDE_INT_1U)) == 0)
106 :
107 : struct init_expmed_rtl
108 : {
109 : rtx reg;
110 : rtx plus;
111 : rtx neg;
112 : rtx mult;
113 : rtx sdiv;
114 : rtx udiv;
115 : rtx sdiv_32;
116 : rtx smod_32;
117 : rtx wide_mult;
118 : rtx wide_lshr;
119 : rtx wide_trunc;
120 : rtx shift;
121 : rtx shift_mult;
122 : rtx shift_add;
123 : rtx shift_sub0;
124 : rtx shift_sub1;
125 : rtx zext;
126 : rtx trunc;
127 :
128 : rtx pow2[MAX_BITS_PER_WORD];
129 : rtx cint[MAX_BITS_PER_WORD];
130 : };
131 :
132 : static void
133 30131920 : init_expmed_one_conv (struct init_expmed_rtl *all, scalar_int_mode to_mode,
134 : scalar_int_mode from_mode, bool speed)
135 : {
136 30131920 : int to_size, from_size;
137 30131920 : rtx which;
138 :
139 30131920 : to_size = GET_MODE_PRECISION (to_mode);
140 30131920 : from_size = GET_MODE_PRECISION (from_mode);
141 :
142 : /* Most partial integers have a precision less than the "full"
143 : integer it requires for storage. In case one doesn't, for
144 : comparison purposes here, reduce the bit size by one in that
145 : case. */
146 30131920 : if (GET_MODE_CLASS (to_mode) == MODE_PARTIAL_INT
147 30131920 : && pow2p_hwi (to_size))
148 6026384 : to_size --;
149 30131920 : if (GET_MODE_CLASS (from_mode) == MODE_PARTIAL_INT
150 30131920 : && pow2p_hwi (from_size))
151 0 : from_size --;
152 :
153 : /* Assume cost of zero-extend and sign-extend is the same. */
154 30131920 : which = (to_size < from_size ? all->trunc : all->zext);
155 :
156 30131920 : PUT_MODE (all->reg, from_mode);
157 30131920 : set_convert_cost (to_mode, from_mode, speed,
158 : set_src_cost (which, to_mode, speed));
159 : /* Restore all->reg's mode. */
160 30131920 : PUT_MODE (all->reg, to_mode);
161 30131920 : }
162 :
163 : static void
164 17648696 : init_expmed_one_mode (struct init_expmed_rtl *all,
165 : machine_mode mode, int speed)
166 : {
167 17648696 : int m, n, mode_bitsize;
168 17648696 : machine_mode mode_from;
169 :
170 17648696 : mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
171 :
172 17648696 : PUT_MODE (all->reg, mode);
173 17648696 : PUT_MODE (all->plus, mode);
174 17648696 : PUT_MODE (all->neg, mode);
175 17648696 : PUT_MODE (all->mult, mode);
176 17648696 : PUT_MODE (all->sdiv, mode);
177 17648696 : PUT_MODE (all->udiv, mode);
178 17648696 : PUT_MODE (all->sdiv_32, mode);
179 17648696 : PUT_MODE (all->smod_32, mode);
180 17648696 : PUT_MODE (all->wide_trunc, mode);
181 17648696 : PUT_MODE (all->shift, mode);
182 17648696 : PUT_MODE (all->shift_mult, mode);
183 17648696 : PUT_MODE (all->shift_add, mode);
184 17648696 : PUT_MODE (all->shift_sub0, mode);
185 17648696 : PUT_MODE (all->shift_sub1, mode);
186 17648696 : PUT_MODE (all->zext, mode);
187 17648696 : PUT_MODE (all->trunc, mode);
188 :
189 17648696 : set_add_cost (speed, mode, set_src_cost (all->plus, mode, speed));
190 17648696 : set_neg_cost (speed, mode, set_src_cost (all->neg, mode, speed));
191 17648696 : set_mul_cost (speed, mode, set_src_cost (all->mult, mode, speed));
192 17648696 : set_sdiv_cost (speed, mode, set_src_cost (all->sdiv, mode, speed));
193 17648696 : set_udiv_cost (speed, mode, set_src_cost (all->udiv, mode, speed));
194 :
195 17648696 : set_sdiv_pow2_cheap (speed, mode, (set_src_cost (all->sdiv_32, mode, speed)
196 17648696 : <= 2 * add_cost (speed, mode)));
197 17648696 : set_smod_pow2_cheap (speed, mode, (set_src_cost (all->smod_32, mode, speed)
198 17648696 : <= 4 * add_cost (speed, mode)));
199 :
200 17648696 : set_shift_cost (speed, mode, 0, 0);
201 17648696 : {
202 17648696 : int cost = add_cost (speed, mode);
203 17648696 : set_shiftadd_cost (speed, mode, 0, cost);
204 17648696 : set_shiftsub0_cost (speed, mode, 0, cost);
205 17648696 : set_shiftsub1_cost (speed, mode, 0, cost);
206 : }
207 :
208 17648696 : n = MIN (MAX_BITS_PER_WORD, mode_bitsize);
209 592307456 : for (m = 1; m < n; m++)
210 : {
211 574658760 : XEXP (all->shift, 1) = all->cint[m];
212 574658760 : XEXP (all->shift_mult, 1) = all->pow2[m];
213 :
214 574658760 : set_shift_cost (speed, mode, m, set_src_cost (all->shift, mode, speed));
215 574658760 : set_shiftadd_cost (speed, mode, m, set_src_cost (all->shift_add, mode,
216 : speed));
217 574658760 : set_shiftsub0_cost (speed, mode, m, set_src_cost (all->shift_sub0, mode,
218 : speed));
219 574658760 : set_shiftsub1_cost (speed, mode, m, set_src_cost (all->shift_sub1, mode,
220 : speed));
221 : }
222 :
223 17648696 : scalar_int_mode int_mode_to;
224 17648696 : if (is_a <scalar_int_mode> (mode, &int_mode_to))
225 : {
226 34436480 : for (mode_from = MIN_MODE_INT; mode_from <= MAX_MODE_INT;
227 30131920 : mode_from = (machine_mode)(mode_from + 1))
228 30131920 : init_expmed_one_conv (all, int_mode_to,
229 : as_a <scalar_int_mode> (mode_from), speed);
230 :
231 4304560 : scalar_int_mode wider_mode;
232 4304560 : if (GET_MODE_CLASS (int_mode_to) == MODE_INT
233 4304560 : && GET_MODE_WIDER_MODE (int_mode_to).exists (&wider_mode))
234 : {
235 2582736 : PUT_MODE (all->reg, mode);
236 2582736 : PUT_MODE (all->zext, wider_mode);
237 2582736 : PUT_MODE (all->wide_mult, wider_mode);
238 2582736 : PUT_MODE (all->wide_lshr, wider_mode);
239 2582736 : XEXP (all->wide_lshr, 1)
240 2582736 : = gen_int_shift_amount (wider_mode, mode_bitsize);
241 :
242 2582736 : set_mul_widen_cost (speed, wider_mode,
243 : set_src_cost (all->wide_mult, wider_mode, speed));
244 2582736 : set_mul_highpart_cost (speed, int_mode_to,
245 : set_src_cost (all->wide_trunc,
246 : int_mode_to, speed));
247 : }
248 : }
249 17648696 : }
250 :
251 : void
252 215228 : init_expmed (void)
253 : {
254 215228 : struct init_expmed_rtl all;
255 215228 : machine_mode mode = QImode;
256 215228 : int m, speed;
257 :
258 215228 : memset (&all, 0, sizeof all);
259 13774592 : for (m = 1; m < MAX_BITS_PER_WORD; m++)
260 : {
261 13559364 : all.pow2[m] = GEN_INT (HOST_WIDE_INT_1 << m);
262 13559364 : all.cint[m] = GEN_INT (m);
263 : }
264 :
265 : /* Avoid using hard regs in ways which may be unsupported. */
266 215228 : all.reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
267 215228 : all.plus = gen_rtx_PLUS (mode, all.reg, all.reg);
268 215228 : all.neg = gen_rtx_NEG (mode, all.reg);
269 215228 : all.mult = gen_rtx_MULT (mode, all.reg, all.reg);
270 215228 : all.sdiv = gen_rtx_DIV (mode, all.reg, all.reg);
271 215228 : all.udiv = gen_rtx_UDIV (mode, all.reg, all.reg);
272 215228 : all.sdiv_32 = gen_rtx_DIV (mode, all.reg, all.pow2[5]);
273 215228 : all.smod_32 = gen_rtx_MOD (mode, all.reg, all.pow2[5]);
274 215228 : all.zext = gen_rtx_ZERO_EXTEND (mode, all.reg);
275 215228 : all.wide_mult = gen_rtx_MULT (mode, all.zext, all.zext);
276 215228 : all.wide_lshr = gen_rtx_LSHIFTRT (mode, all.wide_mult, all.reg);
277 215228 : all.wide_trunc = gen_rtx_TRUNCATE (mode, all.wide_lshr);
278 215228 : all.shift = gen_rtx_ASHIFT (mode, all.reg, all.reg);
279 215228 : all.shift_mult = gen_rtx_MULT (mode, all.reg, all.reg);
280 215228 : all.shift_add = gen_rtx_PLUS (mode, all.shift_mult, all.reg);
281 215228 : all.shift_sub0 = gen_rtx_MINUS (mode, all.shift_mult, all.reg);
282 215228 : all.shift_sub1 = gen_rtx_MINUS (mode, all.reg, all.shift_mult);
283 215228 : all.trunc = gen_rtx_TRUNCATE (mode, all.reg);
284 :
285 645684 : for (speed = 0; speed < 2; speed++)
286 : {
287 430456 : crtl->maybe_hot_insn_p = speed;
288 430456 : set_zero_cost (speed, set_src_cost (const0_rtx, QImode, speed));
289 :
290 3443648 : for (mode = MIN_MODE_INT; mode <= MAX_MODE_INT;
291 3013192 : mode = (machine_mode)(mode + 1))
292 3013192 : init_expmed_one_mode (&all, mode, speed);
293 :
294 : if (MIN_MODE_PARTIAL_INT != VOIDmode)
295 1721824 : for (mode = MIN_MODE_PARTIAL_INT; mode <= MAX_MODE_PARTIAL_INT;
296 1291368 : mode = (machine_mode)(mode + 1))
297 1291368 : init_expmed_one_mode (&all, mode, speed);
298 :
299 : if (MIN_MODE_VECTOR_INT != VOIDmode)
300 13774592 : for (mode = MIN_MODE_VECTOR_INT; mode <= MAX_MODE_VECTOR_INT;
301 13344136 : mode = (machine_mode)(mode + 1))
302 13344136 : init_expmed_one_mode (&all, mode, speed);
303 : }
304 :
305 215228 : if (alg_hash_used_p ())
306 : {
307 1043 : struct alg_hash_entry *p = alg_hash_entry_ptr (0);
308 1043 : memset (p, 0, sizeof (*p) * NUM_ALG_HASH_ENTRIES);
309 : }
310 : else
311 214185 : set_alg_hash_used_p (true);
312 215228 : default_rtl_profile ();
313 :
314 215228 : ggc_free (all.trunc);
315 215228 : ggc_free (all.shift_sub1);
316 215228 : ggc_free (all.shift_sub0);
317 215228 : ggc_free (all.shift_add);
318 215228 : ggc_free (all.shift_mult);
319 215228 : ggc_free (all.shift);
320 215228 : ggc_free (all.wide_trunc);
321 215228 : ggc_free (all.wide_lshr);
322 215228 : ggc_free (all.wide_mult);
323 215228 : ggc_free (all.zext);
324 215228 : ggc_free (all.smod_32);
325 215228 : ggc_free (all.sdiv_32);
326 215228 : ggc_free (all.udiv);
327 215228 : ggc_free (all.sdiv);
328 215228 : ggc_free (all.mult);
329 215228 : ggc_free (all.neg);
330 215228 : ggc_free (all.plus);
331 215228 : ggc_free (all.reg);
332 215228 : }
333 :
334 : /* Return an rtx representing minus the value of X.
335 : MODE is the intended mode of the result,
336 : useful if X is a CONST_INT. */
337 :
338 : rtx
339 1037636 : negate_rtx (machine_mode mode, rtx x)
340 : {
341 1037636 : rtx result = simplify_unary_operation (NEG, mode, x, mode);
342 :
343 1037636 : if (result == 0)
344 2053 : result = expand_unop (mode, neg_optab, x, NULL_RTX, 0);
345 :
346 1037636 : return result;
347 : }
348 :
349 : /* Whether reverse storage order is supported on the target. */
350 : static int reverse_storage_order_supported = -1;
351 :
352 : /* Check whether reverse storage order is supported on the target. */
353 :
354 : static void
355 286 : check_reverse_storage_order_support (void)
356 : {
357 286 : if (BYTES_BIG_ENDIAN != WORDS_BIG_ENDIAN)
358 : {
359 : reverse_storage_order_supported = 0;
360 : sorry ("reverse scalar storage order");
361 : }
362 : else
363 286 : reverse_storage_order_supported = 1;
364 286 : }
365 :
366 : /* Whether reverse FP storage order is supported on the target. */
367 : static int reverse_float_storage_order_supported = -1;
368 :
369 : /* Check whether reverse FP storage order is supported on the target. */
370 :
371 : static void
372 55 : check_reverse_float_storage_order_support (void)
373 : {
374 55 : if (FLOAT_WORDS_BIG_ENDIAN != WORDS_BIG_ENDIAN)
375 : {
376 : reverse_float_storage_order_supported = 0;
377 : sorry ("reverse floating-point scalar storage order");
378 : }
379 : else
380 55 : reverse_float_storage_order_supported = 1;
381 55 : }
382 :
383 : /* Return an rtx representing value of X with reverse storage order.
384 : MODE is the intended mode of the result,
385 : useful if X is a CONST_INT. */
386 :
387 : rtx
388 3265 : flip_storage_order (machine_mode mode, rtx x)
389 : {
390 3265 : scalar_int_mode int_mode;
391 3265 : rtx result;
392 :
393 3265 : if (mode == QImode)
394 : return x;
395 :
396 2415 : if (COMPLEX_MODE_P (mode))
397 : {
398 44 : rtx real = read_complex_part (x, false);
399 44 : rtx imag = read_complex_part (x, true);
400 :
401 88 : real = flip_storage_order (GET_MODE_INNER (mode), real);
402 88 : imag = flip_storage_order (GET_MODE_INNER (mode), imag);
403 :
404 44 : return gen_rtx_CONCAT (mode, real, imag);
405 : }
406 :
407 2371 : if (UNLIKELY (reverse_storage_order_supported < 0))
408 286 : check_reverse_storage_order_support ();
409 :
410 2371 : if (!is_a <scalar_int_mode> (mode, &int_mode))
411 : {
412 243 : if (FLOAT_MODE_P (mode)
413 243 : && UNLIKELY (reverse_float_storage_order_supported < 0))
414 55 : check_reverse_float_storage_order_support ();
415 :
416 243 : if (!int_mode_for_size (GET_MODE_PRECISION (mode), 0).exists (&int_mode)
417 243 : || !targetm.scalar_mode_supported_p (int_mode))
418 : {
419 0 : sorry ("reverse storage order for %smode", GET_MODE_NAME (mode));
420 0 : return x;
421 : }
422 243 : x = gen_lowpart (int_mode, x);
423 : }
424 :
425 2371 : result = simplify_unary_operation (BSWAP, int_mode, x, int_mode);
426 2371 : if (result == 0)
427 1051 : result = expand_unop (int_mode, bswap_optab, x, NULL_RTX, 1);
428 :
429 2371 : if (int_mode != mode)
430 243 : result = gen_lowpart (mode, result);
431 :
432 : return result;
433 : }
434 :
435 : /* If MODE is set, adjust bitfield memory MEM so that it points to the
436 : first unit of mode MODE that contains a bitfield of size BITSIZE at
437 : bit position BITNUM. If MODE is not set, return a BLKmode reference
438 : to every byte in the bitfield. Set *NEW_BITNUM to the bit position
439 : of the field within the new memory. */
440 :
441 : static rtx
442 421114 : narrow_bit_field_mem (rtx mem, opt_scalar_int_mode mode,
443 : unsigned HOST_WIDE_INT bitsize,
444 : unsigned HOST_WIDE_INT bitnum,
445 : unsigned HOST_WIDE_INT *new_bitnum)
446 : {
447 421114 : scalar_int_mode imode;
448 421114 : if (mode.exists (&imode))
449 : {
450 421114 : unsigned int unit = GET_MODE_BITSIZE (imode);
451 421114 : *new_bitnum = bitnum % unit;
452 421114 : HOST_WIDE_INT offset = (bitnum - *new_bitnum) / BITS_PER_UNIT;
453 421114 : return adjust_bitfield_address (mem, imode, offset);
454 : }
455 : else
456 : {
457 0 : *new_bitnum = bitnum % BITS_PER_UNIT;
458 0 : HOST_WIDE_INT offset = bitnum / BITS_PER_UNIT;
459 0 : HOST_WIDE_INT size = ((*new_bitnum + bitsize + BITS_PER_UNIT - 1)
460 0 : / BITS_PER_UNIT);
461 0 : return adjust_bitfield_address_size (mem, BLKmode, offset, size);
462 : }
463 : }
464 :
465 : /* The caller wants to perform insertion or extraction PATTERN on a
466 : bitfield of size BITSIZE at BITNUM bits into memory operand OP0.
467 : BITREGION_START and BITREGION_END are as for store_bit_field
468 : and FIELDMODE is the natural mode of the field.
469 :
470 : Search for a mode that is compatible with the memory access
471 : restrictions and (where applicable) with a register insertion or
472 : extraction. Return the new memory on success, storing the adjusted
473 : bit position in *NEW_BITNUM. Return null otherwise. */
474 :
475 : static rtx
476 203425 : adjust_bit_field_mem_for_reg (enum extraction_pattern pattern,
477 : rtx op0, HOST_WIDE_INT bitsize,
478 : HOST_WIDE_INT bitnum,
479 : poly_uint64 bitregion_start,
480 : poly_uint64 bitregion_end,
481 : machine_mode fieldmode,
482 : unsigned HOST_WIDE_INT *new_bitnum)
483 : {
484 406850 : bit_field_mode_iterator iter (bitsize, bitnum, bitregion_start,
485 203425 : bitregion_end, MEM_ALIGN (op0),
486 203429 : MEM_VOLATILE_P (op0));
487 203425 : scalar_int_mode best_mode;
488 203425 : if (iter.next_mode (&best_mode))
489 : {
490 : /* We can use a memory in BEST_MODE. See whether this is true for
491 : any wider modes. All other things being equal, we prefer to
492 : use the widest mode possible because it tends to expose more
493 : CSE opportunities. */
494 196944 : if (!iter.prefer_smaller_modes ())
495 : {
496 : /* Limit the search to the mode required by the corresponding
497 : register insertion or extraction instruction, if any. */
498 317 : scalar_int_mode limit_mode = word_mode;
499 317 : extraction_insn insn;
500 634 : if (get_best_reg_extraction_insn (&insn, pattern,
501 317 : GET_MODE_BITSIZE (best_mode),
502 : fieldmode))
503 317 : limit_mode = insn.field_mode;
504 :
505 317 : scalar_int_mode wider_mode;
506 317 : while (iter.next_mode (&wider_mode)
507 1052 : && GET_MODE_SIZE (wider_mode) <= GET_MODE_SIZE (limit_mode))
508 111 : best_mode = wider_mode;
509 : }
510 196944 : return narrow_bit_field_mem (op0, best_mode, bitsize, bitnum,
511 : new_bitnum);
512 : }
513 : return NULL_RTX;
514 : }
515 :
516 : /* Return true if a bitfield of size BITSIZE at bit number BITNUM within
517 : a structure of mode STRUCT_MODE represents a lowpart subreg. The subreg
518 : offset is then BITNUM / BITS_PER_UNIT. */
519 :
520 : static bool
521 779541 : lowpart_bit_field_p (poly_uint64 bitnum, poly_uint64 bitsize,
522 : machine_mode struct_mode)
523 : {
524 779541 : poly_uint64 regsize = REGMODE_NATURAL_SIZE (struct_mode);
525 779541 : if (BYTES_BIG_ENDIAN)
526 : return (multiple_p (bitnum, BITS_PER_UNIT)
527 : && (known_eq (bitnum + bitsize, GET_MODE_BITSIZE (struct_mode))
528 : || multiple_p (bitnum + bitsize,
529 : regsize * BITS_PER_UNIT)));
530 : else
531 779541 : return multiple_p (bitnum, regsize * BITS_PER_UNIT);
532 : }
533 :
534 : /* Return true if -fstrict-volatile-bitfields applies to an access of OP0
535 : containing BITSIZE bits starting at BITNUM, with field mode FIELDMODE.
536 : Return false if the access would touch memory outside the range
537 : BITREGION_START to BITREGION_END for conformance to the C++ memory
538 : model. */
539 :
540 : static bool
541 1556044 : strict_volatile_bitfield_p (rtx op0, unsigned HOST_WIDE_INT bitsize,
542 : unsigned HOST_WIDE_INT bitnum,
543 : scalar_int_mode fieldmode,
544 : poly_uint64 bitregion_start,
545 : poly_uint64 bitregion_end)
546 : {
547 1556044 : unsigned HOST_WIDE_INT modesize = GET_MODE_BITSIZE (fieldmode);
548 :
549 : /* -fstrict-volatile-bitfields must be enabled and we must have a
550 : volatile MEM. */
551 1556044 : if (!MEM_P (op0)
552 161444 : || !MEM_VOLATILE_P (op0)
553 1556256 : || flag_strict_volatile_bitfields <= 0)
554 : return false;
555 :
556 : /* The bit size must not be larger than the field mode, and
557 : the field mode must not be larger than a word. */
558 14 : if (bitsize > modesize || modesize > BITS_PER_WORD)
559 : return false;
560 :
561 : /* Check for cases of unaligned fields that must be split. */
562 14 : if (bitnum % modesize + bitsize > modesize)
563 : return false;
564 :
565 : /* The memory must be sufficiently aligned for a MODESIZE access.
566 : This condition guarantees, that the memory access will not
567 : touch anything after the end of the structure. */
568 11 : if (MEM_ALIGN (op0) < modesize)
569 : return false;
570 :
571 : /* Check for cases where the C++ memory model applies. */
572 11 : if (maybe_ne (bitregion_end, 0U)
573 11 : && (maybe_lt (bitnum - bitnum % modesize, bitregion_start)
574 4 : || maybe_gt (bitnum - bitnum % modesize + modesize - 1,
575 : bitregion_end)))
576 0 : return false;
577 :
578 : return true;
579 : }
580 :
581 : /* Return true if OP is a memory and if a bitfield of size BITSIZE at
582 : bit number BITNUM can be treated as a simple value of mode MODE.
583 : Store the byte offset in *BYTENUM if so. */
584 :
585 : static bool
586 534456 : simple_mem_bitfield_p (rtx op0, poly_uint64 bitsize, poly_uint64 bitnum,
587 : machine_mode mode, poly_uint64 *bytenum)
588 : {
589 534456 : return (MEM_P (op0)
590 257024 : && multiple_p (bitnum, BITS_PER_UNIT, bytenum)
591 205199 : && known_eq (bitsize, GET_MODE_BITSIZE (mode))
592 585220 : && (!targetm.slow_unaligned_access (mode, MEM_ALIGN (op0))
593 0 : || (multiple_p (bitnum, GET_MODE_ALIGNMENT (mode))
594 0 : && MEM_ALIGN (op0) >= GET_MODE_ALIGNMENT (mode))));
595 : }
596 :
597 : /* Try to use instruction INSV to store VALUE into a field of OP0.
598 : If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is a
599 : BLKmode MEM. VALUE_MODE is the mode of VALUE. BITSIZE and BITNUM
600 : are as for store_bit_field. */
601 :
602 : static bool
603 108180 : store_bit_field_using_insv (const extraction_insn *insv, rtx op0,
604 : opt_scalar_int_mode op0_mode,
605 : unsigned HOST_WIDE_INT bitsize,
606 : unsigned HOST_WIDE_INT bitnum,
607 : rtx value, scalar_int_mode value_mode)
608 : {
609 108180 : class expand_operand ops[4];
610 108180 : rtx value1;
611 108180 : rtx xop0 = op0;
612 108180 : rtx_insn *last = get_last_insn ();
613 108180 : bool copy_back = false;
614 :
615 108180 : scalar_int_mode op_mode = insv->field_mode;
616 108180 : unsigned int unit = GET_MODE_BITSIZE (op_mode);
617 108180 : if (bitsize == 0 || bitsize > unit)
618 : return false;
619 :
620 108171 : if (MEM_P (xop0))
621 : /* Get a reference to the first byte of the field. */
622 0 : xop0 = narrow_bit_field_mem (xop0, insv->struct_mode, bitsize, bitnum,
623 : &bitnum);
624 : else
625 : {
626 : /* Convert from counting within OP0 to counting in OP_MODE. */
627 108171 : if (BYTES_BIG_ENDIAN)
628 : bitnum += unit - GET_MODE_BITSIZE (op0_mode.require ());
629 :
630 : /* If xop0 is a register, we need it in OP_MODE
631 : to make it acceptable to the format of insv. */
632 108171 : if (GET_CODE (xop0) == SUBREG)
633 : {
634 : /* If such a SUBREG can't be created, give up. */
635 33360 : if (!validate_subreg (op_mode, GET_MODE (SUBREG_REG (xop0)),
636 33360 : SUBREG_REG (xop0), SUBREG_BYTE (xop0)))
637 : return false;
638 : /* We can't just change the mode, because this might clobber op0,
639 : and we will need the original value of op0 if insv fails. */
640 33360 : xop0 = gen_rtx_SUBREG (op_mode, SUBREG_REG (xop0),
641 33360 : SUBREG_BYTE (xop0));
642 : }
643 108171 : if (REG_P (xop0) && GET_MODE (xop0) != op_mode)
644 26726 : xop0 = gen_lowpart_SUBREG (op_mode, xop0);
645 : }
646 :
647 : /* If the destination is a paradoxical subreg such that we need a
648 : truncate to the inner mode, perform the insertion on a temporary and
649 : truncate the result to the original destination. Note that we can't
650 : just truncate the paradoxical subreg as (truncate:N (subreg:W (reg:N
651 : X) 0)) is (reg:N X). */
652 108171 : if (GET_CODE (xop0) == SUBREG
653 60086 : && REG_P (SUBREG_REG (xop0))
654 168257 : && !TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (SUBREG_REG (xop0)),
655 : op_mode))
656 : {
657 0 : rtx tem = gen_reg_rtx (op_mode);
658 0 : emit_move_insn (tem, xop0);
659 0 : xop0 = tem;
660 0 : copy_back = true;
661 : }
662 :
663 : /* There are similar overflow check at the start of store_bit_field_1,
664 : but that only check the situation where the field lies completely
665 : outside the register, while there do have situation where the field
666 : lies partialy in the register, we need to adjust bitsize for this
667 : partial overflow situation. Without this fix, pr48335-2.c on big-endian
668 : will broken on those arch support bit insert instruction, like arm, aarch64
669 : etc. */
670 108171 : if (bitsize + bitnum > unit && bitnum < unit)
671 : {
672 2 : warning (OPT_Wextra, "write of %wu-bit data outside the bound of "
673 : "destination object, data truncated into %wu-bit",
674 : bitsize, unit - bitnum);
675 2 : bitsize = unit - bitnum;
676 : }
677 :
678 : /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
679 : "backwards" from the size of the unit we are inserting into.
680 : Otherwise, we count bits from the most significant on a
681 : BYTES/BITS_BIG_ENDIAN machine. */
682 :
683 108171 : if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
684 : bitnum = unit - bitsize - bitnum;
685 :
686 : /* Convert VALUE to op_mode (which insv insn wants) in VALUE1. */
687 108171 : value1 = value;
688 108171 : if (value_mode != op_mode)
689 : {
690 141432 : if (GET_MODE_BITSIZE (value_mode) >= bitsize)
691 : {
692 70716 : rtx tmp;
693 : /* Optimization: Don't bother really extending VALUE
694 : if it has all the bits we will actually use. However,
695 : if we must narrow it, be sure we do it correctly. */
696 :
697 212148 : if (GET_MODE_SIZE (value_mode) < GET_MODE_SIZE (op_mode))
698 : {
699 32025 : tmp = simplify_subreg (op_mode, value1, value_mode, 0);
700 32025 : if (! tmp)
701 31475 : tmp = simplify_gen_subreg (op_mode,
702 : force_reg (value_mode, value1),
703 : value_mode, 0);
704 : }
705 : else
706 : {
707 38691 : if (targetm.mode_rep_extended (op_mode, value_mode) != UNKNOWN)
708 0 : tmp = simplify_gen_unary (TRUNCATE, op_mode,
709 : value1, value_mode);
710 : else
711 : {
712 38691 : tmp = gen_lowpart_if_possible (op_mode, value1);
713 38691 : if (! tmp)
714 0 : tmp = gen_lowpart (op_mode, force_reg (value_mode, value1));
715 : }
716 : }
717 : value1 = tmp;
718 : }
719 0 : else if (CONST_INT_P (value))
720 0 : value1 = gen_int_mode (INTVAL (value), op_mode);
721 : else
722 : /* Parse phase is supposed to make VALUE's data type
723 : match that of the component reference, which is a type
724 : at least as wide as the field; so VALUE should have
725 : a mode that corresponds to that type. */
726 0 : gcc_assert (CONSTANT_P (value));
727 : }
728 :
729 108171 : create_fixed_operand (&ops[0], xop0);
730 108171 : create_integer_operand (&ops[1], bitsize);
731 108171 : create_integer_operand (&ops[2], bitnum);
732 108171 : create_input_operand (&ops[3], value1, op_mode);
733 108171 : if (maybe_expand_insn (insv->icode, 4, ops))
734 : {
735 2050 : if (copy_back)
736 0 : convert_move (op0, xop0, true);
737 2050 : return true;
738 : }
739 106121 : delete_insns_since (last);
740 106121 : return false;
741 : }
742 :
743 : /* A subroutine of store_bit_field, with the same arguments. Return true
744 : if the operation could be implemented.
745 :
746 : If FALLBACK_P is true, fall back to store_fixed_bit_field if we have
747 : no other way of implementing the operation. If FALLBACK_P is false,
748 : return false instead.
749 :
750 : if UNDEFINED_P is true then STR_RTX is undefined and may be set using
751 : a subreg instead. */
752 :
753 : static bool
754 878699 : store_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
755 : poly_uint64 bitregion_start, poly_uint64 bitregion_end,
756 : machine_mode fieldmode,
757 : rtx value, bool reverse, bool fallback_p, bool undefined_p)
758 : {
759 878699 : rtx op0 = str_rtx;
760 :
761 878705 : while (GET_CODE (op0) == SUBREG)
762 : {
763 6 : bitnum += subreg_memory_offset (op0) * BITS_PER_UNIT;
764 6 : op0 = SUBREG_REG (op0);
765 : }
766 :
767 : /* No action is needed if the target is a register and if the field
768 : lies completely outside that register. This can occur if the source
769 : code contains an out-of-bounds access to a small array. */
770 1682908 : if (REG_P (op0) && known_ge (bitnum, GET_MODE_BITSIZE (GET_MODE (op0))))
771 : return true;
772 :
773 : /* Use vec_set patterns for inserting parts of vectors whenever
774 : available. */
775 878696 : machine_mode outermode = GET_MODE (op0);
776 878696 : scalar_mode innermode = GET_MODE_INNER (outermode);
777 878696 : poly_uint64 pos;
778 876909 : if (VECTOR_MODE_P (outermode)
779 2135 : && !MEM_P (op0)
780 2129 : && optab_handler (vec_set_optab, outermode) != CODE_FOR_nothing
781 1043 : && fieldmode == innermode
782 891 : && known_eq (bitsize, GET_MODE_PRECISION (innermode))
783 879587 : && multiple_p (bitnum, GET_MODE_PRECISION (innermode), &pos))
784 : {
785 891 : class expand_operand ops[3];
786 891 : enum insn_code icode = optab_handler (vec_set_optab, outermode);
787 :
788 891 : create_fixed_operand (&ops[0], op0);
789 891 : create_input_operand (&ops[1], value, innermode);
790 891 : create_integer_operand (&ops[2], pos);
791 891 : if (maybe_expand_insn (icode, 3, ops))
792 891 : return true;
793 : }
794 :
795 : /* If the target is a register, overwriting the entire object, or storing
796 : a full-word or multi-word field can be done with just a SUBREG. */
797 877805 : if (!MEM_P (op0)
798 1681120 : && known_eq (bitsize, GET_MODE_BITSIZE (fieldmode)))
799 : {
800 : /* Use the subreg machinery either to narrow OP0 to the required
801 : words or to cope with mode punning between equal-sized modes.
802 : In the latter case, use subreg on the rhs side, not lhs. */
803 736979 : rtx sub;
804 736979 : poly_uint64 bytenum;
805 736979 : poly_uint64 regsize = REGMODE_NATURAL_SIZE (GET_MODE (op0));
806 736979 : if (known_eq (bitnum, 0U)
807 1106358 : && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0))))
808 : {
809 53897 : sub = force_subreg (GET_MODE (op0), value, fieldmode, 0);
810 53897 : if (sub)
811 : {
812 53897 : if (reverse)
813 1 : sub = flip_storage_order (GET_MODE (op0), sub);
814 53897 : emit_move_insn (op0, sub);
815 53897 : return true;
816 : }
817 : }
818 872260 : else if (multiple_p (bitnum, BITS_PER_UNIT, &bytenum)
819 683077 : && (undefined_p
820 679431 : || (multiple_p (bitnum, regsize * BITS_PER_UNIT)
821 666844 : && multiple_p (bitsize, regsize * BITS_PER_UNIT)))
822 1269478 : && known_ge (GET_MODE_BITSIZE (GET_MODE (op0)), bitsize))
823 : {
824 634727 : sub = simplify_gen_subreg (fieldmode, op0, GET_MODE (op0), bytenum);
825 634727 : if (sub)
826 : {
827 634725 : if (reverse)
828 0 : value = flip_storage_order (fieldmode, value);
829 634725 : emit_move_insn (sub, value);
830 634725 : return true;
831 : }
832 : }
833 : }
834 :
835 : /* If the target is memory, storing any naturally aligned field can be
836 : done with a simple store. For targets that support fast unaligned
837 : memory, any naturally sized, unit aligned field can be done directly. */
838 189183 : poly_uint64 bytenum;
839 189183 : if (simple_mem_bitfield_p (op0, bitsize, bitnum, fieldmode, &bytenum))
840 : {
841 7001 : op0 = adjust_bitfield_address (op0, fieldmode, bytenum);
842 7001 : if (reverse)
843 0 : value = flip_storage_order (fieldmode, value);
844 7001 : emit_move_insn (op0, value);
845 7001 : return true;
846 : }
847 :
848 : /* It's possible we'll need to handle other cases here for
849 : polynomial bitnum and bitsize. */
850 :
851 : /* From here on we need to be looking at a fixed-size insertion. */
852 182182 : unsigned HOST_WIDE_INT ibitsize = bitsize.to_constant ();
853 182182 : unsigned HOST_WIDE_INT ibitnum = bitnum.to_constant ();
854 :
855 : /* Make sure we are playing with integral modes. Pun with subregs
856 : if we aren't. This must come after the entire register case above,
857 : since that case is valid for any mode. The following cases are only
858 : valid for integral modes. */
859 182182 : opt_scalar_int_mode op0_mode = int_mode_for_mode (GET_MODE (op0));
860 182182 : scalar_int_mode imode;
861 182182 : if (!op0_mode.exists (&imode) || imode != GET_MODE (op0))
862 : {
863 19701 : if (MEM_P (op0))
864 15141 : op0 = adjust_bitfield_address_size (op0, op0_mode.else_blk (),
865 : 0, MEM_SIZE (op0));
866 4560 : else if (!op0_mode.exists ())
867 : {
868 0 : if (ibitnum == 0
869 0 : && known_eq (ibitsize, GET_MODE_BITSIZE (GET_MODE (op0)))
870 0 : && MEM_P (value)
871 0 : && !reverse)
872 : {
873 0 : value = adjust_address (value, GET_MODE (op0), 0);
874 0 : emit_move_insn (op0, value);
875 0 : return true;
876 : }
877 0 : if (!fallback_p)
878 : return false;
879 0 : rtx temp = assign_stack_temp (GET_MODE (op0),
880 0 : GET_MODE_SIZE (GET_MODE (op0)));
881 0 : emit_move_insn (temp, op0);
882 0 : store_bit_field_1 (temp, bitsize, bitnum, 0, 0, fieldmode, value,
883 : reverse, fallback_p, undefined_p);
884 0 : emit_move_insn (op0, temp);
885 0 : return true;
886 : }
887 : else
888 4560 : op0 = gen_lowpart (op0_mode.require (), op0);
889 : }
890 :
891 182182 : return store_integral_bit_field (op0, op0_mode, ibitsize, ibitnum,
892 : bitregion_start, bitregion_end,
893 182182 : fieldmode, value, reverse, fallback_p);
894 : }
895 :
896 : /* Subroutine of store_bit_field_1, with the same arguments, except
897 : that BITSIZE and BITNUM are constant. Handle cases specific to
898 : integral modes. If OP0_MODE is defined, it is the mode of OP0,
899 : otherwise OP0 is a BLKmode MEM. */
900 :
901 : static bool
902 182182 : store_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
903 : unsigned HOST_WIDE_INT bitsize,
904 : unsigned HOST_WIDE_INT bitnum,
905 : poly_uint64 bitregion_start,
906 : poly_uint64 bitregion_end,
907 : machine_mode fieldmode,
908 : rtx value, bool reverse, bool fallback_p)
909 : {
910 : /* Storing an lsb-aligned field in a register
911 : can be done with a movstrict instruction. */
912 :
913 182182 : if (!MEM_P (op0)
914 114693 : && !reverse
915 373032 : && lowpart_bit_field_p (bitnum, bitsize, op0_mode.require ())
916 81869 : && known_eq (bitsize, GET_MODE_BITSIZE (fieldmode))
917 217947 : && optab_handler (movstrict_optab, fieldmode) != CODE_FOR_nothing)
918 : {
919 5685 : class expand_operand ops[2];
920 5685 : enum insn_code icode = optab_handler (movstrict_optab, fieldmode);
921 5685 : rtx arg0 = op0;
922 5685 : unsigned HOST_WIDE_INT subreg_off;
923 :
924 5685 : if (GET_CODE (arg0) == SUBREG)
925 : {
926 : /* Else we've got some float mode source being extracted into
927 : a different float mode destination -- this combination of
928 : subregs results in Severe Tire Damage. */
929 422 : gcc_assert (GET_MODE (SUBREG_REG (arg0)) == fieldmode
930 : || GET_MODE_CLASS (fieldmode) == MODE_INT
931 : || GET_MODE_CLASS (fieldmode) == MODE_PARTIAL_INT);
932 : arg0 = SUBREG_REG (arg0);
933 : }
934 :
935 5685 : subreg_off = bitnum / BITS_PER_UNIT;
936 5710 : if (validate_subreg (fieldmode, GET_MODE (arg0), arg0, subreg_off)
937 : /* STRICT_LOW_PART must have a non-paradoxical subreg as
938 : operand. */
939 5685 : && !paradoxical_subreg_p (fieldmode, GET_MODE (arg0)))
940 : {
941 5660 : arg0 = gen_rtx_SUBREG (fieldmode, arg0, subreg_off);
942 :
943 5660 : create_fixed_operand (&ops[0], arg0);
944 : /* Shrink the source operand to FIELDMODE. */
945 5660 : create_convert_operand_to (&ops[1], value, fieldmode, false);
946 5660 : if (maybe_expand_insn (icode, 2, ops))
947 5659 : return true;
948 : }
949 : }
950 :
951 : /* Handle fields bigger than a word. */
952 :
953 178070 : if (bitsize > BITS_PER_WORD)
954 : {
955 : /* Here we transfer the words of the field
956 : in the order least significant first.
957 : This is because the most significant word is the one which may
958 : be less than full.
959 : However, only do that if the value is not BLKmode. */
960 :
961 902 : const bool backwards = WORDS_BIG_ENDIAN && fieldmode != BLKmode;
962 902 : const int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
963 902 : rtx_insn *last;
964 :
965 : /* This is the mode we must force value to, so that there will be enough
966 : subwords to extract. Note that fieldmode will often (always?) be
967 : VOIDmode, because that is what store_field uses to indicate that this
968 : is a bit field, but passing VOIDmode to operand_subword_force
969 : is not allowed.
970 :
971 : The mode must be fixed-size, since insertions into variable-sized
972 : objects are meant to be handled before calling this function. */
973 902 : fixed_size_mode value_mode = as_a <fixed_size_mode> (GET_MODE (value));
974 902 : if (value_mode == VOIDmode)
975 22 : value_mode
976 22 : = smallest_int_mode_for_size (nwords * BITS_PER_WORD).require ();
977 :
978 902 : last = get_last_insn ();
979 2701 : for (int i = 0; i < nwords; i++)
980 : {
981 : /* Number of bits to be stored in this iteration, i.e. BITS_PER_WORD
982 : except maybe for the last iteration. */
983 3608 : const unsigned HOST_WIDE_INT new_bitsize
984 1840 : = MIN (BITS_PER_WORD, bitsize - i * BITS_PER_WORD);
985 : /* Bit offset from the starting bit number in the target. */
986 3620 : const unsigned int bit_offset
987 : = backwards ^ reverse
988 1804 : ? MAX ((int) bitsize - (i + 1) * BITS_PER_WORD, 0)
989 : : i * BITS_PER_WORD;
990 :
991 : /* No further action is needed if the target is a register and if
992 : this field lies completely outside that register. */
993 2076 : if (REG_P (op0) && known_ge (bitnum + bit_offset,
994 : GET_MODE_BITSIZE (GET_MODE (op0))))
995 : {
996 5 : if (backwards ^ reverse)
997 0 : continue;
998 : /* For forward operation we are finished. */
999 182182 : return true;
1000 : }
1001 :
1002 : /* Starting word number in the value. */
1003 1799 : const unsigned int wordnum
1004 : = backwards
1005 1799 : ? GET_MODE_SIZE (value_mode) / UNITS_PER_WORD - (i + 1)
1006 : : i;
1007 : /* The chunk of the value in word_mode. We use bit-field extraction
1008 : in BLKmode to handle unaligned memory references and to shift the
1009 : last chunk right on big-endian machines if need be. */
1010 1799 : rtx value_word
1011 : = fieldmode == BLKmode
1012 1835 : ? extract_bit_field (value, new_bitsize, wordnum * BITS_PER_WORD,
1013 : 1, NULL_RTX, word_mode, word_mode, false,
1014 : NULL)
1015 1550 : : operand_subword_force (value, wordnum, value_mode);
1016 :
1017 1799 : if (!store_bit_field_1 (op0, new_bitsize,
1018 1799 : bitnum + bit_offset,
1019 : bitregion_start, bitregion_end,
1020 : word_mode,
1021 : value_word, reverse, fallback_p, false))
1022 : {
1023 0 : delete_insns_since (last);
1024 0 : return false;
1025 : }
1026 : }
1027 : return true;
1028 : }
1029 :
1030 : /* If VALUE has a floating-point or complex mode, access it as an
1031 : integer of the corresponding size. This can occur on a machine
1032 : with 64 bit registers that uses SFmode for float. It can also
1033 : occur for unaligned float or complex fields. */
1034 175621 : rtx orig_value = value;
1035 175621 : scalar_int_mode value_mode;
1036 175621 : if (GET_MODE (value) == VOIDmode)
1037 : /* By this point we've dealt with values that are bigger than a word,
1038 : so word_mode is a conservatively correct choice. */
1039 105809 : value_mode = word_mode;
1040 69812 : else if (!is_a <scalar_int_mode> (GET_MODE (value), &value_mode))
1041 : {
1042 1072 : value_mode = int_mode_for_mode (GET_MODE (value)).require ();
1043 1072 : value = gen_reg_rtx (value_mode);
1044 1072 : emit_move_insn (gen_lowpart (GET_MODE (orig_value), value), orig_value);
1045 : }
1046 :
1047 : /* If OP0 is a multi-word register, narrow it to the affected word.
1048 : If the region spans two words, defer to store_split_bit_field.
1049 : Don't do this if op0 is a single hard register wider than word
1050 : such as a float or vector register. */
1051 175621 : if (!MEM_P (op0)
1052 218680 : && GET_MODE_SIZE (op0_mode.require ()) > UNITS_PER_WORD
1053 209111 : && (!REG_P (op0)
1054 33469 : || !HARD_REGISTER_P (op0)
1055 142133 : || hard_regno_nregs (REGNO (op0), op0_mode.require ()) != 1))
1056 : {
1057 33563 : if (bitnum % BITS_PER_WORD + bitsize > BITS_PER_WORD)
1058 : {
1059 696 : if (!fallback_p)
1060 : return false;
1061 :
1062 71 : store_split_bit_field (op0, op0_mode, bitsize, bitnum,
1063 : bitregion_start, bitregion_end,
1064 : value, value_mode, reverse);
1065 71 : return true;
1066 : }
1067 32792 : op0 = simplify_gen_subreg (word_mode, op0, op0_mode.require (),
1068 32867 : bitnum / BITS_PER_WORD * UNITS_PER_WORD);
1069 32792 : gcc_assert (op0);
1070 32792 : op0_mode = word_mode;
1071 32867 : bitnum %= BITS_PER_WORD;
1072 : }
1073 :
1074 : /* From here on we can assume that the field to be stored in fits
1075 : within a word. If the destination is a register, it too fits
1076 : in a word. */
1077 :
1078 174925 : extraction_insn insv;
1079 174925 : if (!MEM_P (op0)
1080 108199 : && !reverse
1081 108182 : && get_best_reg_extraction_insn (&insv, EP_insv,
1082 216364 : GET_MODE_BITSIZE (op0_mode.require ()),
1083 : fieldmode)
1084 283105 : && store_bit_field_using_insv (&insv, op0, op0_mode,
1085 : bitsize, bitnum, value, value_mode))
1086 2050 : return true;
1087 :
1088 : /* If OP0 is a memory, try copying it to a register and seeing if a
1089 : cheap register alternative is available. */
1090 172875 : if (MEM_P (op0) && !reverse)
1091 : {
1092 66327 : if (get_best_mem_extraction_insn (&insv, EP_insv, bitsize, bitnum,
1093 : fieldmode)
1094 66327 : && store_bit_field_using_insv (&insv, op0, op0_mode,
1095 : bitsize, bitnum, value, value_mode))
1096 0 : return true;
1097 :
1098 66327 : rtx_insn *last = get_last_insn ();
1099 :
1100 : /* Try loading part of OP0 into a register, inserting the bitfield
1101 : into that, and then copying the result back to OP0. */
1102 66327 : unsigned HOST_WIDE_INT bitpos;
1103 66327 : rtx xop0 = adjust_bit_field_mem_for_reg (EP_insv, op0, bitsize, bitnum,
1104 : bitregion_start, bitregion_end,
1105 : fieldmode, &bitpos);
1106 66327 : if (xop0)
1107 : {
1108 62492 : rtx tempreg = copy_to_reg (xop0);
1109 62492 : if (store_bit_field_1 (tempreg, bitsize, bitpos,
1110 : bitregion_start, bitregion_end,
1111 : fieldmode, orig_value, reverse, false, false))
1112 : {
1113 0 : emit_move_insn (xop0, tempreg);
1114 0 : return true;
1115 : }
1116 62492 : delete_insns_since (last);
1117 : }
1118 : }
1119 :
1120 172875 : if (!fallback_p)
1121 : return false;
1122 :
1123 111008 : store_fixed_bit_field (op0, op0_mode, bitsize, bitnum, bitregion_start,
1124 : bitregion_end, value, value_mode, reverse);
1125 111008 : return true;
1126 : }
1127 :
1128 : /* Generate code to store value from rtx VALUE
1129 : into a bit-field within structure STR_RTX
1130 : containing BITSIZE bits starting at bit BITNUM.
1131 :
1132 : BITREGION_START is bitpos of the first bitfield in this region.
1133 : BITREGION_END is the bitpos of the ending bitfield in this region.
1134 : These two fields are 0, if the C++ memory model does not apply,
1135 : or we are not interested in keeping track of bitfield regions.
1136 :
1137 : FIELDMODE is the machine-mode of the FIELD_DECL node for this field.
1138 :
1139 : If REVERSE is true, the store is to be done in reverse order.
1140 :
1141 : If UNDEFINED_P is true then STR_RTX is currently undefined. */
1142 :
1143 : void
1144 814408 : store_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
1145 : poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1146 : machine_mode fieldmode,
1147 : rtx value, bool reverse, bool undefined_p)
1148 : {
1149 : /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
1150 814408 : unsigned HOST_WIDE_INT ibitsize = 0, ibitnum = 0;
1151 814408 : scalar_int_mode int_mode;
1152 814408 : if (bitsize.is_constant (&ibitsize)
1153 814408 : && bitnum.is_constant (&ibitnum)
1154 1534335 : && is_a <scalar_int_mode> (fieldmode, &int_mode)
1155 719931 : && strict_volatile_bitfield_p (str_rtx, ibitsize, ibitnum, int_mode,
1156 : bitregion_start, bitregion_end))
1157 : {
1158 : /* Storing of a full word can be done with a simple store.
1159 : We know here that the field can be accessed with one single
1160 : instruction. For targets that support unaligned memory,
1161 : an unaligned access may be necessary. */
1162 8 : if (ibitsize == GET_MODE_BITSIZE (int_mode))
1163 : {
1164 0 : str_rtx = adjust_bitfield_address (str_rtx, int_mode,
1165 : ibitnum / BITS_PER_UNIT);
1166 0 : if (reverse)
1167 0 : value = flip_storage_order (int_mode, value);
1168 0 : gcc_assert (ibitnum % BITS_PER_UNIT == 0);
1169 0 : emit_move_insn (str_rtx, value);
1170 : }
1171 : else
1172 : {
1173 4 : rtx temp;
1174 :
1175 4 : str_rtx = narrow_bit_field_mem (str_rtx, int_mode, ibitsize,
1176 : ibitnum, &ibitnum);
1177 8 : gcc_assert (ibitnum + ibitsize <= GET_MODE_BITSIZE (int_mode));
1178 4 : temp = copy_to_reg (str_rtx);
1179 4 : if (!store_bit_field_1 (temp, ibitsize, ibitnum, 0, 0,
1180 : int_mode, value, reverse, true, undefined_p))
1181 0 : gcc_unreachable ();
1182 :
1183 4 : emit_move_insn (str_rtx, temp);
1184 : }
1185 :
1186 4 : return;
1187 : }
1188 :
1189 : /* Under the C++0x memory model, we must not touch bits outside the
1190 : bit region. Adjust the address to start at the beginning of the
1191 : bit region. */
1192 814404 : if (MEM_P (str_rtx) && maybe_ne (bitregion_start, 0U))
1193 : {
1194 50529 : scalar_int_mode best_mode;
1195 50529 : machine_mode addr_mode = VOIDmode;
1196 :
1197 50529 : poly_uint64 offset = exact_div (bitregion_start, BITS_PER_UNIT);
1198 101058 : bitnum -= bitregion_start;
1199 50529 : poly_int64 size = bits_to_bytes_round_up (bitnum + bitsize);
1200 50529 : bitregion_end -= bitregion_start;
1201 50529 : bitregion_start = 0;
1202 50529 : if (bitsize.is_constant (&ibitsize)
1203 50529 : && bitnum.is_constant (&ibitnum)
1204 50529 : && get_best_mode (ibitsize, ibitnum,
1205 : bitregion_start, bitregion_end,
1206 50529 : MEM_ALIGN (str_rtx), INT_MAX,
1207 50529 : MEM_VOLATILE_P (str_rtx), &best_mode))
1208 47127 : addr_mode = best_mode;
1209 50529 : str_rtx = adjust_bitfield_address_size (str_rtx, addr_mode,
1210 : offset, size);
1211 : }
1212 :
1213 814404 : if (!store_bit_field_1 (str_rtx, bitsize, bitnum,
1214 : bitregion_start, bitregion_end,
1215 : fieldmode, value, reverse, true, undefined_p))
1216 0 : gcc_unreachable ();
1217 : }
1218 :
1219 : /* Use shifts and boolean operations to store VALUE into a bit field of
1220 : width BITSIZE in OP0, starting at bit BITNUM. If OP0_MODE is defined,
1221 : it is the mode of OP0, otherwise OP0 is a BLKmode MEM. VALUE_MODE is
1222 : the mode of VALUE.
1223 :
1224 : If REVERSE is true, the store is to be done in reverse order. */
1225 :
1226 : static void
1227 128453 : store_fixed_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1228 : unsigned HOST_WIDE_INT bitsize,
1229 : unsigned HOST_WIDE_INT bitnum,
1230 : poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1231 : rtx value, scalar_int_mode value_mode, bool reverse)
1232 : {
1233 : /* There is a case not handled here:
1234 : a structure with a known alignment of just a halfword
1235 : and a field split across two aligned halfwords within the structure.
1236 : Or likewise a structure with a known alignment of just a byte
1237 : and a field split across two bytes.
1238 : Such cases are not supposed to be able to occur. */
1239 :
1240 128453 : scalar_int_mode best_mode;
1241 128453 : if (MEM_P (op0))
1242 : {
1243 84029 : unsigned int max_bitsize = BITS_PER_WORD;
1244 84029 : scalar_int_mode imode;
1245 143387 : if (op0_mode.exists (&imode) && GET_MODE_BITSIZE (imode) < max_bitsize)
1246 89852 : max_bitsize = GET_MODE_BITSIZE (imode);
1247 :
1248 84029 : if (!get_best_mode (bitsize, bitnum, bitregion_start, bitregion_end,
1249 84029 : MEM_ALIGN (op0), max_bitsize, MEM_VOLATILE_P (op0),
1250 : &best_mode))
1251 : {
1252 : /* The only way this should occur is if the field spans word
1253 : boundaries. */
1254 6542 : store_split_bit_field (op0, op0_mode, bitsize, bitnum,
1255 : bitregion_start, bitregion_end,
1256 : value, value_mode, reverse);
1257 6542 : return;
1258 : }
1259 :
1260 77487 : op0 = narrow_bit_field_mem (op0, best_mode, bitsize, bitnum, &bitnum);
1261 : }
1262 : else
1263 44424 : best_mode = op0_mode.require ();
1264 :
1265 121911 : store_fixed_bit_field_1 (op0, best_mode, bitsize, bitnum,
1266 : value, value_mode, reverse);
1267 : }
1268 :
1269 : /* Helper function for store_fixed_bit_field, stores
1270 : the bit field always using MODE, which is the mode of OP0. The other
1271 : arguments are as for store_fixed_bit_field. */
1272 :
1273 : static void
1274 121911 : store_fixed_bit_field_1 (rtx op0, scalar_int_mode mode,
1275 : unsigned HOST_WIDE_INT bitsize,
1276 : unsigned HOST_WIDE_INT bitnum,
1277 : rtx value, scalar_int_mode value_mode, bool reverse)
1278 : {
1279 121911 : rtx temp;
1280 121911 : int all_zero = 0;
1281 121911 : int all_one = 0;
1282 :
1283 : /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
1284 : for invalid input, such as f5 from gcc.dg/pr48335-2.c. */
1285 :
1286 121911 : if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
1287 : /* BITNUM is the distance between our msb
1288 : and that of the containing datum.
1289 : Convert it to the distance from the lsb. */
1290 1056 : bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
1291 :
1292 : /* Now BITNUM is always the distance between our lsb
1293 : and that of OP0. */
1294 :
1295 : /* Shift VALUE left by BITNUM bits. If VALUE is not constant,
1296 : we must first convert its mode to MODE. */
1297 :
1298 121911 : if (CONST_INT_P (value))
1299 : {
1300 73483 : unsigned HOST_WIDE_INT v = UINTVAL (value);
1301 :
1302 73483 : if (bitsize < HOST_BITS_PER_WIDE_INT)
1303 73465 : v &= (HOST_WIDE_INT_1U << bitsize) - 1;
1304 :
1305 73483 : if (v == 0)
1306 : all_zero = 1;
1307 58574 : else if ((bitsize < HOST_BITS_PER_WIDE_INT
1308 58566 : && v == (HOST_WIDE_INT_1U << bitsize) - 1)
1309 50145 : || (bitsize == HOST_BITS_PER_WIDE_INT
1310 50145 : && v == HOST_WIDE_INT_M1U))
1311 8429 : all_one = 1;
1312 :
1313 73483 : value = lshift_value (mode, v, bitnum);
1314 : }
1315 : else
1316 : {
1317 48428 : int must_and = (GET_MODE_BITSIZE (value_mode) != bitsize
1318 75700 : && bitnum + bitsize != GET_MODE_BITSIZE (mode));
1319 :
1320 48428 : if (value_mode != mode)
1321 26405 : value = convert_to_mode (mode, value, 1);
1322 :
1323 48428 : if (must_and)
1324 20534 : value = expand_binop (mode, and_optab, value,
1325 : mask_rtx (mode, 0, bitsize, 0),
1326 : NULL_RTX, 1, OPTAB_LIB_WIDEN);
1327 48428 : if (bitnum > 0)
1328 13876 : value = expand_shift (LSHIFT_EXPR, mode, value,
1329 13876 : bitnum, NULL_RTX, 1);
1330 : }
1331 :
1332 121911 : if (reverse)
1333 528 : value = flip_storage_order (mode, value);
1334 :
1335 : /* Now clear the chosen bits in OP0,
1336 : except that if VALUE is -1 we need not bother. */
1337 : /* We keep the intermediates in registers to allow CSE to combine
1338 : consecutive bitfield assignments. */
1339 :
1340 121911 : temp = force_reg (mode, op0);
1341 :
1342 121911 : if (! all_one)
1343 : {
1344 113482 : rtx mask = mask_rtx (mode, bitnum, bitsize, 1);
1345 113482 : if (reverse)
1346 516 : mask = flip_storage_order (mode, mask);
1347 113482 : temp = expand_binop (mode, and_optab, temp, mask,
1348 : NULL_RTX, 1, OPTAB_LIB_WIDEN);
1349 113482 : temp = force_reg (mode, temp);
1350 : }
1351 :
1352 : /* Now logical-or VALUE into OP0, unless it is zero. */
1353 :
1354 121911 : if (! all_zero)
1355 : {
1356 107002 : temp = expand_binop (mode, ior_optab, temp, value,
1357 : NULL_RTX, 1, OPTAB_LIB_WIDEN);
1358 107002 : temp = force_reg (mode, temp);
1359 : }
1360 :
1361 121911 : if (op0 != temp)
1362 : {
1363 121911 : op0 = copy_rtx (op0);
1364 121911 : emit_move_insn (op0, temp);
1365 : }
1366 121911 : }
1367 :
1368 : /* Store a bit field that is split across multiple accessible memory objects.
1369 :
1370 : OP0 is the REG, SUBREG or MEM rtx for the first of the objects.
1371 : BITSIZE is the field width; BITPOS the position of its first bit
1372 : (within the word).
1373 : VALUE is the value to store, which has mode VALUE_MODE.
1374 : If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
1375 : a BLKmode MEM.
1376 :
1377 : If REVERSE is true, the store is to be done in reverse order.
1378 :
1379 : This does not yet handle fields wider than BITS_PER_WORD. */
1380 :
1381 : static void
1382 6613 : store_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1383 : unsigned HOST_WIDE_INT bitsize,
1384 : unsigned HOST_WIDE_INT bitpos,
1385 : poly_uint64 bitregion_start, poly_uint64 bitregion_end,
1386 : rtx value, scalar_int_mode value_mode, bool reverse)
1387 : {
1388 6613 : unsigned int unit, total_bits, bitsdone = 0;
1389 :
1390 : /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
1391 : much at a time. */
1392 6613 : if (REG_P (op0) || GET_CODE (op0) == SUBREG)
1393 71 : unit = BITS_PER_WORD;
1394 : else
1395 6542 : unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
1396 :
1397 : /* If OP0 is a memory with a mode, then UNIT must not be larger than
1398 : OP0's mode as well. Otherwise, store_fixed_bit_field will call us
1399 : again, and we will mutually recurse forever. */
1400 6613 : if (MEM_P (op0) && op0_mode.exists ())
1401 5115 : unit = MIN (unit, GET_MODE_BITSIZE (op0_mode.require ()));
1402 :
1403 : /* If VALUE is a constant other than a CONST_INT, get it into a register in
1404 : WORD_MODE. If we can do this using gen_lowpart_common, do so. Note
1405 : that VALUE might be a floating-point constant. */
1406 6613 : if (CONSTANT_P (value) && !CONST_INT_P (value))
1407 : {
1408 0 : rtx word = gen_lowpart_common (word_mode, value);
1409 :
1410 0 : if (word && (value != word))
1411 : value = word;
1412 : else
1413 0 : value = gen_lowpart_common (word_mode, force_reg (value_mode, value));
1414 0 : value_mode = word_mode;
1415 : }
1416 :
1417 6613 : total_bits = GET_MODE_BITSIZE (value_mode);
1418 :
1419 30857 : while (bitsdone < bitsize)
1420 : {
1421 24244 : unsigned HOST_WIDE_INT thissize;
1422 24244 : unsigned HOST_WIDE_INT thispos;
1423 24244 : unsigned HOST_WIDE_INT offset;
1424 24244 : rtx part;
1425 :
1426 24244 : offset = (bitpos + bitsdone) / unit;
1427 24244 : thispos = (bitpos + bitsdone) % unit;
1428 :
1429 : /* When region of bytes we can touch is restricted, decrease
1430 : UNIT close to the end of the region as needed. If op0 is a REG
1431 : or SUBREG of REG, don't do this, as there can't be data races
1432 : on a register and we can expand shorter code in some cases. */
1433 31043 : if (maybe_ne (bitregion_end, 0U)
1434 24244 : && unit > BITS_PER_UNIT
1435 14209 : && maybe_gt (bitpos + bitsdone - thispos + unit, bitregion_end + 1)
1436 6863 : && !REG_P (op0)
1437 31043 : && (GET_CODE (op0) != SUBREG || !REG_P (SUBREG_REG (op0))))
1438 : {
1439 6799 : unit = unit / 2;
1440 6799 : continue;
1441 : }
1442 :
1443 : /* THISSIZE must not overrun a word boundary. Otherwise,
1444 : store_fixed_bit_field will call us again, and we will mutually
1445 : recurse forever. */
1446 17445 : thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
1447 17445 : thissize = MIN (thissize, unit - thispos);
1448 :
1449 17445 : if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
1450 : {
1451 : /* Fetch successively less significant portions. */
1452 214 : if (CONST_INT_P (value))
1453 108 : part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1454 : >> (bitsize - bitsdone - thissize))
1455 : & ((HOST_WIDE_INT_1 << thissize) - 1));
1456 : /* Likewise, but the source is little-endian. */
1457 106 : else if (reverse)
1458 106 : part = extract_fixed_bit_field (word_mode, value, value_mode,
1459 : thissize,
1460 : bitsize - bitsdone - thissize,
1461 : NULL_RTX, 1, false);
1462 : else
1463 : /* The args are chosen so that the last part includes the
1464 : lsb. Give extract_bit_field the value it needs (with
1465 : endianness compensation) to fetch the piece we want. */
1466 : part = extract_fixed_bit_field (word_mode, value, value_mode,
1467 : thissize,
1468 : total_bits - bitsize + bitsdone,
1469 : NULL_RTX, 1, false);
1470 : }
1471 : else
1472 : {
1473 : /* Fetch successively more significant portions. */
1474 17231 : if (CONST_INT_P (value))
1475 12867 : part = GEN_INT (((unsigned HOST_WIDE_INT) (INTVAL (value))
1476 : >> bitsdone)
1477 : & ((HOST_WIDE_INT_1 << thissize) - 1));
1478 : /* Likewise, but the source is big-endian. */
1479 4364 : else if (reverse)
1480 : part = extract_fixed_bit_field (word_mode, value, value_mode,
1481 : thissize,
1482 : total_bits - bitsdone - thissize,
1483 : NULL_RTX, 1, false);
1484 : else
1485 4364 : part = extract_fixed_bit_field (word_mode, value, value_mode,
1486 : thissize, bitsdone, NULL_RTX,
1487 : 1, false);
1488 : }
1489 :
1490 : /* If OP0 is a register, then handle OFFSET here. */
1491 17445 : rtx op0_piece = op0;
1492 17445 : opt_scalar_int_mode op0_piece_mode = op0_mode;
1493 17445 : if (SUBREG_P (op0) || REG_P (op0))
1494 : {
1495 142 : scalar_int_mode imode;
1496 142 : if (op0_mode.exists (&imode)
1497 142 : && GET_MODE_SIZE (imode) < UNITS_PER_WORD)
1498 : {
1499 0 : if (offset)
1500 0 : op0_piece = const0_rtx;
1501 : }
1502 : else
1503 : {
1504 142 : op0_piece = operand_subword_force (op0,
1505 142 : offset * unit / BITS_PER_WORD,
1506 142 : GET_MODE (op0));
1507 142 : op0_piece_mode = word_mode;
1508 : }
1509 142 : offset &= BITS_PER_WORD / unit - 1;
1510 : }
1511 :
1512 : /* OFFSET is in UNITs, and UNIT is in bits. If WORD is const0_rtx,
1513 : it is just an out-of-bounds access. Ignore it. */
1514 17445 : if (op0_piece != const0_rtx)
1515 17445 : store_fixed_bit_field (op0_piece, op0_piece_mode, thissize,
1516 17445 : offset * unit + thispos, bitregion_start,
1517 : bitregion_end, part, word_mode, reverse);
1518 17445 : bitsdone += thissize;
1519 : }
1520 6613 : }
1521 :
1522 : /* A subroutine of extract_bit_field_1 that converts return value X
1523 : to either MODE or TMODE. MODE, TMODE and UNSIGNEDP are arguments
1524 : to extract_bit_field. */
1525 :
1526 : static rtx
1527 852595 : convert_extracted_bit_field (rtx x, machine_mode mode,
1528 : machine_mode tmode, bool unsignedp)
1529 : {
1530 852595 : if (GET_MODE (x) == tmode || GET_MODE (x) == mode)
1531 : return x;
1532 :
1533 : /* If the x mode is not a scalar integral, first convert to the
1534 : integer mode of that size and then access it as a floating-point
1535 : value via a SUBREG. */
1536 21486 : if (!SCALAR_INT_MODE_P (tmode))
1537 : {
1538 11518 : scalar_int_mode int_mode = int_mode_for_mode (tmode).require ();
1539 11518 : x = convert_to_mode (int_mode, x, unsignedp);
1540 11518 : x = force_reg (int_mode, x);
1541 11518 : return gen_lowpart (tmode, x);
1542 : }
1543 :
1544 9968 : return convert_to_mode (tmode, x, unsignedp);
1545 : }
1546 :
1547 : /* Try to use an ext(z)v pattern to extract a field from OP0.
1548 : Return the extracted value on success, otherwise return null.
1549 : EXTV describes the extraction instruction to use. If OP0_MODE
1550 : is defined, it is the mode of OP0, otherwise OP0 is a BLKmode MEM.
1551 : The other arguments are as for extract_bit_field. */
1552 :
1553 : static rtx
1554 147826 : extract_bit_field_using_extv (const extraction_insn *extv, rtx op0,
1555 : opt_scalar_int_mode op0_mode,
1556 : unsigned HOST_WIDE_INT bitsize,
1557 : unsigned HOST_WIDE_INT bitnum,
1558 : int unsignedp, rtx target,
1559 : machine_mode mode, machine_mode tmode)
1560 : {
1561 147826 : class expand_operand ops[4];
1562 147826 : rtx spec_target = target;
1563 147826 : rtx spec_target_subreg = 0;
1564 147826 : scalar_int_mode ext_mode = extv->field_mode;
1565 147826 : unsigned unit = GET_MODE_BITSIZE (ext_mode);
1566 :
1567 147826 : if (bitsize == 0 || unit < bitsize)
1568 : return NULL_RTX;
1569 :
1570 147826 : if (MEM_P (op0))
1571 : /* Get a reference to the first byte of the field. */
1572 0 : op0 = narrow_bit_field_mem (op0, extv->struct_mode, bitsize, bitnum,
1573 : &bitnum);
1574 : else
1575 : {
1576 : /* Convert from counting within OP0 to counting in EXT_MODE. */
1577 147826 : if (BYTES_BIG_ENDIAN)
1578 : bitnum += unit - GET_MODE_BITSIZE (op0_mode.require ());
1579 :
1580 : /* If op0 is a register, we need it in EXT_MODE to make it
1581 : acceptable to the format of ext(z)v. */
1582 147826 : if (GET_CODE (op0) == SUBREG && op0_mode.require () != ext_mode)
1583 0 : return NULL_RTX;
1584 147826 : if (REG_P (op0) && op0_mode.require () != ext_mode)
1585 49345 : op0 = gen_lowpart_SUBREG (ext_mode, op0);
1586 : }
1587 :
1588 : /* If BITS_BIG_ENDIAN is zero on a BYTES_BIG_ENDIAN machine, we count
1589 : "backwards" from the size of the unit we are extracting from.
1590 : Otherwise, we count bits from the most significant on a
1591 : BYTES/BITS_BIG_ENDIAN machine. */
1592 :
1593 147826 : if (BITS_BIG_ENDIAN != BYTES_BIG_ENDIAN)
1594 : bitnum = unit - bitsize - bitnum;
1595 :
1596 147826 : if (target == 0)
1597 28581 : target = spec_target = gen_reg_rtx (tmode);
1598 :
1599 147826 : if (GET_MODE (target) != ext_mode)
1600 : {
1601 80661 : rtx temp;
1602 : /* Don't use LHS paradoxical subreg if explicit truncation is needed
1603 : between the mode of the extraction (word_mode) and the target
1604 : mode. Instead, create a temporary and use convert_move to set
1605 : the target. */
1606 80661 : if (REG_P (target)
1607 79575 : && TRULY_NOOP_TRUNCATION_MODES_P (GET_MODE (target), ext_mode)
1608 160236 : && (temp = gen_lowpart_if_possible (ext_mode, target)))
1609 : {
1610 79065 : target = temp;
1611 79065 : if (partial_subreg_p (GET_MODE (spec_target), ext_mode))
1612 77937 : spec_target_subreg = target;
1613 : }
1614 : else
1615 1596 : target = gen_reg_rtx (ext_mode);
1616 : }
1617 :
1618 147826 : create_output_operand (&ops[0], target, ext_mode);
1619 147826 : create_fixed_operand (&ops[1], op0);
1620 147826 : create_integer_operand (&ops[2], bitsize);
1621 147826 : create_integer_operand (&ops[3], bitnum);
1622 147826 : if (maybe_expand_insn (extv->icode, 4, ops))
1623 : {
1624 1749 : target = ops[0].value;
1625 1749 : if (target == spec_target)
1626 : return target;
1627 1749 : if (target == spec_target_subreg)
1628 : return spec_target;
1629 56 : return convert_extracted_bit_field (target, mode, tmode, unsignedp);
1630 : }
1631 : return NULL_RTX;
1632 : }
1633 :
1634 : /* See whether it would be valid to extract the part of OP0 with
1635 : mode OP0_MODE described by BITNUM and BITSIZE into a value of
1636 : mode MODE using a subreg operation.
1637 : Return the subreg if so, otherwise return null. */
1638 :
1639 : static rtx
1640 807741 : extract_bit_field_as_subreg (machine_mode mode, rtx op0,
1641 : machine_mode op0_mode,
1642 : poly_uint64 bitsize, poly_uint64 bitnum)
1643 : {
1644 807741 : poly_uint64 bytenum;
1645 807741 : if (multiple_p (bitnum, BITS_PER_UNIT, &bytenum)
1646 769148 : && known_eq (bitsize, GET_MODE_BITSIZE (mode))
1647 807741 : && lowpart_bit_field_p (bitnum, bitsize, op0_mode)
1648 1576889 : && TRULY_NOOP_TRUNCATION_MODES_P (mode, op0_mode))
1649 651139 : return force_subreg (mode, op0, op0_mode, bytenum);
1650 : return NULL_RTX;
1651 : }
1652 :
1653 : /* A subroutine of extract_bit_field, with the same arguments.
1654 : If UNSIGNEDP is -1, the result need not be sign or zero extended.
1655 : If FALLBACK_P is true, fall back to extract_fixed_bit_field
1656 : if we can find no other means of implementing the operation.
1657 : if FALLBACK_P is false, return NULL instead. */
1658 :
1659 : static rtx
1660 1091097 : extract_bit_field_1 (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
1661 : int unsignedp, rtx target, machine_mode mode,
1662 : machine_mode tmode, bool reverse, bool fallback_p,
1663 : rtx *alt_rtl)
1664 : {
1665 1091097 : rtx op0 = str_rtx;
1666 1091097 : machine_mode mode1;
1667 :
1668 1091097 : if (tmode == VOIDmode)
1669 0 : tmode = mode;
1670 :
1671 1101452 : while (GET_CODE (op0) == SUBREG)
1672 : {
1673 10355 : bitnum += SUBREG_BYTE (op0) * BITS_PER_UNIT;
1674 10355 : op0 = SUBREG_REG (op0);
1675 : }
1676 :
1677 : /* If we have an out-of-bounds access to a register, just return an
1678 : uninitialized register of the required mode. This can occur if the
1679 : source code contains an out-of-bounds access to a small array. */
1680 1999691 : if (REG_P (op0) && known_ge (bitnum, GET_MODE_BITSIZE (GET_MODE (op0))))
1681 0 : return gen_reg_rtx (tmode);
1682 :
1683 1091097 : if (REG_P (op0)
1684 908594 : && mode == GET_MODE (op0)
1685 148143 : && known_eq (bitnum, 0U)
1686 1325179 : && known_eq (bitsize, GET_MODE_BITSIZE (GET_MODE (op0))))
1687 : {
1688 17608 : if (reverse)
1689 0 : op0 = flip_storage_order (mode, op0);
1690 : /* We're trying to extract a full register from itself. */
1691 17608 : return op0;
1692 : }
1693 :
1694 : /* First try to check for vector from vector extractions. */
1695 1006382 : if (VECTOR_MODE_P (GET_MODE (op0))
1696 86098 : && !MEM_P (op0)
1697 85290 : && VECTOR_MODE_P (tmode)
1698 12786 : && known_eq (bitsize, GET_MODE_PRECISION (tmode))
1699 2172550 : && maybe_gt (GET_MODE_SIZE (GET_MODE (op0)), GET_MODE_SIZE (tmode)))
1700 : {
1701 12786 : machine_mode new_mode = GET_MODE (op0);
1702 38358 : if (GET_MODE_INNER (new_mode) != GET_MODE_INNER (tmode))
1703 : {
1704 174 : scalar_mode inner_mode = GET_MODE_INNER (tmode);
1705 174 : poly_uint64 nunits;
1706 348 : if (!multiple_p (GET_MODE_BITSIZE (GET_MODE (op0)),
1707 174 : GET_MODE_UNIT_BITSIZE (tmode), &nunits)
1708 348 : || !related_vector_mode (tmode, inner_mode,
1709 174 : nunits).exists (&new_mode)
1710 332 : || maybe_ne (GET_MODE_SIZE (new_mode),
1711 474 : GET_MODE_SIZE (GET_MODE (op0))))
1712 16 : new_mode = VOIDmode;
1713 : }
1714 12786 : poly_uint64 pos;
1715 12786 : if (new_mode != VOIDmode
1716 12770 : && (convert_optab_handler (vec_extract_optab, new_mode, tmode)
1717 : != CODE_FOR_nothing)
1718 25556 : && multiple_p (bitnum, GET_MODE_BITSIZE (tmode), &pos))
1719 : {
1720 8954 : class expand_operand ops[3];
1721 8954 : machine_mode outermode = new_mode;
1722 8954 : machine_mode innermode = tmode;
1723 8954 : enum insn_code icode
1724 8954 : = convert_optab_handler (vec_extract_optab, outermode, innermode);
1725 :
1726 8954 : if (new_mode != GET_MODE (op0))
1727 33 : op0 = gen_lowpart (new_mode, op0);
1728 8954 : create_output_operand (&ops[0], target, innermode);
1729 8954 : ops[0].target = 1;
1730 8954 : create_input_operand (&ops[1], op0, outermode);
1731 8954 : create_integer_operand (&ops[2], pos);
1732 8954 : if (maybe_expand_insn (icode, 3, ops))
1733 : {
1734 8954 : if (alt_rtl && ops[0].target)
1735 205 : *alt_rtl = target;
1736 8954 : target = ops[0].value;
1737 8954 : if (GET_MODE (target) != mode)
1738 8954 : return gen_lowpart (tmode, target);
1739 : return target;
1740 : }
1741 : }
1742 : }
1743 :
1744 : /* See if we can get a better vector mode before extracting. */
1745 999041 : if (VECTOR_MODE_P (GET_MODE (op0))
1746 77144 : && !MEM_P (op0)
1747 1217207 : && GET_MODE_INNER (GET_MODE (op0)) != tmode)
1748 : {
1749 9878 : machine_mode new_mode;
1750 :
1751 9878 : if (GET_MODE_CLASS (tmode) == MODE_FLOAT)
1752 557 : new_mode = MIN_MODE_VECTOR_FLOAT;
1753 : else if (GET_MODE_CLASS (tmode) == MODE_FRACT)
1754 0 : new_mode = MIN_MODE_VECTOR_FRACT;
1755 : else if (GET_MODE_CLASS (tmode) == MODE_UFRACT)
1756 0 : new_mode = MIN_MODE_VECTOR_UFRACT;
1757 : else if (GET_MODE_CLASS (tmode) == MODE_ACCUM)
1758 0 : new_mode = MIN_MODE_VECTOR_ACCUM;
1759 : else if (GET_MODE_CLASS (tmode) == MODE_UACCUM)
1760 0 : new_mode = MIN_MODE_VECTOR_UACCUM;
1761 : else
1762 9321 : new_mode = MIN_MODE_VECTOR_INT;
1763 :
1764 157746 : FOR_EACH_MODE_FROM (new_mode, new_mode)
1765 314776 : if (known_eq (GET_MODE_SIZE (new_mode), GET_MODE_SIZE (GET_MODE (op0)))
1766 65742 : && known_eq (GET_MODE_UNIT_SIZE (new_mode), GET_MODE_SIZE (tmode))
1767 177144 : && known_eq (bitsize, GET_MODE_UNIT_PRECISION (new_mode))
1768 19756 : && multiple_p (bitnum, GET_MODE_UNIT_PRECISION (new_mode))
1769 9841 : && targetm.vector_mode_supported_p (new_mode)
1770 167211 : && targetm.modes_tieable_p (GET_MODE (op0), new_mode))
1771 : break;
1772 9878 : if (new_mode != VOIDmode)
1773 9520 : op0 = gen_lowpart (new_mode, op0);
1774 : }
1775 :
1776 : /* Use vec_extract patterns for extracting parts of vectors whenever
1777 : available. If that fails, see whether the current modes and bitregion
1778 : give a natural subreg. */
1779 1064535 : machine_mode outermode = GET_MODE (op0);
1780 1064535 : if (VECTOR_MODE_P (outermode) && !MEM_P (op0))
1781 : {
1782 76336 : scalar_mode innermode = GET_MODE_INNER (outermode);
1783 :
1784 76336 : enum insn_code icode
1785 76336 : = convert_optab_handler (vec_extract_optab, outermode, innermode);
1786 :
1787 76336 : poly_uint64 pos;
1788 76336 : if (icode != CODE_FOR_nothing
1789 76339 : && known_eq (bitsize, GET_MODE_PRECISION (innermode))
1790 150621 : && multiple_p (bitnum, GET_MODE_PRECISION (innermode), &pos))
1791 : {
1792 74282 : class expand_operand ops[3];
1793 :
1794 74282 : create_output_operand (&ops[0], target,
1795 74282 : insn_data[icode].operand[0].mode);
1796 74282 : ops[0].target = 1;
1797 74282 : create_input_operand (&ops[1], op0, outermode);
1798 74282 : create_integer_operand (&ops[2], pos);
1799 74282 : if (maybe_expand_insn (icode, 3, ops))
1800 : {
1801 74282 : if (alt_rtl && ops[0].target)
1802 16901 : *alt_rtl = target;
1803 74282 : target = ops[0].value;
1804 74282 : if (GET_MODE (target) != mode)
1805 74282 : return gen_lowpart (tmode, target);
1806 : return target;
1807 : }
1808 : }
1809 : /* Using subregs is useful if we're extracting one register vector
1810 : from a multi-register vector. extract_bit_field_as_subreg checks
1811 : for valid bitsize and bitnum, so we don't need to do that here. */
1812 2054 : if (VECTOR_MODE_P (mode))
1813 : {
1814 47 : rtx sub = extract_bit_field_as_subreg (mode, op0, outermode,
1815 : bitsize, bitnum);
1816 47 : if (sub)
1817 : return sub;
1818 : }
1819 : }
1820 :
1821 : /* Make sure we are playing with integral modes. Pun with subregs
1822 : if we aren't. */
1823 990236 : opt_scalar_int_mode op0_mode = int_mode_for_mode (GET_MODE (op0));
1824 990236 : scalar_int_mode imode;
1825 990236 : if (!op0_mode.exists (&imode) || imode != GET_MODE (op0))
1826 : {
1827 160778 : if (MEM_P (op0))
1828 157022 : op0 = adjust_bitfield_address_size (op0, op0_mode.else_blk (),
1829 : 0, MEM_SIZE (op0));
1830 3756 : else if (op0_mode.exists (&imode))
1831 : {
1832 3725 : op0 = gen_lowpart (imode, op0);
1833 :
1834 : /* If we got a SUBREG, force it into a register since we
1835 : aren't going to be able to do another SUBREG on it. */
1836 3725 : if (GET_CODE (op0) == SUBREG)
1837 3448 : op0 = force_reg (imode, op0);
1838 : }
1839 : else
1840 : {
1841 62 : poly_int64 size = GET_MODE_SIZE (GET_MODE (op0));
1842 31 : rtx mem = assign_stack_temp (GET_MODE (op0), size);
1843 31 : emit_move_insn (mem, op0);
1844 31 : op0 = adjust_bitfield_address_size (mem, BLKmode, 0, size);
1845 : }
1846 : }
1847 :
1848 : /* ??? We currently assume TARGET is at least as big as BITSIZE.
1849 : If that's wrong, the solution is to test for it and set TARGET to 0
1850 : if needed. */
1851 :
1852 : /* Get the mode of the field to use for atomic access or subreg
1853 : conversion. */
1854 990236 : if (!SCALAR_INT_MODE_P (tmode)
1855 990236 : || !mode_for_size (bitsize, GET_MODE_CLASS (tmode), 0).exists (&mode1))
1856 309273 : mode1 = mode;
1857 990236 : gcc_assert (mode1 != BLKmode);
1858 :
1859 : /* Extraction of a full MODE1 value can be done with a subreg as long
1860 : as the least significant bit of the value is the least significant
1861 : bit of either OP0 or a word of OP0. */
1862 990236 : if (!MEM_P (op0) && !reverse && op0_mode.exists (&imode))
1863 : {
1864 807694 : rtx sub = extract_bit_field_as_subreg (mode1, op0, imode,
1865 : bitsize, bitnum);
1866 807694 : if (sub)
1867 644963 : return convert_extracted_bit_field (sub, mode, tmode, unsignedp);
1868 : }
1869 :
1870 : /* Extraction of a full MODE1 value can be done with a load as long as
1871 : the field is on a byte boundary and is sufficiently aligned. */
1872 345273 : poly_uint64 bytenum;
1873 345273 : if (simple_mem_bitfield_p (op0, bitsize, bitnum, mode1, &bytenum))
1874 : {
1875 43746 : op0 = adjust_bitfield_address (op0, mode1, bytenum);
1876 43746 : if (reverse)
1877 51 : op0 = flip_storage_order (mode1, op0);
1878 43746 : return convert_extracted_bit_field (op0, mode, tmode, unsignedp);
1879 : }
1880 :
1881 : /* If we have a memory source and a non-constant bit offset, restrict
1882 : the memory to the referenced bytes. This is a worst-case fallback
1883 : but is useful for things like vector booleans. */
1884 301527 : if (MEM_P (op0) && !bitnum.is_constant ())
1885 : {
1886 : bytenum = bits_to_bytes_round_down (bitnum);
1887 : bitnum = num_trailing_bits (bitnum);
1888 : poly_uint64 bytesize = bits_to_bytes_round_up (bitnum + bitsize);
1889 : op0 = adjust_bitfield_address_size (op0, BLKmode, bytenum, bytesize);
1890 : op0_mode = opt_scalar_int_mode ();
1891 : }
1892 :
1893 : /* It's possible we'll need to handle other cases here for
1894 : polynomial bitnum and bitsize. */
1895 :
1896 : /* From here on we need to be looking at a fixed-size insertion. */
1897 301527 : return extract_integral_bit_field (op0, op0_mode, bitsize.to_constant (),
1898 : bitnum.to_constant (), unsignedp,
1899 301527 : target, mode, tmode, reverse, fallback_p);
1900 : }
1901 :
1902 : /* Subroutine of extract_bit_field_1, with the same arguments, except
1903 : that BITSIZE and BITNUM are constant. Handle cases specific to
1904 : integral modes. If OP0_MODE is defined, it is the mode of OP0,
1905 : otherwise OP0 is a BLKmode MEM. */
1906 :
1907 : static rtx
1908 301527 : extract_integral_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
1909 : unsigned HOST_WIDE_INT bitsize,
1910 : unsigned HOST_WIDE_INT bitnum, int unsignedp,
1911 : rtx target, machine_mode mode, machine_mode tmode,
1912 : bool reverse, bool fallback_p)
1913 : {
1914 : /* Handle fields bigger than a word. */
1915 :
1916 304765 : if (bitsize > BITS_PER_WORD)
1917 : {
1918 : /* Here we transfer the words of the field
1919 : in the order least significant first.
1920 : This is because the most significant word is the one which may
1921 : be less than full. */
1922 :
1923 1496 : const bool backwards = WORDS_BIG_ENDIAN;
1924 1496 : unsigned int nwords = (bitsize + (BITS_PER_WORD - 1)) / BITS_PER_WORD;
1925 1496 : unsigned int i;
1926 1496 : rtx_insn *last;
1927 :
1928 1496 : if (target == 0 || !REG_P (target) || !valid_multiword_target_p (target))
1929 1474 : target = gen_reg_rtx (mode);
1930 :
1931 : /* In case we're about to clobber a base register or something
1932 : (see gcc.c-torture/execute/20040625-1.c). */
1933 1496 : if (reg_mentioned_p (target, op0))
1934 0 : target = gen_reg_rtx (mode);
1935 :
1936 : /* Indicate for flow that the entire target reg is being set. */
1937 1496 : emit_clobber (target);
1938 :
1939 : /* The mode must be fixed-size, since extract_bit_field_1 handles
1940 : extractions from variable-sized objects before calling this
1941 : function. */
1942 1496 : unsigned int target_size
1943 1496 : = GET_MODE_SIZE (GET_MODE (target)).to_constant ();
1944 1496 : last = get_last_insn ();
1945 4488 : for (i = 0; i < nwords; i++)
1946 : {
1947 : /* If I is 0, use the low-order word in both field and target;
1948 : if I is 1, use the next to lowest word; and so on. */
1949 : /* Word number in TARGET to use. */
1950 2992 : unsigned int wordnum
1951 : = (backwards ? target_size / UNITS_PER_WORD - i - 1 : i);
1952 : /* Offset from start of field in OP0. */
1953 5984 : unsigned int bit_offset = (backwards ^ reverse
1954 2992 : ? MAX ((int) bitsize - ((int) i + 1)
1955 : * BITS_PER_WORD,
1956 : 0)
1957 3056 : : (int) i * BITS_PER_WORD);
1958 2992 : rtx target_part = operand_subword (target, wordnum, 1, VOIDmode);
1959 2992 : rtx result_part
1960 3160 : = extract_bit_field_1 (op0, MIN (BITS_PER_WORD,
1961 : bitsize - i * BITS_PER_WORD),
1962 2992 : bitnum + bit_offset,
1963 : (unsignedp ? 1 : -1), target_part,
1964 : mode, word_mode, reverse, fallback_p, NULL);
1965 :
1966 2992 : gcc_assert (target_part);
1967 2992 : if (!result_part)
1968 : {
1969 0 : delete_insns_since (last);
1970 0 : return NULL;
1971 : }
1972 :
1973 2992 : if (result_part != target_part)
1974 2878 : emit_move_insn (target_part, result_part);
1975 : }
1976 :
1977 1496 : if (unsignedp)
1978 : {
1979 : /* Unless we've filled TARGET, the upper regs in a multi-reg value
1980 : need to be zero'd out. */
1981 1508 : if (target_size > nwords * UNITS_PER_WORD)
1982 : {
1983 0 : unsigned int i, total_words;
1984 :
1985 0 : total_words = target_size / UNITS_PER_WORD;
1986 0 : for (i = nwords; i < total_words; i++)
1987 0 : emit_move_insn
1988 0 : (operand_subword (target,
1989 0 : backwards ? total_words - i - 1 : i,
1990 : 1, VOIDmode),
1991 : const0_rtx);
1992 : }
1993 1476 : return target;
1994 : }
1995 :
1996 : /* Signed bit field: sign-extend with two arithmetic shifts. */
1997 40 : target = expand_shift (LSHIFT_EXPR, mode, target,
1998 20 : GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
1999 40 : return expand_shift (RSHIFT_EXPR, mode, target,
2000 20 : GET_MODE_BITSIZE (mode) - bitsize, NULL_RTX, 0);
2001 : }
2002 :
2003 : /* If OP0 is a multi-word register, narrow it to the affected word.
2004 : If the region spans two words, defer to extract_split_bit_field. */
2005 465263 : if (!MEM_P (op0) && GET_MODE_SIZE (op0_mode.require ()) > UNITS_PER_WORD)
2006 : {
2007 3876 : if (bitnum % BITS_PER_WORD + bitsize > BITS_PER_WORD)
2008 : {
2009 1140 : if (!fallback_p)
2010 : return NULL_RTX;
2011 62 : target = extract_split_bit_field (op0, op0_mode, bitsize, bitnum,
2012 : unsignedp, reverse);
2013 62 : return convert_extracted_bit_field (target, mode, tmode, unsignedp);
2014 : }
2015 : /* If OP0 is a hard register, copy it to a pseudo before calling
2016 : force_subreg. */
2017 2736 : if (REG_P (op0) && HARD_REGISTER_P (op0))
2018 1 : op0 = copy_to_reg (op0);
2019 2736 : op0 = force_subreg (word_mode, op0, op0_mode.require (),
2020 3200 : bitnum / BITS_PER_WORD * UNITS_PER_WORD);
2021 2736 : op0_mode = word_mode;
2022 2968 : bitnum %= BITS_PER_WORD;
2023 : }
2024 :
2025 : /* From here on we know the desired field is smaller than a word.
2026 : If OP0 is a register, it too fits within a word. */
2027 298891 : enum extraction_pattern pattern = unsignedp ? EP_extzv : EP_extv;
2028 298891 : extraction_insn extv;
2029 298891 : if (!MEM_P (op0)
2030 161590 : && !reverse
2031 : /* ??? We could limit the structure size to the part of OP0 that
2032 : contains the field, with appropriate checks for endianness
2033 : and TARGET_TRULY_NOOP_TRUNCATION. */
2034 460473 : && get_best_reg_extraction_insn (&extv, pattern,
2035 474229 : GET_MODE_BITSIZE (op0_mode.require ()),
2036 : tmode))
2037 : {
2038 147826 : rtx result = extract_bit_field_using_extv (&extv, op0, op0_mode,
2039 : bitsize, bitnum,
2040 : unsignedp, target, mode,
2041 : tmode);
2042 147826 : if (result)
2043 : return result;
2044 : }
2045 :
2046 : /* If OP0 is a memory, try copying it to a register and seeing if a
2047 : cheap register alternative is available. */
2048 297142 : if (MEM_P (op0) & !reverse)
2049 : {
2050 137098 : if (get_best_mem_extraction_insn (&extv, pattern, bitsize, bitnum,
2051 : tmode))
2052 : {
2053 0 : rtx result = extract_bit_field_using_extv (&extv, op0, op0_mode,
2054 : bitsize, bitnum,
2055 : unsignedp, target, mode,
2056 : tmode);
2057 0 : if (result)
2058 0 : return result;
2059 : }
2060 :
2061 137098 : rtx_insn *last = get_last_insn ();
2062 :
2063 : /* Try loading part of OP0 into a register and extracting the
2064 : bitfield from that. */
2065 137098 : unsigned HOST_WIDE_INT bitpos;
2066 137098 : rtx xop0 = adjust_bit_field_mem_for_reg (pattern, op0, bitsize, bitnum,
2067 : 0, 0, tmode, &bitpos);
2068 137098 : if (xop0)
2069 : {
2070 134452 : xop0 = copy_to_reg (xop0);
2071 134452 : rtx result = extract_bit_field_1 (xop0, bitsize, bitpos,
2072 : unsignedp, target,
2073 : mode, tmode, reverse, false, NULL);
2074 134452 : if (result)
2075 : return result;
2076 134452 : delete_insns_since (last);
2077 : }
2078 : }
2079 :
2080 297142 : if (!fallback_p)
2081 : return NULL;
2082 :
2083 : /* Find a correspondingly-sized integer field, so we can apply
2084 : shifts and masks to it. */
2085 163768 : scalar_int_mode int_mode;
2086 163768 : if (!int_mode_for_mode (tmode).exists (&int_mode))
2087 : /* If this fails, we should probably push op0 out to memory and then
2088 : do a load. */
2089 0 : int_mode = int_mode_for_mode (mode).require ();
2090 :
2091 163768 : target = extract_fixed_bit_field (int_mode, op0, op0_mode, bitsize,
2092 : bitnum, target, unsignedp, reverse);
2093 :
2094 : /* Complex values must be reversed piecewise, so we need to undo the global
2095 : reversal, convert to the complex mode and reverse again. */
2096 163768 : if (reverse && COMPLEX_MODE_P (tmode))
2097 : {
2098 0 : target = flip_storage_order (int_mode, target);
2099 0 : target = convert_extracted_bit_field (target, mode, tmode, unsignedp);
2100 0 : target = flip_storage_order (tmode, target);
2101 : }
2102 : else
2103 163768 : target = convert_extracted_bit_field (target, mode, tmode, unsignedp);
2104 :
2105 : return target;
2106 : }
2107 :
2108 : /* Generate code to extract a byte-field from STR_RTX
2109 : containing BITSIZE bits, starting at BITNUM,
2110 : and put it in TARGET if possible (if TARGET is nonzero).
2111 : Regardless of TARGET, we return the rtx for where the value is placed.
2112 :
2113 : STR_RTX is the structure containing the byte (a REG or MEM).
2114 : UNSIGNEDP is nonzero if this is an unsigned bit field.
2115 : MODE is the natural mode of the field value once extracted.
2116 : TMODE is the mode the caller would like the value to have;
2117 : but the value may be returned with type MODE instead.
2118 :
2119 : If REVERSE is true, the extraction is to be done in reverse order.
2120 :
2121 : If a TARGET is specified and we can store in it at no extra cost,
2122 : we do so, and return TARGET.
2123 : Otherwise, we return a REG of mode TMODE or MODE, with TMODE preferred
2124 : if they are equally easy.
2125 :
2126 : If the result can be stored at TARGET, and ALT_RTL is non-NULL,
2127 : then *ALT_RTL is set to TARGET (before legitimziation). */
2128 :
2129 : rtx
2130 953653 : extract_bit_field (rtx str_rtx, poly_uint64 bitsize, poly_uint64 bitnum,
2131 : int unsignedp, rtx target, machine_mode mode,
2132 : machine_mode tmode, bool reverse, rtx *alt_rtl)
2133 : {
2134 953653 : machine_mode mode1;
2135 :
2136 : /* Handle -fstrict-volatile-bitfields in the cases where it applies. */
2137 1907306 : if (maybe_ne (GET_MODE_BITSIZE (GET_MODE (str_rtx)), 0))
2138 : mode1 = GET_MODE (str_rtx);
2139 251937 : else if (target && maybe_ne (GET_MODE_BITSIZE (GET_MODE (target)), 0))
2140 : mode1 = GET_MODE (target);
2141 : else
2142 : mode1 = tmode;
2143 :
2144 953653 : unsigned HOST_WIDE_INT ibitsize, ibitnum;
2145 953653 : scalar_int_mode int_mode;
2146 953653 : if (bitsize.is_constant (&ibitsize)
2147 953653 : && bitnum.is_constant (&ibitnum)
2148 1789759 : && is_a <scalar_int_mode> (mode1, &int_mode)
2149 836113 : && strict_volatile_bitfield_p (str_rtx, ibitsize, ibitnum,
2150 : int_mode, 0, 0))
2151 : {
2152 : /* Extraction of a full INT_MODE value can be done with a simple load.
2153 : We know here that the field can be accessed with one single
2154 : instruction. For targets that support unaligned memory,
2155 : an unaligned access may be necessary. */
2156 14 : if (ibitsize == GET_MODE_BITSIZE (int_mode))
2157 : {
2158 0 : rtx result = adjust_bitfield_address (str_rtx, int_mode,
2159 : ibitnum / BITS_PER_UNIT);
2160 0 : if (reverse)
2161 0 : result = flip_storage_order (int_mode, result);
2162 0 : gcc_assert (ibitnum % BITS_PER_UNIT == 0);
2163 0 : return convert_extracted_bit_field (result, mode, tmode, unsignedp);
2164 : }
2165 :
2166 7 : str_rtx = narrow_bit_field_mem (str_rtx, int_mode, ibitsize, ibitnum,
2167 : &ibitnum);
2168 14 : gcc_assert (ibitnum + ibitsize <= GET_MODE_BITSIZE (int_mode));
2169 7 : str_rtx = copy_to_reg (str_rtx);
2170 7 : return extract_bit_field_1 (str_rtx, ibitsize, ibitnum, unsignedp,
2171 : target, mode, tmode, reverse, true, alt_rtl);
2172 : }
2173 :
2174 953646 : return extract_bit_field_1 (str_rtx, bitsize, bitnum, unsignedp,
2175 953646 : target, mode, tmode, reverse, true, alt_rtl);
2176 : }
2177 :
2178 : /* Use shifts and boolean operations to extract a field of BITSIZE bits
2179 : from bit BITNUM of OP0. If OP0_MODE is defined, it is the mode of OP0,
2180 : otherwise OP0 is a BLKmode MEM.
2181 :
2182 : UNSIGNEDP is nonzero for an unsigned bit field (don't sign-extend value).
2183 : If REVERSE is true, the extraction is to be done in reverse order.
2184 :
2185 : If TARGET is nonzero, attempts to store the value there
2186 : and return TARGET, but this is not guaranteed.
2187 : If TARGET is not used, create a pseudo-reg of mode TMODE for the value. */
2188 :
2189 : static rtx
2190 181988 : extract_fixed_bit_field (machine_mode tmode, rtx op0,
2191 : opt_scalar_int_mode op0_mode,
2192 : unsigned HOST_WIDE_INT bitsize,
2193 : unsigned HOST_WIDE_INT bitnum, rtx target,
2194 : int unsignedp, bool reverse)
2195 : {
2196 181988 : scalar_int_mode mode;
2197 181988 : if (MEM_P (op0))
2198 : {
2199 150951 : if (!get_best_mode (bitsize, bitnum, 0, 0, MEM_ALIGN (op0),
2200 150951 : BITS_PER_WORD, MEM_VOLATILE_P (op0), &mode))
2201 : /* The only way this should occur is if the field spans word
2202 : boundaries. */
2203 4279 : return extract_split_bit_field (op0, op0_mode, bitsize, bitnum,
2204 4279 : unsignedp, reverse);
2205 :
2206 146672 : op0 = narrow_bit_field_mem (op0, mode, bitsize, bitnum, &bitnum);
2207 : }
2208 : else
2209 31037 : mode = op0_mode.require ();
2210 :
2211 177709 : return extract_fixed_bit_field_1 (tmode, op0, mode, bitsize, bitnum,
2212 177709 : target, unsignedp, reverse);
2213 : }
2214 :
2215 : /* Helper function for extract_fixed_bit_field, extracts
2216 : the bit field always using MODE, which is the mode of OP0.
2217 : If UNSIGNEDP is -1, the result need not be sign or zero extended.
2218 : The other arguments are as for extract_fixed_bit_field. */
2219 :
2220 : static rtx
2221 177709 : extract_fixed_bit_field_1 (machine_mode tmode, rtx op0, scalar_int_mode mode,
2222 : unsigned HOST_WIDE_INT bitsize,
2223 : unsigned HOST_WIDE_INT bitnum, rtx target,
2224 : int unsignedp, bool reverse)
2225 : {
2226 : /* Note that bitsize + bitnum can be greater than GET_MODE_BITSIZE (mode)
2227 : for invalid input, such as extract equivalent of f5 from
2228 : gcc.dg/pr48335-2.c. */
2229 :
2230 177709 : if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
2231 : /* BITNUM is the distance between our msb and that of OP0.
2232 : Convert it to the distance from the lsb. */
2233 426 : bitnum = GET_MODE_BITSIZE (mode) - bitsize - bitnum;
2234 :
2235 : /* Now BITNUM is always the distance between the field's lsb and that of OP0.
2236 : We have reduced the big-endian case to the little-endian case. */
2237 177709 : if (reverse)
2238 213 : op0 = flip_storage_order (mode, op0);
2239 :
2240 177709 : if (unsignedp)
2241 : {
2242 118381 : if (bitnum)
2243 : {
2244 : /* If the field does not already start at the lsb,
2245 : shift it so it does. */
2246 : /* Maybe propagate the target for the shift. */
2247 44132 : rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
2248 44132 : if (tmode != mode)
2249 23806 : subtarget = 0;
2250 44132 : op0 = expand_shift (RSHIFT_EXPR, mode, op0, bitnum, subtarget, 1);
2251 : }
2252 : /* Convert the value to the desired mode. TMODE must also be a
2253 : scalar integer for this conversion to make sense, since we
2254 : shouldn't reinterpret the bits. */
2255 118381 : scalar_int_mode new_mode = as_a <scalar_int_mode> (tmode);
2256 118381 : if (mode != new_mode)
2257 42993 : op0 = convert_to_mode (new_mode, op0, 1);
2258 :
2259 : /* Unless the msb of the field used to be the msb when we shifted,
2260 : mask out the upper bits. */
2261 :
2262 118381 : if (GET_MODE_BITSIZE (mode) != bitnum + bitsize
2263 118381 : && unsignedp != -1)
2264 92568 : return expand_binop (new_mode, and_optab, op0,
2265 : mask_rtx (new_mode, 0, bitsize, 0),
2266 92568 : target, 1, OPTAB_LIB_WIDEN);
2267 : return op0;
2268 : }
2269 :
2270 : /* To extract a signed bit-field, first shift its msb to the msb of the word,
2271 : then arithmetic-shift its lsb to the lsb of the word. */
2272 59328 : op0 = force_reg (mode, op0);
2273 :
2274 : /* Find the narrowest integer mode that contains the field. */
2275 :
2276 59328 : opt_scalar_int_mode mode_iter;
2277 147162 : FOR_EACH_MODE_IN_CLASS (mode_iter, MODE_INT)
2278 294324 : if (GET_MODE_BITSIZE (mode_iter.require ()) >= bitsize + bitnum)
2279 : break;
2280 :
2281 59328 : mode = mode_iter.require ();
2282 59328 : op0 = convert_to_mode (mode, op0, 0);
2283 :
2284 59328 : if (mode != tmode)
2285 4776 : target = 0;
2286 :
2287 118656 : if (GET_MODE_BITSIZE (mode) != (bitsize + bitnum))
2288 : {
2289 54406 : int amount = GET_MODE_BITSIZE (mode) - (bitsize + bitnum);
2290 : /* Maybe propagate the target for the shift. */
2291 54406 : rtx subtarget = (target != 0 && REG_P (target) ? target : 0);
2292 54406 : op0 = expand_shift (LSHIFT_EXPR, mode, op0, amount, subtarget, 1);
2293 : }
2294 :
2295 118656 : return expand_shift (RSHIFT_EXPR, mode, op0,
2296 59328 : GET_MODE_BITSIZE (mode) - bitsize, target, 0);
2297 : }
2298 :
2299 : /* Return a constant integer (CONST_INT or CONST_DOUBLE) rtx with the value
2300 : VALUE << BITPOS. */
2301 :
2302 : static rtx
2303 73483 : lshift_value (machine_mode mode, unsigned HOST_WIDE_INT value,
2304 : int bitpos)
2305 : {
2306 73483 : return immed_wide_int_const (wi::lshift (value, bitpos), mode);
2307 : }
2308 :
2309 : /* Extract a bit field that is split across two words
2310 : and return an RTX for the result.
2311 :
2312 : OP0 is the REG, SUBREG or MEM rtx for the first of the two words.
2313 : BITSIZE is the field width; BITPOS, position of its first bit, in the word.
2314 : UNSIGNEDP is 1 if should zero-extend the contents; else sign-extend.
2315 : If OP0_MODE is defined, it is the mode of OP0, otherwise OP0 is
2316 : a BLKmode MEM.
2317 :
2318 : If REVERSE is true, the extraction is to be done in reverse order. */
2319 :
2320 : static rtx
2321 4341 : extract_split_bit_field (rtx op0, opt_scalar_int_mode op0_mode,
2322 : unsigned HOST_WIDE_INT bitsize,
2323 : unsigned HOST_WIDE_INT bitpos, int unsignedp,
2324 : bool reverse)
2325 : {
2326 4341 : unsigned int unit;
2327 4341 : unsigned int bitsdone = 0;
2328 4341 : rtx result = NULL_RTX;
2329 4341 : int first = 1;
2330 :
2331 : /* Make sure UNIT isn't larger than BITS_PER_WORD, we can only handle that
2332 : much at a time. */
2333 4341 : if (REG_P (op0) || GET_CODE (op0) == SUBREG)
2334 62 : unit = BITS_PER_WORD;
2335 : else
2336 6356 : unit = MIN (MEM_ALIGN (op0), BITS_PER_WORD);
2337 :
2338 18091 : while (bitsdone < bitsize)
2339 : {
2340 13750 : unsigned HOST_WIDE_INT thissize;
2341 13750 : rtx part;
2342 13750 : unsigned HOST_WIDE_INT thispos;
2343 13750 : unsigned HOST_WIDE_INT offset;
2344 :
2345 13750 : offset = (bitpos + bitsdone) / unit;
2346 13750 : thispos = (bitpos + bitsdone) % unit;
2347 :
2348 : /* THISSIZE must not overrun a word boundary. Otherwise,
2349 : extract_fixed_bit_field will call us again, and we will mutually
2350 : recurse forever. */
2351 13750 : thissize = MIN (bitsize - bitsdone, BITS_PER_WORD);
2352 13750 : thissize = MIN (thissize, unit - thispos);
2353 :
2354 : /* If OP0 is a register, then handle OFFSET here. */
2355 13750 : rtx op0_piece = op0;
2356 13750 : opt_scalar_int_mode op0_piece_mode = op0_mode;
2357 13750 : if (SUBREG_P (op0) || REG_P (op0))
2358 : {
2359 124 : op0_piece = operand_subword_force (op0, offset, op0_mode.require ());
2360 124 : op0_piece_mode = word_mode;
2361 124 : offset = 0;
2362 : }
2363 :
2364 : /* Extract the parts in bit-counting order,
2365 : whose meaning is determined by BYTES_PER_UNIT.
2366 : OFFSET is in UNITs, and UNIT is in bits. */
2367 27500 : part = extract_fixed_bit_field (word_mode, op0_piece, op0_piece_mode,
2368 13750 : thissize, offset * unit + thispos,
2369 : 0, 1, reverse);
2370 13750 : bitsdone += thissize;
2371 :
2372 : /* Shift this part into place for the result. */
2373 13750 : if (reverse ? !BYTES_BIG_ENDIAN : BYTES_BIG_ENDIAN)
2374 : {
2375 4 : if (bitsize != bitsdone)
2376 2 : part = expand_shift (LSHIFT_EXPR, word_mode, part,
2377 2 : bitsize - bitsdone, 0, 1);
2378 : }
2379 : else
2380 : {
2381 13746 : if (bitsdone != thissize)
2382 9407 : part = expand_shift (LSHIFT_EXPR, word_mode, part,
2383 9407 : bitsdone - thissize, 0, 1);
2384 : }
2385 :
2386 13750 : if (first)
2387 : result = part;
2388 : else
2389 : /* Combine the parts with bitwise or. This works
2390 : because we extracted each part as an unsigned bit field. */
2391 9409 : result = expand_binop (word_mode, ior_optab, part, result, NULL_RTX, 1,
2392 : OPTAB_LIB_WIDEN);
2393 :
2394 13750 : first = 0;
2395 : }
2396 :
2397 : /* Unsigned bit field: we are done. */
2398 4341 : if (unsignedp)
2399 : return result;
2400 : /* Signed bit field: sign-extend with two arithmetic shifts. */
2401 1464 : result = expand_shift (LSHIFT_EXPR, word_mode, result,
2402 1464 : BITS_PER_WORD - bitsize, NULL_RTX, 0);
2403 1464 : return expand_shift (RSHIFT_EXPR, word_mode, result,
2404 1464 : BITS_PER_WORD - bitsize, NULL_RTX, 0);
2405 : }
2406 :
2407 : /* Try to read the low bits of SRC as an rvalue of mode MODE, preserving
2408 : the bit pattern. SRC_MODE is the mode of SRC; if this is smaller than
2409 : MODE, fill the upper bits with zeros. Fail if the layout of either
2410 : mode is unknown (as for CC modes) or if the extraction would involve
2411 : unprofitable mode punning. Return the value on success, otherwise
2412 : return null.
2413 :
2414 : This is different from gen_lowpart* in these respects:
2415 :
2416 : - the returned value must always be considered an rvalue
2417 :
2418 : - when MODE is wider than SRC_MODE, the extraction involves
2419 : a zero extension
2420 :
2421 : - when MODE is smaller than SRC_MODE, the extraction involves
2422 : a truncation (and is thus subject to TARGET_TRULY_NOOP_TRUNCATION).
2423 :
2424 : In other words, this routine performs a computation, whereas the
2425 : gen_lowpart* routines are conceptually lvalue or rvalue subreg
2426 : operations. */
2427 :
2428 : rtx
2429 114265 : extract_low_bits (machine_mode mode, machine_mode src_mode, rtx src)
2430 : {
2431 114265 : scalar_int_mode int_mode, src_int_mode;
2432 :
2433 114265 : if (mode == src_mode)
2434 : return src;
2435 :
2436 78461 : if (CONSTANT_P (src))
2437 : {
2438 : /* simplify_gen_subreg can't be used here, as if simplify_subreg
2439 : fails, it will happily create (subreg (symbol_ref)) or similar
2440 : invalid SUBREGs. */
2441 15005 : poly_uint64 byte = subreg_lowpart_offset (mode, src_mode);
2442 15005 : rtx ret = simplify_subreg (mode, src, src_mode, byte);
2443 15005 : if (ret)
2444 : return ret;
2445 :
2446 22 : if (GET_MODE (src) == VOIDmode
2447 22 : || !validate_subreg (mode, src_mode, src, byte))
2448 5 : return NULL_RTX;
2449 :
2450 17 : src = force_reg (GET_MODE (src), src);
2451 17 : return gen_rtx_SUBREG (mode, src, byte);
2452 : }
2453 :
2454 63456 : if (GET_MODE_CLASS (mode) == MODE_CC || GET_MODE_CLASS (src_mode) == MODE_CC)
2455 : return NULL_RTX;
2456 :
2457 126912 : if (known_eq (GET_MODE_BITSIZE (mode), GET_MODE_BITSIZE (src_mode))
2458 63456 : && targetm.modes_tieable_p (mode, src_mode))
2459 : {
2460 3978 : rtx x = gen_lowpart_common (mode, src);
2461 3978 : if (x)
2462 : return x;
2463 : }
2464 :
2465 59489 : if (!int_mode_for_mode (src_mode).exists (&src_int_mode)
2466 59476 : || !int_mode_for_mode (mode).exists (&int_mode))
2467 13 : return NULL_RTX;
2468 :
2469 59476 : if (!targetm.modes_tieable_p (src_int_mode, src_mode))
2470 : return NULL_RTX;
2471 58393 : if (!targetm.modes_tieable_p (int_mode, mode))
2472 : return NULL_RTX;
2473 :
2474 56331 : src = gen_lowpart (src_int_mode, src);
2475 56331 : if (!validate_subreg (int_mode, src_int_mode, src,
2476 : subreg_lowpart_offset (int_mode, src_int_mode)))
2477 : return NULL_RTX;
2478 :
2479 56325 : src = convert_modes (int_mode, src_int_mode, src, true);
2480 56325 : src = gen_lowpart (mode, src);
2481 56325 : return src;
2482 : }
2483 :
2484 : /* Add INC into TARGET. */
2485 :
2486 : void
2487 1185 : expand_inc (rtx target, rtx inc)
2488 : {
2489 1185 : rtx value = expand_binop (GET_MODE (target), add_optab,
2490 : target, inc,
2491 : target, 0, OPTAB_LIB_WIDEN);
2492 1185 : if (value != target)
2493 61 : emit_move_insn (target, value);
2494 1185 : }
2495 :
2496 : /* Subtract DEC from TARGET. */
2497 :
2498 : void
2499 1220 : expand_dec (rtx target, rtx dec)
2500 : {
2501 1220 : rtx value = expand_binop (GET_MODE (target), sub_optab,
2502 : target, dec,
2503 : target, 0, OPTAB_LIB_WIDEN);
2504 1220 : if (value != target)
2505 0 : emit_move_insn (target, value);
2506 1220 : }
2507 :
2508 : /* Output a shift instruction for expression code CODE,
2509 : with SHIFTED being the rtx for the value to shift,
2510 : and AMOUNT the rtx for the amount to shift by.
2511 : Store the result in the rtx TARGET, if that is convenient.
2512 : If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2513 : Return the rtx for where the value is.
2514 : If that cannot be done, abort the compilation unless MAY_FAIL is true,
2515 : in which case 0 is returned. */
2516 :
2517 : static rtx
2518 1481498 : expand_shift_1 (enum tree_code code, machine_mode mode, rtx shifted,
2519 : rtx amount, rtx target, int unsignedp, bool may_fail = false)
2520 : {
2521 1481498 : rtx op1, temp = 0;
2522 1481498 : int left = (code == LSHIFT_EXPR || code == LROTATE_EXPR);
2523 1481498 : int rotate = (code == LROTATE_EXPR || code == RROTATE_EXPR);
2524 1481498 : optab lshift_optab = ashl_optab;
2525 1481498 : optab rshift_arith_optab = ashr_optab;
2526 1481498 : optab rshift_uns_optab = lshr_optab;
2527 1481498 : optab lrotate_optab = rotl_optab;
2528 1481498 : optab rrotate_optab = rotr_optab;
2529 1481498 : machine_mode op1_mode;
2530 1481498 : scalar_mode scalar_mode = GET_MODE_INNER (mode);
2531 1481498 : int attempt;
2532 1481498 : bool speed = optimize_insn_for_speed_p ();
2533 :
2534 1481498 : op1 = amount;
2535 1481498 : op1_mode = GET_MODE (op1);
2536 :
2537 : /* Determine whether the shift/rotate amount is a vector, or scalar. If the
2538 : shift amount is a vector, use the vector/vector shift patterns. */
2539 1481498 : if (VECTOR_MODE_P (mode) && VECTOR_MODE_P (op1_mode))
2540 : {
2541 1481498 : lshift_optab = vashl_optab;
2542 1481498 : rshift_arith_optab = vashr_optab;
2543 1481498 : rshift_uns_optab = vlshr_optab;
2544 1481498 : lrotate_optab = vrotl_optab;
2545 1481498 : rrotate_optab = vrotr_optab;
2546 : }
2547 :
2548 : /* Previously detected shift-counts computed by NEGATE_EXPR
2549 : and shifted in the other direction; but that does not work
2550 : on all machines. */
2551 :
2552 1481498 : if (SHIFT_COUNT_TRUNCATED)
2553 : {
2554 : if (CONST_INT_P (op1)
2555 : && ((unsigned HOST_WIDE_INT) INTVAL (op1) >=
2556 : (unsigned HOST_WIDE_INT) GET_MODE_BITSIZE (scalar_mode)))
2557 : op1 = gen_int_shift_amount (mode,
2558 : (unsigned HOST_WIDE_INT) INTVAL (op1)
2559 : % GET_MODE_BITSIZE (scalar_mode));
2560 : else if (GET_CODE (op1) == SUBREG
2561 : && subreg_lowpart_p (op1)
2562 : && SCALAR_INT_MODE_P (GET_MODE (SUBREG_REG (op1)))
2563 : && SCALAR_INT_MODE_P (GET_MODE (op1)))
2564 : op1 = SUBREG_REG (op1);
2565 : }
2566 :
2567 : /* Canonicalize rotates by constant amount. We may canonicalize
2568 : to reduce the immediate or if the ISA can rotate by constants
2569 : in only on direction. */
2570 1481498 : if (rotate && reverse_rotate_by_imm_p (scalar_mode, left, op1))
2571 : {
2572 3027 : op1 = gen_int_shift_amount (mode, (GET_MODE_BITSIZE (scalar_mode)
2573 3027 : - INTVAL (op1)));
2574 3027 : left = !left;
2575 3027 : code = left ? LROTATE_EXPR : RROTATE_EXPR;
2576 : }
2577 :
2578 : /* Rotation of 16bit values by 8 bits is effectively equivalent to a bswaphi.
2579 : Note that this is not the case for bigger values. For instance a rotation
2580 : of 0x01020304 by 16 bits gives 0x03040102 which is different from
2581 : 0x04030201 (bswapsi). */
2582 1481498 : if (rotate
2583 8003 : && CONST_INT_P (op1)
2584 5133 : && INTVAL (op1) == BITS_PER_UNIT
2585 990 : && GET_MODE_SIZE (scalar_mode) == 2
2586 1482321 : && optab_handler (bswap_optab, mode) != CODE_FOR_nothing)
2587 822 : return expand_unop (mode, bswap_optab, shifted, NULL_RTX, unsignedp);
2588 :
2589 1480676 : if (op1 == const0_rtx)
2590 : return shifted;
2591 :
2592 : /* Check whether its cheaper to implement a left shift by a constant
2593 : bit count by a sequence of additions. */
2594 1433963 : if (code == LSHIFT_EXPR
2595 870072 : && CONST_INT_P (op1)
2596 840502 : && INTVAL (op1) > 0
2597 840473 : && INTVAL (op1) < GET_MODE_PRECISION (scalar_mode)
2598 840473 : && INTVAL (op1) < MAX_BITS_PER_WORD
2599 836069 : && (shift_cost (speed, mode, INTVAL (op1))
2600 836069 : > INTVAL (op1) * add_cost (speed, mode))
2601 1436781 : && shift_cost (speed, mode, INTVAL (op1)) != MAX_COST)
2602 : {
2603 : int i;
2604 5814 : for (i = 0; i < INTVAL (op1); i++)
2605 : {
2606 2996 : temp = force_reg (mode, shifted);
2607 2996 : shifted = expand_binop (mode, add_optab, temp, temp, NULL_RTX,
2608 : unsignedp, OPTAB_LIB_WIDEN);
2609 : }
2610 : return shifted;
2611 : }
2612 :
2613 2862324 : for (attempt = 0; temp == 0 && attempt < 3; attempt++)
2614 : {
2615 1431213 : enum optab_methods methods;
2616 :
2617 1431213 : if (attempt == 0)
2618 : methods = OPTAB_DIRECT;
2619 68 : else if (attempt == 1)
2620 : methods = OPTAB_WIDEN;
2621 : else
2622 34 : methods = OPTAB_LIB_WIDEN;
2623 :
2624 1431213 : if (rotate)
2625 : {
2626 : /* Widening does not work for rotation. */
2627 7249 : if (methods == OPTAB_WIDEN)
2628 34 : continue;
2629 7215 : else if (methods == OPTAB_LIB_WIDEN)
2630 : {
2631 : /* If we have been unable to open-code this by a rotation,
2632 : do it as the IOR or PLUS of two shifts. I.e., to rotate
2633 : A by N bits, compute
2634 : (A << N) | ((unsigned) A >> ((-N) & (C - 1)))
2635 : where C is the bitsize of A. If N cannot be zero,
2636 : use PLUS instead of IOR.
2637 :
2638 : It is theoretically possible that the target machine might
2639 : not be able to perform either shift and hence we would
2640 : be making two libcalls rather than just the one for the
2641 : shift (similarly if IOR could not be done). We will allow
2642 : this extremely unlikely lossage to avoid complicating the
2643 : code below. */
2644 :
2645 34 : rtx subtarget = target == shifted ? 0 : target;
2646 34 : rtx new_amount, other_amount;
2647 34 : rtx temp1;
2648 :
2649 34 : new_amount = op1;
2650 34 : if (op1 == const0_rtx)
2651 : return shifted;
2652 34 : else if (CONST_INT_P (op1))
2653 23 : other_amount = gen_int_shift_amount
2654 23 : (mode, GET_MODE_BITSIZE (scalar_mode) - INTVAL (op1));
2655 : else
2656 : {
2657 11 : other_amount
2658 22 : = simplify_gen_unary (NEG, GET_MODE (op1),
2659 11 : op1, GET_MODE (op1));
2660 11 : HOST_WIDE_INT mask = GET_MODE_PRECISION (scalar_mode) - 1;
2661 11 : other_amount
2662 11 : = simplify_gen_binary (AND, GET_MODE (op1), other_amount,
2663 11 : gen_int_mode (mask, GET_MODE (op1)));
2664 : }
2665 :
2666 34 : shifted = force_reg (mode, shifted);
2667 :
2668 45 : temp = expand_shift_1 (left ? LSHIFT_EXPR : RSHIFT_EXPR,
2669 : mode, shifted, new_amount, 0, 1);
2670 45 : temp1 = expand_shift_1 (left ? RSHIFT_EXPR : LSHIFT_EXPR,
2671 : mode, shifted, other_amount,
2672 : subtarget, 1);
2673 34 : return expand_binop (mode,
2674 34 : CONST_INT_P (op1) ? add_optab : ior_optab,
2675 34 : temp, temp1, target, unsignedp, methods);
2676 : }
2677 :
2678 10559 : temp = expand_binop (mode,
2679 : left ? lrotate_optab : rrotate_optab,
2680 : shifted, op1, target, unsignedp, methods);
2681 : }
2682 1423964 : else if (unsignedp)
2683 1167134 : temp = expand_binop (mode,
2684 : left ? lshift_optab : rshift_uns_optab,
2685 : shifted, op1, target, unsignedp, methods);
2686 :
2687 : /* Do arithmetic shifts.
2688 : Also, if we are going to widen the operand, we can just as well
2689 : use an arithmetic right-shift instead of a logical one. */
2690 1431145 : if (temp == 0 && ! rotate
2691 578142 : && (! unsignedp || (! left && methods == OPTAB_WIDEN)))
2692 : {
2693 : enum optab_methods methods1 = methods;
2694 :
2695 : /* If trying to widen a log shift to an arithmetic shift,
2696 : don't accept an arithmetic shift of the same size. */
2697 : if (unsignedp)
2698 : methods1 = OPTAB_MUST_WIDEN;
2699 :
2700 : /* Arithmetic shift */
2701 :
2702 813540 : temp = expand_binop (mode,
2703 : left ? lshift_optab : rshift_arith_optab,
2704 : shifted, op1, target, unsignedp, methods1);
2705 : }
2706 :
2707 : /* We used to try extzv here for logical right shifts, but that was
2708 : only useful for one machine, the VAX, and caused poor code
2709 : generation there for lshrdi3, so the code was deleted and a
2710 : define_expand for lshrsi3 was added to vax.md. */
2711 : }
2712 :
2713 1431111 : gcc_assert (temp != NULL_RTX || may_fail);
2714 : return temp;
2715 : }
2716 :
2717 : /* Output a shift instruction for expression code CODE,
2718 : with SHIFTED being the rtx for the value to shift,
2719 : and AMOUNT the amount to shift by.
2720 : Store the result in the rtx TARGET, if that is convenient.
2721 : If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2722 : Return the rtx for where the value is. */
2723 :
2724 : rtx
2725 1200945 : expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2726 : poly_int64 amount, rtx target, int unsignedp)
2727 : {
2728 1200945 : return expand_shift_1 (code, mode, shifted,
2729 : gen_int_shift_amount (mode, amount),
2730 1200945 : target, unsignedp);
2731 : }
2732 :
2733 : /* Likewise, but return 0 if that cannot be done. */
2734 :
2735 : rtx
2736 302 : maybe_expand_shift (enum tree_code code, machine_mode mode, rtx shifted,
2737 : int amount, rtx target, int unsignedp)
2738 : {
2739 302 : return expand_shift_1 (code, mode,
2740 302 : shifted, GEN_INT (amount), target, unsignedp, true);
2741 : }
2742 :
2743 : /* Output a shift instruction for expression code CODE,
2744 : with SHIFTED being the rtx for the value to shift,
2745 : and AMOUNT the tree for the amount to shift by.
2746 : Store the result in the rtx TARGET, if that is convenient.
2747 : If UNSIGNEDP is nonzero, do a logical shift; otherwise, arithmetic.
2748 : Return the rtx for where the value is. */
2749 :
2750 : rtx
2751 280183 : expand_variable_shift (enum tree_code code, machine_mode mode, rtx shifted,
2752 : tree amount, rtx target, int unsignedp)
2753 : {
2754 280183 : return expand_shift_1 (code, mode,
2755 280183 : shifted, expand_normal (amount), target, unsignedp);
2756 : }
2757 :
2758 :
2759 : static void synth_mult (struct algorithm *, unsigned HOST_WIDE_INT,
2760 : const struct mult_cost *, machine_mode mode);
2761 : static rtx expand_mult_const (machine_mode, rtx, HOST_WIDE_INT, rtx,
2762 : const struct algorithm *, enum mult_variant);
2763 : static unsigned HOST_WIDE_INT invert_mod2n (unsigned HOST_WIDE_INT, int);
2764 : static rtx extract_high_half (scalar_int_mode, rtx);
2765 : static rtx expmed_mult_highpart (scalar_int_mode, rtx, rtx, rtx, int, int);
2766 :
2767 : /* Compute and return the best algorithm for multiplying by T.
2768 : The algorithm must cost less than cost_limit
2769 : If retval.cost >= COST_LIMIT, no algorithm was found and all
2770 : other field of the returned struct are undefined.
2771 : MODE is the machine mode of the multiplication. */
2772 :
2773 : static void
2774 35249030 : synth_mult (struct algorithm *alg_out, unsigned HOST_WIDE_INT t,
2775 : const struct mult_cost *cost_limit, machine_mode mode)
2776 : {
2777 35249030 : int m;
2778 35249030 : struct algorithm *alg_in, *best_alg;
2779 35249030 : struct mult_cost best_cost;
2780 35249030 : struct mult_cost new_limit;
2781 35249030 : int op_cost, op_latency;
2782 35249030 : unsigned HOST_WIDE_INT orig_t = t;
2783 35249030 : unsigned HOST_WIDE_INT q;
2784 35249030 : int maxm, hash_index;
2785 35249030 : bool cache_hit = false;
2786 35249030 : enum alg_code cache_alg = alg_zero;
2787 35249030 : bool speed = optimize_insn_for_speed_p ();
2788 35249030 : scalar_int_mode imode;
2789 35249030 : struct alg_hash_entry *entry_ptr;
2790 :
2791 : /* Indicate that no algorithm is yet found. If no algorithm
2792 : is found, this value will be returned and indicate failure. */
2793 35249030 : alg_out->cost.cost = cost_limit->cost + 1;
2794 35249030 : alg_out->cost.latency = cost_limit->latency + 1;
2795 :
2796 35249030 : if (cost_limit->cost < 0
2797 29056941 : || (cost_limit->cost == 0 && cost_limit->latency <= 0))
2798 27794540 : return;
2799 :
2800 : /* Be prepared for vector modes. */
2801 48299394 : imode = as_a <scalar_int_mode> (GET_MODE_INNER (mode));
2802 :
2803 71879569 : maxm = MIN (BITS_PER_WORD, GET_MODE_BITSIZE (imode));
2804 :
2805 : /* Restrict the bits of "t" to the multiplication's mode. */
2806 24149697 : t &= GET_MODE_MASK (imode);
2807 :
2808 : /* t == 1 can be done in zero cost. */
2809 24149697 : if (t == 1)
2810 : {
2811 6010802 : alg_out->ops = 1;
2812 6010802 : alg_out->cost.cost = 0;
2813 6010802 : alg_out->cost.latency = 0;
2814 6010802 : alg_out->op[0] = alg_m;
2815 6010802 : return;
2816 : }
2817 :
2818 : /* t == 0 sometimes has a cost. If it does and it exceeds our limit,
2819 : fail now. */
2820 18138895 : if (t == 0)
2821 : {
2822 562977 : if (MULT_COST_LESS (cost_limit, zero_cost (speed)))
2823 : return;
2824 : else
2825 : {
2826 562977 : alg_out->ops = 1;
2827 562977 : alg_out->cost.cost = zero_cost (speed);
2828 562977 : alg_out->cost.latency = zero_cost (speed);
2829 562977 : alg_out->op[0] = alg_zero;
2830 562977 : return;
2831 : }
2832 : }
2833 :
2834 : /* We'll be needing a couple extra algorithm structures now. */
2835 :
2836 17575918 : alg_in = XALLOCA (struct algorithm);
2837 17575918 : best_alg = XALLOCA (struct algorithm);
2838 17575918 : best_cost = *cost_limit;
2839 :
2840 : /* Compute the hash index. */
2841 17575918 : hash_index = (t ^ (unsigned int) mode ^ (speed * 256)) % NUM_ALG_HASH_ENTRIES;
2842 :
2843 : /* See if we already know what to do for T. */
2844 17575918 : entry_ptr = alg_hash_entry_ptr (hash_index);
2845 17575918 : if (entry_ptr->t == t
2846 14587366 : && entry_ptr->mode == mode
2847 14587366 : && entry_ptr->speed == speed
2848 14587366 : && entry_ptr->alg != alg_unknown)
2849 : {
2850 14587366 : cache_alg = entry_ptr->alg;
2851 :
2852 14587366 : if (cache_alg == alg_impossible)
2853 : {
2854 : /* The cache tells us that it's impossible to synthesize
2855 : multiplication by T within entry_ptr->cost. */
2856 6753987 : if (!CHEAPER_MULT_COST (&entry_ptr->cost, cost_limit))
2857 : /* COST_LIMIT is at least as restrictive as the one
2858 : recorded in the hash table, in which case we have no
2859 : hope of synthesizing a multiplication. Just
2860 : return. */
2861 : return;
2862 :
2863 : /* If we get here, COST_LIMIT is less restrictive than the
2864 : one recorded in the hash table, so we may be able to
2865 : synthesize a multiplication. Proceed as if we didn't
2866 : have the cache entry. */
2867 : }
2868 : else
2869 : {
2870 7833379 : if (CHEAPER_MULT_COST (cost_limit, &entry_ptr->cost))
2871 : /* The cached algorithm shows that this multiplication
2872 : requires more cost than COST_LIMIT. Just return. This
2873 : way, we don't clobber this cache entry with
2874 : alg_impossible but retain useful information. */
2875 : return;
2876 :
2877 7075435 : cache_hit = true;
2878 :
2879 7075435 : switch (cache_alg)
2880 : {
2881 4639619 : case alg_shift:
2882 4639619 : goto do_alg_shift;
2883 :
2884 973987 : case alg_add_t_m2:
2885 973987 : case alg_sub_t_m2:
2886 973987 : goto do_alg_addsub_t_m2;
2887 :
2888 118292 : case alg_add_factor:
2889 118292 : case alg_sub_factor:
2890 118292 : goto do_alg_addsub_factor;
2891 :
2892 1343529 : case alg_add_t2_m:
2893 1343529 : goto do_alg_add_t2_m;
2894 :
2895 8 : case alg_sub_t2_m:
2896 8 : goto do_alg_sub_t2_m;
2897 :
2898 0 : default:
2899 0 : gcc_unreachable ();
2900 : }
2901 : }
2902 : }
2903 :
2904 : /* If we have a group of zero bits at the low-order part of T, try
2905 : multiplying by the remaining bits and then doing a shift. */
2906 :
2907 3847932 : if ((t & 1) == 0)
2908 : {
2909 1959984 : do_alg_shift:
2910 6599603 : m = ctz_or_zero (t); /* m = number of low zero bits */
2911 6599603 : if (m < maxm)
2912 : {
2913 6598617 : q = t >> m;
2914 : /* The function expand_shift will choose between a shift and
2915 : a sequence of additions, so the observed cost is given as
2916 : MIN (m * add_cost(speed, mode), shift_cost(speed, mode, m)). */
2917 6598617 : op_cost = m * add_cost (speed, mode);
2918 6598617 : if (shift_cost (speed, mode, m) < op_cost)
2919 : op_cost = shift_cost (speed, mode, m);
2920 6598617 : new_limit.cost = best_cost.cost - op_cost;
2921 6598617 : new_limit.latency = best_cost.latency - op_cost;
2922 6598617 : synth_mult (alg_in, q, &new_limit, mode);
2923 :
2924 6598617 : alg_in->cost.cost += op_cost;
2925 6598617 : alg_in->cost.latency += op_cost;
2926 6598617 : if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2927 : {
2928 4243003 : best_cost = alg_in->cost;
2929 4243003 : std::swap (alg_in, best_alg);
2930 4243003 : best_alg->log[best_alg->ops] = m;
2931 4243003 : best_alg->op[best_alg->ops] = alg_shift;
2932 : }
2933 :
2934 : /* See if treating ORIG_T as a signed number yields a better
2935 : sequence. Try this sequence only for a negative ORIG_T
2936 : as it would be useless for a non-negative ORIG_T. */
2937 6598617 : if ((HOST_WIDE_INT) orig_t < 0)
2938 : {
2939 : /* Shift ORIG_T as follows because a right shift of a
2940 : negative-valued signed type is implementation
2941 : defined. */
2942 645710 : q = ~(~orig_t >> m);
2943 : /* The function expand_shift will choose between a shift
2944 : and a sequence of additions, so the observed cost is
2945 : given as MIN (m * add_cost(speed, mode),
2946 : shift_cost(speed, mode, m)). */
2947 645710 : op_cost = m * add_cost (speed, mode);
2948 645710 : if (shift_cost (speed, mode, m) < op_cost)
2949 : op_cost = shift_cost (speed, mode, m);
2950 645710 : new_limit.cost = best_cost.cost - op_cost;
2951 645710 : new_limit.latency = best_cost.latency - op_cost;
2952 645710 : synth_mult (alg_in, q, &new_limit, mode);
2953 :
2954 645710 : alg_in->cost.cost += op_cost;
2955 645710 : alg_in->cost.latency += op_cost;
2956 645710 : if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2957 : {
2958 616319 : best_cost = alg_in->cost;
2959 616319 : std::swap (alg_in, best_alg);
2960 616319 : best_alg->log[best_alg->ops] = m;
2961 616319 : best_alg->op[best_alg->ops] = alg_shift;
2962 : }
2963 : }
2964 : }
2965 986 : else if (2 * BITS_PER_WORD <= HOST_BITS_PER_WIDE_INT
2966 986 : && GET_MODE_BITSIZE (imode) == 2 * BITS_PER_WORD
2967 986 : && m >= BITS_PER_WORD
2968 1972 : && imode == mode)
2969 : {
2970 986 : q = t >> m;
2971 986 : int op1_cost = shift_cost (speed, mode, m - BITS_PER_WORD);
2972 986 : int op2_cost = zero_cost (speed);
2973 986 : op_latency = MAX (op1_cost, op2_cost);
2974 986 : op_cost = op1_cost + op2_cost;
2975 :
2976 986 : new_limit.cost = best_cost.cost - op_cost;
2977 986 : new_limit.latency = best_cost.latency - op_latency;
2978 986 : synth_mult (alg_in, q, &new_limit, mode);
2979 986 : alg_in->cost.cost += op_cost;
2980 986 : alg_in->cost.latency += op_latency;
2981 986 : if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
2982 : {
2983 912 : best_cost = alg_in->cost;
2984 912 : std::swap (alg_in, best_alg);
2985 912 : best_alg->log[best_alg->ops] = m;
2986 912 : best_alg->op[best_alg->ops] = alg_shift;
2987 : }
2988 : }
2989 6599603 : if (cache_hit)
2990 4639619 : goto done;
2991 : }
2992 :
2993 : /* If we have an odd number, add or subtract one. */
2994 1959984 : if ((t & 1) != 0)
2995 : {
2996 2861935 : unsigned HOST_WIDE_INT w;
2997 :
2998 0 : do_alg_addsub_t_m2:
2999 42103495 : for (w = 1; (w & t) != 0; w <<= 1)
3000 : ;
3001 : /* If T was -1, then W will be zero after the loop. This is another
3002 : case where T ends with ...111. Handling this with (T + 1) and
3003 : subtract 1 produces slightly better code and results in algorithm
3004 : selection much faster than treating it like the ...0111 case
3005 : below. */
3006 2861935 : if (w == 0
3007 2421212 : || (w > 2
3008 : /* Reject the case where t is 3.
3009 : Thus we prefer addition in that case. */
3010 2421212 : && t != 3))
3011 : {
3012 : /* T ends with ...111. Multiply by (T + 1) and subtract T. */
3013 :
3014 1625661 : op_cost = add_cost (speed, mode);
3015 1625661 : new_limit.cost = best_cost.cost - op_cost;
3016 1625661 : new_limit.latency = best_cost.latency - op_cost;
3017 1625661 : synth_mult (alg_in, t + 1, &new_limit, mode);
3018 :
3019 1625661 : alg_in->cost.cost += op_cost;
3020 1625661 : alg_in->cost.latency += op_cost;
3021 1625661 : if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3022 : {
3023 748750 : best_cost = alg_in->cost;
3024 748750 : std::swap (alg_in, best_alg);
3025 748750 : best_alg->log[best_alg->ops] = 0;
3026 748750 : best_alg->op[best_alg->ops] = alg_sub_t_m2;
3027 : }
3028 : }
3029 : else
3030 : {
3031 : /* T ends with ...01 or ...011. Multiply by (T - 1) and add T. */
3032 :
3033 1236274 : op_cost = add_cost (speed, mode);
3034 1236274 : new_limit.cost = best_cost.cost - op_cost;
3035 1236274 : new_limit.latency = best_cost.latency - op_cost;
3036 1236274 : synth_mult (alg_in, t - 1, &new_limit, mode);
3037 :
3038 1236274 : alg_in->cost.cost += op_cost;
3039 1236274 : alg_in->cost.latency += op_cost;
3040 1236274 : if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3041 : {
3042 184210 : best_cost = alg_in->cost;
3043 184210 : std::swap (alg_in, best_alg);
3044 184210 : best_alg->log[best_alg->ops] = 0;
3045 184210 : best_alg->op[best_alg->ops] = alg_add_t_m2;
3046 : }
3047 : }
3048 :
3049 : /* We may be able to calculate a * -7, a * -15, a * -31, etc
3050 : quickly with a - a * n for some appropriate constant n. */
3051 2861935 : m = exact_log2 (-orig_t + 1);
3052 2861935 : if (m >= 0 && m < maxm)
3053 : {
3054 768795 : op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
3055 : /* If the target has a cheap shift-and-subtract insn use
3056 : that in preference to a shift insn followed by a sub insn.
3057 : Assume that the shift-and-sub is "atomic" with a latency
3058 : equal to it's cost, otherwise assume that on superscalar
3059 : hardware the shift may be executed concurrently with the
3060 : earlier steps in the algorithm. */
3061 768795 : if (shiftsub1_cost (speed, mode, m) <= op_cost)
3062 : {
3063 : op_cost = shiftsub1_cost (speed, mode, m);
3064 : op_latency = op_cost;
3065 : }
3066 : else
3067 763099 : op_latency = add_cost (speed, mode);
3068 :
3069 768795 : new_limit.cost = best_cost.cost - op_cost;
3070 768795 : new_limit.latency = best_cost.latency - op_latency;
3071 768795 : synth_mult (alg_in, (unsigned HOST_WIDE_INT) (-orig_t + 1) >> m,
3072 : &new_limit, mode);
3073 :
3074 768795 : alg_in->cost.cost += op_cost;
3075 768795 : alg_in->cost.latency += op_latency;
3076 768795 : if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3077 : {
3078 190512 : best_cost = alg_in->cost;
3079 190512 : std::swap (alg_in, best_alg);
3080 190512 : best_alg->log[best_alg->ops] = m;
3081 190512 : best_alg->op[best_alg->ops] = alg_sub_t_m2;
3082 : }
3083 : }
3084 :
3085 2861935 : if (cache_hit)
3086 973987 : goto done;
3087 : }
3088 :
3089 : /* Look for factors of t of the form
3090 : t = q(2**m +- 1), 2 <= m <= floor(log2(t - 1)).
3091 : If we find such a factor, we can multiply by t using an algorithm that
3092 : multiplies by q, shift the result by m and add/subtract it to itself.
3093 :
3094 : We search for large factors first and loop down, even if large factors
3095 : are less probable than small; if we find a large factor we will find a
3096 : good sequence quickly, and therefore be able to prune (by decreasing
3097 : COST_LIMIT) the search. */
3098 :
3099 1959984 : do_alg_addsub_factor:
3100 73876798 : for (m = floor_log2 (t - 1); m >= 2; m--)
3101 : {
3102 71949609 : unsigned HOST_WIDE_INT d;
3103 :
3104 71949609 : d = (HOST_WIDE_INT_1U << m) + 1;
3105 71949609 : if (t % d == 0 && t > d && m < maxm
3106 974432 : && (!cache_hit || cache_alg == alg_add_factor))
3107 : {
3108 974432 : op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
3109 974432 : if (shiftadd_cost (speed, mode, m) <= op_cost)
3110 : op_cost = shiftadd_cost (speed, mode, m);
3111 :
3112 974432 : op_latency = op_cost;
3113 :
3114 :
3115 974432 : new_limit.cost = best_cost.cost - op_cost;
3116 974432 : new_limit.latency = best_cost.latency - op_latency;
3117 974432 : synth_mult (alg_in, t / d, &new_limit, mode);
3118 :
3119 974432 : alg_in->cost.cost += op_cost;
3120 974432 : alg_in->cost.latency += op_latency;
3121 974432 : if (alg_in->cost.latency < op_cost)
3122 203949 : alg_in->cost.latency = op_cost;
3123 974432 : if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3124 : {
3125 117265 : best_cost = alg_in->cost;
3126 117265 : std::swap (alg_in, best_alg);
3127 117265 : best_alg->log[best_alg->ops] = m;
3128 117265 : best_alg->op[best_alg->ops] = alg_add_factor;
3129 : }
3130 : /* Other factors will have been taken care of in the recursion. */
3131 : break;
3132 : }
3133 :
3134 70975177 : d = (HOST_WIDE_INT_1U << m) - 1;
3135 70975177 : if (t % d == 0 && t > d && m < maxm
3136 1064603 : && (!cache_hit || cache_alg == alg_sub_factor))
3137 : {
3138 1064603 : op_cost = add_cost (speed, mode) + shift_cost (speed, mode, m);
3139 1064603 : if (shiftsub0_cost (speed, mode, m) <= op_cost)
3140 : op_cost = shiftsub0_cost (speed, mode, m);
3141 :
3142 1064603 : op_latency = op_cost;
3143 :
3144 1064603 : new_limit.cost = best_cost.cost - op_cost;
3145 1064603 : new_limit.latency = best_cost.latency - op_latency;
3146 1064603 : synth_mult (alg_in, t / d, &new_limit, mode);
3147 :
3148 1064603 : alg_in->cost.cost += op_cost;
3149 1064603 : alg_in->cost.latency += op_latency;
3150 1064603 : if (alg_in->cost.latency < op_cost)
3151 283833 : alg_in->cost.latency = op_cost;
3152 1064603 : if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3153 : {
3154 32623 : best_cost = alg_in->cost;
3155 32623 : std::swap (alg_in, best_alg);
3156 32623 : best_alg->log[best_alg->ops] = m;
3157 32623 : best_alg->op[best_alg->ops] = alg_sub_factor;
3158 : }
3159 : break;
3160 : }
3161 : }
3162 3966224 : if (cache_hit)
3163 118292 : goto done;
3164 :
3165 : /* Try shift-and-add (load effective address) instructions,
3166 : i.e. do a*3, a*5, a*9. */
3167 3847932 : if ((t & 1) != 0)
3168 : {
3169 1887948 : do_alg_add_t2_m:
3170 3231477 : q = t - 1;
3171 3231477 : m = ctz_hwi (q);
3172 3231477 : if (q && m < maxm)
3173 : {
3174 3231461 : op_cost = shiftadd_cost (speed, mode, m);
3175 3231461 : new_limit.cost = best_cost.cost - op_cost;
3176 3231461 : new_limit.latency = best_cost.latency - op_cost;
3177 3231461 : synth_mult (alg_in, (t - 1) >> m, &new_limit, mode);
3178 :
3179 3231461 : alg_in->cost.cost += op_cost;
3180 3231461 : alg_in->cost.latency += op_cost;
3181 3231461 : if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3182 : {
3183 1399109 : best_cost = alg_in->cost;
3184 1399109 : std::swap (alg_in, best_alg);
3185 1399109 : best_alg->log[best_alg->ops] = m;
3186 1399109 : best_alg->op[best_alg->ops] = alg_add_t2_m;
3187 : }
3188 : }
3189 3231477 : if (cache_hit)
3190 1343529 : goto done;
3191 :
3192 1887948 : do_alg_sub_t2_m:
3193 1887956 : q = t + 1;
3194 1887956 : m = ctz_hwi (q);
3195 1887956 : if (q && m < maxm)
3196 : {
3197 1866327 : op_cost = shiftsub0_cost (speed, mode, m);
3198 1866327 : new_limit.cost = best_cost.cost - op_cost;
3199 1866327 : new_limit.latency = best_cost.latency - op_cost;
3200 1866327 : synth_mult (alg_in, (t + 1) >> m, &new_limit, mode);
3201 :
3202 1866327 : alg_in->cost.cost += op_cost;
3203 1866327 : alg_in->cost.latency += op_cost;
3204 1866327 : if (CHEAPER_MULT_COST (&alg_in->cost, &best_cost))
3205 : {
3206 63 : best_cost = alg_in->cost;
3207 63 : std::swap (alg_in, best_alg);
3208 63 : best_alg->log[best_alg->ops] = m;
3209 63 : best_alg->op[best_alg->ops] = alg_sub_t2_m;
3210 : }
3211 : }
3212 1887956 : if (cache_hit)
3213 : goto done;
3214 : }
3215 :
3216 1959984 : done:
3217 : /* If best_cost has not decreased, we have not found any algorithm. */
3218 10923367 : if (!CHEAPER_MULT_COST (&best_cost, cost_limit))
3219 : {
3220 : /* We failed to find an algorithm. Record alg_impossible for
3221 : this case (that is, <T, MODE, COST_LIMIT>) so that next time
3222 : we are asked to find an algorithm for T within the same or
3223 : lower COST_LIMIT, we can immediately return to the
3224 : caller. */
3225 3468877 : entry_ptr->t = t;
3226 3468877 : entry_ptr->mode = mode;
3227 3468877 : entry_ptr->speed = speed;
3228 3468877 : entry_ptr->alg = alg_impossible;
3229 3468877 : entry_ptr->cost = *cost_limit;
3230 3468877 : return;
3231 : }
3232 :
3233 : /* Cache the result. */
3234 7454490 : if (!cache_hit)
3235 : {
3236 691848 : entry_ptr->t = t;
3237 691848 : entry_ptr->mode = mode;
3238 691848 : entry_ptr->speed = speed;
3239 691848 : entry_ptr->alg = best_alg->op[best_alg->ops];
3240 691848 : entry_ptr->cost.cost = best_cost.cost;
3241 691848 : entry_ptr->cost.latency = best_cost.latency;
3242 : }
3243 :
3244 : /* If we are getting a too long sequence for `struct algorithm'
3245 : to record, make this search fail. */
3246 7454490 : if (best_alg->ops == MAX_BITS_PER_WORD)
3247 : return;
3248 :
3249 : /* Copy the algorithm from temporary space to the space at alg_out.
3250 : We avoid using structure assignment because the majority of
3251 : best_alg is normally undefined, and this is a critical function. */
3252 7454490 : alg_out->ops = best_alg->ops + 1;
3253 7454490 : alg_out->cost = best_cost;
3254 7454490 : memcpy (alg_out->op, best_alg->op,
3255 7454490 : alg_out->ops * sizeof *alg_out->op);
3256 7454490 : memcpy (alg_out->log, best_alg->log,
3257 : alg_out->ops * sizeof *alg_out->log);
3258 : }
3259 :
3260 : /* Find the cheapest way of multiplying a value of mode MODE by VAL.
3261 : Try three variations:
3262 :
3263 : - a shift/add sequence based on VAL itself
3264 : - a shift/add sequence based on -VAL, followed by a negation
3265 : - a shift/add sequence based on VAL - 1, followed by an addition.
3266 :
3267 : Return true if the cheapest of these cost less than MULT_COST,
3268 : describing the algorithm in *ALG and final fixup in *VARIANT. */
3269 :
3270 : bool
3271 7316095 : choose_mult_variant (machine_mode mode, HOST_WIDE_INT val,
3272 : struct algorithm *alg, enum mult_variant *variant,
3273 : int mult_cost)
3274 : {
3275 7316095 : struct algorithm alg2;
3276 7316095 : struct mult_cost limit;
3277 7316095 : int op_cost;
3278 7316095 : bool speed = optimize_insn_for_speed_p ();
3279 :
3280 : /* Fail quickly for impossible bounds. */
3281 7316095 : if (mult_cost < 0)
3282 : return false;
3283 :
3284 : /* Ensure that mult_cost provides a reasonable upper bound.
3285 : Any constant multiplication can be performed with less
3286 : than 2 * bits additions. */
3287 14629034 : op_cost = 2 * GET_MODE_UNIT_BITSIZE (mode) * add_cost (speed, mode);
3288 7314517 : if (mult_cost > op_cost)
3289 : mult_cost = op_cost;
3290 :
3291 7314517 : *variant = basic_variant;
3292 7314517 : limit.cost = mult_cost;
3293 7314517 : limit.latency = mult_cost;
3294 7314517 : synth_mult (alg, val, &limit, mode);
3295 :
3296 : /* This works only if the inverted value actually fits in an
3297 : `unsigned int' */
3298 14629034 : if (HOST_BITS_PER_INT >= GET_MODE_UNIT_BITSIZE (mode))
3299 : {
3300 2607130 : op_cost = neg_cost (speed, mode);
3301 2607130 : if (MULT_COST_LESS (&alg->cost, mult_cost))
3302 : {
3303 2504278 : limit.cost = alg->cost.cost - op_cost;
3304 2504278 : limit.latency = alg->cost.latency - op_cost;
3305 : }
3306 : else
3307 : {
3308 102852 : limit.cost = mult_cost - op_cost;
3309 102852 : limit.latency = mult_cost - op_cost;
3310 : }
3311 :
3312 2607130 : synth_mult (&alg2, -val, &limit, mode);
3313 2607130 : alg2.cost.cost += op_cost;
3314 2607130 : alg2.cost.latency += op_cost;
3315 2607130 : if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
3316 12666 : *alg = alg2, *variant = negate_variant;
3317 : }
3318 :
3319 : /* This proves very useful for division-by-constant. */
3320 7314517 : op_cost = add_cost (speed, mode);
3321 7314517 : if (MULT_COST_LESS (&alg->cost, mult_cost))
3322 : {
3323 6491474 : limit.cost = alg->cost.cost - op_cost;
3324 6491474 : limit.latency = alg->cost.latency - op_cost;
3325 : }
3326 : else
3327 : {
3328 823043 : limit.cost = mult_cost - op_cost;
3329 823043 : limit.latency = mult_cost - op_cost;
3330 : }
3331 :
3332 7314517 : if (val != HOST_WIDE_INT_MIN
3333 7314527 : || GET_MODE_UNIT_PRECISION (mode) == HOST_BITS_PER_WIDE_INT)
3334 : {
3335 7314517 : synth_mult (&alg2, val - HOST_WIDE_INT_1U, &limit, mode);
3336 7314517 : alg2.cost.cost += op_cost;
3337 7314517 : alg2.cost.latency += op_cost;
3338 7314517 : if (CHEAPER_MULT_COST (&alg2.cost, &alg->cost))
3339 2732 : *alg = alg2, *variant = add_variant;
3340 : }
3341 :
3342 7314517 : return MULT_COST_LESS (&alg->cost, mult_cost);
3343 : }
3344 :
3345 : /* A subroutine of expand_mult, used for constant multiplications.
3346 : Multiply OP0 by VAL in mode MODE, storing the result in TARGET if
3347 : convenient. Use the shift/add sequence described by ALG and apply
3348 : the final fixup specified by VARIANT. */
3349 :
3350 : static rtx
3351 136269 : expand_mult_const (machine_mode mode, rtx op0, HOST_WIDE_INT val,
3352 : rtx target, const struct algorithm *alg,
3353 : enum mult_variant variant)
3354 : {
3355 136269 : unsigned HOST_WIDE_INT val_so_far;
3356 136269 : rtx_insn *insn;
3357 136269 : rtx accum, tem;
3358 136269 : int opno;
3359 136269 : machine_mode nmode;
3360 :
3361 : /* Avoid referencing memory over and over and invalid sharing
3362 : on SUBREGs. */
3363 136269 : op0 = force_reg (mode, op0);
3364 :
3365 : /* ACCUM starts out either as OP0 or as a zero, depending on
3366 : the first operation. */
3367 :
3368 136269 : if (alg->op[0] == alg_zero)
3369 : {
3370 5142 : accum = copy_to_mode_reg (mode, CONST0_RTX (mode));
3371 5142 : val_so_far = 0;
3372 : }
3373 131127 : else if (alg->op[0] == alg_m)
3374 : {
3375 131127 : accum = copy_to_mode_reg (mode, op0);
3376 131127 : val_so_far = 1;
3377 : }
3378 : else
3379 0 : gcc_unreachable ();
3380 :
3381 385359 : for (opno = 1; opno < alg->ops; opno++)
3382 : {
3383 249090 : int log = alg->log[opno];
3384 249090 : rtx shift_subtarget = optimize ? 0 : accum;
3385 233122 : rtx add_target
3386 136269 : = (opno == alg->ops - 1 && target != 0 && variant != add_variant
3387 40104 : && !optimize)
3388 249090 : ? target : 0;
3389 249090 : rtx accum_target = optimize ? 0 : accum;
3390 249090 : rtx accum_inner;
3391 :
3392 249090 : switch (alg->op[opno])
3393 : {
3394 108466 : case alg_shift:
3395 108466 : tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3396 : /* REG_EQUAL note will be attached to the following insn. */
3397 108466 : emit_move_insn (accum, tem);
3398 108466 : val_so_far <<= log;
3399 108466 : break;
3400 :
3401 6005 : case alg_add_t_m2:
3402 6005 : tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3403 12010 : accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3404 : add_target ? add_target : accum_target);
3405 6005 : val_so_far += HOST_WIDE_INT_1U << log;
3406 6005 : break;
3407 :
3408 18939 : case alg_sub_t_m2:
3409 18939 : tem = expand_shift (LSHIFT_EXPR, mode, op0, log, NULL_RTX, 0);
3410 37878 : accum = force_operand (gen_rtx_MINUS (mode, accum, tem),
3411 : add_target ? add_target : accum_target);
3412 18939 : val_so_far -= HOST_WIDE_INT_1U << log;
3413 18939 : break;
3414 :
3415 114549 : case alg_add_t2_m:
3416 114549 : accum = expand_shift (LSHIFT_EXPR, mode, accum,
3417 114549 : log, shift_subtarget, 0);
3418 229098 : accum = force_operand (gen_rtx_PLUS (mode, accum, op0),
3419 : add_target ? add_target : accum_target);
3420 114549 : val_so_far = (val_so_far << log) + 1;
3421 114549 : break;
3422 :
3423 0 : case alg_sub_t2_m:
3424 0 : accum = expand_shift (LSHIFT_EXPR, mode, accum,
3425 0 : log, shift_subtarget, 0);
3426 0 : accum = force_operand (gen_rtx_MINUS (mode, accum, op0),
3427 : add_target ? add_target : accum_target);
3428 0 : val_so_far = (val_so_far << log) - 1;
3429 0 : break;
3430 :
3431 1038 : case alg_add_factor:
3432 1038 : tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3433 2076 : accum = force_operand (gen_rtx_PLUS (mode, accum, tem),
3434 : add_target ? add_target : accum_target);
3435 1038 : val_so_far += val_so_far << log;
3436 1038 : break;
3437 :
3438 93 : case alg_sub_factor:
3439 93 : tem = expand_shift (LSHIFT_EXPR, mode, accum, log, NULL_RTX, 0);
3440 186 : accum = force_operand (gen_rtx_MINUS (mode, tem, accum),
3441 : (add_target
3442 93 : ? add_target : (optimize ? 0 : tem)));
3443 93 : val_so_far = (val_so_far << log) - val_so_far;
3444 93 : break;
3445 :
3446 0 : default:
3447 0 : gcc_unreachable ();
3448 : }
3449 :
3450 249090 : if (SCALAR_INT_MODE_P (mode))
3451 : {
3452 : /* Write a REG_EQUAL note on the last insn so that we can cse
3453 : multiplication sequences. Note that if ACCUM is a SUBREG,
3454 : we've set the inner register and must properly indicate that. */
3455 242713 : tem = op0, nmode = mode;
3456 242713 : accum_inner = accum;
3457 242713 : if (GET_CODE (accum) == SUBREG)
3458 : {
3459 0 : accum_inner = SUBREG_REG (accum);
3460 0 : nmode = GET_MODE (accum_inner);
3461 0 : tem = gen_lowpart (nmode, op0);
3462 : }
3463 :
3464 : /* Don't add a REG_EQUAL note if tem is a paradoxical SUBREG.
3465 : In that case, only the low bits of accum would be guaranteed to
3466 : be equal to the content of the REG_EQUAL note, the upper bits
3467 : can be anything. */
3468 242713 : if (!paradoxical_subreg_p (tem))
3469 : {
3470 242713 : insn = get_last_insn ();
3471 242713 : wide_int wval_so_far
3472 242713 : = wi::uhwi (val_so_far,
3473 242713 : GET_MODE_PRECISION (as_a <scalar_mode> (nmode)));
3474 242713 : rtx c = immed_wide_int_const (wval_so_far, nmode);
3475 242713 : set_dst_reg_note (insn, REG_EQUAL, gen_rtx_MULT (nmode, tem, c),
3476 : accum_inner);
3477 242713 : }
3478 : }
3479 : }
3480 :
3481 136269 : if (variant == negate_variant)
3482 : {
3483 426 : val_so_far = -val_so_far;
3484 426 : accum = expand_unop (mode, neg_optab, accum, target, 0);
3485 : }
3486 135843 : else if (variant == add_variant)
3487 : {
3488 21 : val_so_far = val_so_far + 1;
3489 21 : accum = force_operand (gen_rtx_PLUS (mode, accum, op0), target);
3490 : }
3491 :
3492 : /* Compare only the bits of val and val_so_far that are significant
3493 : in the result mode, to avoid sign-/zero-extension confusion. */
3494 136269 : nmode = GET_MODE_INNER (mode);
3495 136269 : val &= GET_MODE_MASK (nmode);
3496 136269 : val_so_far &= GET_MODE_MASK (nmode);
3497 136269 : gcc_assert (val == (HOST_WIDE_INT) val_so_far);
3498 :
3499 136269 : return accum;
3500 : }
3501 :
3502 : /* Perform a multiplication and return an rtx for the result.
3503 : MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3504 : TARGET is a suggestion for where to store the result (an rtx).
3505 :
3506 : We check specially for a constant integer as OP1.
3507 : If you want this check for OP0 as well, then before calling
3508 : you should swap the two operands if OP0 would be constant. */
3509 :
3510 : rtx
3511 1072006 : expand_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3512 : int unsignedp, bool no_libcall)
3513 : {
3514 1072006 : enum mult_variant variant;
3515 1072006 : struct algorithm algorithm;
3516 1072006 : rtx scalar_op1;
3517 1072006 : int max_cost;
3518 1072006 : bool speed = optimize_insn_for_speed_p ();
3519 1072006 : bool do_trapv = flag_trapv && SCALAR_INT_MODE_P (mode) && !unsignedp;
3520 :
3521 1072006 : if (CONSTANT_P (op0))
3522 275 : std::swap (op0, op1);
3523 :
3524 : /* For vectors, there are several simplifications that can be made if
3525 : all elements of the vector constant are identical. */
3526 1072006 : scalar_op1 = unwrap_const_vec_duplicate (op1);
3527 :
3528 1072006 : if (INTEGRAL_MODE_P (mode))
3529 : {
3530 955408 : rtx fake_reg;
3531 955408 : HOST_WIDE_INT coeff;
3532 955408 : bool is_neg;
3533 955408 : int mode_bitsize;
3534 :
3535 955408 : if (op1 == CONST0_RTX (mode))
3536 : return op1;
3537 955408 : if (op1 == CONST1_RTX (mode))
3538 : return op0;
3539 913973 : if (op1 == CONSTM1_RTX (mode))
3540 2822 : return expand_unop (mode, do_trapv ? negv_optab : neg_optab,
3541 1411 : op0, target, 0);
3542 :
3543 912562 : if (do_trapv)
3544 32 : goto skip_synth;
3545 :
3546 : /* If mode is integer vector mode, check if the backend supports
3547 : vector lshift (by scalar or vector) at all. If not, we can't use
3548 : synthetized multiply. */
3549 912530 : if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT
3550 14265 : && optab_handler (vashl_optab, mode) == CODE_FOR_nothing
3551 924398 : && optab_handler (ashl_optab, mode) == CODE_FOR_nothing)
3552 0 : goto skip_synth;
3553 :
3554 : /* These are the operations that are potentially turned into
3555 : a sequence of shifts and additions. */
3556 912530 : mode_bitsize = GET_MODE_UNIT_BITSIZE (mode);
3557 :
3558 : /* synth_mult does an `unsigned int' multiply. As long as the mode is
3559 : less than or equal in size to `unsigned int' this doesn't matter.
3560 : If the mode is larger than `unsigned int', then synth_mult works
3561 : only if the constant value exactly fits in an `unsigned int' without
3562 : any truncation. This means that multiplying by negative values does
3563 : not work; results are off by 2^32 on a 32 bit machine. */
3564 912530 : if (CONST_INT_P (scalar_op1))
3565 : {
3566 679832 : coeff = INTVAL (scalar_op1);
3567 679832 : is_neg = coeff < 0;
3568 : }
3569 : #if TARGET_SUPPORTS_WIDE_INT
3570 232698 : else if (CONST_WIDE_INT_P (scalar_op1))
3571 : #else
3572 : else if (CONST_DOUBLE_AS_INT_P (scalar_op1))
3573 : #endif
3574 : {
3575 1139 : int shift = wi::exact_log2 (rtx_mode_t (scalar_op1, mode));
3576 : /* Perfect power of 2 (other than 1, which is handled above). */
3577 1139 : if (shift > 0)
3578 106 : return expand_shift (LSHIFT_EXPR, mode, op0,
3579 106 : shift, target, unsignedp);
3580 : else
3581 1033 : goto skip_synth;
3582 : }
3583 : else
3584 231559 : goto skip_synth;
3585 :
3586 : /* We used to test optimize here, on the grounds that it's better to
3587 : produce a smaller program when -O is not used. But this causes
3588 : such a terrible slowdown sometimes that it seems better to always
3589 : use synth_mult. */
3590 :
3591 : /* Special case powers of two. */
3592 679832 : if (EXACT_POWER_OF_2_OR_ZERO_P (coeff)
3593 458078 : && !(is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT))
3594 458070 : return expand_shift (LSHIFT_EXPR, mode, op0,
3595 916140 : floor_log2 (coeff), target, unsignedp);
3596 :
3597 221762 : fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3598 :
3599 : /* Attempt to handle multiplication of DImode values by negative
3600 : coefficients, by performing the multiplication by a positive
3601 : multiplier and then inverting the result. */
3602 221762 : if (is_neg && mode_bitsize > HOST_BITS_PER_WIDE_INT)
3603 : {
3604 : /* Its safe to use -coeff even for INT_MIN, as the
3605 : result is interpreted as an unsigned coefficient.
3606 : Exclude cost of op0 from max_cost to match the cost
3607 : calculation of the synth_mult. */
3608 216 : coeff = -(unsigned HOST_WIDE_INT) coeff;
3609 216 : max_cost = (set_src_cost (gen_rtx_MULT (mode, fake_reg, op1),
3610 : mode, speed)
3611 216 : - neg_cost (speed, mode));
3612 216 : if (max_cost <= 0)
3613 0 : goto skip_synth;
3614 :
3615 : /* Special case powers of two. */
3616 216 : if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3617 : {
3618 342 : rtx temp = expand_shift (LSHIFT_EXPR, mode, op0,
3619 171 : floor_log2 (coeff), target, unsignedp);
3620 171 : return expand_unop (mode, neg_optab, temp, target, 0);
3621 : }
3622 :
3623 45 : if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3624 : max_cost))
3625 : {
3626 44 : rtx temp = expand_mult_const (mode, op0, coeff, NULL_RTX,
3627 : &algorithm, variant);
3628 44 : return expand_unop (mode, neg_optab, temp, target, 0);
3629 : }
3630 1 : goto skip_synth;
3631 : }
3632 :
3633 : /* Exclude cost of op0 from max_cost to match the cost
3634 : calculation of the synth_mult. */
3635 221546 : max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, op1), mode, speed);
3636 221546 : if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3637 135835 : return expand_mult_const (mode, op0, coeff, target,
3638 135835 : &algorithm, variant);
3639 : }
3640 85711 : skip_synth:
3641 :
3642 : /* Expand x*2.0 as x+x. */
3643 37325 : if (CONST_DOUBLE_AS_FLOAT_P (scalar_op1)
3644 472259 : && real_equal (CONST_DOUBLE_REAL_VALUE (scalar_op1), &dconst2))
3645 : {
3646 5734 : op0 = force_reg (GET_MODE (op0), op0);
3647 11468 : return expand_binop (mode, add_optab, op0, op0,
3648 : target, unsignedp,
3649 5734 : no_libcall ? OPTAB_WIDEN : OPTAB_LIB_WIDEN);
3650 : }
3651 :
3652 : /* This used to use umul_optab if unsigned, but for non-widening multiply
3653 : there is no difference between signed and unsigned. */
3654 1287568 : op0 = expand_binop (mode, do_trapv ? smulv_optab : smul_optab,
3655 : op0, op1, target, unsignedp,
3656 : no_libcall ? OPTAB_WIDEN : OPTAB_LIB_WIDEN);
3657 429200 : gcc_assert (op0 || no_libcall);
3658 : return op0;
3659 : }
3660 :
3661 : /* Return a cost estimate for multiplying a register by the given
3662 : COEFFicient in the given MODE and SPEED. */
3663 :
3664 : int
3665 6769926 : mult_by_coeff_cost (HOST_WIDE_INT coeff, machine_mode mode, bool speed)
3666 : {
3667 6769926 : int max_cost;
3668 6769926 : struct algorithm algorithm;
3669 6769926 : enum mult_variant variant;
3670 :
3671 6769926 : rtx fake_reg = gen_raw_REG (mode, LAST_VIRTUAL_REGISTER + 1);
3672 6769926 : max_cost = set_src_cost (gen_rtx_MULT (mode, fake_reg, fake_reg),
3673 : mode, speed);
3674 6769926 : if (choose_mult_variant (mode, coeff, &algorithm, &variant, max_cost))
3675 6036647 : return algorithm.cost.cost;
3676 : else
3677 : return max_cost;
3678 : }
3679 :
3680 : /* Perform a widening multiplication and return an rtx for the result.
3681 : MODE is mode of value; OP0 and OP1 are what to multiply (rtx's);
3682 : TARGET is a suggestion for where to store the result (an rtx).
3683 : THIS_OPTAB is the optab we should use, it must be either umul_widen_optab
3684 : or smul_widen_optab.
3685 :
3686 : We check specially for a constant integer as OP1, comparing the
3687 : cost of a widening multiply against the cost of a sequence of shifts
3688 : and adds. */
3689 :
3690 : rtx
3691 17995 : expand_widening_mult (machine_mode mode, rtx op0, rtx op1, rtx target,
3692 : int unsignedp, optab this_optab)
3693 : {
3694 17995 : bool speed = optimize_insn_for_speed_p ();
3695 17995 : rtx cop1;
3696 :
3697 17995 : if (CONST_INT_P (op1)
3698 4229 : && GET_MODE (op0) != VOIDmode
3699 4229 : && (cop1 = convert_modes (mode, GET_MODE (op0), op1,
3700 : this_optab == umul_widen_optab))
3701 4229 : && CONST_INT_P (cop1)
3702 21703 : && (INTVAL (cop1) >= 0
3703 20472 : || HWI_COMPUTABLE_MODE_P (mode)))
3704 : {
3705 3475 : HOST_WIDE_INT coeff = INTVAL (cop1);
3706 3475 : int max_cost;
3707 3475 : enum mult_variant variant;
3708 3475 : struct algorithm algorithm;
3709 :
3710 3475 : if (coeff == 0)
3711 998 : return CONST0_RTX (mode);
3712 :
3713 : /* Special case powers of two. */
3714 3367 : if (EXACT_POWER_OF_2_OR_ZERO_P (coeff))
3715 : {
3716 513 : op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3717 513 : return expand_shift (LSHIFT_EXPR, mode, op0,
3718 513 : floor_log2 (coeff), target, unsignedp);
3719 : }
3720 :
3721 : /* Exclude cost of op0 from max_cost to match the cost
3722 : calculation of the synth_mult. */
3723 2854 : max_cost = mul_widen_cost (speed, mode);
3724 2854 : if (choose_mult_variant (mode, coeff, &algorithm, &variant,
3725 : max_cost))
3726 : {
3727 377 : op0 = convert_to_mode (mode, op0, this_optab == umul_widen_optab);
3728 377 : return expand_mult_const (mode, op0, coeff, target,
3729 377 : &algorithm, variant);
3730 : }
3731 : }
3732 16997 : return expand_binop (mode, this_optab, op0, op1, target,
3733 16997 : unsignedp, OPTAB_LIB_WIDEN);
3734 : }
3735 :
3736 : /* Choose a minimal N + 1 bit approximation to 2**K / D that can be used to
3737 : replace division by D, put the least significant N bits of the result in
3738 : *MULTIPLIER_PTR, the value K - N in *POST_SHIFT_PTR, and return the most
3739 : significant bit.
3740 :
3741 : The width of operations is N (should be <= HOST_BITS_PER_WIDE_INT), the
3742 : needed precision is PRECISION (should be <= N).
3743 :
3744 : PRECISION should be as small as possible so this function can choose the
3745 : multiplier more freely. If PRECISION is <= N - 1, the most significant
3746 : bit returned by the function will be zero.
3747 :
3748 : Using this function, x / D is equal to (x*m) / 2**N >> (*POST_SHIFT_PTR),
3749 : where m is the full N + 1 bit multiplier. */
3750 :
3751 : unsigned HOST_WIDE_INT
3752 64044 : choose_multiplier (unsigned HOST_WIDE_INT d, int n, int precision,
3753 : unsigned HOST_WIDE_INT *multiplier_ptr,
3754 : int *post_shift_ptr)
3755 : {
3756 64044 : int lgup, post_shift;
3757 64044 : int pow1, pow2;
3758 :
3759 : /* lgup = ceil(log2(d)) */
3760 : /* Assuming d > 1, we have d >= 2^(lgup-1) + 1 */
3761 64044 : lgup = ceil_log2 (d);
3762 :
3763 64044 : gcc_assert (lgup <= n);
3764 64044 : gcc_assert (lgup <= precision);
3765 :
3766 64044 : pow1 = n + lgup;
3767 64044 : pow2 = n + lgup - precision;
3768 :
3769 : /* mlow = 2^(n + lgup)/d */
3770 : /* Trivially from above we have mlow < 2^(n+1) */
3771 64044 : wide_int val = wi::set_bit_in_zero (pow1, HOST_BITS_PER_DOUBLE_INT);
3772 64044 : wide_int mlow = wi::udiv_trunc (val, d);
3773 :
3774 : /* mhigh = (2^(n + lgup) + 2^(n + lgup - precision))/d */
3775 : /* From above we have mhigh < 2^(n+1) assuming lgup <= precision */
3776 : /* From precision <= n, the difference between the numerators of mhigh and
3777 : mlow is >= 2^lgup >= d. Therefore the difference of the quotients in
3778 : the Euclidean division by d is at least 1, so we have mlow < mhigh and
3779 : the exact value of 2^(n + lgup)/d lies in the interval [mlow; mhigh). */
3780 64044 : val |= wi::set_bit_in_zero (pow2, HOST_BITS_PER_DOUBLE_INT);
3781 64044 : wide_int mhigh = wi::udiv_trunc (val, d);
3782 :
3783 : /* Reduce to lowest terms. */
3784 : /* If precision <= n - 1, then the difference between the numerators of
3785 : mhigh and mlow is >= 2^(lgup + 1) >= 2 * 2^lgup >= 2 * d. Therefore
3786 : the difference of the quotients in the Euclidean division by d is at
3787 : least 2, which means that mhigh and mlow differ by at least one bit
3788 : not in the last place. The conclusion is that the first iteration of
3789 : the loop below completes and shifts mhigh and mlow by 1 bit, which in
3790 : particular means that mhigh < 2^n, that is to say, the most significant
3791 : bit in the n + 1 bit value is zero. */
3792 169605 : for (post_shift = lgup; post_shift > 0; post_shift--)
3793 : {
3794 164102 : unsigned HOST_WIDE_INT ml_lo = wi::extract_uhwi (mlow, 1,
3795 : HOST_BITS_PER_WIDE_INT);
3796 164102 : unsigned HOST_WIDE_INT mh_lo = wi::extract_uhwi (mhigh, 1,
3797 : HOST_BITS_PER_WIDE_INT);
3798 164102 : if (ml_lo >= mh_lo)
3799 : break;
3800 :
3801 105561 : mlow = wi::uhwi (ml_lo, HOST_BITS_PER_DOUBLE_INT);
3802 105561 : mhigh = wi::uhwi (mh_lo, HOST_BITS_PER_DOUBLE_INT);
3803 : }
3804 :
3805 64044 : *post_shift_ptr = post_shift;
3806 :
3807 64044 : if (n < HOST_BITS_PER_WIDE_INT)
3808 : {
3809 41160 : unsigned HOST_WIDE_INT mask = (HOST_WIDE_INT_1U << n) - 1;
3810 41160 : *multiplier_ptr = mhigh.to_uhwi () & mask;
3811 41160 : return mhigh.to_uhwi () > mask;
3812 : }
3813 : else
3814 : {
3815 22884 : *multiplier_ptr = mhigh.to_uhwi ();
3816 22884 : return wi::extract_uhwi (mhigh, HOST_BITS_PER_WIDE_INT, 1);
3817 : }
3818 64044 : }
3819 :
3820 : /* Compute the inverse of X mod 2**N, i.e., find Y such that X * Y is congruent
3821 : to 1 modulo 2**N, assuming that X is odd. Bézout's lemma guarantees that Y
3822 : exists for any given positive N. */
3823 :
3824 : static unsigned HOST_WIDE_INT
3825 45938 : invert_mod2n (unsigned HOST_WIDE_INT x, int n)
3826 : {
3827 45938 : gcc_assert ((x & 1) == 1);
3828 :
3829 : /* The algorithm notes that the choice Y = Z satisfies X*Y == 1 mod 2^3,
3830 : since X is odd. Then each iteration doubles the number of bits of
3831 : significance in Y. */
3832 :
3833 47274 : const unsigned HOST_WIDE_INT mask
3834 : = (n == HOST_BITS_PER_WIDE_INT
3835 45938 : ? HOST_WIDE_INT_M1U
3836 1336 : : (HOST_WIDE_INT_1U << n) - 1);
3837 45938 : unsigned HOST_WIDE_INT y = x;
3838 45938 : int nbit = 3;
3839 :
3840 274262 : while (nbit < n)
3841 : {
3842 228324 : y = y * (2 - x*y) & mask; /* Modulo 2^N */
3843 228324 : nbit *= 2;
3844 : }
3845 :
3846 45938 : return y;
3847 : }
3848 :
3849 : /* Emit code to adjust ADJ_OPERAND after multiplication of wrong signedness
3850 : flavor of OP0 and OP1. ADJ_OPERAND is already the high half of the
3851 : product OP0 x OP1. If UNSIGNEDP is nonzero, adjust the signed product
3852 : to become unsigned, if UNSIGNEDP is zero, adjust the unsigned product to
3853 : become signed.
3854 :
3855 : The result is put in TARGET if that is convenient.
3856 :
3857 : MODE is the mode of operation. */
3858 :
3859 : rtx
3860 0 : expand_mult_highpart_adjust (scalar_int_mode mode, rtx adj_operand, rtx op0,
3861 : rtx op1, rtx target, int unsignedp)
3862 : {
3863 0 : rtx tem;
3864 0 : enum rtx_code adj_code = unsignedp ? PLUS : MINUS;
3865 :
3866 0 : tem = expand_shift (RSHIFT_EXPR, mode, op0,
3867 0 : GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3868 0 : tem = expand_and (mode, tem, op1, NULL_RTX);
3869 0 : adj_operand
3870 0 : = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3871 : adj_operand);
3872 :
3873 0 : tem = expand_shift (RSHIFT_EXPR, mode, op1,
3874 0 : GET_MODE_BITSIZE (mode) - 1, NULL_RTX, 0);
3875 0 : tem = expand_and (mode, tem, op0, NULL_RTX);
3876 0 : target = force_operand (gen_rtx_fmt_ee (adj_code, mode, adj_operand, tem),
3877 : target);
3878 :
3879 0 : return target;
3880 : }
3881 :
3882 : /* Subroutine of expmed_mult_highpart. Return the MODE high part of OP. */
3883 :
3884 : static rtx
3885 20024 : extract_high_half (scalar_int_mode mode, rtx op)
3886 : {
3887 20024 : if (mode == word_mode)
3888 0 : return gen_highpart (mode, op);
3889 :
3890 20024 : scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3891 :
3892 40048 : op = expand_shift (RSHIFT_EXPR, wider_mode, op,
3893 20024 : GET_MODE_BITSIZE (mode), 0, 1);
3894 20024 : return convert_modes (mode, wider_mode, op, 0);
3895 : }
3896 :
3897 : /* Like expmed_mult_highpart, but only consider using multiplication optab. */
3898 :
3899 : rtx
3900 45362 : expmed_mult_highpart_optab (scalar_int_mode mode, rtx op0, rtx op1,
3901 : rtx target, int unsignedp, int max_cost)
3902 : {
3903 45362 : const scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
3904 45362 : const bool speed = optimize_insn_for_speed_p ();
3905 45362 : const int size = GET_MODE_BITSIZE (mode);
3906 45362 : optab moptab;
3907 45362 : rtx tem;
3908 :
3909 : /* Firstly, try using a multiplication insn that only generates the needed
3910 : high part of the product, and in the sign flavor of unsignedp. */
3911 45362 : if (mul_highpart_cost (speed, mode) < max_cost)
3912 : {
3913 43383 : moptab = unsignedp ? umul_highpart_optab : smul_highpart_optab;
3914 43383 : tem = expand_binop (mode, moptab, op0, op1, target, unsignedp,
3915 : OPTAB_DIRECT);
3916 43383 : if (tem)
3917 : return tem;
3918 : }
3919 :
3920 : /* Secondly, same as above, but use sign flavor opposite of unsignedp.
3921 : Need to adjust the result after the multiplication. */
3922 22130 : if (size - 1 < BITS_PER_WORD
3923 44038 : && (mul_highpart_cost (speed, mode)
3924 21908 : + 2 * shift_cost (speed, mode, size-1)
3925 21908 : + 4 * add_cost (speed, mode) < max_cost))
3926 : {
3927 5474 : moptab = unsignedp ? smul_highpart_optab : umul_highpart_optab;
3928 5474 : tem = expand_binop (mode, moptab, op0, op1, target, !unsignedp,
3929 : OPTAB_DIRECT);
3930 5474 : if (tem)
3931 : /* We used the wrong signedness. Adjust the result. */
3932 0 : return expand_mult_highpart_adjust (mode, tem, op0, op1, tem,
3933 0 : unsignedp);
3934 : }
3935 :
3936 : /* Try widening multiplication. */
3937 22130 : moptab = unsignedp ? umul_widen_optab : smul_widen_optab;
3938 22130 : if (convert_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3939 22130 : && mul_widen_cost (speed, wider_mode) < max_cost)
3940 : {
3941 383 : tem = expand_binop (wider_mode, moptab, op0, op1, NULL_RTX, unsignedp,
3942 : OPTAB_WIDEN);
3943 383 : if (tem)
3944 383 : return extract_high_half (mode, tem);
3945 : }
3946 :
3947 : /* Try widening the mode and perform a non-widening multiplication. */
3948 21747 : if (optab_handler (smul_optab, wider_mode) != CODE_FOR_nothing
3949 21226 : && size - 1 < BITS_PER_WORD
3950 42966 : && (mul_cost (speed, wider_mode) + shift_cost (speed, mode, size-1)
3951 : < max_cost))
3952 : {
3953 19628 : rtx_insn *insns;
3954 19628 : rtx wop0, wop1;
3955 :
3956 : /* We need to widen the operands, for example to ensure the
3957 : constant multiplier is correctly sign or zero extended.
3958 : Use a sequence to clean-up any instructions emitted by
3959 : the conversions if things don't work out. */
3960 19628 : start_sequence ();
3961 19628 : wop0 = convert_modes (wider_mode, mode, op0, unsignedp);
3962 19628 : wop1 = convert_modes (wider_mode, mode, op1, unsignedp);
3963 19628 : tem = expand_binop (wider_mode, smul_optab, wop0, wop1, 0,
3964 : unsignedp, OPTAB_WIDEN);
3965 19628 : insns = end_sequence ();
3966 :
3967 19628 : if (tem)
3968 : {
3969 19628 : emit_insn (insns);
3970 19628 : return extract_high_half (mode, tem);
3971 : }
3972 : }
3973 :
3974 : /* Try widening multiplication of opposite signedness, and adjust. */
3975 2119 : moptab = unsignedp ? smul_widen_optab : umul_widen_optab;
3976 2119 : if (convert_optab_handler (moptab, wider_mode, mode) != CODE_FOR_nothing
3977 411 : && size - 1 < BITS_PER_WORD
3978 2821 : && (mul_widen_cost (speed, wider_mode)
3979 351 : + 2 * shift_cost (speed, mode, size-1)
3980 351 : + 4 * add_cost (speed, mode) < max_cost))
3981 : {
3982 0 : tem = expand_binop (wider_mode, moptab, op0, op1, NULL_RTX, !unsignedp,
3983 : OPTAB_WIDEN);
3984 0 : if (tem != 0)
3985 : {
3986 0 : tem = extract_high_half (mode, tem);
3987 : /* We used the wrong signedness. Adjust the result. */
3988 0 : return expand_mult_highpart_adjust (mode, tem, op0, op1, target,
3989 0 : unsignedp);
3990 : }
3991 : }
3992 :
3993 : return 0;
3994 : }
3995 :
3996 : /* Emit code to multiply OP0 and OP1 (where OP1 is an integer constant),
3997 : putting the high half of the result in TARGET if that is convenient,
3998 : and return where the result is. If the operation cannot be performed,
3999 : 0 is returned.
4000 :
4001 : MODE is the mode of operation and result.
4002 :
4003 : UNSIGNEDP nonzero means unsigned multiply.
4004 :
4005 : MAX_COST is the total allowed cost for the expanded RTL. */
4006 :
4007 : static rtx
4008 45362 : expmed_mult_highpart (scalar_int_mode mode, rtx op0, rtx op1,
4009 : rtx target, int unsignedp, int max_cost)
4010 : {
4011 45362 : const bool speed = optimize_insn_for_speed_p ();
4012 45362 : unsigned HOST_WIDE_INT cnst1;
4013 45362 : int extra_cost;
4014 45362 : bool sign_adjust = false;
4015 45362 : enum mult_variant variant;
4016 45362 : struct algorithm alg;
4017 45362 : rtx narrow_op1, tem;
4018 :
4019 : /* We can't support modes wider than HOST_BITS_PER_INT. */
4020 45362 : gcc_assert (HWI_COMPUTABLE_MODE_P (mode));
4021 :
4022 45362 : cnst1 = INTVAL (op1) & GET_MODE_MASK (mode);
4023 45362 : narrow_op1 = gen_int_mode (INTVAL (op1), mode);
4024 :
4025 : /* We can't optimize modes wider than BITS_PER_WORD.
4026 : ??? We might be able to perform double-word arithmetic if
4027 : mode == word_mode, however all the cost calculations in
4028 : synth_mult etc. assume single-word operations. */
4029 45362 : scalar_int_mode wider_mode = GET_MODE_WIDER_MODE (mode).require ();
4030 93859 : if (GET_MODE_BITSIZE (wider_mode) > BITS_PER_WORD)
4031 23760 : return expmed_mult_highpart_optab (mode, op0, narrow_op1, target,
4032 23760 : unsignedp, max_cost);
4033 :
4034 43204 : extra_cost = shift_cost (speed, mode, GET_MODE_BITSIZE (mode) - 1);
4035 :
4036 : /* Check whether we try to multiply by a negative constant. */
4037 31786 : if (!unsignedp && ((cnst1 >> (GET_MODE_BITSIZE (mode) - 1)) & 1))
4038 : {
4039 2195 : sign_adjust = true;
4040 2195 : extra_cost += add_cost (speed, mode);
4041 : }
4042 :
4043 : /* See whether shift/add multiplication is cheap enough. */
4044 21602 : if (choose_mult_variant (wider_mode, cnst1, &alg, &variant,
4045 : max_cost - extra_cost))
4046 : {
4047 : /* See whether the specialized multiplication optabs are
4048 : cheaper than the shift/add version. */
4049 39888 : tem = expmed_mult_highpart_optab (mode, op0, narrow_op1, target,
4050 : unsignedp,
4051 19944 : alg.cost.cost + extra_cost);
4052 19944 : if (tem)
4053 : return tem;
4054 :
4055 13 : tem = convert_to_mode (wider_mode, op0, unsignedp);
4056 13 : tem = expand_mult_const (wider_mode, tem, cnst1, 0, &alg, variant);
4057 13 : tem = extract_high_half (mode, tem);
4058 :
4059 : /* Adjust result for signedness. */
4060 13 : if (sign_adjust)
4061 0 : tem = force_operand (gen_rtx_MINUS (mode, tem, op0), tem);
4062 :
4063 13 : return tem;
4064 : }
4065 1658 : return expmed_mult_highpart_optab (mode, op0, narrow_op1, target,
4066 1658 : unsignedp, max_cost);
4067 : }
4068 :
4069 :
4070 : /* Expand signed modulus of OP0 by a power of two D in mode MODE. */
4071 :
4072 : static rtx
4073 2508 : expand_smod_pow2 (scalar_int_mode mode, rtx op0, HOST_WIDE_INT d)
4074 : {
4075 2508 : rtx result, temp, shift;
4076 2508 : rtx_code_label *label;
4077 2508 : int logd;
4078 2508 : int prec = GET_MODE_PRECISION (mode);
4079 :
4080 2508 : logd = floor_log2 (d);
4081 2508 : result = gen_reg_rtx (mode);
4082 :
4083 : /* Avoid conditional branches when they're expensive. */
4084 2508 : if (BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2
4085 2508 : && optimize_insn_for_speed_p ())
4086 : {
4087 2504 : rtx signmask = emit_store_flag (result, LT, op0, const0_rtx,
4088 : mode, 0, -1);
4089 2504 : if (signmask)
4090 : {
4091 2504 : HOST_WIDE_INT masklow = (HOST_WIDE_INT_1 << logd) - 1;
4092 2504 : signmask = force_reg (mode, signmask);
4093 5008 : shift = gen_int_shift_amount (mode, GET_MODE_BITSIZE (mode) - logd);
4094 :
4095 : /* Use the rtx_cost of a LSHIFTRT instruction to determine
4096 : which instruction sequence to use. If logical right shifts
4097 : are expensive the use 2 XORs, 2 SUBs and an AND, otherwise
4098 : use a LSHIFTRT, 1 ADD, 1 SUB and an AND. */
4099 :
4100 2504 : temp = gen_rtx_LSHIFTRT (mode, result, shift);
4101 2504 : if (optab_handler (lshr_optab, mode) == CODE_FOR_nothing
4102 2504 : || (set_src_cost (temp, mode, optimize_insn_for_speed_p ())
4103 : > COSTS_N_INSNS (2)))
4104 : {
4105 89 : temp = expand_binop (mode, xor_optab, op0, signmask,
4106 : NULL_RTX, 1, OPTAB_LIB_WIDEN);
4107 89 : temp = expand_binop (mode, sub_optab, temp, signmask,
4108 : NULL_RTX, 1, OPTAB_LIB_WIDEN);
4109 89 : temp = expand_binop (mode, and_optab, temp,
4110 89 : gen_int_mode (masklow, mode),
4111 : NULL_RTX, 1, OPTAB_LIB_WIDEN);
4112 89 : temp = expand_binop (mode, xor_optab, temp, signmask,
4113 : NULL_RTX, 1, OPTAB_LIB_WIDEN);
4114 89 : temp = expand_binop (mode, sub_optab, temp, signmask,
4115 : NULL_RTX, 1, OPTAB_LIB_WIDEN);
4116 : }
4117 : else
4118 : {
4119 2415 : signmask = expand_binop (mode, lshr_optab, signmask, shift,
4120 : NULL_RTX, 1, OPTAB_LIB_WIDEN);
4121 2415 : signmask = force_reg (mode, signmask);
4122 :
4123 2415 : temp = expand_binop (mode, add_optab, op0, signmask,
4124 : NULL_RTX, 1, OPTAB_LIB_WIDEN);
4125 2415 : temp = expand_binop (mode, and_optab, temp,
4126 2415 : gen_int_mode (masklow, mode),
4127 : NULL_RTX, 1, OPTAB_LIB_WIDEN);
4128 2415 : temp = expand_binop (mode, sub_optab, temp, signmask,
4129 : NULL_RTX, 1, OPTAB_LIB_WIDEN);
4130 : }
4131 2504 : return temp;
4132 : }
4133 : }
4134 :
4135 : /* Mask contains the mode's signbit and the significant bits of the
4136 : modulus. By including the signbit in the operation, many targets
4137 : can avoid an explicit compare operation in the following comparison
4138 : against zero. */
4139 4 : wide_int mask = wi::mask (logd, false, prec);
4140 4 : mask = wi::set_bit (mask, prec - 1);
4141 :
4142 8 : temp = expand_binop (mode, and_optab, op0,
4143 4 : immed_wide_int_const (mask, mode),
4144 : result, 1, OPTAB_LIB_WIDEN);
4145 4 : if (temp != result)
4146 0 : emit_move_insn (result, temp);
4147 :
4148 4 : label = gen_label_rtx ();
4149 4 : do_cmp_and_jump (result, const0_rtx, GE, mode, label);
4150 :
4151 4 : temp = expand_binop (mode, sub_optab, result, const1_rtx, result,
4152 : 0, OPTAB_LIB_WIDEN);
4153 :
4154 4 : mask = wi::mask (logd, true, prec);
4155 8 : temp = expand_binop (mode, ior_optab, temp,
4156 4 : immed_wide_int_const (mask, mode),
4157 : result, 1, OPTAB_LIB_WIDEN);
4158 4 : temp = expand_binop (mode, add_optab, temp, const1_rtx, result,
4159 : 0, OPTAB_LIB_WIDEN);
4160 4 : if (temp != result)
4161 0 : emit_move_insn (result, temp);
4162 4 : emit_label (label);
4163 4 : return result;
4164 4 : }
4165 :
4166 : /* Expand signed division of OP0 by a power of two D in mode MODE.
4167 : This routine is only called for positive values of D. */
4168 :
4169 : static rtx
4170 10103 : expand_sdiv_pow2 (scalar_int_mode mode, rtx op0, HOST_WIDE_INT d)
4171 : {
4172 10103 : rtx temp;
4173 10103 : rtx_code_label *label;
4174 10103 : int logd;
4175 :
4176 10103 : logd = floor_log2 (d);
4177 :
4178 10103 : if (d == 2
4179 10103 : && BRANCH_COST (optimize_insn_for_speed_p (),
4180 : false) >= 1)
4181 : {
4182 6675 : temp = gen_reg_rtx (mode);
4183 6675 : temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, 1);
4184 6675 : if (temp != NULL_RTX)
4185 : {
4186 6675 : temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
4187 : 0, OPTAB_LIB_WIDEN);
4188 6675 : return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
4189 : }
4190 : }
4191 :
4192 6855 : if (HAVE_conditional_move
4193 3428 : && BRANCH_COST (optimize_insn_for_speed_p (), false) >= 2)
4194 : {
4195 3428 : rtx temp2;
4196 :
4197 3428 : start_sequence ();
4198 3428 : temp2 = copy_to_mode_reg (mode, op0);
4199 3428 : temp = expand_binop (mode, add_optab, temp2, gen_int_mode (d - 1, mode),
4200 : NULL_RTX, 0, OPTAB_LIB_WIDEN);
4201 3428 : temp = force_reg (mode, temp);
4202 :
4203 : /* Construct "temp2 = (temp2 < 0) ? temp : temp2". */
4204 3428 : temp2 = emit_conditional_move (temp2, { LT, temp2, const0_rtx, mode },
4205 : temp, temp2, mode, 0);
4206 3428 : if (temp2)
4207 : {
4208 3380 : rtx_insn *seq = end_sequence ();
4209 3380 : emit_insn (seq);
4210 3380 : return expand_shift (RSHIFT_EXPR, mode, temp2, logd, NULL_RTX, 0);
4211 : }
4212 48 : end_sequence ();
4213 : }
4214 :
4215 48 : if (BRANCH_COST (optimize_insn_for_speed_p (),
4216 : false) >= 2)
4217 : {
4218 48 : int ushift = GET_MODE_BITSIZE (mode) - logd;
4219 :
4220 48 : temp = gen_reg_rtx (mode);
4221 48 : temp = emit_store_flag (temp, LT, op0, const0_rtx, mode, 0, -1);
4222 48 : if (temp != NULL_RTX)
4223 : {
4224 96 : if (GET_MODE_BITSIZE (mode) >= BITS_PER_WORD
4225 48 : || shift_cost (optimize_insn_for_speed_p (), mode, ushift)
4226 : > COSTS_N_INSNS (1))
4227 48 : temp = expand_binop (mode, and_optab, temp,
4228 48 : gen_int_mode (d - 1, mode),
4229 : NULL_RTX, 0, OPTAB_LIB_WIDEN);
4230 : else
4231 0 : temp = expand_shift (RSHIFT_EXPR, mode, temp,
4232 0 : ushift, NULL_RTX, 1);
4233 48 : temp = expand_binop (mode, add_optab, temp, op0, NULL_RTX,
4234 : 0, OPTAB_LIB_WIDEN);
4235 48 : return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
4236 : }
4237 : }
4238 :
4239 0 : label = gen_label_rtx ();
4240 0 : temp = copy_to_mode_reg (mode, op0);
4241 0 : do_cmp_and_jump (temp, const0_rtx, GE, mode, label);
4242 0 : expand_inc (temp, gen_int_mode (d - 1, mode));
4243 0 : emit_label (label);
4244 0 : return expand_shift (RSHIFT_EXPR, mode, temp, logd, NULL_RTX, 0);
4245 : }
4246 :
4247 : /* Emit the code to divide OP0 by OP1, putting the result in TARGET
4248 : if that is convenient, and returning where the result is.
4249 : You may request either the quotient or the remainder as the result;
4250 : specify REM_FLAG nonzero to get the remainder.
4251 :
4252 : CODE is the expression code for which kind of division this is;
4253 : it controls how rounding is done. MODE is the machine mode to use.
4254 : UNSIGNEDP nonzero means do unsigned division. */
4255 :
4256 : /* ??? For CEIL_MOD_EXPR, can compute incorrect remainder with ANDI
4257 : and then correct it by or'ing in missing high bits
4258 : if result of ANDI is nonzero.
4259 : For ROUND_MOD_EXPR, can use ANDI and then sign-extend the result.
4260 : This could optimize to a bfexts instruction.
4261 : But C doesn't use these operations, so their optimizations are
4262 : left for later. */
4263 : /* ??? For modulo, we don't actually need the highpart of the first product,
4264 : the low part will do nicely. And for small divisors, the second multiply
4265 : can also be a low-part only multiply or even be completely left out.
4266 : E.g. to calculate the remainder of a division by 3 with a 32 bit
4267 : multiply, multiply with 0x55555556 and extract the upper two bits;
4268 : the result is exact for inputs up to 0x1fffffff.
4269 : The input range can be reduced by using cross-sum rules.
4270 : For odd divisors >= 3, the following table gives right shift counts
4271 : so that if a number is shifted by an integer multiple of the given
4272 : amount, the remainder stays the same:
4273 : 2, 4, 3, 6, 10, 12, 4, 8, 18, 6, 11, 20, 18, 0, 5, 10, 12, 0, 12, 20,
4274 : 14, 12, 23, 21, 8, 0, 20, 18, 0, 0, 6, 12, 0, 22, 0, 18, 20, 30, 0, 0,
4275 : 0, 8, 0, 11, 12, 10, 36, 0, 30, 0, 0, 12, 0, 0, 0, 0, 44, 12, 24, 0,
4276 : 20, 0, 7, 14, 0, 18, 36, 0, 0, 46, 60, 0, 42, 0, 15, 24, 20, 0, 0, 33,
4277 : 0, 20, 0, 0, 18, 0, 60, 0, 0, 0, 0, 0, 40, 18, 0, 0, 12
4278 :
4279 : Cross-sum rules for even numbers can be derived by leaving as many bits
4280 : to the right alone as the divisor has zeros to the right.
4281 : E.g. if x is an unsigned 32 bit number:
4282 : (x mod 12) == (((x & 1023) + ((x >> 8) & ~3)) * 0x15555558 >> 2 * 3) >> 28
4283 : */
4284 :
4285 : rtx
4286 225501 : expand_divmod (int rem_flag, enum tree_code code, machine_mode mode,
4287 : rtx op0, rtx op1, rtx target, int unsignedp,
4288 : enum optab_methods methods)
4289 : {
4290 225501 : machine_mode compute_mode;
4291 225501 : rtx tquotient;
4292 225501 : rtx quotient = 0, remainder = 0;
4293 225501 : rtx_insn *last;
4294 225501 : rtx_insn *insn;
4295 225501 : optab optab1, optab2;
4296 225501 : int op1_is_constant, op1_is_pow2 = 0;
4297 225501 : int max_cost, extra_cost;
4298 225501 : static HOST_WIDE_INT last_div_const = 0;
4299 225501 : bool speed = optimize_insn_for_speed_p ();
4300 :
4301 225501 : op1_is_constant = CONST_INT_P (op1);
4302 225501 : if (op1_is_constant)
4303 : {
4304 141106 : wide_int ext_op1 = rtx_mode_t (op1, mode);
4305 141106 : op1_is_pow2 = (wi::popcount (ext_op1) == 1
4306 282212 : || (! unsignedp
4307 169748 : && wi::popcount (wi::neg (ext_op1)) == 1));
4308 141106 : }
4309 :
4310 : /*
4311 : This is the structure of expand_divmod:
4312 :
4313 : First comes code to fix up the operands so we can perform the operations
4314 : correctly and efficiently.
4315 :
4316 : Second comes a switch statement with code specific for each rounding mode.
4317 : For some special operands this code emits all RTL for the desired
4318 : operation, for other cases, it generates only a quotient and stores it in
4319 : QUOTIENT. The case for trunc division/remainder might leave quotient = 0,
4320 : to indicate that it has not done anything.
4321 :
4322 : Last comes code that finishes the operation. If QUOTIENT is set and
4323 : REM_FLAG is set, the remainder is computed as OP0 - QUOTIENT * OP1. If
4324 : QUOTIENT is not set, it is computed using trunc rounding.
4325 :
4326 : We try to generate special code for division and remainder when OP1 is a
4327 : constant. If |OP1| = 2**n we can use shifts and some other fast
4328 : operations. For other values of OP1, we compute a carefully selected
4329 : fixed-point approximation m = 1/OP1, and generate code that multiplies OP0
4330 : by m.
4331 :
4332 : In all cases but EXACT_DIV_EXPR, this multiplication requires the upper
4333 : half of the product. Different strategies for generating the product are
4334 : implemented in expmed_mult_highpart.
4335 :
4336 : If what we actually want is the remainder, we generate that by another
4337 : by-constant multiplication and a subtraction. */
4338 :
4339 : /* We shouldn't be called with OP1 == const1_rtx, but some of the
4340 : code below will malfunction if we are, so check here and handle
4341 : the special case if so. */
4342 225501 : if (op1 == const1_rtx)
4343 0 : return rem_flag ? const0_rtx : op0;
4344 :
4345 : /* When dividing by -1, we could get an overflow.
4346 : negv_optab can handle overflows. */
4347 225501 : if (! unsignedp && op1 == constm1_rtx)
4348 : {
4349 0 : if (rem_flag)
4350 0 : return const0_rtx;
4351 0 : return expand_unop (mode, flag_trapv && GET_MODE_CLASS (mode) == MODE_INT
4352 0 : ? negv_optab : neg_optab, op0, target, 0);
4353 : }
4354 :
4355 225501 : if (target
4356 : /* Don't use the function value register as a target
4357 : since we have to read it as well as write it,
4358 : and function-inlining gets confused by this. */
4359 225501 : && ((REG_P (target) && REG_FUNCTION_VALUE_P (target))
4360 : /* Don't clobber an operand while doing a multi-step calculation. */
4361 95694 : || ((rem_flag || op1_is_constant)
4362 76473 : && (reg_mentioned_p (target, op0)
4363 74124 : || (MEM_P (op0) && MEM_P (target))))
4364 92630 : || reg_mentioned_p (target, op1)
4365 92551 : || (MEM_P (op1) && MEM_P (target))))
4366 : target = 0;
4367 :
4368 : /* Get the mode in which to perform this computation. Normally it will
4369 : be MODE, but sometimes we can't do the desired operation in MODE.
4370 : If so, pick a wider mode in which we can do the operation. Convert
4371 : to that mode at the start to avoid repeated conversions.
4372 :
4373 : First see what operations we need. These depend on the expression
4374 : we are evaluating. (We assume that divxx3 insns exist under the
4375 : same conditions that modxx3 insns and that these insns don't normally
4376 : fail. If these assumptions are not correct, we may generate less
4377 : efficient code in some cases.)
4378 :
4379 : Then see if we find a mode in which we can open-code that operation
4380 : (either a division, modulus, or shift). Finally, check for the smallest
4381 : mode for which we can do the operation with a library call. */
4382 :
4383 : /* We might want to refine this now that we have division-by-constant
4384 : optimization. Since expmed_mult_highpart tries so many variants, it is
4385 : not straightforward to generalize this. Maybe we should make an array
4386 : of possible modes in init_expmed? Save this for GCC 2.7. */
4387 :
4388 118074 : optab1 = (op1_is_pow2
4389 225501 : ? (unsignedp ? lshr_optab : ashr_optab)
4390 138228 : : (unsignedp ? udiv_optab : sdiv_optab));
4391 288275 : optab2 = (op1_is_pow2 ? optab1
4392 138228 : : (unsignedp ? udivmod_optab : sdivmod_optab));
4393 :
4394 225501 : if (methods == OPTAB_WIDEN || methods == OPTAB_LIB_WIDEN)
4395 : {
4396 237032 : FOR_EACH_MODE_FROM (compute_mode, mode)
4397 232898 : if (optab_handler (optab1, compute_mode) != CODE_FOR_nothing
4398 232898 : || optab_handler (optab2, compute_mode) != CODE_FOR_nothing)
4399 : break;
4400 :
4401 223820 : if (compute_mode == VOIDmode && methods == OPTAB_LIB_WIDEN)
4402 4134 : FOR_EACH_MODE_FROM (compute_mode, mode)
4403 4134 : if (optab_libfunc (optab1, compute_mode)
4404 4134 : || optab_libfunc (optab2, compute_mode))
4405 : break;
4406 : }
4407 : else
4408 : compute_mode = mode;
4409 :
4410 : /* If we still couldn't find a mode, use MODE, but expand_binop will
4411 : probably die. */
4412 5815 : if (compute_mode == VOIDmode)
4413 0 : compute_mode = mode;
4414 :
4415 225501 : if (target && GET_MODE (target) == compute_mode)
4416 : tquotient = target;
4417 : else
4418 133111 : tquotient = gen_reg_rtx (compute_mode);
4419 :
4420 : #if 0
4421 : /* It should be possible to restrict the precision to GET_MODE_BITSIZE
4422 : (mode), and thereby get better code when OP1 is a constant. Do that
4423 : later. It will require going over all usages of SIZE below. */
4424 : size = GET_MODE_BITSIZE (mode);
4425 : #endif
4426 :
4427 : /* Only deduct something for a REM if the last divide done was
4428 : for a different constant. Then set the constant of the last
4429 : divide. */
4430 225501 : max_cost = (unsignedp
4431 332928 : ? udiv_cost (speed, compute_mode)
4432 118074 : : sdiv_cost (speed, compute_mode));
4433 225501 : if (rem_flag && ! (last_div_const != 0 && op1_is_constant
4434 7630 : && INTVAL (op1) == last_div_const))
4435 52061 : max_cost -= (mul_cost (speed, compute_mode)
4436 52061 : + add_cost (speed, compute_mode));
4437 :
4438 225501 : last_div_const = ! rem_flag && op1_is_constant ? INTVAL (op1) : 0;
4439 :
4440 : /* Now convert to the best mode to use. */
4441 225501 : if (compute_mode != mode)
4442 : {
4443 0 : op0 = convert_modes (compute_mode, mode, op0, unsignedp);
4444 0 : op1 = convert_modes (compute_mode, mode, op1, unsignedp);
4445 :
4446 : /* convert_modes may have placed op1 into a register, so we
4447 : must recompute the following. */
4448 0 : op1_is_constant = CONST_INT_P (op1);
4449 0 : if (op1_is_constant)
4450 : {
4451 0 : wide_int ext_op1 = rtx_mode_t (op1, compute_mode);
4452 0 : op1_is_pow2 = (wi::popcount (ext_op1) == 1
4453 0 : || (! unsignedp
4454 0 : && wi::popcount (wi::neg (ext_op1)) == 1));
4455 0 : }
4456 : else
4457 : op1_is_pow2 = 0;
4458 : }
4459 :
4460 : /* If one of the operands is a volatile MEM, copy it into a register. */
4461 :
4462 225501 : if (MEM_P (op0) && MEM_VOLATILE_P (op0))
4463 0 : op0 = force_reg (compute_mode, op0);
4464 225501 : if (MEM_P (op1) && MEM_VOLATILE_P (op1))
4465 0 : op1 = force_reg (compute_mode, op1);
4466 :
4467 : /* If we need the remainder or if OP1 is constant, we need to
4468 : put OP0 in a register in case it has any queued subexpressions. */
4469 225501 : if (rem_flag || op1_is_constant)
4470 170548 : op0 = force_reg (compute_mode, op0);
4471 :
4472 225501 : last = get_last_insn ();
4473 :
4474 : /* Promote floor rounding to trunc rounding for unsigned operations. */
4475 225501 : if (unsignedp)
4476 : {
4477 107427 : if (code == FLOOR_DIV_EXPR)
4478 : code = TRUNC_DIV_EXPR;
4479 107370 : if (code == FLOOR_MOD_EXPR)
4480 156 : code = TRUNC_MOD_EXPR;
4481 107427 : if (code == EXACT_DIV_EXPR && op1_is_pow2)
4482 4647 : code = TRUNC_DIV_EXPR;
4483 : }
4484 :
4485 225501 : if (op1 != const0_rtx)
4486 225124 : switch (code)
4487 : {
4488 177023 : case TRUNC_MOD_EXPR:
4489 177023 : case TRUNC_DIV_EXPR:
4490 177023 : if (op1_is_constant)
4491 : {
4492 93741 : scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
4493 93741 : int size = GET_MODE_BITSIZE (int_mode);
4494 93741 : if (unsignedp)
4495 : {
4496 56990 : unsigned HOST_WIDE_INT mh, ml;
4497 56990 : int pre_shift, post_shift;
4498 56990 : wide_int wd = rtx_mode_t (op1, int_mode);
4499 56990 : unsigned HOST_WIDE_INT d = wd.to_uhwi ();
4500 :
4501 56990 : if (wi::popcount (wd) == 1)
4502 : {
4503 31973 : pre_shift = floor_log2 (d);
4504 31973 : if (rem_flag)
4505 : {
4506 261 : unsigned HOST_WIDE_INT mask
4507 261 : = (HOST_WIDE_INT_1U << pre_shift) - 1;
4508 261 : remainder
4509 261 : = expand_binop (int_mode, and_optab, op0,
4510 261 : gen_int_mode (mask, int_mode),
4511 : remainder, 1, methods);
4512 261 : if (remainder)
4513 261 : return gen_lowpart (mode, remainder);
4514 : }
4515 31712 : quotient = expand_shift (RSHIFT_EXPR, int_mode, op0,
4516 31712 : pre_shift, tquotient, 1);
4517 : }
4518 25017 : else if (size <= HOST_BITS_PER_WIDE_INT)
4519 : {
4520 23440 : if (d >= (HOST_WIDE_INT_1U << (size - 1)))
4521 : {
4522 : /* Most significant bit of divisor is set; emit an scc
4523 : insn. */
4524 155 : quotient = emit_store_flag_force (tquotient, GEU, op0, op1,
4525 : int_mode, 1, 1);
4526 : }
4527 : else
4528 : {
4529 : /* Find a suitable multiplier and right shift count
4530 : instead of directly dividing by D. */
4531 23285 : mh = choose_multiplier (d, size, size,
4532 : &ml, &post_shift);
4533 :
4534 : /* If the suggested multiplier is more than SIZE bits,
4535 : we can do better for even divisors, using an
4536 : initial right shift. */
4537 23285 : if (mh != 0 && (d & 1) == 0)
4538 : {
4539 2232 : pre_shift = ctz_or_zero (d);
4540 2232 : mh = choose_multiplier (d >> pre_shift, size,
4541 : size - pre_shift,
4542 : &ml, &post_shift);
4543 2232 : gcc_assert (!mh);
4544 : }
4545 : else
4546 : pre_shift = 0;
4547 :
4548 2246 : if (mh != 0)
4549 : {
4550 2246 : rtx t1, t2, t3, t4;
4551 :
4552 2341 : if (post_shift - 1 >= BITS_PER_WORD)
4553 0 : goto fail1;
4554 :
4555 2246 : extra_cost
4556 2246 : = (shift_cost (speed, int_mode, post_shift - 1)
4557 2246 : + shift_cost (speed, int_mode, 1)
4558 2246 : + 2 * add_cost (speed, int_mode));
4559 2246 : t1 = expmed_mult_highpart
4560 2246 : (int_mode, op0, gen_int_mode (ml, int_mode),
4561 : NULL_RTX, 1, max_cost - extra_cost);
4562 2246 : if (t1 == 0)
4563 103 : goto fail1;
4564 2143 : t2 = force_operand (gen_rtx_MINUS (int_mode,
4565 : op0, t1),
4566 : NULL_RTX);
4567 2143 : t3 = expand_shift (RSHIFT_EXPR, int_mode,
4568 : t2, 1, NULL_RTX, 1);
4569 2143 : t4 = force_operand (gen_rtx_PLUS (int_mode,
4570 : t1, t3),
4571 : NULL_RTX);
4572 2143 : quotient = expand_shift
4573 2143 : (RSHIFT_EXPR, int_mode, t4,
4574 2143 : post_shift - 1, tquotient, 1);
4575 : }
4576 : else
4577 : {
4578 21039 : rtx t1, t2;
4579 :
4580 22840 : if (pre_shift >= BITS_PER_WORD
4581 21039 : || post_shift >= BITS_PER_WORD)
4582 3 : goto fail1;
4583 :
4584 21036 : t1 = expand_shift
4585 42072 : (RSHIFT_EXPR, int_mode, op0,
4586 21036 : pre_shift, NULL_RTX, 1);
4587 21036 : extra_cost
4588 21036 : = (shift_cost (speed, int_mode, pre_shift)
4589 21036 : + shift_cost (speed, int_mode, post_shift));
4590 21036 : t2 = expmed_mult_highpart
4591 21036 : (int_mode, t1,
4592 21036 : gen_int_mode (ml, int_mode),
4593 : NULL_RTX, 1, max_cost - extra_cost);
4594 21036 : if (t2 == 0)
4595 932 : goto fail1;
4596 20104 : quotient = expand_shift
4597 20104 : (RSHIFT_EXPR, int_mode, t2,
4598 20104 : post_shift, tquotient, 1);
4599 : }
4600 : }
4601 : }
4602 : else /* Too wide mode to use tricky code */
4603 : break;
4604 :
4605 54114 : insn = get_last_insn ();
4606 54114 : if (insn != last)
4607 54114 : set_dst_reg_note (insn, REG_EQUAL,
4608 : gen_rtx_UDIV (int_mode, op0, op1),
4609 : quotient);
4610 55413 : }
4611 : else /* TRUNC_DIV, signed */
4612 : {
4613 36751 : unsigned HOST_WIDE_INT ml;
4614 36751 : int post_shift;
4615 36751 : rtx mlr;
4616 36751 : HOST_WIDE_INT d = INTVAL (op1);
4617 36751 : unsigned HOST_WIDE_INT abs_d;
4618 :
4619 : /* Not prepared to handle division/remainder by
4620 : 0xffffffffffffffff8000000000000000 etc. */
4621 36751 : if (d == HOST_WIDE_INT_MIN && size > HOST_BITS_PER_WIDE_INT)
4622 : break;
4623 :
4624 : /* Since d might be INT_MIN, we have to cast to
4625 : unsigned HOST_WIDE_INT before negating to avoid
4626 : undefined signed overflow. */
4627 36751 : abs_d = (d >= 0
4628 36751 : ? (unsigned HOST_WIDE_INT) d
4629 : : - (unsigned HOST_WIDE_INT) d);
4630 :
4631 : /* n rem d = n rem -d */
4632 36751 : if (rem_flag && d < 0)
4633 : {
4634 141 : d = abs_d;
4635 141 : op1 = gen_int_mode (abs_d, int_mode);
4636 : }
4637 :
4638 36751 : if (d == 1)
4639 : quotient = op0;
4640 36751 : else if (d == -1)
4641 0 : quotient = expand_unop (int_mode, neg_optab, op0,
4642 : tquotient, 0);
4643 36751 : else if (size <= HOST_BITS_PER_WIDE_INT
4644 35350 : && abs_d == HOST_WIDE_INT_1U << (size - 1))
4645 : {
4646 : /* This case is not handled correctly below. */
4647 133 : quotient = emit_store_flag (tquotient, EQ, op0, op1,
4648 : int_mode, 1, 1);
4649 133 : if (quotient == 0)
4650 1316 : goto fail1;
4651 : }
4652 36618 : else if (EXACT_POWER_OF_2_OR_ZERO_P (d)
4653 12713 : && (size <= HOST_BITS_PER_WIDE_INT || d >= 0)
4654 2638 : && (rem_flag
4655 2638 : ? smod_pow2_cheap (speed, int_mode)
4656 10075 : : sdiv_pow2_cheap (speed, int_mode))
4657 : /* We assume that cheap metric is true if the
4658 : optab has an expander for this mode. */
4659 50431 : && ((optab_handler ((rem_flag ? smod_optab
4660 : : sdiv_optab),
4661 : int_mode)
4662 : != CODE_FOR_nothing)
4663 617 : || (optab_handler (sdivmod_optab, int_mode)
4664 : != CODE_FOR_nothing)))
4665 : ;
4666 36007 : else if (EXACT_POWER_OF_2_OR_ZERO_P (abs_d))
4667 : {
4668 12618 : if (rem_flag)
4669 : {
4670 2508 : remainder = expand_smod_pow2 (int_mode, op0, d);
4671 2508 : if (remainder)
4672 2508 : return gen_lowpart (mode, remainder);
4673 : }
4674 :
4675 10110 : if (sdiv_pow2_cheap (speed, int_mode)
4676 10110 : && ((optab_handler (sdiv_optab, int_mode)
4677 : != CODE_FOR_nothing)
4678 10 : || (optab_handler (sdivmod_optab, int_mode)
4679 : != CODE_FOR_nothing)))
4680 7 : quotient = expand_divmod (0, TRUNC_DIV_EXPR,
4681 : int_mode, op0,
4682 7 : gen_int_mode (abs_d,
4683 : int_mode),
4684 : NULL_RTX, 0);
4685 : else
4686 10103 : quotient = expand_sdiv_pow2 (int_mode, op0, abs_d);
4687 :
4688 : /* We have computed OP0 / abs(OP1). If OP1 is negative,
4689 : negate the quotient. */
4690 10110 : if (d < 0)
4691 : {
4692 516 : insn = get_last_insn ();
4693 516 : if (insn != last
4694 516 : && abs_d < (HOST_WIDE_INT_1U
4695 : << (HOST_BITS_PER_WIDE_INT - 1)))
4696 516 : set_dst_reg_note (insn, REG_EQUAL,
4697 516 : gen_rtx_DIV (int_mode, op0,
4698 : gen_int_mode
4699 : (abs_d,
4700 : int_mode)),
4701 : quotient);
4702 :
4703 516 : quotient = expand_unop (int_mode, neg_optab,
4704 : quotient, quotient, 0);
4705 : }
4706 : }
4707 23389 : else if (size <= HOST_BITS_PER_WIDE_INT)
4708 : {
4709 22051 : choose_multiplier (abs_d, size, size - 1,
4710 : &ml, &post_shift);
4711 22051 : if (ml < HOST_WIDE_INT_1U << (size - 1))
4712 : {
4713 16683 : rtx t1, t2, t3;
4714 :
4715 17827 : if (post_shift >= BITS_PER_WORD
4716 16683 : || size - 1 >= BITS_PER_WORD)
4717 251 : goto fail1;
4718 :
4719 16432 : extra_cost = (shift_cost (speed, int_mode, post_shift)
4720 16432 : + shift_cost (speed, int_mode, size - 1)
4721 16432 : + add_cost (speed, int_mode));
4722 16432 : t1 = expmed_mult_highpart
4723 16432 : (int_mode, op0, gen_int_mode (ml, int_mode),
4724 : NULL_RTX, 0, max_cost - extra_cost);
4725 16432 : if (t1 == 0)
4726 830 : goto fail1;
4727 15602 : t2 = expand_shift
4728 31204 : (RSHIFT_EXPR, int_mode, t1,
4729 15602 : post_shift, NULL_RTX, 0);
4730 15602 : t3 = expand_shift
4731 15602 : (RSHIFT_EXPR, int_mode, op0,
4732 15602 : size - 1, NULL_RTX, 0);
4733 15602 : if (d < 0)
4734 197 : quotient
4735 197 : = force_operand (gen_rtx_MINUS (int_mode, t3, t2),
4736 : tquotient);
4737 : else
4738 15405 : quotient
4739 15405 : = force_operand (gen_rtx_MINUS (int_mode, t2, t3),
4740 : tquotient);
4741 : }
4742 : else
4743 : {
4744 5368 : rtx t1, t2, t3, t4;
4745 :
4746 5720 : if (post_shift >= BITS_PER_WORD
4747 5363 : || size - 1 >= BITS_PER_WORD)
4748 25 : goto fail1;
4749 :
4750 5343 : ml |= HOST_WIDE_INT_M1U << (size - 1);
4751 5343 : mlr = gen_int_mode (ml, int_mode);
4752 5343 : extra_cost = (shift_cost (speed, int_mode, post_shift)
4753 5343 : + shift_cost (speed, int_mode, size - 1)
4754 5343 : + 2 * add_cost (speed, int_mode));
4755 5343 : t1 = expmed_mult_highpart (int_mode, op0, mlr,
4756 : NULL_RTX, 0,
4757 : max_cost - extra_cost);
4758 5343 : if (t1 == 0)
4759 210 : goto fail1;
4760 5133 : t2 = force_operand (gen_rtx_PLUS (int_mode, t1, op0),
4761 : NULL_RTX);
4762 5133 : t3 = expand_shift
4763 10266 : (RSHIFT_EXPR, int_mode, t2,
4764 5133 : post_shift, NULL_RTX, 0);
4765 5133 : t4 = expand_shift
4766 5133 : (RSHIFT_EXPR, int_mode, op0,
4767 5133 : size - 1, NULL_RTX, 0);
4768 5133 : if (d < 0)
4769 53 : quotient
4770 53 : = force_operand (gen_rtx_MINUS (int_mode, t4, t3),
4771 : tquotient);
4772 : else
4773 5080 : quotient
4774 5080 : = force_operand (gen_rtx_MINUS (int_mode, t3, t4),
4775 : tquotient);
4776 : }
4777 : }
4778 : else /* Too wide mode to use tricky code */
4779 : break;
4780 :
4781 31589 : insn = get_last_insn ();
4782 31589 : if (insn != last)
4783 30978 : set_dst_reg_note (insn, REG_EQUAL,
4784 : gen_rtx_DIV (int_mode, op0, op1),
4785 : quotient);
4786 : }
4787 : break;
4788 : }
4789 83282 : fail1:
4790 85636 : delete_insns_since (last);
4791 85636 : break;
4792 :
4793 1770 : case FLOOR_DIV_EXPR:
4794 1770 : case FLOOR_MOD_EXPR:
4795 : /* We will come here only for signed operations. */
4796 1770 : if (op1_is_constant && HWI_COMPUTABLE_MODE_P (compute_mode))
4797 : {
4798 976 : scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
4799 976 : int size = GET_MODE_BITSIZE (int_mode);
4800 976 : unsigned HOST_WIDE_INT mh, ml;
4801 976 : int pre_shift, post_shift;
4802 976 : HOST_WIDE_INT d = INTVAL (op1);
4803 :
4804 976 : if (d > 0)
4805 : {
4806 : /* We could just as easily deal with negative constants here,
4807 : but it does not seem worth the trouble for GCC 2.6. */
4808 951 : if (EXACT_POWER_OF_2_OR_ZERO_P (d))
4809 : {
4810 644 : pre_shift = floor_log2 (d);
4811 644 : if (rem_flag)
4812 : {
4813 70 : unsigned HOST_WIDE_INT mask
4814 70 : = (HOST_WIDE_INT_1U << pre_shift) - 1;
4815 70 : remainder = expand_binop
4816 70 : (int_mode, and_optab, op0,
4817 70 : gen_int_mode (mask, int_mode),
4818 : remainder, 0, methods);
4819 70 : if (remainder)
4820 70 : return gen_lowpart (mode, remainder);
4821 : }
4822 574 : quotient = expand_shift
4823 574 : (RSHIFT_EXPR, int_mode, op0,
4824 574 : pre_shift, tquotient, 0);
4825 : }
4826 : else
4827 : {
4828 307 : rtx t1, t2, t3, t4;
4829 :
4830 307 : mh = choose_multiplier (d, size, size - 1,
4831 : &ml, &post_shift);
4832 307 : gcc_assert (!mh);
4833 :
4834 331 : if (post_shift < BITS_PER_WORD
4835 307 : && size - 1 < BITS_PER_WORD)
4836 : {
4837 305 : t1 = expand_shift
4838 305 : (RSHIFT_EXPR, int_mode, op0,
4839 305 : size - 1, NULL_RTX, 0);
4840 305 : t2 = expand_binop (int_mode, xor_optab, op0, t1,
4841 : NULL_RTX, 0, OPTAB_WIDEN);
4842 305 : extra_cost = (shift_cost (speed, int_mode, post_shift)
4843 305 : + shift_cost (speed, int_mode, size - 1)
4844 305 : + 2 * add_cost (speed, int_mode));
4845 305 : t3 = expmed_mult_highpart
4846 305 : (int_mode, t2, gen_int_mode (ml, int_mode),
4847 : NULL_RTX, 1, max_cost - extra_cost);
4848 305 : if (t3 != 0)
4849 : {
4850 274 : t4 = expand_shift
4851 548 : (RSHIFT_EXPR, int_mode, t3,
4852 274 : post_shift, NULL_RTX, 1);
4853 274 : quotient = expand_binop (int_mode, xor_optab,
4854 : t4, t1, tquotient, 0,
4855 : OPTAB_WIDEN);
4856 : }
4857 : }
4858 : }
4859 : }
4860 : else
4861 : {
4862 25 : rtx nsign, t1, t2, t3, t4;
4863 25 : t1 = force_operand (gen_rtx_PLUS (int_mode,
4864 : op0, constm1_rtx), NULL_RTX);
4865 25 : t2 = expand_binop (int_mode, ior_optab, op0, t1, NULL_RTX,
4866 : 0, OPTAB_WIDEN);
4867 50 : nsign = expand_shift (RSHIFT_EXPR, int_mode, t2,
4868 25 : size - 1, NULL_RTX, 0);
4869 25 : t3 = force_operand (gen_rtx_MINUS (int_mode, t1, nsign),
4870 : NULL_RTX);
4871 25 : t4 = expand_divmod (0, TRUNC_DIV_EXPR, int_mode, t3, op1,
4872 : NULL_RTX, 0);
4873 25 : if (t4)
4874 : {
4875 25 : rtx t5;
4876 25 : t5 = expand_unop (int_mode, one_cmpl_optab, nsign,
4877 : NULL_RTX, 0);
4878 25 : quotient = force_operand (gen_rtx_PLUS (int_mode, t4, t5),
4879 : tquotient);
4880 : }
4881 : }
4882 : }
4883 :
4884 906 : if (quotient != 0)
4885 : break;
4886 827 : delete_insns_since (last);
4887 :
4888 : /* Try using an instruction that produces both the quotient and
4889 : remainder, using truncation. We can easily compensate the quotient
4890 : or remainder to get floor rounding, once we have the remainder.
4891 : Notice that we compute also the final remainder value here,
4892 : and return the result right away. */
4893 827 : if (target == 0 || GET_MODE (target) != compute_mode)
4894 125 : target = gen_reg_rtx (compute_mode);
4895 :
4896 827 : if (rem_flag)
4897 : {
4898 329 : remainder
4899 329 : = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4900 329 : quotient = gen_reg_rtx (compute_mode);
4901 : }
4902 : else
4903 : {
4904 498 : quotient
4905 498 : = REG_P (target) ? target : gen_reg_rtx (compute_mode);
4906 498 : remainder = gen_reg_rtx (compute_mode);
4907 : }
4908 :
4909 827 : if (expand_twoval_binop (sdivmod_optab, op0, op1,
4910 : quotient, remainder, 0))
4911 : {
4912 : /* This could be computed with a branch-less sequence.
4913 : Save that for later. */
4914 792 : rtx tem;
4915 792 : rtx_code_label *label = gen_label_rtx ();
4916 792 : do_cmp_and_jump (remainder, const0_rtx, EQ, compute_mode, label);
4917 792 : tem = expand_binop (compute_mode, xor_optab, op0, op1,
4918 : NULL_RTX, 0, OPTAB_WIDEN);
4919 792 : do_cmp_and_jump (tem, const0_rtx, GE, compute_mode, label);
4920 792 : expand_dec (quotient, const1_rtx);
4921 792 : expand_inc (remainder, op1);
4922 792 : emit_label (label);
4923 1281 : return gen_lowpart (mode, rem_flag ? remainder : quotient);
4924 : }
4925 :
4926 : /* No luck with division elimination or divmod. Have to do it
4927 : by conditionally adjusting op0 *and* the result. */
4928 35 : {
4929 35 : rtx_code_label *label1, *label2, *label3, *label4, *label5;
4930 35 : rtx adjusted_op0;
4931 35 : rtx tem;
4932 :
4933 35 : quotient = gen_reg_rtx (compute_mode);
4934 35 : adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
4935 35 : label1 = gen_label_rtx ();
4936 35 : label2 = gen_label_rtx ();
4937 35 : label3 = gen_label_rtx ();
4938 35 : label4 = gen_label_rtx ();
4939 35 : label5 = gen_label_rtx ();
4940 35 : do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
4941 35 : do_cmp_and_jump (adjusted_op0, const0_rtx, LT, compute_mode, label1);
4942 35 : tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4943 : quotient, 0, methods);
4944 35 : if (tem != quotient)
4945 35 : emit_move_insn (quotient, tem);
4946 35 : emit_jump_insn (targetm.gen_jump (label5));
4947 35 : emit_barrier ();
4948 35 : emit_label (label1);
4949 35 : expand_inc (adjusted_op0, const1_rtx);
4950 35 : emit_jump_insn (targetm.gen_jump (label4));
4951 35 : emit_barrier ();
4952 35 : emit_label (label2);
4953 35 : do_cmp_and_jump (adjusted_op0, const0_rtx, GT, compute_mode, label3);
4954 35 : tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4955 : quotient, 0, methods);
4956 35 : if (tem != quotient)
4957 35 : emit_move_insn (quotient, tem);
4958 35 : emit_jump_insn (targetm.gen_jump (label5));
4959 35 : emit_barrier ();
4960 35 : emit_label (label3);
4961 35 : expand_dec (adjusted_op0, const1_rtx);
4962 35 : emit_label (label4);
4963 35 : tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
4964 : quotient, 0, methods);
4965 35 : if (tem != quotient)
4966 35 : emit_move_insn (quotient, tem);
4967 35 : expand_dec (quotient, const1_rtx);
4968 35 : emit_label (label5);
4969 : }
4970 35 : break;
4971 :
4972 383 : case CEIL_DIV_EXPR:
4973 383 : case CEIL_MOD_EXPR:
4974 383 : if (unsignedp)
4975 : {
4976 0 : if (op1_is_constant
4977 0 : && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
4978 0 : && (HWI_COMPUTABLE_MODE_P (compute_mode)
4979 0 : || INTVAL (op1) >= 0))
4980 : {
4981 0 : scalar_int_mode int_mode
4982 0 : = as_a <scalar_int_mode> (compute_mode);
4983 0 : rtx t1, t2, t3;
4984 0 : unsigned HOST_WIDE_INT d = INTVAL (op1);
4985 0 : t1 = expand_shift (RSHIFT_EXPR, int_mode, op0,
4986 0 : floor_log2 (d), tquotient, 1);
4987 0 : t2 = expand_binop (int_mode, and_optab, op0,
4988 0 : gen_int_mode (d - 1, int_mode),
4989 : NULL_RTX, 1, methods);
4990 0 : t3 = gen_reg_rtx (int_mode);
4991 0 : t3 = emit_store_flag (t3, NE, t2, const0_rtx, int_mode, 1, 1);
4992 0 : if (t3 == 0)
4993 : {
4994 0 : rtx_code_label *lab;
4995 0 : lab = gen_label_rtx ();
4996 0 : do_cmp_and_jump (t2, const0_rtx, EQ, int_mode, lab);
4997 0 : expand_inc (t1, const1_rtx);
4998 0 : emit_label (lab);
4999 0 : quotient = t1;
5000 : }
5001 : else
5002 0 : quotient = force_operand (gen_rtx_PLUS (int_mode, t1, t3),
5003 : tquotient);
5004 : break;
5005 : }
5006 :
5007 : /* Try using an instruction that produces both the quotient and
5008 : remainder, using truncation. We can easily compensate the
5009 : quotient or remainder to get ceiling rounding, once we have the
5010 : remainder. Notice that we compute also the final remainder
5011 : value here, and return the result right away. */
5012 0 : if (target == 0 || GET_MODE (target) != compute_mode)
5013 0 : target = gen_reg_rtx (compute_mode);
5014 :
5015 0 : if (rem_flag)
5016 : {
5017 0 : remainder = (REG_P (target)
5018 0 : ? target : gen_reg_rtx (compute_mode));
5019 0 : quotient = gen_reg_rtx (compute_mode);
5020 : }
5021 : else
5022 : {
5023 0 : quotient = (REG_P (target)
5024 0 : ? target : gen_reg_rtx (compute_mode));
5025 0 : remainder = gen_reg_rtx (compute_mode);
5026 : }
5027 :
5028 0 : if (expand_twoval_binop (udivmod_optab, op0, op1, quotient,
5029 : remainder, 1))
5030 : {
5031 : /* This could be computed with a branch-less sequence.
5032 : Save that for later. */
5033 0 : rtx_code_label *label = gen_label_rtx ();
5034 0 : do_cmp_and_jump (remainder, const0_rtx, EQ,
5035 : compute_mode, label);
5036 0 : expand_inc (quotient, const1_rtx);
5037 0 : expand_dec (remainder, op1);
5038 0 : emit_label (label);
5039 0 : return gen_lowpart (mode, rem_flag ? remainder : quotient);
5040 : }
5041 :
5042 : /* No luck with division elimination or divmod. Have to do it
5043 : by conditionally adjusting op0 *and* the result. */
5044 0 : {
5045 0 : rtx_code_label *label1, *label2;
5046 0 : rtx adjusted_op0, tem;
5047 :
5048 0 : quotient = gen_reg_rtx (compute_mode);
5049 0 : adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
5050 0 : label1 = gen_label_rtx ();
5051 0 : label2 = gen_label_rtx ();
5052 0 : do_cmp_and_jump (adjusted_op0, const0_rtx, NE,
5053 : compute_mode, label1);
5054 0 : emit_move_insn (quotient, const0_rtx);
5055 0 : emit_jump_insn (targetm.gen_jump (label2));
5056 0 : emit_barrier ();
5057 0 : emit_label (label1);
5058 0 : expand_dec (adjusted_op0, const1_rtx);
5059 0 : tem = expand_binop (compute_mode, udiv_optab, adjusted_op0, op1,
5060 : quotient, 1, methods);
5061 0 : if (tem != quotient)
5062 0 : emit_move_insn (quotient, tem);
5063 0 : expand_inc (quotient, const1_rtx);
5064 0 : emit_label (label2);
5065 : }
5066 : }
5067 : else /* signed */
5068 : {
5069 383 : if (op1_is_constant && EXACT_POWER_OF_2_OR_ZERO_P (INTVAL (op1))
5070 27 : && INTVAL (op1) >= 0)
5071 : {
5072 : /* This is extremely similar to the code for the unsigned case
5073 : above. For 2.7 we should merge these variants, but for
5074 : 2.6.1 I don't want to touch the code for unsigned since that
5075 : get used in C. The signed case will only be used by other
5076 : languages (Ada). */
5077 :
5078 27 : rtx t1, t2, t3;
5079 27 : unsigned HOST_WIDE_INT d = INTVAL (op1);
5080 54 : t1 = expand_shift (RSHIFT_EXPR, compute_mode, op0,
5081 27 : floor_log2 (d), tquotient, 0);
5082 27 : t2 = expand_binop (compute_mode, and_optab, op0,
5083 27 : gen_int_mode (d - 1, compute_mode),
5084 : NULL_RTX, 1, methods);
5085 27 : t3 = gen_reg_rtx (compute_mode);
5086 27 : t3 = emit_store_flag (t3, NE, t2, const0_rtx,
5087 : compute_mode, 1, 1);
5088 27 : if (t3 == 0)
5089 : {
5090 0 : rtx_code_label *lab;
5091 0 : lab = gen_label_rtx ();
5092 0 : do_cmp_and_jump (t2, const0_rtx, EQ, compute_mode, lab);
5093 0 : expand_inc (t1, const1_rtx);
5094 0 : emit_label (lab);
5095 0 : quotient = t1;
5096 : }
5097 : else
5098 27 : quotient = force_operand (gen_rtx_PLUS (compute_mode,
5099 : t1, t3),
5100 : tquotient);
5101 : break;
5102 : }
5103 :
5104 : /* Try using an instruction that produces both the quotient and
5105 : remainder, using truncation. We can easily compensate the
5106 : quotient or remainder to get ceiling rounding, once we have the
5107 : remainder. Notice that we compute also the final remainder
5108 : value here, and return the result right away. */
5109 356 : if (target == 0 || GET_MODE (target) != compute_mode)
5110 15 : target = gen_reg_rtx (compute_mode);
5111 356 : if (rem_flag)
5112 : {
5113 149 : remainder= (REG_P (target)
5114 149 : ? target : gen_reg_rtx (compute_mode));
5115 149 : quotient = gen_reg_rtx (compute_mode);
5116 : }
5117 : else
5118 : {
5119 207 : quotient = (REG_P (target)
5120 207 : ? target : gen_reg_rtx (compute_mode));
5121 207 : remainder = gen_reg_rtx (compute_mode);
5122 : }
5123 :
5124 356 : if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient,
5125 : remainder, 0))
5126 : {
5127 : /* This could be computed with a branch-less sequence.
5128 : Save that for later. */
5129 356 : rtx tem;
5130 356 : rtx_code_label *label = gen_label_rtx ();
5131 356 : do_cmp_and_jump (remainder, const0_rtx, EQ,
5132 : compute_mode, label);
5133 356 : tem = expand_binop (compute_mode, xor_optab, op0, op1,
5134 : NULL_RTX, 0, OPTAB_WIDEN);
5135 356 : do_cmp_and_jump (tem, const0_rtx, LT, compute_mode, label);
5136 356 : expand_inc (quotient, const1_rtx);
5137 356 : expand_dec (remainder, op1);
5138 356 : emit_label (label);
5139 563 : return gen_lowpart (mode, rem_flag ? remainder : quotient);
5140 : }
5141 :
5142 : /* No luck with division elimination or divmod. Have to do it
5143 : by conditionally adjusting op0 *and* the result. */
5144 0 : {
5145 0 : rtx_code_label *label1, *label2, *label3, *label4, *label5;
5146 0 : rtx adjusted_op0;
5147 0 : rtx tem;
5148 :
5149 0 : quotient = gen_reg_rtx (compute_mode);
5150 0 : adjusted_op0 = copy_to_mode_reg (compute_mode, op0);
5151 0 : label1 = gen_label_rtx ();
5152 0 : label2 = gen_label_rtx ();
5153 0 : label3 = gen_label_rtx ();
5154 0 : label4 = gen_label_rtx ();
5155 0 : label5 = gen_label_rtx ();
5156 0 : do_cmp_and_jump (op1, const0_rtx, LT, compute_mode, label2);
5157 0 : do_cmp_and_jump (adjusted_op0, const0_rtx, GT,
5158 : compute_mode, label1);
5159 0 : tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
5160 : quotient, 0, methods);
5161 0 : if (tem != quotient)
5162 0 : emit_move_insn (quotient, tem);
5163 0 : emit_jump_insn (targetm.gen_jump (label5));
5164 0 : emit_barrier ();
5165 0 : emit_label (label1);
5166 0 : expand_dec (adjusted_op0, const1_rtx);
5167 0 : emit_jump_insn (targetm.gen_jump (label4));
5168 0 : emit_barrier ();
5169 0 : emit_label (label2);
5170 0 : do_cmp_and_jump (adjusted_op0, const0_rtx, LT,
5171 : compute_mode, label3);
5172 0 : tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
5173 : quotient, 0, methods);
5174 0 : if (tem != quotient)
5175 0 : emit_move_insn (quotient, tem);
5176 0 : emit_jump_insn (targetm.gen_jump (label5));
5177 0 : emit_barrier ();
5178 0 : emit_label (label3);
5179 0 : expand_inc (adjusted_op0, const1_rtx);
5180 0 : emit_label (label4);
5181 0 : tem = expand_binop (compute_mode, sdiv_optab, adjusted_op0, op1,
5182 : quotient, 0, methods);
5183 0 : if (tem != quotient)
5184 0 : emit_move_insn (quotient, tem);
5185 0 : expand_inc (quotient, const1_rtx);
5186 0 : emit_label (label5);
5187 : }
5188 : }
5189 : break;
5190 :
5191 45946 : case EXACT_DIV_EXPR:
5192 45946 : if (op1_is_constant && HWI_COMPUTABLE_MODE_P (compute_mode))
5193 : {
5194 45938 : scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
5195 45938 : int size = GET_MODE_BITSIZE (int_mode);
5196 45938 : HOST_WIDE_INT d = INTVAL (op1);
5197 45938 : unsigned HOST_WIDE_INT ml;
5198 45938 : int pre_shift;
5199 45938 : rtx t1;
5200 :
5201 45938 : pre_shift = ctz_or_zero (d);
5202 45938 : ml = invert_mod2n (d >> pre_shift, size);
5203 45938 : t1 = expand_shift (RSHIFT_EXPR, int_mode, op0,
5204 45938 : pre_shift, NULL_RTX, unsignedp);
5205 45938 : quotient = expand_mult (int_mode, t1, gen_int_mode (ml, int_mode),
5206 : NULL_RTX, 1);
5207 :
5208 45938 : insn = get_last_insn ();
5209 91876 : set_dst_reg_note (insn, REG_EQUAL,
5210 : gen_rtx_fmt_ee (unsignedp ? UDIV : DIV,
5211 : int_mode, op0, op1),
5212 : quotient);
5213 : }
5214 : break;
5215 :
5216 2 : case ROUND_DIV_EXPR:
5217 2 : case ROUND_MOD_EXPR:
5218 2 : if (unsignedp)
5219 : {
5220 0 : scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
5221 0 : rtx tem;
5222 0 : rtx_code_label *label;
5223 0 : label = gen_label_rtx ();
5224 0 : quotient = gen_reg_rtx (int_mode);
5225 0 : remainder = gen_reg_rtx (int_mode);
5226 0 : if (expand_twoval_binop (udivmod_optab, op0, op1, quotient, remainder, 1) == 0)
5227 : {
5228 0 : rtx tem;
5229 0 : quotient = expand_binop (int_mode, udiv_optab, op0, op1,
5230 : quotient, 1, methods);
5231 0 : tem = expand_mult (int_mode, quotient, op1, NULL_RTX, 1);
5232 0 : remainder = expand_binop (int_mode, sub_optab, op0, tem,
5233 : remainder, 1, methods);
5234 : }
5235 0 : tem = plus_constant (int_mode, op1, -1);
5236 0 : tem = expand_shift (RSHIFT_EXPR, int_mode, tem, 1, NULL_RTX, 1);
5237 0 : do_cmp_and_jump (remainder, tem, LEU, int_mode, label);
5238 0 : expand_inc (quotient, const1_rtx);
5239 0 : expand_dec (remainder, op1);
5240 0 : emit_label (label);
5241 : }
5242 : else
5243 : {
5244 2 : scalar_int_mode int_mode = as_a <scalar_int_mode> (compute_mode);
5245 2 : int size = GET_MODE_BITSIZE (int_mode);
5246 2 : rtx abs_rem, abs_op1, tem, mask;
5247 2 : rtx_code_label *label;
5248 2 : label = gen_label_rtx ();
5249 2 : quotient = gen_reg_rtx (int_mode);
5250 2 : remainder = gen_reg_rtx (int_mode);
5251 2 : if (expand_twoval_binop (sdivmod_optab, op0, op1, quotient, remainder, 0) == 0)
5252 : {
5253 0 : rtx tem;
5254 0 : quotient = expand_binop (int_mode, sdiv_optab, op0, op1,
5255 : quotient, 0, methods);
5256 0 : tem = expand_mult (int_mode, quotient, op1, NULL_RTX, 0);
5257 0 : remainder = expand_binop (int_mode, sub_optab, op0, tem,
5258 : remainder, 0, methods);
5259 : }
5260 2 : abs_rem = expand_abs (int_mode, remainder, NULL_RTX, 1, 0);
5261 2 : abs_op1 = expand_abs (int_mode, op1, NULL_RTX, 1, 0);
5262 2 : tem = expand_shift (LSHIFT_EXPR, int_mode, abs_rem,
5263 : 1, NULL_RTX, 1);
5264 2 : do_cmp_and_jump (tem, abs_op1, LTU, int_mode, label);
5265 2 : tem = expand_binop (int_mode, xor_optab, op0, op1,
5266 : NULL_RTX, 0, OPTAB_WIDEN);
5267 4 : mask = expand_shift (RSHIFT_EXPR, int_mode, tem,
5268 2 : size - 1, NULL_RTX, 0);
5269 2 : tem = expand_binop (int_mode, xor_optab, mask, const1_rtx,
5270 : NULL_RTX, 0, OPTAB_WIDEN);
5271 2 : tem = expand_binop (int_mode, sub_optab, tem, mask,
5272 : NULL_RTX, 0, OPTAB_WIDEN);
5273 2 : expand_inc (quotient, tem);
5274 2 : tem = expand_binop (int_mode, xor_optab, mask, op1,
5275 : NULL_RTX, 0, OPTAB_WIDEN);
5276 2 : tem = expand_binop (int_mode, sub_optab, tem, mask,
5277 : NULL_RTX, 0, OPTAB_WIDEN);
5278 2 : expand_dec (remainder, tem);
5279 2 : emit_label (label);
5280 : }
5281 3 : return gen_lowpart (mode, rem_flag ? remainder : quotient);
5282 :
5283 0 : default:
5284 0 : gcc_unreachable ();
5285 : }
5286 :
5287 220254 : if (quotient == 0)
5288 : {
5289 89547 : if (target && GET_MODE (target) != compute_mode)
5290 48859 : target = 0;
5291 :
5292 89547 : if (rem_flag)
5293 : {
5294 : /* Try to produce the remainder without producing the quotient.
5295 : If we seem to have a divmod pattern that does not require widening,
5296 : don't try widening here. We should really have a WIDEN argument
5297 : to expand_twoval_binop, since what we'd really like to do here is
5298 : 1) try a mod insn in compute_mode
5299 : 2) try a divmod insn in compute_mode
5300 : 3) try a div insn in compute_mode and multiply-subtract to get
5301 : remainder
5302 : 4) try the same things with widening allowed. */
5303 31543 : remainder
5304 33007 : = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5305 : op0, op1, target,
5306 : unsignedp,
5307 31543 : ((optab_handler (optab2, compute_mode)
5308 : != CODE_FOR_nothing)
5309 : ? OPTAB_DIRECT : OPTAB_WIDEN));
5310 31543 : if (remainder == 0)
5311 : {
5312 : /* No luck there. Can we do remainder and divide at once
5313 : without a library call? */
5314 31336 : remainder = gen_reg_rtx (compute_mode);
5315 46384 : if (! expand_twoval_binop ((unsignedp
5316 : ? udivmod_optab
5317 : : sdivmod_optab),
5318 : op0, op1,
5319 : NULL_RTX, remainder, unsignedp))
5320 : remainder = 0;
5321 : }
5322 :
5323 30079 : if (remainder)
5324 30286 : return gen_lowpart (mode, remainder);
5325 : }
5326 :
5327 : /* Produce the quotient. Try a quotient insn, but not a library call.
5328 : If we have a divmod in this mode, use it in preference to widening
5329 : the div (for this test we assume it will not fail). Note that optab2
5330 : is set to the one of the two optabs that the call below will use. */
5331 59261 : quotient
5332 64412 : = sign_expand_binop (compute_mode, udiv_optab, sdiv_optab,
5333 : op0, op1, rem_flag ? NULL_RTX : target,
5334 : unsignedp,
5335 59261 : ((optab_handler (optab2, compute_mode)
5336 : != CODE_FOR_nothing)
5337 : ? OPTAB_DIRECT : OPTAB_WIDEN));
5338 :
5339 59261 : if (quotient == 0)
5340 : {
5341 : /* No luck there. Try a quotient-and-remainder insn,
5342 : keeping the quotient alone. */
5343 58873 : quotient = gen_reg_rtx (compute_mode);
5344 81319 : if (! expand_twoval_binop (unsignedp ? udivmod_optab : sdivmod_optab,
5345 : op0, op1,
5346 : quotient, NULL_RTX, unsignedp))
5347 : {
5348 3507 : quotient = 0;
5349 3507 : if (! rem_flag)
5350 : /* Still no luck. If we are not computing the remainder,
5351 : use a library call for the quotient. */
5352 2272 : quotient = sign_expand_binop (compute_mode,
5353 : udiv_optab, sdiv_optab,
5354 : op0, op1, target,
5355 : unsignedp, methods);
5356 : }
5357 : }
5358 : }
5359 :
5360 189991 : if (rem_flag)
5361 : {
5362 21797 : if (target && GET_MODE (target) != compute_mode)
5363 14500 : target = 0;
5364 :
5365 21797 : if (quotient == 0)
5366 : {
5367 : /* No divide instruction either. Use library for remainder. */
5368 1235 : remainder = sign_expand_binop (compute_mode, umod_optab, smod_optab,
5369 : op0, op1, target,
5370 : unsignedp, methods);
5371 : /* No remainder function. Try a quotient-and-remainder
5372 : function, keeping the remainder. */
5373 1235 : if (!remainder
5374 0 : && (methods == OPTAB_LIB || methods == OPTAB_LIB_WIDEN))
5375 : {
5376 0 : remainder = gen_reg_rtx (compute_mode);
5377 0 : if (!expand_twoval_binop_libfunc
5378 0 : (unsignedp ? udivmod_optab : sdivmod_optab,
5379 : op0, op1,
5380 : NULL_RTX, remainder,
5381 : unsignedp ? UMOD : MOD))
5382 0 : remainder = NULL_RTX;
5383 : }
5384 : }
5385 : else
5386 : {
5387 : /* We divided. Now finish doing X - Y * (X / Y). */
5388 20562 : remainder = expand_mult (compute_mode, quotient, op1,
5389 : NULL_RTX, unsignedp);
5390 20562 : remainder = expand_binop (compute_mode, sub_optab, op0,
5391 : remainder, target, unsignedp,
5392 : methods);
5393 : }
5394 : }
5395 :
5396 191226 : if (methods != OPTAB_LIB_WIDEN
5397 1681 : && (rem_flag ? remainder : quotient) == NULL_RTX)
5398 : return NULL_RTX;
5399 :
5400 360655 : return gen_lowpart (mode, rem_flag ? remainder : quotient);
5401 : }
5402 :
5403 : /* Return a tree node with data type TYPE, describing the value of X.
5404 : Usually this is an VAR_DECL, if there is no obvious better choice.
5405 : X may be an expression, however we only support those expressions
5406 : generated by loop.c. */
5407 :
5408 : tree
5409 671817 : make_tree (tree type, rtx x)
5410 : {
5411 671817 : tree t;
5412 :
5413 671817 : switch (GET_CODE (x))
5414 : {
5415 22348 : case CONST_INT:
5416 22348 : case CONST_WIDE_INT:
5417 22348 : t = wide_int_to_tree (type, rtx_mode_t (x, TYPE_MODE (type)));
5418 22348 : return t;
5419 :
5420 0 : case CONST_DOUBLE:
5421 0 : STATIC_ASSERT (HOST_BITS_PER_WIDE_INT * 2 <= MAX_BITSIZE_MODE_ANY_INT);
5422 0 : if (TARGET_SUPPORTS_WIDE_INT == 0 && GET_MODE (x) == VOIDmode)
5423 : t = wide_int_to_tree (type,
5424 : wide_int::from_array (&CONST_DOUBLE_LOW (x), 2,
5425 : HOST_BITS_PER_WIDE_INT * 2));
5426 : else
5427 0 : t = build_real (type, *CONST_DOUBLE_REAL_VALUE (x));
5428 :
5429 0 : return t;
5430 :
5431 0 : case CONST_VECTOR:
5432 0 : {
5433 0 : unsigned int npatterns = CONST_VECTOR_NPATTERNS (x);
5434 0 : unsigned int nelts_per_pattern = CONST_VECTOR_NELTS_PER_PATTERN (x);
5435 0 : tree itype = TREE_TYPE (type);
5436 :
5437 : /* Build a tree with vector elements. */
5438 0 : tree_vector_builder elts (type, npatterns, nelts_per_pattern);
5439 0 : unsigned int count = elts.encoded_nelts ();
5440 0 : for (unsigned int i = 0; i < count; ++i)
5441 : {
5442 0 : rtx elt = CONST_VECTOR_ELT (x, i);
5443 0 : elts.quick_push (make_tree (itype, elt));
5444 : }
5445 :
5446 0 : return elts.build ();
5447 0 : }
5448 :
5449 0 : case PLUS:
5450 0 : return fold_build2 (PLUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5451 : make_tree (type, XEXP (x, 1)));
5452 :
5453 0 : case MINUS:
5454 0 : return fold_build2 (MINUS_EXPR, type, make_tree (type, XEXP (x, 0)),
5455 : make_tree (type, XEXP (x, 1)));
5456 :
5457 0 : case NEG:
5458 0 : return fold_build1 (NEGATE_EXPR, type, make_tree (type, XEXP (x, 0)));
5459 :
5460 0 : case MULT:
5461 0 : return fold_build2 (MULT_EXPR, type, make_tree (type, XEXP (x, 0)),
5462 : make_tree (type, XEXP (x, 1)));
5463 :
5464 0 : case ASHIFT:
5465 0 : return fold_build2 (LSHIFT_EXPR, type, make_tree (type, XEXP (x, 0)),
5466 : make_tree (type, XEXP (x, 1)));
5467 :
5468 0 : case LSHIFTRT:
5469 0 : t = unsigned_type_for (type);
5470 0 : return fold_convert (type, build2 (RSHIFT_EXPR, t,
5471 : make_tree (t, XEXP (x, 0)),
5472 : make_tree (type, XEXP (x, 1))));
5473 :
5474 0 : case ASHIFTRT:
5475 0 : t = signed_type_for (type);
5476 0 : return fold_convert (type, build2 (RSHIFT_EXPR, t,
5477 : make_tree (t, XEXP (x, 0)),
5478 : make_tree (type, XEXP (x, 1))));
5479 :
5480 0 : case DIV:
5481 0 : if (TREE_CODE (type) != REAL_TYPE)
5482 0 : t = signed_type_for (type);
5483 : else
5484 : t = type;
5485 :
5486 0 : return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5487 : make_tree (t, XEXP (x, 0)),
5488 : make_tree (t, XEXP (x, 1))));
5489 0 : case UDIV:
5490 0 : t = unsigned_type_for (type);
5491 0 : return fold_convert (type, build2 (TRUNC_DIV_EXPR, t,
5492 : make_tree (t, XEXP (x, 0)),
5493 : make_tree (t, XEXP (x, 1))));
5494 :
5495 0 : case SIGN_EXTEND:
5496 0 : case ZERO_EXTEND:
5497 0 : t = lang_hooks.types.type_for_mode (GET_MODE (XEXP (x, 0)),
5498 : GET_CODE (x) == ZERO_EXTEND);
5499 0 : return fold_convert (type, make_tree (t, XEXP (x, 0)));
5500 :
5501 0 : case CONST:
5502 0 : return make_tree (type, XEXP (x, 0));
5503 :
5504 0 : case SYMBOL_REF:
5505 0 : t = SYMBOL_REF_DECL (x);
5506 0 : if (t)
5507 0 : return fold_convert (type, build_fold_addr_expr (t));
5508 : /* fall through. */
5509 :
5510 649469 : default:
5511 649469 : if (CONST_POLY_INT_P (x))
5512 : return wide_int_to_tree (t, const_poly_int_value (x));
5513 :
5514 649469 : t = build_decl (RTL_LOCATION (x), VAR_DECL, NULL_TREE, type);
5515 :
5516 : /* If TYPE is a POINTER_TYPE, we might need to convert X from
5517 : address mode to pointer mode. */
5518 649469 : if (POINTER_TYPE_P (type))
5519 806996 : x = convert_memory_address_addr_space
5520 403498 : (SCALAR_INT_TYPE_MODE (type), x, TYPE_ADDR_SPACE (TREE_TYPE (type)));
5521 :
5522 : /* Note that we do *not* use SET_DECL_RTL here, because we do not
5523 : want set_decl_rtl to go adjusting REG_ATTRS for this temporary. */
5524 649469 : t->decl_with_rtl.rtl = x;
5525 :
5526 649469 : return t;
5527 : }
5528 : }
5529 :
5530 : /* Compute the logical-and of OP0 and OP1, storing it in TARGET
5531 : and returning TARGET.
5532 :
5533 : If TARGET is 0, a pseudo-register or constant is returned. */
5534 :
5535 : rtx
5536 73390 : expand_and (machine_mode mode, rtx op0, rtx op1, rtx target)
5537 : {
5538 73390 : rtx tem = 0;
5539 :
5540 73390 : if (GET_MODE (op0) == VOIDmode && GET_MODE (op1) == VOIDmode)
5541 81 : tem = simplify_binary_operation (AND, mode, op0, op1);
5542 81 : if (tem == 0)
5543 73309 : tem = expand_binop (mode, and_optab, op0, op1, target, 0, OPTAB_LIB_WIDEN);
5544 :
5545 73390 : if (target == 0)
5546 : target = tem;
5547 44242 : else if (tem != target)
5548 13 : emit_move_insn (target, tem);
5549 73390 : return target;
5550 : }
5551 :
5552 : /* Helper function for emit_store_flag. */
5553 : rtx
5554 705277 : emit_cstore (rtx target, enum insn_code icode, enum rtx_code code,
5555 : machine_mode mode, machine_mode compare_mode,
5556 : int unsignedp, rtx x, rtx y, int normalizep,
5557 : machine_mode target_mode)
5558 : {
5559 705277 : class expand_operand ops[4];
5560 705277 : rtx op0, comparison, subtarget;
5561 705277 : rtx_insn *last;
5562 705277 : scalar_int_mode result_mode = targetm.cstore_mode (icode);
5563 705277 : scalar_int_mode int_target_mode;
5564 :
5565 705277 : last = get_last_insn ();
5566 705277 : x = prepare_operand (icode, x, 2, mode, compare_mode, unsignedp);
5567 705277 : y = prepare_operand (icode, y, 3, mode, compare_mode, unsignedp);
5568 705277 : if (!x || !y)
5569 : {
5570 284 : delete_insns_since (last);
5571 284 : return NULL_RTX;
5572 : }
5573 :
5574 704993 : if (target_mode == VOIDmode)
5575 : int_target_mode = result_mode;
5576 : else
5577 704977 : int_target_mode = as_a <scalar_int_mode> (target_mode);
5578 704993 : if (!target)
5579 66354 : target = gen_reg_rtx (int_target_mode);
5580 :
5581 704993 : comparison = gen_rtx_fmt_ee (code, result_mode, x, y);
5582 :
5583 704993 : create_output_operand (&ops[0], optimize ? NULL_RTX : target, result_mode);
5584 704993 : create_fixed_operand (&ops[1], comparison);
5585 704993 : create_fixed_operand (&ops[2], x);
5586 704993 : create_fixed_operand (&ops[3], y);
5587 704993 : if (!maybe_expand_insn (icode, 4, ops))
5588 : {
5589 148935 : delete_insns_since (last);
5590 148935 : return NULL_RTX;
5591 : }
5592 556058 : subtarget = ops[0].value;
5593 :
5594 : /* If we are converting to a wider mode, first convert to
5595 : INT_TARGET_MODE, then normalize. This produces better combining
5596 : opportunities on machines that have a SIGN_EXTRACT when we are
5597 : testing a single bit. This mostly benefits the 68k.
5598 :
5599 : If STORE_FLAG_VALUE does not have the sign bit set when
5600 : interpreted in MODE, we can do this conversion as unsigned, which
5601 : is usually more efficient. */
5602 556058 : if (GET_MODE_PRECISION (int_target_mode) > GET_MODE_PRECISION (result_mode))
5603 : {
5604 108910 : gcc_assert (GET_MODE_PRECISION (result_mode) != 1
5605 : || STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1);
5606 :
5607 108910 : bool unsignedp = (STORE_FLAG_VALUE >= 0);
5608 108910 : convert_move (target, subtarget, unsignedp);
5609 :
5610 108910 : op0 = target;
5611 108910 : result_mode = int_target_mode;
5612 : }
5613 : else
5614 : op0 = subtarget;
5615 :
5616 : /* If we want to keep subexpressions around, don't reuse our last
5617 : target. */
5618 556058 : if (optimize)
5619 441196 : subtarget = 0;
5620 :
5621 : /* Now normalize to the proper value in MODE. Sometimes we don't
5622 : have to do anything. */
5623 556058 : if (normalizep == 0 || normalizep == STORE_FLAG_VALUE)
5624 : ;
5625 : /* STORE_FLAG_VALUE might be the most negative number, so write
5626 : the comparison this way to avoid a compiler-time warning. */
5627 375 : else if (- normalizep == STORE_FLAG_VALUE)
5628 375 : op0 = expand_unop (result_mode, neg_optab, op0, subtarget, 0);
5629 :
5630 : /* We don't want to use STORE_FLAG_VALUE < 0 below since this makes
5631 : it hard to use a value of just the sign bit due to ANSI integer
5632 : constant typing rules. */
5633 0 : else if (val_signbit_known_set_p (result_mode, STORE_FLAG_VALUE))
5634 0 : op0 = expand_shift (RSHIFT_EXPR, result_mode, op0,
5635 0 : GET_MODE_BITSIZE (result_mode) - 1, subtarget,
5636 : normalizep == 1);
5637 : else
5638 : {
5639 0 : gcc_assert (STORE_FLAG_VALUE & 1);
5640 :
5641 0 : op0 = expand_and (result_mode, op0, const1_rtx, subtarget);
5642 0 : if (normalizep == -1)
5643 : op0 = expand_unop (result_mode, neg_optab, op0, op0, 0);
5644 : }
5645 :
5646 : /* If we were converting to a smaller mode, do the conversion now. */
5647 556058 : if (int_target_mode != result_mode)
5648 : {
5649 0 : convert_move (target, op0, 0);
5650 0 : return target;
5651 : }
5652 : else
5653 : return op0;
5654 : }
5655 :
5656 :
5657 : /* A subroutine of emit_store_flag only including "tricks" that do not
5658 : need a recursive call. These are kept separate to avoid infinite
5659 : loops. */
5660 :
5661 : static rtx
5662 671632 : emit_store_flag_1 (rtx target, enum rtx_code code, rtx op0, rtx op1,
5663 : machine_mode mode, int unsignedp, int normalizep,
5664 : machine_mode target_mode)
5665 : {
5666 671632 : rtx subtarget;
5667 671632 : enum insn_code icode;
5668 671632 : machine_mode compare_mode;
5669 671632 : enum mode_class mclass;
5670 :
5671 671632 : if (unsignedp)
5672 153798 : code = unsigned_condition (code);
5673 :
5674 : /* If one operand is constant, make it the second one. Only do this
5675 : if the other operand is not constant as well. */
5676 :
5677 671632 : if (swap_commutative_operands_p (op0, op1))
5678 : {
5679 4737 : std::swap (op0, op1);
5680 4737 : code = swap_condition (code);
5681 : }
5682 :
5683 671632 : if (mode == VOIDmode)
5684 37585 : mode = GET_MODE (op0);
5685 :
5686 671632 : if (CONST_SCALAR_INT_P (op1))
5687 295159 : canonicalize_comparison (mode, &code, &op1);
5688 :
5689 : /* For some comparisons with 1 and -1, we can convert this to
5690 : comparisons with zero. This will often produce more opportunities for
5691 : store-flag insns. */
5692 :
5693 671632 : switch (code)
5694 : {
5695 43588 : case LT:
5696 43588 : if (op1 == const1_rtx)
5697 61 : op1 = const0_rtx, code = LE;
5698 : break;
5699 25242 : case LE:
5700 25242 : if (op1 == constm1_rtx)
5701 0 : op1 = const0_rtx, code = LT;
5702 : break;
5703 37808 : case GE:
5704 37808 : if (op1 == const1_rtx)
5705 0 : op1 = const0_rtx, code = GT;
5706 : break;
5707 32360 : case GT:
5708 32360 : if (op1 == constm1_rtx)
5709 63 : op1 = const0_rtx, code = GE;
5710 : break;
5711 3815 : case GEU:
5712 3815 : if (op1 == const1_rtx)
5713 0 : op1 = const0_rtx, code = NE;
5714 : break;
5715 7313 : case LTU:
5716 7313 : if (op1 == const1_rtx)
5717 10 : op1 = const0_rtx, code = EQ;
5718 : break;
5719 : default:
5720 : break;
5721 : }
5722 :
5723 : /* If this is A < 0 or A >= 0, we can do this by taking the ones
5724 : complement of A (for GE) and shifting the sign bit to the low bit. */
5725 671632 : scalar_int_mode int_mode;
5726 190206 : if (op1 == const0_rtx && (code == LT || code == GE)
5727 671632 : && is_int_mode (mode, &int_mode)
5728 671632 : && (normalizep || STORE_FLAG_VALUE == 1
5729 : || val_signbit_p (int_mode, STORE_FLAG_VALUE)))
5730 : {
5731 39326 : scalar_int_mode int_target_mode;
5732 39326 : subtarget = target;
5733 :
5734 39326 : if (!target)
5735 : int_target_mode = int_mode;
5736 : else
5737 : {
5738 : /* If the result is to be wider than OP0, it is best to convert it
5739 : first. If it is to be narrower, it is *incorrect* to convert it
5740 : first. */
5741 39326 : int_target_mode = as_a <scalar_int_mode> (target_mode);
5742 117978 : if (GET_MODE_SIZE (int_target_mode) > GET_MODE_SIZE (int_mode))
5743 : {
5744 394 : op0 = convert_modes (int_target_mode, int_mode, op0, 0);
5745 394 : int_mode = int_target_mode;
5746 : }
5747 : }
5748 :
5749 39326 : if (int_target_mode != int_mode)
5750 26549 : subtarget = 0;
5751 :
5752 39326 : if (code == GE)
5753 20373 : op0 = expand_unop (int_mode, one_cmpl_optab, op0,
5754 : ((STORE_FLAG_VALUE == 1 || normalizep)
5755 : ? 0 : subtarget), 0);
5756 :
5757 39326 : if (STORE_FLAG_VALUE == 1 || normalizep)
5758 : /* If we are supposed to produce a 0/1 value, we want to do
5759 : a logical shift from the sign bit to the low-order bit; for
5760 : a -1/0 value, we do an arithmetic shift. */
5761 78652 : op0 = expand_shift (RSHIFT_EXPR, int_mode, op0,
5762 39326 : GET_MODE_BITSIZE (int_mode) - 1,
5763 : subtarget, normalizep != -1);
5764 :
5765 39326 : if (int_mode != int_target_mode)
5766 26549 : op0 = convert_modes (int_target_mode, int_mode, op0, 0);
5767 :
5768 39326 : return op0;
5769 : }
5770 :
5771 : /* Next try expanding this via the backend's cstore<mode>4. */
5772 632306 : mclass = GET_MODE_CLASS (mode);
5773 643405 : FOR_EACH_WIDER_MODE_FROM (compare_mode, mode)
5774 : {
5775 636236 : machine_mode optab_mode = mclass == MODE_CC ? CCmode : compare_mode;
5776 636236 : icode = optab_handler (cstore_optab, optab_mode);
5777 636236 : if (icode != CODE_FOR_nothing)
5778 : {
5779 625137 : do_pending_stack_adjust ();
5780 625137 : rtx tem = emit_cstore (target, icode, code, mode, compare_mode,
5781 : unsignedp, op0, op1, normalizep, target_mode);
5782 625137 : if (tem)
5783 : return tem;
5784 :
5785 84992 : if (GET_MODE_CLASS (mode) == MODE_FLOAT)
5786 : {
5787 80122 : enum rtx_code scode = swap_condition (code);
5788 :
5789 80122 : tem = emit_cstore (target, icode, scode, mode, compare_mode,
5790 : unsignedp, op1, op0, normalizep, target_mode);
5791 80122 : if (tem)
5792 : return tem;
5793 : }
5794 : break;
5795 : }
5796 : }
5797 :
5798 : /* If we are comparing a double-word integer with zero or -1, we can
5799 : convert the comparison into one involving a single word. */
5800 76266 : if (is_int_mode (mode, &int_mode)
5801 7632 : && GET_MODE_BITSIZE (int_mode) == BITS_PER_WORD * 2
5802 4586 : && (!MEM_P (op0) || ! MEM_VOLATILE_P (op0)))
5803 : {
5804 4586 : rtx tem;
5805 4586 : if ((code == EQ || code == NE)
5806 0 : && (op1 == const0_rtx || op1 == constm1_rtx))
5807 : {
5808 0 : rtx op00, op01;
5809 :
5810 : /* Do a logical OR or AND of the two words and compare the
5811 : result. */
5812 0 : op00 = force_subreg (word_mode, op0, int_mode, 0);
5813 0 : op01 = force_subreg (word_mode, op0, int_mode, UNITS_PER_WORD);
5814 0 : tem = expand_binop (word_mode,
5815 0 : op1 == const0_rtx ? ior_optab : and_optab,
5816 : op00, op01, NULL_RTX, unsignedp,
5817 : OPTAB_DIRECT);
5818 :
5819 0 : if (tem != 0)
5820 0 : tem = emit_store_flag (NULL_RTX, code, tem, op1, word_mode,
5821 : unsignedp, normalizep);
5822 : }
5823 4586 : else if ((code == LT || code == GE) && op1 == const0_rtx)
5824 : {
5825 0 : rtx op0h;
5826 :
5827 : /* If testing the sign bit, can just test on high word. */
5828 0 : op0h = force_highpart_subreg (word_mode, op0, int_mode);
5829 0 : tem = emit_store_flag (NULL_RTX, code, op0h, op1, word_mode,
5830 : unsignedp, normalizep);
5831 0 : }
5832 : else
5833 : tem = NULL_RTX;
5834 :
5835 0 : if (tem)
5836 : {
5837 0 : if (target_mode == VOIDmode || GET_MODE (tem) == target_mode)
5838 : return tem;
5839 0 : if (!target)
5840 0 : target = gen_reg_rtx (target_mode);
5841 :
5842 0 : convert_move (target, tem,
5843 0 : !val_signbit_known_set_p (word_mode,
5844 : (normalizep ? normalizep
5845 : : STORE_FLAG_VALUE)));
5846 0 : return target;
5847 : }
5848 : }
5849 :
5850 : return 0;
5851 : }
5852 :
5853 : /* Subroutine of emit_store_flag that handles cases in which the operands
5854 : are scalar integers. SUBTARGET is the target to use for temporary
5855 : operations and TRUEVAL is the value to store when the condition is
5856 : true. All other arguments are as for emit_store_flag. */
5857 :
5858 : rtx
5859 2435 : emit_store_flag_int (rtx target, rtx subtarget, enum rtx_code code, rtx op0,
5860 : rtx op1, scalar_int_mode mode, int unsignedp,
5861 : int normalizep, rtx trueval)
5862 : {
5863 2435 : machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
5864 2435 : rtx_insn *last = get_last_insn ();
5865 :
5866 : /* If this is an equality comparison of integers, we can try to exclusive-or
5867 : (or subtract) the two operands and use a recursive call to try the
5868 : comparison with zero. Don't do any of these cases if branches are
5869 : very cheap. */
5870 :
5871 2435 : if ((code == EQ || code == NE) && op1 != const0_rtx)
5872 : {
5873 0 : rtx tem = expand_binop (mode, xor_optab, op0, op1, subtarget, 1,
5874 : OPTAB_WIDEN);
5875 :
5876 0 : if (tem == 0)
5877 0 : tem = expand_binop (mode, sub_optab, op0, op1, subtarget, 1,
5878 : OPTAB_WIDEN);
5879 0 : if (tem != 0)
5880 0 : tem = emit_store_flag (target, code, tem, const0_rtx,
5881 : mode, unsignedp, normalizep);
5882 0 : if (tem != 0)
5883 : return tem;
5884 :
5885 0 : delete_insns_since (last);
5886 : }
5887 :
5888 : /* For integer comparisons, try the reverse comparison. However, for
5889 : small X and if we'd have anyway to extend, implementing "X != 0"
5890 : as "-(int)X >> 31" is still cheaper than inverting "(int)X == 0". */
5891 2435 : rtx_code rcode = reverse_condition (code);
5892 2435 : if (can_compare_p (rcode, mode, ccp_store_flag)
5893 2435 : && ! (optab_handler (cstore_optab, mode) == CODE_FOR_nothing
5894 0 : && code == NE
5895 0 : && GET_MODE_SIZE (mode) < UNITS_PER_WORD
5896 0 : && op1 == const0_rtx))
5897 : {
5898 2435 : int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
5899 : || (STORE_FLAG_VALUE == -1 && normalizep == 1));
5900 :
5901 : /* Again, for the reverse comparison, use either an addition or a XOR. */
5902 2435 : if (want_add
5903 2435 : && rtx_cost (GEN_INT (normalizep), mode, PLUS, 1,
5904 0 : optimize_insn_for_speed_p ()) == 0)
5905 : {
5906 0 : rtx tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5907 : STORE_FLAG_VALUE, target_mode);
5908 0 : if (tem != 0)
5909 0 : tem = expand_binop (target_mode, add_optab, tem,
5910 0 : gen_int_mode (normalizep, target_mode),
5911 : target, 0, OPTAB_WIDEN);
5912 0 : if (tem != 0)
5913 : return tem;
5914 : }
5915 2435 : else if (!want_add
5916 4870 : && rtx_cost (trueval, mode, XOR, 1,
5917 2435 : optimize_insn_for_speed_p ()) == 0)
5918 : {
5919 2435 : rtx tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
5920 : normalizep, target_mode);
5921 2435 : if (tem != 0)
5922 0 : tem = expand_binop (target_mode, xor_optab, tem, trueval, target,
5923 0 : INTVAL (trueval) >= 0, OPTAB_WIDEN);
5924 0 : if (tem != 0)
5925 : return tem;
5926 : }
5927 :
5928 2435 : delete_insns_since (last);
5929 : }
5930 :
5931 : /* Some other cases we can do are EQ, NE, LE, and GT comparisons with
5932 : the constant zero. Reject all other comparisons at this point. Only
5933 : do LE and GT if branches are expensive since they are expensive on
5934 : 2-operand machines. */
5935 :
5936 2435 : if (op1 != const0_rtx
5937 2435 : || (code != EQ && code != NE
5938 87 : && (BRANCH_COST (optimize_insn_for_speed_p (),
5939 87 : false) <= 1 || (code != LE && code != GT))))
5940 2206 : return 0;
5941 :
5942 : /* Try to put the result of the comparison in the sign bit. Assume we can't
5943 : do the necessary operation below. */
5944 :
5945 229 : rtx tem = 0;
5946 :
5947 : /* To see if A <= 0, compute (A | (A - 1)). A <= 0 iff that result has
5948 : the sign bit set. */
5949 :
5950 229 : if (code == LE)
5951 : {
5952 : /* This is destructive, so SUBTARGET can't be OP0. */
5953 59 : if (rtx_equal_p (subtarget, op0))
5954 0 : subtarget = 0;
5955 :
5956 59 : tem = expand_binop (mode, sub_optab, op0, const1_rtx, subtarget, 0,
5957 : OPTAB_WIDEN);
5958 59 : if (tem)
5959 59 : tem = expand_binop (mode, ior_optab, op0, tem, subtarget, 0,
5960 : OPTAB_WIDEN);
5961 : }
5962 :
5963 : /* To see if A > 0, compute (((signed) A) << BITS) - A, where BITS is the
5964 : number of bits in the mode of OP0, minus one. */
5965 :
5966 229 : if (code == GT)
5967 : {
5968 28 : if (rtx_equal_p (subtarget, op0))
5969 0 : subtarget = 0;
5970 :
5971 28 : tem = maybe_expand_shift (RSHIFT_EXPR, mode, op0,
5972 28 : GET_MODE_BITSIZE (mode) - 1,
5973 : subtarget, 0);
5974 28 : if (tem)
5975 28 : tem = expand_binop (mode, sub_optab, tem, op0, subtarget, 0,
5976 : OPTAB_WIDEN);
5977 : }
5978 :
5979 229 : if (code == EQ || code == NE)
5980 : {
5981 : /* For EQ or NE, one way to do the comparison is to apply an operation
5982 : that converts the operand into a positive number if it is nonzero
5983 : or zero if it was originally zero. Then, for EQ, we subtract 1 and
5984 : for NE we negate. This puts the result in the sign bit. Then we
5985 : normalize with a shift, if needed.
5986 :
5987 : Two operations that can do the above actions are ABS and FFS, so try
5988 : them. If that doesn't work, and MODE is smaller than a full word,
5989 : we can use zero-extension to the wider mode (an unsigned conversion)
5990 : as the operation. */
5991 :
5992 : /* Note that ABS doesn't yield a positive number for INT_MIN, but
5993 : that is compensated by the subsequent overflow when subtracting
5994 : one / negating. */
5995 :
5996 142 : if (optab_handler (abs_optab, mode) != CODE_FOR_nothing)
5997 142 : tem = expand_unop (mode, abs_optab, op0, subtarget, 1);
5998 0 : else if (optab_handler (ffs_optab, mode) != CODE_FOR_nothing)
5999 0 : tem = expand_unop (mode, ffs_optab, op0, subtarget, 1);
6000 0 : else if (GET_MODE_SIZE (mode) < UNITS_PER_WORD)
6001 : {
6002 0 : tem = convert_modes (word_mode, mode, op0, 1);
6003 0 : mode = word_mode;
6004 : }
6005 :
6006 142 : if (tem != 0)
6007 : {
6008 142 : if (code == EQ)
6009 0 : tem = expand_binop (mode, sub_optab, tem, const1_rtx, subtarget,
6010 : 0, OPTAB_WIDEN);
6011 : else
6012 142 : tem = expand_unop (mode, neg_optab, tem, subtarget, 0);
6013 : }
6014 :
6015 : /* If we couldn't do it that way, for NE we can "or" the two's complement
6016 : of the value with itself. For EQ, we take the one's complement of
6017 : that "or", which is an extra insn, so we only handle EQ if branches
6018 : are expensive. */
6019 :
6020 142 : if (tem == 0
6021 142 : && (code == NE
6022 0 : || BRANCH_COST (optimize_insn_for_speed_p (),
6023 : false) > 1))
6024 : {
6025 0 : if (rtx_equal_p (subtarget, op0))
6026 0 : subtarget = 0;
6027 :
6028 0 : tem = expand_unop (mode, neg_optab, op0, subtarget, 0);
6029 0 : tem = expand_binop (mode, ior_optab, tem, op0, subtarget, 0,
6030 : OPTAB_WIDEN);
6031 :
6032 0 : if (tem && code == EQ)
6033 0 : tem = expand_unop (mode, one_cmpl_optab, tem, subtarget, 0);
6034 : }
6035 : }
6036 :
6037 229 : if (tem && normalizep)
6038 229 : tem = maybe_expand_shift (RSHIFT_EXPR, mode, tem,
6039 229 : GET_MODE_BITSIZE (mode) - 1,
6040 : subtarget, normalizep == 1);
6041 :
6042 229 : if (tem)
6043 : {
6044 229 : if (!target)
6045 : ;
6046 229 : else if (GET_MODE (tem) != target_mode)
6047 : {
6048 87 : convert_move (target, tem, 0);
6049 87 : tem = target;
6050 : }
6051 142 : else if (!subtarget)
6052 : {
6053 73 : emit_move_insn (target, tem);
6054 73 : tem = target;
6055 : }
6056 : }
6057 : else
6058 0 : delete_insns_since (last);
6059 :
6060 : return tem;
6061 : }
6062 :
6063 : /* Emit a store-flags instruction for comparison CODE on OP0 and OP1
6064 : and storing in TARGET. Normally return TARGET.
6065 : Return 0 if that cannot be done.
6066 :
6067 : MODE is the mode to use for OP0 and OP1 should they be CONST_INTs. If
6068 : it is VOIDmode, they cannot both be CONST_INT.
6069 :
6070 : UNSIGNEDP is for the case where we have to widen the operands
6071 : to perform the operation. It says to use zero-extension.
6072 :
6073 : NORMALIZEP is 1 if we should convert the result to be either zero
6074 : or one. Normalize is -1 if we should convert the result to be
6075 : either zero or -1. If NORMALIZEP is zero, the result will be left
6076 : "raw" out of the scc insn. */
6077 :
6078 : rtx
6079 604748 : emit_store_flag (rtx target, enum rtx_code code, rtx op0, rtx op1,
6080 : machine_mode mode, int unsignedp, int normalizep)
6081 : {
6082 604748 : machine_mode target_mode = target ? GET_MODE (target) : VOIDmode;
6083 604748 : enum rtx_code rcode;
6084 604748 : rtx subtarget;
6085 604748 : rtx tem, trueval;
6086 604748 : rtx_insn *last;
6087 :
6088 : /* If we compare constants, we shouldn't use a store-flag operation,
6089 : but a constant load. We can get there via the vanilla route that
6090 : usually generates a compare-branch sequence, but will in this case
6091 : fold the comparison to a constant, and thus elide the branch. */
6092 604748 : if (CONSTANT_P (op0) && CONSTANT_P (op1))
6093 : return NULL_RTX;
6094 :
6095 604458 : tem = emit_store_flag_1 (target, code, op0, op1, mode, unsignedp, normalizep,
6096 : target_mode);
6097 604458 : if (tem)
6098 : return tem;
6099 :
6100 : /* If we reached here, we can't do this with a scc insn, however there
6101 : are some comparisons that can be done in other ways. Don't do any
6102 : of these cases if branches are very cheap. */
6103 73319 : if (BRANCH_COST (optimize_insn_for_speed_p (), false) == 0)
6104 : return 0;
6105 :
6106 : /* See what we need to return. We can only return a 1, -1, or the
6107 : sign bit. */
6108 :
6109 73319 : if (normalizep == 0)
6110 : {
6111 0 : if (STORE_FLAG_VALUE == 1 || STORE_FLAG_VALUE == -1)
6112 0 : normalizep = STORE_FLAG_VALUE;
6113 :
6114 : else if (val_signbit_p (mode, STORE_FLAG_VALUE))
6115 : ;
6116 : else
6117 : return 0;
6118 : }
6119 :
6120 73319 : last = get_last_insn ();
6121 :
6122 : /* If optimizing, use different pseudo registers for each insn, instead
6123 : of reusing the same pseudo. This leads to better CSE, but slows
6124 : down the compiler, since there are more pseudos. */
6125 73250 : subtarget = (!optimize
6126 73319 : && (target_mode == mode)) ? target : NULL_RTX;
6127 73319 : trueval = GEN_INT (normalizep ? normalizep : STORE_FLAG_VALUE);
6128 :
6129 : /* For floating-point comparisons, try the reverse comparison or try
6130 : changing the "orderedness" of the comparison. */
6131 73319 : if (GET_MODE_CLASS (mode) == MODE_FLOAT)
6132 : {
6133 67381 : enum rtx_code first_code;
6134 67381 : bool and_them;
6135 :
6136 67381 : rcode = reverse_condition_maybe_unordered (code);
6137 67381 : if (can_compare_p (rcode, mode, ccp_store_flag)
6138 67381 : && (code == ORDERED || code == UNORDERED
6139 0 : || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
6140 0 : || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
6141 : {
6142 0 : int want_add = ((STORE_FLAG_VALUE == 1 && normalizep == -1)
6143 : || (STORE_FLAG_VALUE == -1 && normalizep == 1));
6144 :
6145 : /* For the reverse comparison, use either an addition or a XOR. */
6146 0 : if (want_add
6147 0 : && rtx_cost (GEN_INT (normalizep), mode, PLUS, 1,
6148 0 : optimize_insn_for_speed_p ()) == 0)
6149 : {
6150 0 : tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
6151 : STORE_FLAG_VALUE, target_mode);
6152 0 : if (tem)
6153 0 : return expand_binop (target_mode, add_optab, tem,
6154 0 : gen_int_mode (normalizep, target_mode),
6155 : target, 0, OPTAB_WIDEN);
6156 : }
6157 0 : else if (!want_add
6158 0 : && rtx_cost (trueval, mode, XOR, 1,
6159 0 : optimize_insn_for_speed_p ()) == 0)
6160 : {
6161 0 : tem = emit_store_flag_1 (subtarget, rcode, op0, op1, mode, 0,
6162 : normalizep, target_mode);
6163 0 : if (tem)
6164 0 : return expand_binop (target_mode, xor_optab, tem, trueval,
6165 0 : target, INTVAL (trueval) >= 0,
6166 0 : OPTAB_WIDEN);
6167 : }
6168 : }
6169 :
6170 67381 : delete_insns_since (last);
6171 :
6172 : /* Cannot split ORDERED and UNORDERED, only try the above trick. */
6173 67381 : if (code == ORDERED || code == UNORDERED)
6174 : return 0;
6175 :
6176 67263 : and_them = split_comparison (code, mode, &first_code, &code);
6177 :
6178 : /* If there are no NaNs, the first comparison should always fall through.
6179 : Effectively change the comparison to the other one. */
6180 67263 : if (!HONOR_NANS (mode))
6181 : {
6182 282 : gcc_assert (first_code == (and_them ? ORDERED : UNORDERED));
6183 182 : return emit_store_flag_1 (target, code, op0, op1, mode, 0, normalizep,
6184 182 : target_mode);
6185 : }
6186 :
6187 67081 : if (!HAVE_conditional_move)
6188 : return 0;
6189 :
6190 : /* Do not turn a trapping comparison into a non-trapping one. */
6191 67081 : if ((code != EQ && code != NE && code != UNEQ && code != LTGT)
6192 2524 : && flag_trapping_math)
6193 : return 0;
6194 :
6195 : /* Try using a setcc instruction for ORDERED/UNORDERED, followed by a
6196 : conditional move. */
6197 64557 : tem = emit_store_flag_1 (subtarget, first_code, op0, op1, mode, 0,
6198 : normalizep, target_mode);
6199 64557 : if (tem == 0)
6200 : return 0;
6201 :
6202 64045 : if (and_them)
6203 2095 : tem = emit_conditional_move (target, { code, op0, op1, mode },
6204 2095 : tem, const0_rtx, GET_MODE (tem), 0);
6205 : else
6206 61950 : tem = emit_conditional_move (target, { code, op0, op1, mode },
6207 61950 : trueval, tem, GET_MODE (tem), 0);
6208 :
6209 64045 : if (tem == 0)
6210 476 : delete_insns_since (last);
6211 64045 : return tem;
6212 : }
6213 :
6214 : /* The remaining tricks only apply to integer comparisons. */
6215 :
6216 5938 : scalar_int_mode int_mode;
6217 5938 : if (is_int_mode (mode, &int_mode))
6218 2435 : return emit_store_flag_int (target, subtarget, code, op0, op1, int_mode,
6219 2435 : unsignedp, normalizep, trueval);
6220 :
6221 : return 0;
6222 : }
6223 :
6224 : /* Like emit_store_flag, but always succeeds. */
6225 :
6226 : rtx
6227 557760 : emit_store_flag_force (rtx target, enum rtx_code code, rtx op0, rtx op1,
6228 : machine_mode mode, int unsignedp, int normalizep)
6229 : {
6230 557760 : rtx tem;
6231 557760 : rtx_code_label *label;
6232 557760 : rtx trueval, falseval;
6233 :
6234 : /* First see if emit_store_flag can do the job. */
6235 557760 : tem = emit_store_flag (target, code, op0, op1, mode, unsignedp, normalizep);
6236 557760 : if (tem != 0)
6237 : return tem;
6238 :
6239 : /* If one operand is constant, make it the second one. Only do this
6240 : if the other operand is not constant as well. */
6241 9629 : if (swap_commutative_operands_p (op0, op1))
6242 : {
6243 8 : std::swap (op0, op1);
6244 8 : code = swap_condition (code);
6245 : }
6246 :
6247 9629 : if (mode == VOIDmode)
6248 0 : mode = GET_MODE (op0);
6249 :
6250 9629 : if (!target)
6251 0 : target = gen_reg_rtx (word_mode);
6252 :
6253 : /* If this failed, we have to do this with set/compare/jump/set code.
6254 : For foo != 0, if foo is in OP0, just replace it with 1 if nonzero. */
6255 9629 : trueval = normalizep ? GEN_INT (normalizep) : const1_rtx;
6256 9629 : if (code == NE
6257 1575 : && GET_MODE_CLASS (mode) == MODE_INT
6258 26 : && REG_P (target)
6259 26 : && op0 == target
6260 0 : && op1 == const0_rtx)
6261 : {
6262 0 : label = gen_label_rtx ();
6263 0 : do_compare_rtx_and_jump (target, const0_rtx, EQ, unsignedp, mode,
6264 : NULL_RTX, NULL, label,
6265 : profile_probability::uninitialized ());
6266 0 : emit_move_insn (target, trueval);
6267 0 : emit_label (label);
6268 0 : return target;
6269 : }
6270 :
6271 9629 : if (!REG_P (target)
6272 9629 : || reg_mentioned_p (target, op0) || reg_mentioned_p (target, op1))
6273 5 : target = gen_reg_rtx (GET_MODE (target));
6274 :
6275 : /* Jump in the right direction if the target cannot implement CODE
6276 : but can jump on its reverse condition. */
6277 9629 : falseval = const0_rtx;
6278 9629 : if (! can_compare_p (code, mode, ccp_jump)
6279 9629 : && (! FLOAT_MODE_P (mode)
6280 7120 : || code == ORDERED || code == UNORDERED
6281 6957 : || (! HONOR_NANS (mode) && (code == LTGT || code == UNEQ))
6282 6957 : || (! HONOR_SNANS (mode) && (code == EQ || code == NE))))
6283 : {
6284 2502 : enum rtx_code rcode;
6285 2502 : if (FLOAT_MODE_P (mode))
6286 2502 : rcode = reverse_condition_maybe_unordered (code);
6287 : else
6288 0 : rcode = reverse_condition (code);
6289 :
6290 : /* Canonicalize to UNORDERED for the libcall. */
6291 2502 : if (can_compare_p (rcode, mode, ccp_jump)
6292 2502 : || (code == ORDERED && ! can_compare_p (ORDERED, mode, ccp_jump)))
6293 : {
6294 139 : falseval = trueval;
6295 139 : trueval = const0_rtx;
6296 139 : code = rcode;
6297 : }
6298 : }
6299 :
6300 9629 : emit_move_insn (target, trueval);
6301 9629 : label = gen_label_rtx ();
6302 9629 : do_compare_rtx_and_jump (op0, op1, code, unsignedp, mode, NULL_RTX, NULL,
6303 : label, profile_probability::uninitialized ());
6304 :
6305 9629 : emit_move_insn (target, falseval);
6306 9629 : emit_label (label);
6307 :
6308 9629 : return target;
6309 : }
6310 :
6311 : /* Expand a vector (left) rotate of MODE of X by an immediate AMT as a vector
6312 : permute operation. Emit code to put the result in DST if successfull and
6313 : return it. Otherwise return NULL. This is intended to implement vector
6314 : rotates by byte amounts using vector permutes when the target does not offer
6315 : native vector rotate operations. */
6316 : rtx
6317 0 : expand_rotate_as_vec_perm (machine_mode mode, rtx dst, rtx x, rtx amt)
6318 : {
6319 0 : rtx amt_unwrap = unwrap_const_vec_duplicate (amt);
6320 : /* For now handle only rotate by the same integer constant in all lanes.
6321 : In principle rotates by any constant vector are representable through
6322 : permutes as long as the individual rotate amounts are multiples of
6323 : BITS_PER_UNIT. */
6324 0 : if (!CONST_INT_P (amt_unwrap))
6325 : return NULL_RTX;
6326 :
6327 0 : int rotamnt = INTVAL (amt_unwrap);
6328 0 : if (rotamnt % BITS_PER_UNIT != 0)
6329 : return NULL_RTX;
6330 0 : machine_mode qimode;
6331 0 : if (!qimode_for_vec_perm (mode).exists (&qimode))
6332 0 : return NULL_RTX;
6333 :
6334 0 : vec_perm_builder builder;
6335 0 : unsigned nunits = GET_MODE_SIZE (GET_MODE_INNER (mode));
6336 0 : poly_uint64 total_units = GET_MODE_SIZE (mode);
6337 0 : builder.new_vector (total_units, nunits, 3);
6338 0 : unsigned rot_bytes = rotamnt / BITS_PER_UNIT;
6339 0 : unsigned rot_to_perm = BYTES_BIG_ENDIAN ? rot_bytes : nunits - rot_bytes;
6340 0 : for (unsigned j = 0; j < 3 * nunits; j += nunits)
6341 0 : for (unsigned i = 0; i < nunits; i++)
6342 0 : builder.quick_push ((rot_to_perm + i) % nunits + j);
6343 :
6344 0 : rtx perm_src = lowpart_subreg (qimode, x, mode);
6345 0 : rtx perm_dst = lowpart_subreg (qimode, dst, mode);
6346 0 : rtx res
6347 0 : = expand_vec_perm_const (qimode, perm_src, perm_src, builder,
6348 : qimode, perm_dst);
6349 0 : if (!res)
6350 : return NULL_RTX;
6351 0 : if (!rtx_equal_p (res, perm_dst))
6352 0 : emit_move_insn (dst, lowpart_subreg (mode, res, qimode));
6353 : return dst;
6354 0 : }
6355 :
6356 : /* Helper function for canonicalize_cmp_for_target. Swap between inclusive
6357 : and exclusive ranges in order to create an equivalent comparison. See
6358 : canonicalize_cmp_for_target for the possible cases. */
6359 :
6360 : static enum rtx_code
6361 47 : equivalent_cmp_code (enum rtx_code code)
6362 : {
6363 47 : switch (code)
6364 : {
6365 : case GT:
6366 : return GE;
6367 0 : case GE:
6368 0 : return GT;
6369 0 : case LT:
6370 0 : return LE;
6371 0 : case LE:
6372 0 : return LT;
6373 2 : case GTU:
6374 2 : return GEU;
6375 0 : case GEU:
6376 0 : return GTU;
6377 1 : case LTU:
6378 1 : return LEU;
6379 2 : case LEU:
6380 2 : return LTU;
6381 :
6382 0 : default:
6383 0 : return code;
6384 : }
6385 : }
6386 :
6387 : /* Choose the more appropiate immediate in scalar integer comparisons. The
6388 : purpose of this is to end up with an immediate which can be loaded into a
6389 : register in fewer moves, if possible.
6390 :
6391 : For each integer comparison there exists an equivalent choice:
6392 : i) a > b or a >= b + 1
6393 : ii) a <= b or a < b + 1
6394 : iii) a >= b or a > b - 1
6395 : iv) a < b or a <= b - 1
6396 :
6397 : MODE is the mode of the first operand.
6398 : CODE points to the comparison code.
6399 : IMM points to the rtx containing the immediate. *IMM must satisfy
6400 : CONST_SCALAR_INT_P on entry and continues to satisfy CONST_SCALAR_INT_P
6401 : on exit. */
6402 :
6403 : void
6404 4665923 : canonicalize_comparison (machine_mode mode, enum rtx_code *code, rtx *imm)
6405 : {
6406 4665923 : if (!SCALAR_INT_MODE_P (mode))
6407 3862767 : return;
6408 :
6409 4662135 : int to_add = 0;
6410 4662135 : enum signop sgn = unsigned_condition_p (*code) ? UNSIGNED : SIGNED;
6411 :
6412 : /* Extract the immediate value from the rtx. */
6413 4662135 : wide_int imm_val = rtx_mode_t (*imm, mode);
6414 :
6415 4662135 : if (*code == GT || *code == GTU || *code == LE || *code == LEU)
6416 : to_add = 1;
6417 : else if (*code == GE || *code == GEU || *code == LT || *code == LTU)
6418 : to_add = -1;
6419 : else
6420 : return;
6421 :
6422 : /* Check for overflow/underflow in the case of signed values and
6423 : wrapping around in the case of unsigned values. If any occur
6424 : cancel the optimization. */
6425 803300 : wi::overflow_type overflow = wi::OVF_NONE;
6426 803300 : wide_int imm_modif;
6427 :
6428 803300 : if (to_add == 1)
6429 580725 : imm_modif = wi::add (imm_val, 1, sgn, &overflow);
6430 : else
6431 222575 : imm_modif = wi::sub (imm_val, 1, sgn, &overflow);
6432 :
6433 803300 : if (overflow)
6434 144 : return;
6435 :
6436 803156 : rtx new_imm = immed_wide_int_const (imm_modif, mode);
6437 :
6438 803156 : int old_cost = rtx_cost (*imm, mode, COMPARE, 0, true);
6439 803156 : int new_cost = rtx_cost (new_imm, mode, COMPARE, 0, true);
6440 :
6441 803156 : if (dump_file && (dump_flags & TDF_DETAILS))
6442 : {
6443 7 : fprintf (dump_file, ";; cmp: %s, old cst: ",
6444 7 : GET_RTX_NAME (*code));
6445 7 : print_rtl (dump_file, *imm);
6446 7 : fprintf (dump_file, " new cst: ");
6447 7 : print_rtl (dump_file, new_imm);
6448 7 : fprintf (dump_file, "\n");
6449 7 : fprintf (dump_file, ";; old cst cost: %d, new cst cost: %d\n",
6450 : old_cost, new_cost);
6451 : }
6452 :
6453 : /* Update the immediate and the code. */
6454 803156 : if (old_cost > new_cost)
6455 : {
6456 47 : *code = equivalent_cmp_code (*code);
6457 47 : *imm = new_imm;
6458 : }
6459 4662279 : }
6460 :
6461 :
6462 :
6463 : /* Perform possibly multi-word comparison and conditional jump to LABEL
6464 : if ARG1 OP ARG2 true where ARG1 and ARG2 are of mode MODE. This is
6465 : now a thin wrapper around do_compare_rtx_and_jump. */
6466 :
6467 : static void
6468 2407 : do_cmp_and_jump (rtx arg1, rtx arg2, enum rtx_code op, machine_mode mode,
6469 : rtx_code_label *label)
6470 : {
6471 2407 : int unsignedp = (op == LTU || op == LEU || op == GTU || op == GEU);
6472 2407 : do_compare_rtx_and_jump (arg1, arg2, op, unsignedp, mode, NULL_RTX,
6473 : NULL, label, profile_probability::uninitialized ());
6474 2407 : }
|