Line data Source code
1 : /* Operations with very long integers.
2 : Copyright (C) 2012-2026 Free Software Foundation, Inc.
3 : Contributed by Kenneth Zadeck <zadeck@naturalbridge.com>
4 :
5 : This file is part of GCC.
6 :
7 : GCC is free software; you can redistribute it and/or modify it
8 : under the terms of the GNU General Public License as published by the
9 : Free Software Foundation; either version 3, or (at your option) any
10 : later version.
11 :
12 : GCC is distributed in the hope that it will be useful, but WITHOUT
13 : ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 : FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 : for more details.
16 :
17 : You should have received a copy of the GNU General Public License
18 : along with GCC; see the file COPYING3. If not see
19 : <http://www.gnu.org/licenses/>. */
20 :
21 : #include "config.h"
22 : #include "system.h"
23 : #include "coretypes.h"
24 : #include "tm.h"
25 : #include "tree.h"
26 : #include "selftest.h"
27 :
28 :
29 : #define HOST_BITS_PER_HALF_WIDE_INT 32
30 : #if HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_LONG
31 : # define HOST_HALF_WIDE_INT long
32 : #elif HOST_BITS_PER_HALF_WIDE_INT == HOST_BITS_PER_INT
33 : # define HOST_HALF_WIDE_INT int
34 : #else
35 : #error Please add support for HOST_HALF_WIDE_INT
36 : #endif
37 :
38 : #define W_TYPE_SIZE HOST_BITS_PER_WIDE_INT
39 : /* Do not include longlong.h when compiler is clang-based. See PR61146. */
40 : #if GCC_VERSION >= 3000 && (W_TYPE_SIZE == 32 || defined (__SIZEOF_INT128__)) && !defined(__clang__)
41 : typedef unsigned HOST_HALF_WIDE_INT UHWtype;
42 : typedef unsigned HOST_WIDE_INT UWtype;
43 : typedef unsigned int UQItype __attribute__ ((mode (QI)));
44 : typedef unsigned int USItype __attribute__ ((mode (SI)));
45 : typedef unsigned int UDItype __attribute__ ((mode (DI)));
46 : #if W_TYPE_SIZE == 32
47 : typedef unsigned int UDWtype __attribute__ ((mode (DI)));
48 : #else
49 : typedef unsigned int UDWtype __attribute__ ((mode (TI)));
50 : #endif
51 : #include "longlong.h"
52 : #endif
53 :
54 : static const HOST_WIDE_INT zeros[1] = {};
55 :
56 : /*
57 : * Internal utilities.
58 : */
59 :
60 : /* Quantities to deal with values that hold half of a wide int. Used
61 : in multiply and divide. */
62 : #define HALF_INT_MASK ((HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1)
63 :
64 : #define BLOCK_OF(TARGET) ((TARGET) / HOST_BITS_PER_WIDE_INT)
65 : #define BLOCKS_NEEDED(PREC) (PREC ? CEIL (PREC, HOST_BITS_PER_WIDE_INT) : 1)
66 : #define SIGN_MASK(X) ((HOST_WIDE_INT) (X) < 0 ? -1 : 0)
67 :
68 : /* Return the value a VAL[I] if I < LEN, otherwise, return 0 or -1
69 : based on the top existing bit of VAL. */
70 :
71 : static unsigned HOST_WIDE_INT
72 8954904822 : safe_uhwi (const HOST_WIDE_INT *val, unsigned int len, unsigned int i)
73 : {
74 8954904822 : return i < len ? val[i] : val[len - 1] < 0 ? HOST_WIDE_INT_M1 : 0;
75 : }
76 :
77 : /* Convert the integer in VAL to canonical form, returning its new length.
78 : LEN is the number of blocks currently in VAL and PRECISION is the number
79 : of bits in the integer it represents.
80 :
81 : This function only changes the representation, not the value. */
82 : static unsigned int
83 24623282828 : canonize (HOST_WIDE_INT *val, unsigned int len, unsigned int precision)
84 : {
85 24623282828 : unsigned int blocks_needed = BLOCKS_NEEDED (precision);
86 24623282828 : HOST_WIDE_INT top;
87 24623282828 : int i;
88 :
89 24623282828 : if (len > blocks_needed)
90 : len = blocks_needed;
91 :
92 24623282828 : if (len == 1)
93 : return len;
94 :
95 5050359780 : top = val[len - 1];
96 5050359780 : if (len * HOST_BITS_PER_WIDE_INT > precision)
97 297543 : val[len - 1] = top = sext_hwi (top, precision % HOST_BITS_PER_WIDE_INT);
98 5050359780 : if (top != 0 && top != HOST_WIDE_INT_M1)
99 : return len;
100 :
101 : /* At this point we know that the top is either 0 or -1. Find the
102 : first block that is not a copy of this. */
103 7688457229 : for (i = len - 2; i >= 0; i--)
104 : {
105 5283510193 : HOST_WIDE_INT x = val[i];
106 5283510193 : if (x != top)
107 : {
108 4765058184 : if (SIGN_MASK (x) == top)
109 2176470060 : return i + 1;
110 :
111 : /* We need an extra block because the top bit block i does
112 : not match the extension. */
113 428049979 : return i + 2;
114 : }
115 : }
116 :
117 : /* The number is 0 or -1. */
118 : return 1;
119 : }
120 :
121 : /* VAL[0] is the unsigned result of an operation. Canonize it by adding
122 : another 0 block if needed, and return number of blocks needed. */
123 :
124 : static inline unsigned int
125 892314313 : canonize_uhwi (HOST_WIDE_INT *val, unsigned int precision)
126 : {
127 1110713 : if (val[0] < 0 && precision > HOST_BITS_PER_WIDE_INT)
128 : {
129 25249 : val[1] = 0;
130 25249 : return 2;
131 : }
132 : return 1;
133 : }
134 :
135 : /*
136 : * Conversion routines in and out of wide_int.
137 : */
138 :
139 : /* Copy XLEN elements from XVAL to VAL. If NEED_CANON, canonize the
140 : result for an integer with precision PRECISION. Return the length
141 : of VAL (after any canonization). */
142 : unsigned int
143 190403222 : wi::from_array (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
144 : unsigned int xlen, unsigned int precision, bool need_canon)
145 : {
146 755895664 : for (unsigned i = 0; i < xlen; i++)
147 565492442 : val[i] = xval[i];
148 190403222 : return need_canon ? canonize (val, xlen, precision) : xlen;
149 : }
150 :
151 : /* Construct a wide int from a buffer of length LEN. BUFFER will be
152 : read according to byte endianness and word endianness of the target.
153 : Only the lower BUFFER_LEN bytes of the result are set; the remaining
154 : high bytes are cleared. */
155 : wide_int
156 3227378 : wi::from_buffer (const unsigned char *buffer, unsigned int buffer_len)
157 : {
158 3227378 : unsigned int precision = buffer_len * BITS_PER_UNIT;
159 3227378 : wide_int result = wide_int::create (precision);
160 3227378 : unsigned int words = buffer_len / UNITS_PER_WORD;
161 :
162 : /* We have to clear all the bits ourself, as we merely or in values
163 : below. */
164 3227378 : unsigned int len = BLOCKS_NEEDED (precision);
165 3227378 : HOST_WIDE_INT *val = result.write_val (0);
166 6463462 : for (unsigned int i = 0; i < len; ++i)
167 3236084 : val[i] = 0;
168 :
169 19975937 : for (unsigned int byte = 0; byte < buffer_len; byte++)
170 : {
171 16748559 : unsigned int offset;
172 16748559 : unsigned int index;
173 16748559 : unsigned int bitpos = byte * BITS_PER_UNIT;
174 16748559 : unsigned HOST_WIDE_INT value;
175 :
176 16748559 : if (buffer_len > UNITS_PER_WORD)
177 : {
178 16748559 : unsigned int word = byte / UNITS_PER_WORD;
179 :
180 16748559 : if (WORDS_BIG_ENDIAN)
181 : word = (words - 1) - word;
182 :
183 16748559 : offset = word * UNITS_PER_WORD;
184 :
185 16748559 : if (BYTES_BIG_ENDIAN)
186 : offset += (UNITS_PER_WORD - 1) - (byte % UNITS_PER_WORD);
187 : else
188 16748559 : offset += byte % UNITS_PER_WORD;
189 : }
190 : else
191 : offset = BYTES_BIG_ENDIAN ? (buffer_len - 1) - byte : byte;
192 :
193 16748559 : value = (unsigned HOST_WIDE_INT) buffer[offset];
194 :
195 16748559 : index = bitpos / HOST_BITS_PER_WIDE_INT;
196 16748559 : val[index] |= value << (bitpos % HOST_BITS_PER_WIDE_INT);
197 : }
198 :
199 3227378 : result.set_len (canonize (val, len, precision));
200 :
201 3227378 : return result;
202 : }
203 :
204 : /* Sets RESULT from X, the sign is taken according to SGN. */
205 : void
206 117753890 : wi::to_mpz (const wide_int_ref &x, mpz_t result, signop sgn)
207 : {
208 117753890 : int len = x.get_len ();
209 117753890 : const HOST_WIDE_INT *v = x.get_val ();
210 117753890 : int excess = len * HOST_BITS_PER_WIDE_INT - x.get_precision ();
211 :
212 117753890 : if (wi::neg_p (x, sgn))
213 : {
214 : /* We use ones complement to avoid -x80..0 edge case that -
215 : won't work on. */
216 8813896 : HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
217 17639517 : for (int i = 0; i < len; i++)
218 8825621 : t[i] = ~v[i];
219 8813896 : if (excess > 0)
220 4482903 : t[len - 1] = (unsigned HOST_WIDE_INT) t[len - 1] << excess >> excess;
221 8813896 : mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
222 8813896 : mpz_com (result, result);
223 : }
224 108939994 : else if (excess > 0)
225 : {
226 64568196 : HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len);
227 64568196 : for (int i = 0; i < len - 1; i++)
228 0 : t[i] = v[i];
229 64568196 : t[len - 1] = (unsigned HOST_WIDE_INT) v[len - 1] << excess >> excess;
230 64568196 : mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
231 : }
232 44371798 : else if (excess < 0 && wi::neg_p (x))
233 : {
234 15379 : int extra = CEIL (-excess, HOST_BITS_PER_WIDE_INT);
235 15379 : HOST_WIDE_INT *t = XALLOCAVEC (HOST_WIDE_INT, len + extra);
236 30758 : for (int i = 0; i < len; i++)
237 15379 : t[i] = v[i];
238 30758 : for (int i = 0; i < extra; i++)
239 15379 : t[len + i] = -1;
240 15379 : excess = (-excess) % HOST_BITS_PER_WIDE_INT;
241 15379 : if (excess)
242 0 : t[len + extra - 1] = (HOST_WIDE_INT_1U << excess) - 1;
243 15379 : mpz_import (result, len + extra, -1, sizeof (HOST_WIDE_INT), 0, 0, t);
244 : }
245 : else
246 44356419 : mpz_import (result, len, -1, sizeof (HOST_WIDE_INT), 0, 0, v);
247 117753890 : }
248 :
249 : /* Returns X converted to TYPE. If WRAP is true, then out-of-range
250 : values of VAL will be wrapped; otherwise, they will be set to the
251 : appropriate minimum or maximum TYPE bound. */
252 : wide_int
253 14621240 : wi::from_mpz (const_tree type, mpz_t x, bool wrap)
254 : {
255 14621240 : size_t count, numb;
256 14621240 : unsigned int prec = TYPE_PRECISION (type);
257 14621240 : wide_int res = wide_int::create (prec);
258 :
259 14621240 : if (!wrap)
260 : {
261 12335012 : mpz_t min, max;
262 :
263 12335012 : mpz_init (min);
264 12335012 : mpz_init (max);
265 12335012 : get_type_static_bounds (type, min, max);
266 :
267 12335012 : if (mpz_cmp (x, min) < 0)
268 232 : mpz_set (x, min);
269 12334780 : else if (mpz_cmp (x, max) > 0)
270 16320 : mpz_set (x, max);
271 :
272 12335012 : mpz_clear (min);
273 12335012 : mpz_clear (max);
274 : }
275 :
276 : /* Determine the number of unsigned HOST_WIDE_INTs that are required
277 : for representing the absolute value. The code to calculate count is
278 : extracted from the GMP manual, section "Integer Import and Export":
279 : http://gmplib.org/manual/Integer-Import-and-Export.html */
280 14621240 : numb = CHAR_BIT * sizeof (HOST_WIDE_INT);
281 14621240 : count = CEIL (mpz_sizeinbase (x, 2), numb);
282 14621240 : HOST_WIDE_INT *val = res.write_val (0);
283 : /* Read the absolute value.
284 :
285 : Write directly to the wide_int storage if possible, otherwise leave
286 : GMP to allocate the memory for us. It might be slightly more efficient
287 : to use mpz_tdiv_r_2exp for the latter case, but the situation is
288 : pathological and it seems safer to operate on the original mpz value
289 : in all cases. */
290 14621240 : void *valres = mpz_export (count <= WIDE_INT_MAX_INL_ELTS ? val : 0,
291 : &count, -1, sizeof (HOST_WIDE_INT), 0, 0, x);
292 14621240 : if (count < 1)
293 : {
294 218049 : val[0] = 0;
295 218049 : count = 1;
296 : }
297 14621240 : count = MIN (count, BLOCKS_NEEDED (prec));
298 14621240 : if (valres != val)
299 : {
300 0 : memcpy (val, valres, count * sizeof (HOST_WIDE_INT));
301 0 : free (valres);
302 : }
303 : /* Zero-extend the absolute value to PREC bits. */
304 14621240 : if (count < BLOCKS_NEEDED (prec) && val[count - 1] < 0)
305 1419 : val[count++] = 0;
306 : else
307 14619821 : count = canonize (val, count, prec);
308 14621240 : res.set_len (count);
309 :
310 14621240 : if (mpz_sgn (x) < 0)
311 92222 : res = -res;
312 :
313 14621240 : return res;
314 : }
315 :
316 : /*
317 : * Largest and smallest values in a mode.
318 : */
319 :
320 : /* Return the largest SGNed number that is representable in PRECISION bits.
321 :
322 : TODO: There is still code from the double_int era that trys to
323 : make up for the fact that double int's could not represent the
324 : min and max values of all types. This code should be removed
325 : because the min and max values can always be represented in
326 : wide_ints and int-csts. */
327 : wide_int
328 4747725476 : wi::max_value (unsigned int precision, signop sgn)
329 : {
330 4747725476 : gcc_checking_assert (precision != 0);
331 4747725476 : if (sgn == UNSIGNED)
332 : /* The unsigned max is just all ones. */
333 3479139773 : return shwi (-1, precision);
334 : else
335 : /* The signed max is all ones except the top bit. This must be
336 : explicitly represented. */
337 1268585703 : return mask (precision - 1, false, precision);
338 : }
339 :
340 : /* Return the largest SGNed number that is representable in PRECISION bits. */
341 : wide_int
342 6377281226 : wi::min_value (unsigned int precision, signop sgn)
343 : {
344 6377281226 : gcc_checking_assert (precision != 0);
345 6377281226 : if (sgn == UNSIGNED)
346 3786264366 : return uhwi (0, precision);
347 : else
348 : /* The signed min is all zeros except the top bit. This must be
349 : explicitly represented. */
350 2591016860 : return wi::set_bit_in_zero (precision - 1, precision);
351 : }
352 :
353 : /*
354 : * Public utilities.
355 : */
356 :
357 : /* Convert the number represented by XVAL, XLEN and XPRECISION, which has
358 : signedness SGN, to an integer that has PRECISION bits. Store the blocks
359 : in VAL and return the number of blocks used.
360 :
361 : This function can handle both extension (PRECISION > XPRECISION)
362 : and truncation (PRECISION < XPRECISION). */
363 : unsigned int
364 19461890697 : wi::force_to_size (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
365 : unsigned int xlen, unsigned int xprecision,
366 : unsigned int precision, signop sgn)
367 : {
368 19461890697 : unsigned int blocks_needed = BLOCKS_NEEDED (precision);
369 19461890697 : unsigned int len = blocks_needed < xlen ? blocks_needed : xlen;
370 38934777387 : for (unsigned i = 0; i < len; i++)
371 19472886690 : val[i] = xval[i];
372 :
373 19461890697 : if (precision > xprecision)
374 : {
375 4819494223 : unsigned int small_xprecision = xprecision % HOST_BITS_PER_WIDE_INT;
376 :
377 : /* Expanding. */
378 4819494223 : if (sgn == UNSIGNED)
379 : {
380 2170589833 : if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
381 966963314 : val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
382 1203626519 : else if (val[len - 1] < 0)
383 : {
384 178669901 : while (len < BLOCKS_NEEDED (xprecision))
385 1207299 : val[len++] = -1;
386 177462602 : if (small_xprecision)
387 20872 : val[len - 1] = zext_hwi (val[len - 1], small_xprecision);
388 : else
389 177441730 : val[len++] = 0;
390 : }
391 : }
392 : else
393 : {
394 2648904390 : if (small_xprecision && len == BLOCKS_NEEDED (xprecision))
395 1064127337 : val[len - 1] = sext_hwi (val[len - 1], small_xprecision);
396 : }
397 : }
398 19461890697 : len = canonize (val, len, precision);
399 :
400 19461890697 : return len;
401 : }
402 :
403 : /* This function hides the fact that we cannot rely on the bits beyond
404 : the precision. This issue comes up in the relational comparisions
405 : where we do allow comparisons of values of different precisions. */
406 : static inline HOST_WIDE_INT
407 268110940 : selt (const HOST_WIDE_INT *a, unsigned int len,
408 : unsigned int blocks_needed, unsigned int small_prec,
409 : unsigned int index, signop sgn)
410 : {
411 268110940 : HOST_WIDE_INT val;
412 268110940 : if (index < len)
413 214773550 : val = a[index];
414 53337390 : else if (index < blocks_needed || sgn == SIGNED)
415 : /* Signed or within the precision. */
416 53337390 : val = SIGN_MASK (a[len - 1]);
417 : else
418 : /* Unsigned extension beyond the precision. */
419 : val = 0;
420 :
421 268110940 : if (small_prec && index == blocks_needed - 1)
422 749288 : return (sgn == SIGNED
423 749288 : ? sext_hwi (val, small_prec)
424 290688 : : zext_hwi (val, small_prec));
425 : else
426 : return val;
427 : }
428 :
429 : /* Find the highest bit represented in a wide int. This will in
430 : general have the same value as the sign bit. */
431 : static inline HOST_WIDE_INT
432 643635118 : top_bit_of (const HOST_WIDE_INT *a, unsigned int len, unsigned int prec)
433 : {
434 643635118 : int excess = len * HOST_BITS_PER_WIDE_INT - prec;
435 643635118 : unsigned HOST_WIDE_INT val = a[len - 1];
436 643635118 : if (excess > 0)
437 38765 : val <<= excess;
438 643635118 : return val >> (HOST_BITS_PER_WIDE_INT - 1);
439 : }
440 :
441 : /*
442 : * Comparisons, note that only equality is an operator. The other
443 : * comparisons cannot be operators since they are inherently signed or
444 : * unsigned and C++ has no such operators.
445 : */
446 :
447 : /* Return true if OP0 == OP1. */
448 : bool
449 2598874 : wi::eq_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
450 : const HOST_WIDE_INT *op1, unsigned int op1len,
451 : unsigned int prec)
452 : {
453 2598874 : int l0 = op0len - 1;
454 2598874 : unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
455 :
456 2598874 : if (op0len != op1len)
457 : return false;
458 :
459 1034730 : if (op0len == BLOCKS_NEEDED (prec) && small_prec)
460 : {
461 : /* It does not matter if we zext or sext here, we just have to
462 : do both the same way. */
463 24493 : if (zext_hwi (op0 [l0], small_prec) != zext_hwi (op1 [l0], small_prec))
464 : return false;
465 19974 : l0--;
466 : }
467 :
468 3130130 : while (l0 >= 0)
469 2132914 : if (op0[l0] != op1[l0])
470 : return false;
471 : else
472 2099919 : l0--;
473 :
474 : return true;
475 : }
476 :
477 : /* Return true if OP0 < OP1 using signed comparisons. */
478 : bool
479 27857880 : wi::lts_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
480 : unsigned int precision,
481 : const HOST_WIDE_INT *op1, unsigned int op1len)
482 : {
483 27857880 : HOST_WIDE_INT s0, s1;
484 27857880 : unsigned HOST_WIDE_INT u0, u1;
485 27857880 : unsigned int blocks_needed = BLOCKS_NEEDED (precision);
486 27857880 : unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
487 27857880 : int l = MAX (op0len - 1, op1len - 1);
488 :
489 : /* Only the top block is compared as signed. The rest are unsigned
490 : comparisons. */
491 27857880 : s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
492 27857880 : s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
493 27857880 : if (s0 < s1)
494 : return true;
495 26905767 : if (s0 > s1)
496 : return false;
497 :
498 23838024 : l--;
499 30421685 : while (l >= 0)
500 : {
501 24101371 : u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
502 24101371 : u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
503 :
504 24101371 : if (u0 < u1)
505 : return true;
506 8012775 : if (u0 > u1)
507 : return false;
508 6583661 : l--;
509 : }
510 :
511 : return false;
512 : }
513 :
514 : /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
515 : signed compares. */
516 : int
517 2548068 : wi::cmps_large (const HOST_WIDE_INT *op0, unsigned int op0len,
518 : unsigned int precision,
519 : const HOST_WIDE_INT *op1, unsigned int op1len)
520 : {
521 2548068 : HOST_WIDE_INT s0, s1;
522 2548068 : unsigned HOST_WIDE_INT u0, u1;
523 2548068 : unsigned int blocks_needed = BLOCKS_NEEDED (precision);
524 2548068 : unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
525 2548068 : int l = MAX (op0len - 1, op1len - 1);
526 :
527 : /* Only the top block is compared as signed. The rest are unsigned
528 : comparisons. */
529 2548068 : s0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
530 2548068 : s1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
531 2548068 : if (s0 < s1)
532 : return -1;
533 1388488 : if (s0 > s1)
534 : return 1;
535 :
536 1272244 : l--;
537 2069366 : while (l >= 0)
538 : {
539 1501119 : u0 = selt (op0, op0len, blocks_needed, small_prec, l, SIGNED);
540 1501119 : u1 = selt (op1, op1len, blocks_needed, small_prec, l, SIGNED);
541 :
542 1501119 : if (u0 < u1)
543 : return -1;
544 841076 : if (u0 > u1)
545 : return 1;
546 797122 : l--;
547 : }
548 :
549 : return 0;
550 : }
551 :
552 : /* Return true if OP0 < OP1 using unsigned comparisons. */
553 : bool
554 41839681 : wi::ltu_p_large (const HOST_WIDE_INT *op0, unsigned int op0len,
555 : unsigned int precision,
556 : const HOST_WIDE_INT *op1, unsigned int op1len)
557 : {
558 41839681 : unsigned HOST_WIDE_INT x0;
559 41839681 : unsigned HOST_WIDE_INT x1;
560 41839681 : unsigned int blocks_needed = BLOCKS_NEEDED (precision);
561 41839681 : unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
562 41839681 : int l = MAX (op0len - 1, op1len - 1);
563 :
564 63742205 : while (l >= 0)
565 : {
566 61471346 : x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
567 61471346 : x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
568 61471346 : if (x0 < x1)
569 : return true;
570 50547862 : if (x0 > x1)
571 : return false;
572 21902524 : l--;
573 : }
574 :
575 : return false;
576 : }
577 :
578 : /* Returns -1 if OP0 < OP1, 0 if OP0 == OP1 and 1 if OP0 > OP1 using
579 : unsigned compares. */
580 : int
581 9919224 : wi::cmpu_large (const HOST_WIDE_INT *op0, unsigned int op0len,
582 : unsigned int precision,
583 : const HOST_WIDE_INT *op1, unsigned int op1len)
584 : {
585 9919224 : unsigned HOST_WIDE_INT x0;
586 9919224 : unsigned HOST_WIDE_INT x1;
587 9919224 : unsigned int blocks_needed = BLOCKS_NEEDED (precision);
588 9919224 : unsigned int small_prec = precision & (HOST_BITS_PER_WIDE_INT - 1);
589 9919224 : int l = MAX (op0len - 1, op1len - 1);
590 :
591 17278796 : while (l >= 0)
592 : {
593 16575686 : x0 = selt (op0, op0len, blocks_needed, small_prec, l, UNSIGNED);
594 16575686 : x1 = selt (op1, op1len, blocks_needed, small_prec, l, UNSIGNED);
595 16575686 : if (x0 < x1)
596 : return -1;
597 8914493 : if (x0 > x1)
598 : return 1;
599 7359572 : l--;
600 : }
601 :
602 : return 0;
603 : }
604 :
605 : /*
606 : * Extension.
607 : */
608 :
609 : /* Sign-extend the number represented by XVAL and XLEN into VAL,
610 : starting at OFFSET. Return the number of blocks in VAL. Both XVAL
611 : and VAL have PRECISION bits. */
612 : unsigned int
613 4919643 : wi::sext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
614 : unsigned int xlen, unsigned int precision, unsigned int offset)
615 : {
616 4919643 : unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
617 : /* Extending beyond the precision is a no-op. If we have only stored
618 : OFFSET bits or fewer, the rest are already signs. */
619 4919643 : if (offset >= precision || len >= xlen)
620 : {
621 10337739 : for (unsigned i = 0; i < xlen; ++i)
622 5760445 : val[i] = xval[i];
623 : return xlen;
624 : }
625 342349 : unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
626 1179515 : for (unsigned int i = 0; i < len; i++)
627 837166 : val[i] = xval[i];
628 342349 : if (suboffset > 0)
629 : {
630 24410 : val[len] = sext_hwi (xval[len], suboffset);
631 24410 : len += 1;
632 : }
633 342349 : return canonize (val, len, precision);
634 : }
635 :
636 : /* Zero-extend the number represented by XVAL and XLEN into VAL,
637 : starting at OFFSET. Return the number of blocks in VAL. Both XVAL
638 : and VAL have PRECISION bits. */
639 : unsigned int
640 779165043 : wi::zext_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
641 : unsigned int xlen, unsigned int precision, unsigned int offset)
642 : {
643 779165043 : unsigned int len = offset / HOST_BITS_PER_WIDE_INT;
644 : /* Extending beyond the precision is a no-op. If we have only stored
645 : OFFSET bits or fewer, and the upper stored bit is zero, then there
646 : is nothing to do. */
647 779165043 : if (offset >= precision || (len >= xlen && xval[xlen - 1] >= 0))
648 : {
649 1275339827 : for (unsigned i = 0; i < xlen; ++i)
650 637904741 : val[i] = xval[i];
651 : return xlen;
652 : }
653 141729957 : unsigned int suboffset = offset % HOST_BITS_PER_WIDE_INT;
654 284784353 : for (unsigned int i = 0; i < len; i++)
655 143054396 : val[i] = i < xlen ? xval[i] : -1;
656 141729957 : if (suboffset > 0)
657 37003 : val[len] = zext_hwi (len < xlen ? xval[len] : -1, suboffset);
658 : else
659 141692954 : val[len] = 0;
660 141729957 : return canonize (val, len + 1, precision);
661 : }
662 :
663 : /*
664 : * Masking, inserting, shifting, rotating.
665 : */
666 :
667 : /* Insert WIDTH bits from Y into X starting at START. */
668 : wide_int
669 405646 : wi::insert (const wide_int &x, const wide_int &y, unsigned int start,
670 : unsigned int width)
671 : {
672 405646 : wide_int result;
673 405646 : wide_int mask;
674 405646 : wide_int tmp;
675 :
676 405646 : unsigned int precision = x.get_precision ();
677 405646 : if (start >= precision)
678 0 : return x;
679 :
680 405646 : gcc_checking_assert (precision >= width);
681 :
682 405646 : if (start + width >= precision)
683 164353 : width = precision - start;
684 :
685 405646 : mask = wi::shifted_mask (start, width, false, precision);
686 405646 : tmp = wi::lshift (wide_int::from (y, precision, UNSIGNED), start);
687 405646 : result = tmp & mask;
688 :
689 405646 : tmp = wi::bit_and_not (x, mask);
690 405646 : result = result | tmp;
691 :
692 405646 : return result;
693 405646 : }
694 :
695 : /* Copy the number represented by XVAL and XLEN into VAL, setting bit BIT.
696 : Return the number of blocks in VAL. Both XVAL and VAL have PRECISION
697 : bits. */
698 : unsigned int
699 100 : wi::set_bit_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
700 : unsigned int xlen, unsigned int precision, unsigned int bit)
701 : {
702 100 : unsigned int block = bit / HOST_BITS_PER_WIDE_INT;
703 100 : unsigned int subbit = bit % HOST_BITS_PER_WIDE_INT;
704 :
705 100 : if (block + 1 >= xlen)
706 : {
707 : /* The operation either affects the last current block or needs
708 : a new block. */
709 81 : unsigned int len = block + 1;
710 81 : for (unsigned int i = 0; i < len; i++)
711 108 : val[i] = safe_uhwi (xval, xlen, i);
712 27 : val[block] |= HOST_WIDE_INT_1U << subbit;
713 :
714 : /* If the bit we just set is at the msb of the block, make sure
715 : that any higher bits are zeros. */
716 27 : if (bit + 1 < precision && subbit == HOST_BITS_PER_WIDE_INT - 1)
717 : {
718 0 : val[len++] = 0;
719 0 : return len;
720 : }
721 27 : return canonize (val, len, precision);
722 : }
723 : else
724 : {
725 365 : for (unsigned int i = 0; i < xlen; i++)
726 292 : val[i] = xval[i];
727 73 : val[block] |= HOST_WIDE_INT_1U << subbit;
728 73 : return canonize (val, xlen, precision);
729 : }
730 : }
731 :
732 : /* Byte swap the integer represented by XVAL and XLEN into VAL. Return
733 : the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
734 : unsigned int
735 7680 : wi::bswap_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
736 : unsigned int xlen, unsigned int precision)
737 : {
738 7680 : unsigned int s, len = BLOCKS_NEEDED (precision);
739 :
740 : /* This is not a well defined operation if the precision is not a
741 : multiple of 8. */
742 7680 : gcc_assert ((precision & 0x7) == 0);
743 :
744 7680 : memset (val, 0, sizeof (unsigned HOST_WIDE_INT) * len);
745 :
746 : /* Only swap the bytes that are not the padding. */
747 47064 : for (s = 0; s < precision; s += 8)
748 : {
749 39384 : unsigned int d = precision - s - 8;
750 39384 : unsigned HOST_WIDE_INT byte;
751 :
752 39384 : unsigned int block = s / HOST_BITS_PER_WIDE_INT;
753 39384 : unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
754 :
755 39384 : byte = (safe_uhwi (xval, xlen, block) >> offset) & 0xff;
756 :
757 39384 : block = d / HOST_BITS_PER_WIDE_INT;
758 39384 : offset = d & (HOST_BITS_PER_WIDE_INT - 1);
759 :
760 39384 : val[block] |= byte << offset;
761 : }
762 :
763 7680 : return canonize (val, len, precision);
764 : }
765 :
766 : /* Bitreverse the integer represented by XVAL and LEN into VAL. Return
767 : the number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
768 : unsigned int
769 0 : wi::bitreverse_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
770 : unsigned int len, unsigned int precision)
771 : {
772 0 : unsigned int i, s;
773 :
774 0 : for (i = 0; i < len; i++)
775 0 : val[i] = 0;
776 :
777 0 : for (s = 0; s < precision; s++)
778 : {
779 0 : unsigned int block = s / HOST_BITS_PER_WIDE_INT;
780 0 : unsigned int offset = s & (HOST_BITS_PER_WIDE_INT - 1);
781 0 : if (((safe_uhwi (xval, len, block) >> offset) & 1) != 0)
782 : {
783 0 : unsigned int d = (precision - 1) - s;
784 0 : block = d / HOST_BITS_PER_WIDE_INT;
785 0 : offset = d & (HOST_BITS_PER_WIDE_INT - 1);
786 0 : val[block] |= HOST_WIDE_INT_1U << offset;
787 : }
788 : }
789 :
790 0 : return canonize (val, len, precision);
791 : }
792 :
793 : /* Fill VAL with a mask where the lower WIDTH bits are ones and the bits
794 : above that up to PREC are zeros. The result is inverted if NEGATE
795 : is true. Return the number of blocks in VAL. */
796 : unsigned int
797 2415124121 : wi::mask (HOST_WIDE_INT *val, unsigned int width, bool negate,
798 : unsigned int prec)
799 : {
800 2415124121 : if (width >= prec)
801 : {
802 480259710 : val[0] = negate ? 0 : -1;
803 480259710 : return 1;
804 : }
805 1934864411 : else if (width == 0)
806 : {
807 11428445 : val[0] = negate ? -1 : 0;
808 11428445 : return 1;
809 : }
810 :
811 : unsigned int i = 0;
812 1949355572 : while (i < width / HOST_BITS_PER_WIDE_INT)
813 51623269 : val[i++] = negate ? 0 : -1;
814 :
815 1923435966 : unsigned int shift = width & (HOST_BITS_PER_WIDE_INT - 1);
816 1923435966 : if (shift != 0)
817 : {
818 1904987953 : HOST_WIDE_INT last = (HOST_WIDE_INT_1U << shift) - 1;
819 1904987953 : val[i++] = negate ? ~last : last;
820 : }
821 : else
822 36692643 : val[i++] = negate ? -1 : 0;
823 :
824 : return i;
825 : }
826 :
827 : /* Fill VAL with a mask where the lower START bits are zeros, the next WIDTH
828 : bits are ones, and the bits above that up to PREC are zeros. The result
829 : is inverted if NEGATE is true. Return the number of blocks in VAL. */
830 : unsigned int
831 2608995050 : wi::shifted_mask (HOST_WIDE_INT *val, unsigned int start, unsigned int width,
832 : bool negate, unsigned int prec)
833 : {
834 2608995050 : if (start >= prec || width == 0)
835 : {
836 2 : val[0] = negate ? -1 : 0;
837 2 : return 1;
838 : }
839 :
840 2608995048 : if (width > prec - start)
841 : width = prec - start;
842 2608995048 : unsigned int end = start + width;
843 :
844 2608995048 : unsigned int i = 0;
845 2618153941 : while (i < start / HOST_BITS_PER_WIDE_INT)
846 18317784 : val[i++] = negate ? -1 : 0;
847 :
848 2608995048 : unsigned int shift = start & (HOST_BITS_PER_WIDE_INT - 1);
849 2608995048 : if (shift)
850 : {
851 2607785432 : HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
852 2607785432 : shift += width;
853 2607785432 : if (shift < HOST_BITS_PER_WIDE_INT)
854 : {
855 : /* case 000111000 */
856 1409119472 : block = (HOST_WIDE_INT_1U << shift) - block - 1;
857 1409119472 : val[i++] = negate ? ~block : block;
858 1409119472 : return i;
859 : }
860 : else
861 : /* ...111000 */
862 1198665960 : val[i++] = negate ? block : ~block;
863 : }
864 :
865 1199875576 : if (end >= prec)
866 : {
867 1198635707 : if (!shift)
868 195241 : val[i++] = negate ? 0 : -1;
869 1198635707 : return i;
870 : }
871 :
872 1421876 : while (i < end / HOST_BITS_PER_WIDE_INT)
873 : /* 1111111 */
874 192043 : val[i++] = negate ? 0 : -1;
875 :
876 1239869 : shift = end & (HOST_BITS_PER_WIDE_INT - 1);
877 1239869 : if (shift != 0)
878 : {
879 : /* 000011111 */
880 926530 : HOST_WIDE_INT block = (HOST_WIDE_INT_1U << shift) - 1;
881 926530 : val[i++] = negate ? ~block : block;
882 : }
883 : else
884 456718 : val[i++] = negate ? -1 : 0;
885 :
886 : return i;
887 : }
888 :
889 : /*
890 : * logical operations.
891 : */
892 :
893 : /* Set VAL to OP0 & OP1. Return the number of blocks used. */
894 : unsigned int
895 25438127 : wi::and_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
896 : unsigned int op0len, const HOST_WIDE_INT *op1,
897 : unsigned int op1len, unsigned int prec)
898 : {
899 25438127 : int l0 = op0len - 1;
900 25438127 : int l1 = op1len - 1;
901 25438127 : bool need_canon = true;
902 :
903 25438127 : unsigned int len = MAX (op0len, op1len);
904 25438127 : if (l0 > l1)
905 : {
906 10336200 : HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
907 10336200 : if (op1mask == 0)
908 : {
909 25438127 : l0 = l1;
910 25438127 : len = l1 + 1;
911 : }
912 : else
913 : {
914 87364 : need_canon = false;
915 87364 : while (l0 > l1)
916 : {
917 44116 : val[l0] = op0[l0];
918 44116 : l0--;
919 : }
920 : }
921 : }
922 15101927 : else if (l1 > l0)
923 : {
924 6992202 : HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
925 6992202 : if (op0mask == 0)
926 : len = l0 + 1;
927 : else
928 : {
929 724111 : need_canon = false;
930 724111 : while (l1 > l0)
931 : {
932 369343 : val[l1] = op1[l1];
933 369343 : l1--;
934 : }
935 : }
936 : }
937 :
938 59201058 : while (l0 >= 0)
939 : {
940 33762931 : val[l0] = op0[l0] & op1[l0];
941 33762931 : l0--;
942 : }
943 :
944 25438127 : if (need_canon)
945 25040111 : len = canonize (val, len, prec);
946 :
947 25438127 : return len;
948 : }
949 :
950 : /* Set VAL to OP0 & ~OP1. Return the number of blocks used. */
951 : unsigned int
952 92444453 : wi::and_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
953 : unsigned int op0len, const HOST_WIDE_INT *op1,
954 : unsigned int op1len, unsigned int prec)
955 : {
956 92444453 : wide_int result;
957 92444453 : int l0 = op0len - 1;
958 92444453 : int l1 = op1len - 1;
959 92444453 : bool need_canon = true;
960 :
961 92444453 : unsigned int len = MAX (op0len, op1len);
962 92444453 : if (l0 > l1)
963 : {
964 13658456 : HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
965 13658456 : if (op1mask != 0)
966 : {
967 92444453 : l0 = l1;
968 92444453 : len = l1 + 1;
969 : }
970 : else
971 : {
972 26765174 : need_canon = false;
973 26765174 : while (l0 > l1)
974 : {
975 13426135 : val[l0] = op0[l0];
976 13426135 : l0--;
977 : }
978 : }
979 : }
980 78785997 : else if (l1 > l0)
981 : {
982 77465691 : HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
983 77465691 : if (op0mask == 0)
984 : len = l0 + 1;
985 : else
986 : {
987 138132 : need_canon = false;
988 138132 : while (l1 > l0)
989 : {
990 70387 : val[l1] = ~op1[l1];
991 70387 : l1--;
992 : }
993 : }
994 : }
995 :
996 186331437 : while (l0 >= 0)
997 : {
998 93886984 : val[l0] = op0[l0] & ~op1[l0];
999 93886984 : l0--;
1000 : }
1001 :
1002 92444453 : if (need_canon)
1003 79037669 : len = canonize (val, len, prec);
1004 :
1005 92444453 : return len;
1006 92444453 : }
1007 :
1008 : /* Set VAL to OP0 | OP1. Return the number of blocks used. */
1009 : unsigned int
1010 177042813 : wi::or_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1011 : unsigned int op0len, const HOST_WIDE_INT *op1,
1012 : unsigned int op1len, unsigned int prec)
1013 : {
1014 177042813 : wide_int result;
1015 177042813 : int l0 = op0len - 1;
1016 177042813 : int l1 = op1len - 1;
1017 177042813 : bool need_canon = true;
1018 :
1019 177042813 : unsigned int len = MAX (op0len, op1len);
1020 177042813 : if (l0 > l1)
1021 : {
1022 65383957 : HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1023 65383957 : if (op1mask != 0)
1024 : {
1025 177042813 : l0 = l1;
1026 177042813 : len = l1 + 1;
1027 : }
1028 : else
1029 : {
1030 130662339 : need_canon = false;
1031 130662339 : while (l0 > l1)
1032 : {
1033 65629512 : val[l0] = op0[l0];
1034 65629512 : l0--;
1035 : }
1036 : }
1037 : }
1038 111658856 : else if (l1 > l0)
1039 : {
1040 65492241 : HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1041 65492241 : if (op0mask != 0)
1042 : len = l0 + 1;
1043 : else
1044 : {
1045 123908015 : need_canon = false;
1046 123908015 : while (l1 > l0)
1047 : {
1048 62134222 : val[l1] = op1[l1];
1049 62134222 : l1--;
1050 : }
1051 : }
1052 : }
1053 :
1054 400742397 : while (l0 >= 0)
1055 : {
1056 223699584 : val[l0] = op0[l0] | op1[l0];
1057 223699584 : l0--;
1058 : }
1059 :
1060 177042813 : if (need_canon)
1061 50236193 : len = canonize (val, len, prec);
1062 :
1063 177042813 : return len;
1064 177042813 : }
1065 :
1066 : /* Set VAL to OP0 | ~OP1. Return the number of blocks used. */
1067 : unsigned int
1068 0 : wi::or_not_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1069 : unsigned int op0len, const HOST_WIDE_INT *op1,
1070 : unsigned int op1len, unsigned int prec)
1071 : {
1072 0 : wide_int result;
1073 0 : int l0 = op0len - 1;
1074 0 : int l1 = op1len - 1;
1075 0 : bool need_canon = true;
1076 :
1077 0 : unsigned int len = MAX (op0len, op1len);
1078 0 : if (l0 > l1)
1079 : {
1080 0 : HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1081 0 : if (op1mask == 0)
1082 : {
1083 0 : l0 = l1;
1084 0 : len = l1 + 1;
1085 : }
1086 : else
1087 : {
1088 0 : need_canon = false;
1089 0 : while (l0 > l1)
1090 : {
1091 0 : val[l0] = op0[l0];
1092 0 : l0--;
1093 : }
1094 : }
1095 : }
1096 0 : else if (l1 > l0)
1097 : {
1098 0 : HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1099 0 : if (op0mask != 0)
1100 : len = l0 + 1;
1101 : else
1102 : {
1103 0 : need_canon = false;
1104 0 : while (l1 > l0)
1105 : {
1106 0 : val[l1] = ~op1[l1];
1107 0 : l1--;
1108 : }
1109 : }
1110 : }
1111 :
1112 0 : while (l0 >= 0)
1113 : {
1114 0 : val[l0] = op0[l0] | ~op1[l0];
1115 0 : l0--;
1116 : }
1117 :
1118 0 : if (need_canon)
1119 0 : len = canonize (val, len, prec);
1120 :
1121 0 : return len;
1122 0 : }
1123 :
1124 : /* Set VAL to OP0 ^ OP1. Return the number of blocks used. */
1125 : unsigned int
1126 34429923 : wi::xor_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1127 : unsigned int op0len, const HOST_WIDE_INT *op1,
1128 : unsigned int op1len, unsigned int prec)
1129 : {
1130 34429923 : wide_int result;
1131 34429923 : int l0 = op0len - 1;
1132 34429923 : int l1 = op1len - 1;
1133 :
1134 34429923 : unsigned int len = MAX (op0len, op1len);
1135 34429923 : if (l0 > l1)
1136 : {
1137 7326463 : HOST_WIDE_INT op1mask = -top_bit_of (op1, op1len, prec);
1138 14689293 : while (l0 > l1)
1139 : {
1140 7362830 : val[l0] = op0[l0] ^ op1mask;
1141 7362830 : l0--;
1142 : }
1143 : }
1144 :
1145 34429923 : if (l1 > l0)
1146 : {
1147 21444242 : HOST_WIDE_INT op0mask = -top_bit_of (op0, op0len, prec);
1148 43102746 : while (l1 > l0)
1149 : {
1150 21658504 : val[l1] = op0mask ^ op1[l1];
1151 21658504 : l1--;
1152 : }
1153 : }
1154 :
1155 74743646 : while (l0 >= 0)
1156 : {
1157 40313723 : val[l0] = op0[l0] ^ op1[l0];
1158 40313723 : l0--;
1159 : }
1160 :
1161 34429923 : return canonize (val, len, prec);
1162 34429923 : }
1163 :
1164 : /*
1165 : * math
1166 : */
1167 :
1168 : /* Set VAL to OP0 + OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1169 : whether the result overflows when OP0 and OP1 are treated as having
1170 : signedness SGN. Return the number of blocks in VAL. */
1171 : unsigned int
1172 130637383 : wi::add_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1173 : unsigned int op0len, const HOST_WIDE_INT *op1,
1174 : unsigned int op1len, unsigned int prec,
1175 : signop sgn, wi::overflow_type *overflow)
1176 : {
1177 130637383 : unsigned HOST_WIDE_INT o0 = 0;
1178 130637383 : unsigned HOST_WIDE_INT o1 = 0;
1179 130637383 : unsigned HOST_WIDE_INT x = 0;
1180 130637383 : unsigned HOST_WIDE_INT carry = 0;
1181 130637383 : unsigned HOST_WIDE_INT old_carry = 0;
1182 130637383 : unsigned HOST_WIDE_INT mask0, mask1;
1183 130637383 : unsigned int i;
1184 :
1185 130637383 : unsigned int len = MAX (op0len, op1len);
1186 130637383 : mask0 = -top_bit_of (op0, op0len, prec);
1187 130637383 : mask1 = -top_bit_of (op1, op1len, prec);
1188 : /* Add all of the explicitly defined elements. */
1189 :
1190 340041368 : for (i = 0; i < len; i++)
1191 : {
1192 209403985 : o0 = i < op0len ? (unsigned HOST_WIDE_INT) op0[i] : mask0;
1193 209403985 : o1 = i < op1len ? (unsigned HOST_WIDE_INT) op1[i] : mask1;
1194 209403985 : x = o0 + o1 + carry;
1195 209403985 : val[i] = x;
1196 209403985 : old_carry = carry;
1197 209403985 : carry = carry == 0 ? x < o0 : x <= o0;
1198 : }
1199 :
1200 130637383 : if (len * HOST_BITS_PER_WIDE_INT < prec)
1201 : {
1202 125794191 : val[len] = mask0 + mask1 + carry;
1203 125794191 : len++;
1204 125794191 : if (overflow)
1205 57158746 : *overflow
1206 114246902 : = (sgn == UNSIGNED && carry) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1207 : }
1208 4843192 : else if (overflow)
1209 : {
1210 412418 : unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1211 412418 : if (sgn == SIGNED)
1212 : {
1213 179885 : unsigned HOST_WIDE_INT x = (val[len - 1] ^ o0) & (val[len - 1] ^ o1);
1214 179885 : if ((HOST_WIDE_INT) (x << shift) < 0)
1215 : {
1216 20761 : if (o0 > (unsigned HOST_WIDE_INT) val[len - 1])
1217 9312 : *overflow = wi::OVF_UNDERFLOW;
1218 11449 : else if (o0 < (unsigned HOST_WIDE_INT) val[len - 1])
1219 11449 : *overflow = wi::OVF_OVERFLOW;
1220 : else
1221 0 : *overflow = wi::OVF_NONE;
1222 : }
1223 : else
1224 159124 : *overflow = wi::OVF_NONE;
1225 : }
1226 : else
1227 : {
1228 : /* Put the MSB of X and O0 and in the top of the HWI. */
1229 232533 : x <<= shift;
1230 232533 : o0 <<= shift;
1231 232533 : if (old_carry)
1232 170432 : *overflow = (x <= o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1233 : else
1234 282288 : *overflow = (x < o0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1235 : }
1236 : }
1237 :
1238 130637383 : return canonize (val, len, prec);
1239 : }
1240 :
1241 : /* Subroutines of the multiplication and division operations. Unpack
1242 : the first IN_LEN HOST_WIDE_INTs in INPUT into 2 * IN_LEN
1243 : HOST_HALF_WIDE_INTs of RESULT. The rest of RESULT is filled by
1244 : uncompressing the top bit of INPUT[IN_LEN - 1]. */
1245 : static void
1246 67502386 : wi_unpack (unsigned HOST_HALF_WIDE_INT *result, const HOST_WIDE_INT *input,
1247 : unsigned int in_len, unsigned int out_len,
1248 : unsigned int prec, signop sgn)
1249 : {
1250 67502386 : unsigned int i;
1251 67502386 : unsigned int j = 0;
1252 67502386 : unsigned int small_prec = prec & (HOST_BITS_PER_WIDE_INT - 1);
1253 67502386 : unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1254 67502386 : HOST_WIDE_INT mask;
1255 :
1256 67502386 : if (sgn == SIGNED)
1257 : {
1258 0 : mask = -top_bit_of ((const HOST_WIDE_INT *) input, in_len, prec);
1259 0 : mask &= HALF_INT_MASK;
1260 : }
1261 : else
1262 : mask = 0;
1263 :
1264 148755204 : for (i = 0; i < blocks_needed - 1; i++)
1265 : {
1266 81252818 : HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1267 81252818 : result[j++] = x;
1268 81252818 : result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1269 : }
1270 :
1271 67502386 : HOST_WIDE_INT x = safe_uhwi (input, in_len, i);
1272 67502386 : if (small_prec)
1273 : {
1274 32976 : if (sgn == SIGNED)
1275 0 : x = sext_hwi (x, small_prec);
1276 : else
1277 32976 : x = zext_hwi (x, small_prec);
1278 : }
1279 67502386 : result[j++] = x;
1280 67502386 : result[j++] = x >> HOST_BITS_PER_HALF_WIDE_INT;
1281 :
1282 : /* Smear the sign bit. */
1283 67502386 : while (j < out_len)
1284 0 : result[j++] = mask;
1285 67502386 : }
1286 :
1287 : /* The inverse of wi_unpack. IN_LEN is the number of input
1288 : blocks and PRECISION is the precision of the result. Return the
1289 : number of blocks in the canonicalized result. */
1290 : static unsigned int
1291 36053594 : wi_pack (HOST_WIDE_INT *result,
1292 : const unsigned HOST_HALF_WIDE_INT *input,
1293 : unsigned int in_len, unsigned int precision)
1294 : {
1295 36053594 : unsigned int i = 0;
1296 36053594 : unsigned int j = 0;
1297 36053594 : unsigned int blocks_needed = BLOCKS_NEEDED (precision);
1298 :
1299 110955862 : while (i + 1 < in_len)
1300 : {
1301 74902268 : result[j++] = ((unsigned HOST_WIDE_INT) input[i]
1302 74902268 : | ((unsigned HOST_WIDE_INT) input[i + 1]
1303 74902268 : << HOST_BITS_PER_HALF_WIDE_INT));
1304 74902268 : i += 2;
1305 : }
1306 :
1307 : /* Handle the case where in_len is odd. For this we zero extend. */
1308 36053594 : if (in_len & 1)
1309 2899502 : result[j++] = (unsigned HOST_WIDE_INT) input[i];
1310 33154092 : else if (j < blocks_needed)
1311 1842440 : result[j++] = 0;
1312 36053594 : return canonize (result, j, precision);
1313 : }
1314 :
1315 : /* Multiply Op1 by Op2. If HIGH is set, only the upper half of the
1316 : result is returned.
1317 :
1318 : If HIGH is not set, throw away the upper half after the check is
1319 : made to see if it overflows. Unfortunately there is no better way
1320 : to check for overflow than to do this. If OVERFLOW is nonnull,
1321 : record in *OVERFLOW whether the result overflowed. SGN controls
1322 : the signedness and is used to check overflow or if HIGH is set.
1323 :
1324 : NOTE: Overflow type for signed overflow is not yet implemented. */
1325 : unsigned int
1326 1618496561 : wi::mul_internal (HOST_WIDE_INT *val, const HOST_WIDE_INT *op1val,
1327 : unsigned int op1len, const HOST_WIDE_INT *op2val,
1328 : unsigned int op2len, unsigned int prec, signop sgn,
1329 : wi::overflow_type *overflow, bool high)
1330 : {
1331 1618496561 : unsigned HOST_WIDE_INT o0, o1, k, t;
1332 1618496561 : unsigned int i;
1333 1618496561 : unsigned int j;
1334 :
1335 : /* If the top level routine did not really pass in an overflow, then
1336 : just make sure that we never attempt to set it. */
1337 1618496561 : bool needs_overflow = (overflow != 0);
1338 1618496561 : if (needs_overflow)
1339 465318363 : *overflow = wi::OVF_NONE;
1340 :
1341 1618496561 : wide_int_ref op1 = wi::storage_ref (op1val, op1len, prec);
1342 1618496561 : wide_int_ref op2 = wi::storage_ref (op2val, op2len, prec);
1343 :
1344 : /* This is a surprisingly common case, so do it first. */
1345 1618496561 : if (op1 == 0 || op2 == 0)
1346 : {
1347 652417024 : val[0] = 0;
1348 652417024 : return 1;
1349 : }
1350 :
1351 : #ifdef umul_ppmm
1352 966079537 : if (sgn == UNSIGNED)
1353 : {
1354 : /* If the inputs are single HWIs and the output has room for at
1355 : least two HWIs, we can use umul_ppmm directly. */
1356 940057143 : if (prec >= HOST_BITS_PER_WIDE_INT * 2
1357 874815236 : && wi::fits_uhwi_p (op1)
1358 1782840533 : && wi::fits_uhwi_p (op2))
1359 : {
1360 : /* This case never overflows. */
1361 841686862 : if (high)
1362 : {
1363 0 : val[0] = 0;
1364 0 : return 1;
1365 : }
1366 841686862 : umul_ppmm (val[1], val[0], op1.ulow (), op2.ulow ());
1367 841686862 : if (val[1] < 0 && prec > HOST_BITS_PER_WIDE_INT * 2)
1368 : {
1369 147361 : val[2] = 0;
1370 147361 : return 3;
1371 : }
1372 855118694 : return 1 + (val[1] != 0 || val[0] < 0);
1373 : }
1374 : /* Likewise if the output is a full single HWI, except that the
1375 : upper HWI of the result is only used for determining overflow.
1376 : (We handle this case inline when overflow isn't needed.) */
1377 98370281 : else if (prec == HOST_BITS_PER_WIDE_INT)
1378 : {
1379 55067219 : unsigned HOST_WIDE_INT upper;
1380 55067219 : umul_ppmm (upper, val[0], op1.ulow (), op2.ulow ());
1381 55067219 : if (needs_overflow)
1382 : /* Unsigned overflow can only be +OVERFLOW. */
1383 107309681 : *overflow = (upper != 0) ? wi::OVF_OVERFLOW : wi::OVF_NONE;
1384 55067219 : if (high)
1385 0 : val[0] = upper;
1386 55067219 : return 1;
1387 : }
1388 : }
1389 : #endif
1390 :
1391 : /* Handle multiplications by 1. */
1392 69325456 : if (op1 == 1)
1393 : {
1394 8380006 : if (high)
1395 : {
1396 229 : val[0] = wi::neg_p (op2, sgn) ? -1 : 0;
1397 229 : return 1;
1398 : }
1399 16779002 : for (i = 0; i < op2len; i++)
1400 8399225 : val[i] = op2val[i];
1401 : return op2len;
1402 : }
1403 60945450 : if (op2 == 1)
1404 : {
1405 17291097 : if (high)
1406 : {
1407 0 : val[0] = wi::neg_p (op1, sgn) ? -1 : 0;
1408 0 : return 1;
1409 : }
1410 34680819 : for (i = 0; i < op1len; i++)
1411 17389722 : val[i] = op1val[i];
1412 : return op1len;
1413 : }
1414 :
1415 : /* If we need to check for overflow, we can only do half wide
1416 : multiplies quickly because we need to look at the top bits to
1417 : check for the overflow. */
1418 43654353 : if ((high || needs_overflow)
1419 24780071 : && (prec <= HOST_BITS_PER_HALF_WIDE_INT))
1420 : {
1421 12446519 : unsigned HOST_WIDE_INT r;
1422 :
1423 12446519 : if (sgn == SIGNED)
1424 : {
1425 5680756 : o0 = op1.to_shwi ();
1426 5680756 : o1 = op2.to_shwi ();
1427 : }
1428 : else
1429 : {
1430 6765763 : o0 = op1.to_uhwi ();
1431 6765763 : o1 = op2.to_uhwi ();
1432 : }
1433 :
1434 12446519 : r = o0 * o1;
1435 12446519 : if (needs_overflow)
1436 : {
1437 12441843 : if (sgn == SIGNED)
1438 : {
1439 5676725 : if ((HOST_WIDE_INT) r != sext_hwi (r, prec))
1440 : /* FIXME: Signed overflow type is not implemented yet. */
1441 2370159 : *overflow = OVF_UNKNOWN;
1442 : }
1443 : else
1444 : {
1445 6765118 : if ((r >> prec) != 0)
1446 : /* Unsigned overflow can only be +OVERFLOW. */
1447 4234448 : *overflow = OVF_OVERFLOW;
1448 : }
1449 : }
1450 12446519 : val[0] = high ? r >> prec : r;
1451 12446519 : return 1;
1452 : }
1453 :
1454 : /* The sizes here are scaled to support a 2x WIDE_INT_MAX_INL_PRECISION by 2x
1455 : WIDE_INT_MAX_INL_PRECISION yielding a 4x WIDE_INT_MAX_INL_PRECISION
1456 : result. */
1457 :
1458 12333552 : unsigned HOST_HALF_WIDE_INT
1459 : ubuf[4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
1460 12333552 : unsigned HOST_HALF_WIDE_INT
1461 : vbuf[4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
1462 : /* The '2' in 'R' is because we are internally doing a full
1463 : multiply. */
1464 12333552 : unsigned HOST_HALF_WIDE_INT
1465 : rbuf[2 * 4 * WIDE_INT_MAX_INL_PRECISION / HOST_BITS_PER_HALF_WIDE_INT];
1466 43541378 : const HOST_WIDE_INT mask
1467 : = (HOST_WIDE_INT_1 << HOST_BITS_PER_HALF_WIDE_INT) - 1;
1468 43541378 : unsigned HOST_HALF_WIDE_INT *u = ubuf;
1469 43541378 : unsigned HOST_HALF_WIDE_INT *v = vbuf;
1470 43541378 : unsigned HOST_HALF_WIDE_INT *r = rbuf;
1471 :
1472 12333552 : if (!high)
1473 31207826 : prec = MIN ((op1len + op2len + 1) * HOST_BITS_PER_WIDE_INT, prec);
1474 31207834 : unsigned int blocks_needed = BLOCKS_NEEDED (prec);
1475 31207834 : unsigned int half_blocks_needed = blocks_needed * 2;
1476 31207834 : if (UNLIKELY (prec > WIDE_INT_MAX_INL_PRECISION))
1477 : {
1478 28930 : unsigned HOST_HALF_WIDE_INT *buf
1479 28930 : = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT, 4 * half_blocks_needed);
1480 28930 : u = buf;
1481 28930 : v = u + half_blocks_needed;
1482 28930 : r = v + half_blocks_needed;
1483 : }
1484 :
1485 : /* We do unsigned mul and then correct it. */
1486 31207834 : wi_unpack (u, op1val, op1len, half_blocks_needed, prec, UNSIGNED);
1487 31207834 : wi_unpack (v, op2val, op2len, half_blocks_needed, prec, UNSIGNED);
1488 :
1489 : /* The 2 is for a full mult. */
1490 31207834 : memset (r, 0, half_blocks_needed * 2
1491 31207834 : * HOST_BITS_PER_HALF_WIDE_INT / CHAR_BIT);
1492 :
1493 172132738 : for (j = 0; j < half_blocks_needed; j++)
1494 : {
1495 : k = 0;
1496 11362695240 : for (i = 0; i < half_blocks_needed; i++)
1497 : {
1498 11221770336 : t = ((unsigned HOST_WIDE_INT)u[i] * (unsigned HOST_WIDE_INT)v[j]
1499 11221770336 : + r[i + j] + k);
1500 11221770336 : r[i + j] = t & HALF_INT_MASK;
1501 11221770336 : k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1502 : }
1503 140924904 : r[j + half_blocks_needed] = k;
1504 : }
1505 :
1506 31207834 : unsigned int shift;
1507 31207834 : if ((high || needs_overflow) && (shift = prec % HOST_BITS_PER_WIDE_INT) != 0)
1508 : {
1509 : /* The high or needs_overflow code assumes that the high bits
1510 : only appear from r[half_blocks_needed] up to
1511 : r[half_blocks_needed * 2 - 1]. If prec is not a multiple
1512 : of HOST_BITS_PER_WIDE_INT, shift the bits above prec up
1513 : to make that code simple. */
1514 2872 : if (shift == HOST_BITS_PER_HALF_WIDE_INT)
1515 4 : memmove (&r[half_blocks_needed], &r[half_blocks_needed - 1],
1516 4 : sizeof (r[0]) * half_blocks_needed);
1517 : else
1518 : {
1519 2868 : unsigned int skip = shift < HOST_BITS_PER_HALF_WIDE_INT;
1520 2868 : if (!skip)
1521 2500 : shift -= HOST_BITS_PER_HALF_WIDE_INT;
1522 29518 : for (i = 2 * half_blocks_needed - 1; i >= half_blocks_needed; i--)
1523 26650 : r[i] = ((r[i - skip] << (-shift % HOST_BITS_PER_HALF_WIDE_INT))
1524 26650 : | (r[i - skip - 1] >> shift));
1525 : }
1526 : }
1527 :
1528 : /* We did unsigned math above. For signed we must adjust the
1529 : product (assuming we need to see that). */
1530 31207834 : if (sgn == SIGNED && (high || needs_overflow))
1531 : {
1532 12236081 : unsigned HOST_WIDE_INT b;
1533 12236081 : if (wi::neg_p (op1))
1534 : {
1535 : b = 0;
1536 18426542 : for (i = 0; i < half_blocks_needed; i++)
1537 : {
1538 12337052 : t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1539 12337052 : - (unsigned HOST_WIDE_INT)v[i] - b;
1540 12337052 : r[i + half_blocks_needed] = t & HALF_INT_MASK;
1541 12337052 : b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1542 : }
1543 : }
1544 12236081 : if (wi::neg_p (op2))
1545 : {
1546 : b = 0;
1547 4881149 : for (i = 0; i < half_blocks_needed; i++)
1548 : {
1549 3274294 : t = (unsigned HOST_WIDE_INT)r[i + half_blocks_needed]
1550 3274294 : - (unsigned HOST_WIDE_INT)u[i] - b;
1551 3274294 : r[i + half_blocks_needed] = t & HALF_INT_MASK;
1552 3274294 : b = t >> (HOST_BITS_PER_WIDE_INT - 1);
1553 : }
1554 : }
1555 : }
1556 :
1557 31207834 : if (needs_overflow)
1558 : {
1559 12333544 : HOST_WIDE_INT top;
1560 :
1561 : /* For unsigned, overflow is true if any of the top bits are set.
1562 : For signed, overflow is true if any of the top bits are not equal
1563 : to the sign bit. */
1564 12333544 : if (sgn == UNSIGNED)
1565 : top = 0;
1566 : else
1567 : {
1568 12236073 : top = r[half_blocks_needed - 1
1569 12236073 : - ((-prec % HOST_BITS_PER_WIDE_INT)
1570 12236073 : >= HOST_BITS_PER_HALF_WIDE_INT)];
1571 12236073 : top = SIGN_MASK (((unsigned HOST_WIDE_INT) top)
1572 : << (HOST_BITS_PER_WIDE_INT / 2
1573 : + (-prec % HOST_BITS_PER_HALF_WIDE_INT)));
1574 12236073 : top &= mask;
1575 : }
1576 :
1577 12333544 : unsigned int end = half_blocks_needed * 2;
1578 12333544 : shift = prec % HOST_BITS_PER_WIDE_INT;
1579 12333544 : if (shift)
1580 : {
1581 : /* For overflow checking only look at the first prec bits
1582 : starting with r[half_blocks_needed]. */
1583 2872 : if (shift <= HOST_BITS_PER_HALF_WIDE_INT)
1584 372 : --end;
1585 2872 : shift %= HOST_BITS_PER_HALF_WIDE_INT;
1586 2872 : if (shift)
1587 : {
1588 2868 : if (top)
1589 1264 : r[end - 1] |= ((~(unsigned HOST_HALF_WIDE_INT) 0) << shift);
1590 : else
1591 1604 : r[end - 1] &= (((unsigned HOST_HALF_WIDE_INT) 1) << shift) - 1;
1592 : }
1593 : }
1594 46706346 : for (i = half_blocks_needed; i < end; i++)
1595 34372802 : if (((HOST_WIDE_INT)(r[i] & mask)) != top)
1596 : /* FIXME: Signed overflow type is not implemented yet. */
1597 15869359 : *overflow = (sgn == UNSIGNED) ? wi::OVF_OVERFLOW : wi::OVF_UNKNOWN;
1598 : }
1599 :
1600 31207834 : int r_offset = high ? half_blocks_needed : 0;
1601 31207834 : return wi_pack (val, &r[r_offset], half_blocks_needed, prec);
1602 : }
1603 :
1604 : /* Compute the population count of X. */
1605 : int
1606 145227341 : wi::popcount (const wide_int_ref &x)
1607 : {
1608 145227341 : unsigned int i;
1609 145227341 : int count;
1610 :
1611 : /* The high order block is special if it is the last block and the
1612 : precision is not an even multiple of HOST_BITS_PER_WIDE_INT. We
1613 : have to clear out any ones above the precision before doing
1614 : popcount on this block. */
1615 145227341 : count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
1616 145227341 : unsigned int stop = x.len;
1617 145227341 : if (count < 0)
1618 : {
1619 63879207 : count = popcount_hwi (x.uhigh () << -count);
1620 63879207 : stop -= 1;
1621 : }
1622 : else
1623 : {
1624 81348134 : if (x.sign_mask () >= 0)
1625 66841496 : count = 0;
1626 : }
1627 :
1628 227076959 : for (i = 0; i < stop; ++i)
1629 81849618 : count += popcount_hwi (x.val[i]);
1630 :
1631 145227341 : return count;
1632 : }
1633 :
1634 : /* Set VAL to OP0 - OP1. If OVERFLOW is nonnull, record in *OVERFLOW
1635 : whether the result overflows when OP0 and OP1 are treated as having
1636 : signedness SGN. Return the number of blocks in VAL. */
1637 : unsigned int
1638 57130450 : wi::sub_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *op0,
1639 : unsigned int op0len, const HOST_WIDE_INT *op1,
1640 : unsigned int op1len, unsigned int prec,
1641 : signop sgn, wi::overflow_type *overflow)
1642 : {
1643 57130450 : unsigned HOST_WIDE_INT o0 = 0;
1644 57130450 : unsigned HOST_WIDE_INT o1 = 0;
1645 57130450 : unsigned HOST_WIDE_INT x = 0;
1646 : /* We implement subtraction as an in place negate and add. Negation
1647 : is just inversion and add 1, so we can do the add of 1 by just
1648 : starting the borrow in of the first element at 1. */
1649 57130450 : unsigned HOST_WIDE_INT borrow = 0;
1650 57130450 : unsigned HOST_WIDE_INT old_borrow = 0;
1651 :
1652 57130450 : unsigned HOST_WIDE_INT mask0, mask1;
1653 57130450 : unsigned int i;
1654 :
1655 57130450 : unsigned int len = MAX (op0len, op1len);
1656 57130450 : mask0 = -top_bit_of (op0, op0len, prec);
1657 57130450 : mask1 = -top_bit_of (op1, op1len, prec);
1658 :
1659 : /* Subtract all of the explicitly defined elements. */
1660 171420664 : for (i = 0; i < len; i++)
1661 : {
1662 114290214 : o0 = i < op0len ? (unsigned HOST_WIDE_INT)op0[i] : mask0;
1663 114290214 : o1 = i < op1len ? (unsigned HOST_WIDE_INT)op1[i] : mask1;
1664 114290214 : x = o0 - o1 - borrow;
1665 114290214 : val[i] = x;
1666 114290214 : old_borrow = borrow;
1667 114290214 : borrow = borrow == 0 ? o0 < o1 : o0 <= o1;
1668 : }
1669 :
1670 57130450 : if (len * HOST_BITS_PER_WIDE_INT < prec)
1671 : {
1672 54083589 : val[len] = mask0 - mask1 - borrow;
1673 54083589 : len++;
1674 54083589 : if (overflow)
1675 1370516 : *overflow = (sgn == UNSIGNED && borrow) ? OVF_UNDERFLOW : OVF_NONE;
1676 : }
1677 3046861 : else if (overflow)
1678 : {
1679 314808 : unsigned int shift = -prec % HOST_BITS_PER_WIDE_INT;
1680 314808 : if (sgn == SIGNED)
1681 : {
1682 228227 : unsigned HOST_WIDE_INT x = (o0 ^ o1) & (val[len - 1] ^ o0);
1683 228227 : if ((HOST_WIDE_INT) (x << shift) < 0)
1684 : {
1685 5884 : if (o0 > o1)
1686 2445 : *overflow = OVF_UNDERFLOW;
1687 3439 : else if (o0 < o1)
1688 3439 : *overflow = OVF_OVERFLOW;
1689 : else
1690 0 : *overflow = OVF_NONE;
1691 : }
1692 : else
1693 222343 : *overflow = OVF_NONE;
1694 : }
1695 : else
1696 : {
1697 : /* Put the MSB of X and O0 and in the top of the HWI. */
1698 86581 : x <<= shift;
1699 86581 : o0 <<= shift;
1700 86581 : if (old_borrow)
1701 100791 : *overflow = (x >= o0) ? OVF_UNDERFLOW : OVF_NONE;
1702 : else
1703 53397 : *overflow = (x > o0) ? OVF_UNDERFLOW : OVF_NONE;
1704 : }
1705 : }
1706 :
1707 57130450 : return canonize (val, len, prec);
1708 : }
1709 :
1710 :
1711 : /*
1712 : * Division and Mod
1713 : */
1714 :
1715 : /* Compute B_QUOTIENT and B_REMAINDER from B_DIVIDEND/B_DIVISOR. The
1716 : algorithm is a small modification of the algorithm in Hacker's
1717 : Delight by Warren, which itself is a small modification of Knuth's
1718 : algorithm. M is the number of significant elements of U however
1719 : there needs to be at least one extra element of B_DIVIDEND
1720 : allocated, N is the number of elements of B_DIVISOR.
1721 : Return new value for N. */
1722 : static int
1723 2543359 : divmod_internal_2 (unsigned HOST_HALF_WIDE_INT *b_quotient,
1724 : unsigned HOST_HALF_WIDE_INT *b_remainder,
1725 : unsigned HOST_HALF_WIDE_INT *b_dividend,
1726 : unsigned HOST_HALF_WIDE_INT *b_divisor,
1727 : int m, int n)
1728 : {
1729 : /* The "digits" are a HOST_HALF_WIDE_INT which the size of half of a
1730 : HOST_WIDE_INT and stored in the lower bits of each word. This
1731 : algorithm should work properly on both 32 and 64 bit
1732 : machines. */
1733 2543359 : unsigned HOST_WIDE_INT b = HOST_WIDE_INT_1U << HOST_BITS_PER_HALF_WIDE_INT;
1734 2543359 : unsigned HOST_WIDE_INT qhat; /* Estimate of quotient digit. */
1735 2543359 : unsigned HOST_WIDE_INT rhat; /* A remainder. */
1736 2543359 : unsigned HOST_WIDE_INT p; /* Product of two digits. */
1737 2543359 : HOST_WIDE_INT t, k;
1738 2543359 : int i, j, s;
1739 :
1740 : /* Single digit divisor. */
1741 2543359 : if (n == 1)
1742 : {
1743 755613 : k = 0;
1744 3130893 : for (j = m - 1; j >= 0; j--)
1745 : {
1746 2375280 : b_quotient[j] = (k * b + b_dividend[j])/b_divisor[0];
1747 2375280 : k = ((k * b + b_dividend[j])
1748 2375280 : - ((unsigned HOST_WIDE_INT)b_quotient[j]
1749 2375280 : * (unsigned HOST_WIDE_INT)b_divisor[0]));
1750 : }
1751 755613 : b_remainder[0] = k;
1752 755613 : return 1;
1753 : }
1754 :
1755 1787746 : s = clz_hwi (b_divisor[n-1]) - HOST_BITS_PER_HALF_WIDE_INT; /* CHECK clz */
1756 :
1757 1787746 : if (s)
1758 : {
1759 : /* Normalize B_DIVIDEND and B_DIVISOR. Unlike the published
1760 : algorithm, we can overwrite b_dividend and b_divisor, so we do
1761 : that. */
1762 3629014 : for (i = n - 1; i > 0; i--)
1763 1844911 : b_divisor[i] = (b_divisor[i] << s)
1764 1844911 : | (b_divisor[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1765 1784103 : b_divisor[0] = b_divisor[0] << s;
1766 :
1767 1784103 : b_dividend[m] = b_dividend[m-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s);
1768 5408699 : for (i = m - 1; i > 0; i--)
1769 3624596 : b_dividend[i] = (b_dividend[i] << s)
1770 3624596 : | (b_dividend[i-1] >> (HOST_BITS_PER_HALF_WIDE_INT - s));
1771 1784103 : b_dividend[0] = b_dividend[0] << s;
1772 : }
1773 :
1774 : /* Main loop. */
1775 5362036 : for (j = m - n; j >= 0; j--)
1776 : {
1777 3574290 : qhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) / b_divisor[n-1];
1778 3574290 : rhat = (b_dividend[j+n] * b + b_dividend[j+n-1]) - qhat * b_divisor[n-1];
1779 3593463 : again:
1780 3593463 : if (qhat >= b || qhat * b_divisor[n-2] > b * rhat + b_dividend[j+n-2])
1781 : {
1782 21957 : qhat -= 1;
1783 21957 : rhat += b_divisor[n-1];
1784 21957 : if (rhat < b)
1785 19173 : goto again;
1786 : }
1787 :
1788 : /* Multiply and subtract. */
1789 3574290 : k = 0;
1790 10873576 : for (i = 0; i < n; i++)
1791 : {
1792 7299286 : p = qhat * b_divisor[i];
1793 7299286 : t = b_dividend[i+j] - k - (p & HALF_INT_MASK);
1794 7299286 : b_dividend[i + j] = t;
1795 7299286 : k = ((p >> HOST_BITS_PER_HALF_WIDE_INT)
1796 7299286 : - (t >> HOST_BITS_PER_HALF_WIDE_INT));
1797 : }
1798 3574290 : t = b_dividend[j+n] - k;
1799 3574290 : b_dividend[j+n] = t;
1800 :
1801 3574290 : b_quotient[j] = qhat;
1802 3574290 : if (t < 0)
1803 : {
1804 7754 : b_quotient[j] -= 1;
1805 7754 : k = 0;
1806 64308 : for (i = 0; i < n; i++)
1807 : {
1808 56554 : t = (HOST_WIDE_INT)b_dividend[i+j] + b_divisor[i] + k;
1809 56554 : b_dividend[i+j] = t;
1810 56554 : k = t >> HOST_BITS_PER_HALF_WIDE_INT;
1811 : }
1812 7754 : b_dividend[j+n] += k;
1813 : }
1814 : }
1815 : /* If N > M, the main loop was skipped, quotient will be 0 and
1816 : we can't copy more than M half-limbs into the remainder, as they
1817 : aren't present in b_dividend (which has . */
1818 1787746 : n = MIN (n, m);
1819 1787746 : if (s)
1820 5405182 : for (i = 0; i < n; i++)
1821 3621079 : b_remainder[i] = (b_dividend[i] >> s)
1822 3621079 : | (b_dividend[i+1] << (HOST_BITS_PER_HALF_WIDE_INT - s));
1823 : else
1824 14520 : for (i = 0; i < n; i++)
1825 10877 : b_remainder[i] = b_dividend[i];
1826 : return n;
1827 : }
1828 :
1829 :
1830 : /* Divide DIVIDEND by DIVISOR, which have signedness SGN, and truncate
1831 : the result. If QUOTIENT is nonnull, store the value of the quotient
1832 : there and return the number of blocks in it. The return value is
1833 : not defined otherwise. If REMAINDER is nonnull, store the value
1834 : of the remainder there and store the number of blocks in
1835 : *REMAINDER_LEN. If OFLOW is not null, store in *OFLOW whether
1836 : the division overflowed. */
1837 : unsigned int
1838 695715715 : wi::divmod_internal (HOST_WIDE_INT *quotient, unsigned int *remainder_len,
1839 : HOST_WIDE_INT *remainder,
1840 : const HOST_WIDE_INT *dividend_val,
1841 : unsigned int dividend_len, unsigned int dividend_prec,
1842 : const HOST_WIDE_INT *divisor_val, unsigned int divisor_len,
1843 : unsigned int divisor_prec, signop sgn,
1844 : wi::overflow_type *oflow)
1845 : {
1846 695715715 : unsigned int m, n;
1847 695715715 : bool dividend_neg = false;
1848 695715715 : bool divisor_neg = false;
1849 695715715 : bool overflow = false;
1850 695715715 : wide_int neg_dividend, neg_divisor;
1851 :
1852 695715715 : wide_int_ref dividend = wi::storage_ref (dividend_val, dividend_len,
1853 695715715 : dividend_prec);
1854 695715715 : wide_int_ref divisor = wi::storage_ref (divisor_val, divisor_len,
1855 695715715 : divisor_prec);
1856 695715715 : if (divisor == 0)
1857 : overflow = true;
1858 :
1859 : /* The smallest signed number / -1 causes overflow. The dividend_len
1860 : check is for speed rather than correctness. */
1861 695715715 : if (sgn == SIGNED
1862 52198559 : && dividend_len == BLOCKS_NEEDED (dividend_prec)
1863 19075286 : && divisor == -1
1864 696476884 : && wi::only_sign_bit_p (dividend))
1865 213297 : overflow = true;
1866 :
1867 : /* Handle the overflow cases. Viewed as unsigned value, the quotient of
1868 : (signed min / -1) has the same representation as the orignal dividend.
1869 : We have traditionally made division by zero act as division by one,
1870 : so there too we use the original dividend. */
1871 695715715 : if (overflow)
1872 : {
1873 213944 : if (remainder)
1874 : {
1875 1837 : *remainder_len = 1;
1876 1837 : remainder[0] = 0;
1877 : }
1878 213944 : if (oflow)
1879 213838 : *oflow = OVF_OVERFLOW;
1880 213944 : if (quotient)
1881 428814 : for (unsigned int i = 0; i < dividend_len; ++i)
1882 215132 : quotient[i] = dividend_val[i];
1883 213944 : return dividend_len;
1884 : }
1885 :
1886 695501771 : if (oflow)
1887 645042489 : *oflow = OVF_NONE;
1888 :
1889 : /* Do it on the host if you can. */
1890 695501771 : if (sgn == SIGNED
1891 51984798 : && wi::fits_shwi_p (dividend)
1892 747385232 : && wi::fits_shwi_p (divisor))
1893 : {
1894 51883010 : HOST_WIDE_INT o0 = dividend.to_shwi ();
1895 51883010 : HOST_WIDE_INT o1 = divisor.to_shwi ();
1896 :
1897 51883010 : if (o0 == HOST_WIDE_INT_MIN && o1 == -1)
1898 : {
1899 0 : gcc_checking_assert (dividend_prec > HOST_BITS_PER_WIDE_INT);
1900 0 : if (quotient)
1901 : {
1902 0 : quotient[0] = HOST_WIDE_INT_MIN;
1903 0 : quotient[1] = 0;
1904 : }
1905 0 : if (remainder)
1906 : {
1907 0 : remainder[0] = 0;
1908 0 : *remainder_len = 1;
1909 : }
1910 0 : return 2;
1911 : }
1912 : else
1913 : {
1914 51883010 : if (quotient)
1915 31025378 : quotient[0] = o0 / o1;
1916 51883010 : if (remainder)
1917 : {
1918 21560023 : remainder[0] = o0 % o1;
1919 21560023 : *remainder_len = 1;
1920 : }
1921 51883010 : return 1;
1922 : }
1923 : }
1924 :
1925 643618761 : if (sgn == UNSIGNED
1926 643516973 : && wi::fits_uhwi_p (dividend)
1927 1284697232 : && wi::fits_uhwi_p (divisor))
1928 : {
1929 641075402 : unsigned HOST_WIDE_INT o0 = dividend.to_uhwi ();
1930 641075402 : unsigned HOST_WIDE_INT o1 = divisor.to_uhwi ();
1931 641075402 : unsigned int quotient_len = 1;
1932 :
1933 641075402 : if (quotient)
1934 : {
1935 630912969 : quotient[0] = o0 / o1;
1936 630912969 : quotient_len = canonize_uhwi (quotient, dividend_prec);
1937 : }
1938 641075402 : if (remainder)
1939 : {
1940 261401344 : remainder[0] = o0 % o1;
1941 261402224 : *remainder_len = canonize_uhwi (remainder, dividend_prec);
1942 : }
1943 641075402 : return quotient_len;
1944 : }
1945 :
1946 : /* Make the divisor and dividend positive and remember what we
1947 : did. */
1948 2543359 : if (sgn == SIGNED)
1949 : {
1950 101788 : if (wi::neg_p (dividend))
1951 : {
1952 13529 : neg_dividend = -dividend;
1953 13529 : dividend = neg_dividend;
1954 13529 : dividend_neg = true;
1955 : }
1956 101788 : if (wi::neg_p (divisor))
1957 : {
1958 4281 : neg_divisor = -divisor;
1959 4281 : divisor = neg_divisor;
1960 4281 : divisor_neg = true;
1961 : }
1962 : }
1963 :
1964 2441571 : unsigned HOST_HALF_WIDE_INT
1965 : b_quotient_buf[4 * WIDE_INT_MAX_INL_PRECISION
1966 : / HOST_BITS_PER_HALF_WIDE_INT];
1967 2441571 : unsigned HOST_HALF_WIDE_INT
1968 : b_remainder_buf[4 * WIDE_INT_MAX_INL_PRECISION
1969 : / HOST_BITS_PER_HALF_WIDE_INT];
1970 2441571 : unsigned HOST_HALF_WIDE_INT
1971 : b_dividend_buf[(4 * WIDE_INT_MAX_INL_PRECISION
1972 : / HOST_BITS_PER_HALF_WIDE_INT) + 1];
1973 2441571 : unsigned HOST_HALF_WIDE_INT
1974 : b_divisor_buf[4 * WIDE_INT_MAX_INL_PRECISION
1975 : / HOST_BITS_PER_HALF_WIDE_INT];
1976 4917984 : unsigned HOST_HALF_WIDE_INT *b_quotient = b_quotient_buf;
1977 4917984 : unsigned HOST_HALF_WIDE_INT *b_remainder = b_remainder_buf;
1978 4917984 : unsigned HOST_HALF_WIDE_INT *b_dividend = b_dividend_buf;
1979 4917984 : unsigned HOST_HALF_WIDE_INT *b_divisor = b_divisor_buf;
1980 :
1981 2441571 : if (sgn == SIGNED || dividend_val[dividend_len - 1] >= 0)
1982 2476413 : dividend_prec = MIN ((dividend_len + 1) * HOST_BITS_PER_WIDE_INT,
1983 : dividend_prec);
1984 2543359 : if (sgn == SIGNED || divisor_val[divisor_len - 1] >= 0)
1985 2541874 : divisor_prec = MIN (divisor_len * HOST_BITS_PER_WIDE_INT, divisor_prec);
1986 2543359 : unsigned int dividend_blocks_needed = 2 * BLOCKS_NEEDED (dividend_prec);
1987 2543359 : unsigned int divisor_blocks_needed = 2 * BLOCKS_NEEDED (divisor_prec);
1988 2543359 : if (UNLIKELY (dividend_prec > WIDE_INT_MAX_INL_PRECISION)
1989 2542688 : || UNLIKELY (divisor_prec > WIDE_INT_MAX_INL_PRECISION))
1990 : {
1991 700 : unsigned HOST_HALF_WIDE_INT *buf
1992 700 : = XALLOCAVEC (unsigned HOST_HALF_WIDE_INT,
1993 : 3 * dividend_blocks_needed + 1
1994 : + divisor_blocks_needed);
1995 700 : b_quotient = buf;
1996 700 : b_remainder = b_quotient + dividend_blocks_needed;
1997 700 : b_dividend = b_remainder + dividend_blocks_needed;
1998 700 : b_divisor = b_dividend + dividend_blocks_needed + 1;
1999 700 : memset (b_quotient, 0,
2000 : dividend_blocks_needed * sizeof (HOST_HALF_WIDE_INT));
2001 : }
2002 2543359 : wi_unpack (b_dividend, dividend.get_val (), dividend.get_len (),
2003 : dividend_blocks_needed, dividend_prec, UNSIGNED);
2004 2543359 : wi_unpack (b_divisor, divisor.get_val (), divisor.get_len (),
2005 : divisor_blocks_needed, divisor_prec, UNSIGNED);
2006 :
2007 2543359 : m = dividend_blocks_needed;
2008 2543359 : b_dividend[m] = 0;
2009 5227759 : while (m > 1 && b_dividend[m - 1] == 0)
2010 : m--;
2011 :
2012 : n = divisor_blocks_needed;
2013 3308745 : while (n > 1 && b_divisor[n - 1] == 0)
2014 : n--;
2015 :
2016 2543359 : if (b_quotient == b_quotient_buf)
2017 2542659 : memset (b_quotient_buf, 0, sizeof (b_quotient_buf));
2018 :
2019 2543359 : n = divmod_internal_2 (b_quotient, b_remainder, b_dividend, b_divisor, m, n);
2020 :
2021 2543359 : unsigned int quotient_len = 0;
2022 2543359 : if (quotient)
2023 : {
2024 2481322 : quotient_len = wi_pack (quotient, b_quotient, m, dividend_prec);
2025 : /* The quotient is neg if exactly one of the divisor or dividend is
2026 : neg. */
2027 2481322 : if (dividend_neg != divisor_neg)
2028 12457 : quotient_len = wi::sub_large (quotient, zeros, 1, quotient,
2029 : quotient_len, dividend_prec,
2030 : UNSIGNED, 0);
2031 : }
2032 :
2033 2543359 : if (remainder)
2034 : {
2035 2364438 : *remainder_len = wi_pack (remainder, b_remainder, n, dividend_prec);
2036 : /* The remainder is always the same sign as the dividend. */
2037 2364438 : if (dividend_neg)
2038 2083 : *remainder_len = wi::sub_large (remainder, zeros, 1, remainder,
2039 : *remainder_len, dividend_prec,
2040 : UNSIGNED, 0);
2041 : }
2042 :
2043 : return quotient_len;
2044 695715715 : }
2045 :
2046 : /*
2047 : * Shifting, rotating and extraction.
2048 : */
2049 :
2050 : /* Left shift XVAL by SHIFT and store the result in VAL. Return the
2051 : number of blocks in VAL. Both XVAL and VAL have PRECISION bits. */
2052 : unsigned int
2053 4230624703 : wi::lshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2054 : unsigned int xlen, unsigned int precision,
2055 : unsigned int shift)
2056 : {
2057 : /* Split the shift into a whole-block shift and a subblock shift. */
2058 4230624703 : unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
2059 4230624703 : unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
2060 :
2061 : /* The whole-block shift fills with zeros. */
2062 4230624703 : unsigned int len = BLOCKS_NEEDED (precision);
2063 4230624703 : len = MIN (xlen + skip + 1, len);
2064 4231745800 : for (unsigned int i = 0; i < skip; ++i)
2065 1121097 : val[i] = 0;
2066 :
2067 : /* It's easier to handle the simple block case specially. */
2068 4230624703 : if (small_shift == 0)
2069 18320938 : for (unsigned int i = skip; i < len; ++i)
2070 24738800 : val[i] = safe_uhwi (xval, xlen, i - skip);
2071 : else
2072 : {
2073 : /* The first unfilled output block is a left shift of the first
2074 : block in XVAL. The other output blocks contain bits from two
2075 : consecutive input blocks. */
2076 : unsigned HOST_WIDE_INT carry = 0;
2077 12681300560 : for (unsigned int i = skip; i < len; ++i)
2078 : {
2079 8456627395 : unsigned HOST_WIDE_INT x = safe_uhwi (xval, xlen, i - skip);
2080 8456627395 : val[i] = (x << small_shift) | carry;
2081 8456627395 : carry = x >> (-small_shift % HOST_BITS_PER_WIDE_INT);
2082 : }
2083 : }
2084 4230624703 : return canonize (val, len, precision);
2085 : }
2086 :
2087 : /* Right shift XVAL by SHIFT and store the result in VAL. LEN is the
2088 : number of blocks in VAL. The input has XPRECISION bits and the
2089 : output has XPRECISION - SHIFT bits. */
2090 : static void
2091 168064219 : rshift_large_common (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2092 : unsigned int xlen, unsigned int shift, unsigned int len)
2093 : {
2094 : /* Split the shift into a whole-block shift and a subblock shift. */
2095 168064219 : unsigned int skip = shift / HOST_BITS_PER_WIDE_INT;
2096 168064219 : unsigned int small_shift = shift % HOST_BITS_PER_WIDE_INT;
2097 :
2098 : /* It's easier to handle the simple block case specially. */
2099 168064219 : if (small_shift == 0)
2100 2629282 : for (unsigned int i = 0; i < len; ++i)
2101 2964108 : val[i] = safe_uhwi (xval, xlen, i + skip);
2102 : else
2103 : {
2104 : /* Each output block but the last is a combination of two input blocks.
2105 : The last block is a right shift of the last block in XVAL. */
2106 166916991 : unsigned HOST_WIDE_INT curr = safe_uhwi (xval, xlen, skip);
2107 335631331 : for (unsigned int i = 0; i < len; ++i)
2108 : {
2109 168714340 : val[i] = curr >> small_shift;
2110 168714340 : curr = safe_uhwi (xval, xlen, i + skip + 1);
2111 168714340 : val[i] |= curr << (-small_shift % HOST_BITS_PER_WIDE_INT);
2112 : }
2113 : }
2114 168064219 : }
2115 :
2116 : /* Logically right shift XVAL by SHIFT and store the result in VAL.
2117 : Return the number of blocks in VAL. XVAL has XPRECISION bits and
2118 : VAL has PRECISION bits. */
2119 : unsigned int
2120 9644709 : wi::lrshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2121 : unsigned int xlen, unsigned int xprecision,
2122 : unsigned int precision, unsigned int shift)
2123 : {
2124 : /* Work out how many blocks are needed to store the significant bits
2125 : (excluding the upper zeros or signs). */
2126 9644709 : unsigned int blocks_needed = BLOCKS_NEEDED (xprecision - shift);
2127 9644709 : unsigned int len = blocks_needed;
2128 9644709 : if (len > xlen && xval[xlen - 1] >= 0)
2129 9644709 : len = xlen;
2130 :
2131 9644709 : rshift_large_common (val, xval, xlen, shift, len);
2132 :
2133 : /* The value we just created has precision XPRECISION - SHIFT.
2134 : Zero-extend it to wider precisions. */
2135 9644709 : if (precision > xprecision - shift && len == blocks_needed)
2136 : {
2137 462686 : unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2138 462686 : if (small_prec)
2139 64108 : val[len - 1] = zext_hwi (val[len - 1], small_prec);
2140 398578 : else if (val[len - 1] < 0)
2141 : {
2142 : /* Add a new block with a zero. */
2143 192619 : val[len++] = 0;
2144 192619 : return len;
2145 : }
2146 : }
2147 9452090 : return canonize (val, len, precision);
2148 : }
2149 :
2150 : /* Arithmetically right shift XVAL by SHIFT and store the result in VAL.
2151 : Return the number of blocks in VAL. XVAL has XPRECISION bits and
2152 : VAL has PRECISION bits. */
2153 : unsigned int
2154 158419510 : wi::arshift_large (HOST_WIDE_INT *val, const HOST_WIDE_INT *xval,
2155 : unsigned int xlen, unsigned int xprecision,
2156 : unsigned int precision, unsigned int shift)
2157 : {
2158 : /* Work out how many blocks are needed to store the significant bits
2159 : (excluding the upper zeros or signs). */
2160 158419510 : unsigned int blocks_needed = BLOCKS_NEEDED (xprecision - shift);
2161 158419510 : unsigned int len = MIN (xlen, blocks_needed);
2162 :
2163 158419510 : rshift_large_common (val, xval, xlen, shift, len);
2164 :
2165 : /* The value we just created has precision XPRECISION - SHIFT.
2166 : Sign-extend it to wider types. */
2167 158419510 : if (precision > xprecision - shift && len == blocks_needed)
2168 : {
2169 42570 : unsigned int small_prec = (xprecision - shift) % HOST_BITS_PER_WIDE_INT;
2170 42570 : if (small_prec)
2171 6292 : val[len - 1] = sext_hwi (val[len - 1], small_prec);
2172 : }
2173 158419510 : return canonize (val, len, precision);
2174 : }
2175 :
2176 : /* Return the number of leading (upper) zeros in X. */
2177 : int
2178 1058532365 : wi::clz (const wide_int_ref &x)
2179 : {
2180 1058532365 : if (x.sign_mask () < 0)
2181 : /* The upper bit is set, so there are no leading zeros. */
2182 : return 0;
2183 :
2184 : /* Calculate how many bits there above the highest represented block. */
2185 611233358 : int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2186 :
2187 611233358 : unsigned HOST_WIDE_INT high = x.uhigh ();
2188 611233358 : if (count < 0)
2189 : /* The upper -COUNT bits of HIGH are not part of the value.
2190 : Clear them out. */
2191 268486466 : high = (high << -count) >> -count;
2192 :
2193 : /* We don't need to look below HIGH. Either HIGH is nonzero,
2194 : or the top bit of the block below is nonzero; clz_hwi is
2195 : HOST_BITS_PER_WIDE_INT in the latter case. */
2196 1219550241 : return count + clz_hwi (high);
2197 : }
2198 :
2199 : /* Return the number of redundant sign bits in X. (That is, the number
2200 : of bits immediately below the sign bit that have the same value as
2201 : the sign bit.) */
2202 : int
2203 38829149 : wi::clrsb (const wide_int_ref &x)
2204 : {
2205 : /* Calculate how many bits there above the highest represented block. */
2206 38829149 : int count = x.precision - x.len * HOST_BITS_PER_WIDE_INT;
2207 :
2208 38829149 : unsigned HOST_WIDE_INT high = x.uhigh ();
2209 38829149 : unsigned HOST_WIDE_INT mask = -1;
2210 38829149 : if (count < 0)
2211 : {
2212 : /* The upper -COUNT bits of HIGH are not part of the value.
2213 : Clear them from both MASK and HIGH. */
2214 1990265 : mask >>= -count;
2215 1990265 : high &= mask;
2216 : }
2217 :
2218 : /* If the top bit is 1, count the number of leading 1s. If the top
2219 : bit is zero, count the number of leading zeros. */
2220 38829149 : if (high > mask / 2)
2221 1431544 : high ^= mask;
2222 :
2223 : /* There are no sign bits below the top block, so we don't need to look
2224 : beyond HIGH. Note that clz_hwi is HOST_BITS_PER_WIDE_INT when
2225 : HIGH is 0. */
2226 38829149 : return count + clz_hwi (high) - 1;
2227 : }
2228 :
2229 : /* Return the number of trailing (lower) zeros in X. */
2230 : int
2231 503001283 : wi::ctz (const wide_int_ref &x)
2232 : {
2233 503001283 : if (x.len == 1 && x.ulow () == 0)
2234 42223495 : return x.precision;
2235 :
2236 : /* Having dealt with the zero case, there must be a block with a
2237 : nonzero bit. We don't care about the bits above the first 1. */
2238 : unsigned int i = 0;
2239 460894695 : while (x.val[i] == 0)
2240 116907 : ++i;
2241 460777788 : return i * HOST_BITS_PER_WIDE_INT + ctz_hwi (x.val[i]);
2242 : }
2243 :
2244 : /* If X is an exact power of 2, return the base-2 logarithm, otherwise
2245 : return -1. */
2246 : int
2247 12934271 : wi::exact_log2 (const wide_int_ref &x)
2248 : {
2249 : /* Reject cases where there are implicit -1 blocks above HIGH. */
2250 12934271 : if (x.len * HOST_BITS_PER_WIDE_INT < x.precision && x.sign_mask () < 0)
2251 : return -1;
2252 :
2253 : /* Set CRUX to the index of the entry that should be nonzero.
2254 : If the top block is zero then the next lowest block (if any)
2255 : must have the high bit set. */
2256 12928431 : unsigned int crux = x.len - 1;
2257 12928431 : if (crux > 0 && x.val[crux] == 0)
2258 23887 : crux -= 1;
2259 :
2260 : /* Check that all lower blocks are zero. */
2261 12929263 : for (unsigned int i = 0; i < crux; ++i)
2262 1846 : if (x.val[i] != 0)
2263 : return -1;
2264 :
2265 : /* Get a zero-extended form of block CRUX. */
2266 12927417 : unsigned HOST_WIDE_INT hwi = x.val[crux];
2267 12927417 : if ((crux + 1) * HOST_BITS_PER_WIDE_INT > x.precision)
2268 3019853 : hwi = zext_hwi (hwi, x.precision % HOST_BITS_PER_WIDE_INT);
2269 :
2270 : /* Now it's down to whether HWI is a power of 2. */
2271 12927417 : int res = ::exact_log2 (hwi);
2272 8177487 : if (res >= 0)
2273 8177487 : res += crux * HOST_BITS_PER_WIDE_INT;
2274 : return res;
2275 : }
2276 :
2277 : /* Return the base-2 logarithm of X, rounding down. Return -1 if X is 0. */
2278 : int
2279 10626123 : wi::floor_log2 (const wide_int_ref &x)
2280 : {
2281 10626123 : return x.precision - 1 - clz (x);
2282 : }
2283 :
2284 : /* Return the index of the first (lowest) set bit in X, counting from 1.
2285 : Return 0 if X is 0. */
2286 : int
2287 491 : wi::ffs (const wide_int_ref &x)
2288 : {
2289 491 : return eq_p (x, 0) ? 0 : ctz (x) + 1;
2290 : }
2291 :
2292 : /* Return true if sign-extending X to have precision PRECISION would give
2293 : the minimum signed value at that precision. */
2294 : bool
2295 36535021 : wi::only_sign_bit_p (const wide_int_ref &x, unsigned int precision)
2296 : {
2297 36535021 : return ctz (x) + 1 == int (precision);
2298 : }
2299 :
2300 : /* Return true if X represents the minimum signed value. */
2301 : bool
2302 34389433 : wi::only_sign_bit_p (const wide_int_ref &x)
2303 : {
2304 34389433 : return only_sign_bit_p (x, x.precision);
2305 : }
2306 :
2307 : /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2308 : down to the previous value that has no bits set outside MASK.
2309 : This rounding wraps for signed values if VAL is negative and
2310 : the top bit of MASK is clear.
2311 :
2312 : For example, round_down_for_mask (6, 0xf1) would give 1 and
2313 : round_down_for_mask (24, 0xf1) would give 17. */
2314 :
2315 : wide_int
2316 17051641 : wi::round_down_for_mask (const wide_int &val, const wide_int &mask)
2317 : {
2318 : /* Get the bits in VAL that are outside the mask. */
2319 17051641 : wide_int extra_bits = wi::bit_and_not (val, mask);
2320 17051641 : if (extra_bits == 0)
2321 17033076 : return val;
2322 :
2323 : /* Get a mask that includes the top bit in EXTRA_BITS and is all 1s
2324 : below that bit. */
2325 18565 : unsigned int precision = val.get_precision ();
2326 18565 : wide_int lower_mask = wi::mask (precision - wi::clz (extra_bits),
2327 18565 : false, precision);
2328 :
2329 : /* Clear the bits that aren't in MASK, but ensure that all bits
2330 : in MASK below the top cleared bit are set. */
2331 18565 : return (val & mask) | (mask & lower_mask);
2332 18565 : }
2333 :
2334 : /* Return VAL if VAL has no bits set outside MASK. Otherwise round VAL
2335 : up to the next value that has no bits set outside MASK. The rounding
2336 : wraps if there are no suitable values greater than VAL.
2337 :
2338 : For example, round_up_for_mask (6, 0xf1) would give 16 and
2339 : round_up_for_mask (24, 0xf1) would give 32. */
2340 :
2341 : wide_int
2342 17514284 : wi::round_up_for_mask (const wide_int &val, const wide_int &mask)
2343 : {
2344 : /* Get the bits in VAL that are outside the mask. */
2345 17514284 : wide_int extra_bits = wi::bit_and_not (val, mask);
2346 17514284 : if (extra_bits == 0)
2347 17511046 : return val;
2348 :
2349 : /* Get a mask that is all 1s above the top bit in EXTRA_BITS. */
2350 3238 : unsigned int precision = val.get_precision ();
2351 3238 : wide_int upper_mask = wi::mask (precision - wi::clz (extra_bits),
2352 3238 : true, precision);
2353 :
2354 : /* Get the bits of the mask that are above the top bit in EXTRA_BITS. */
2355 3238 : upper_mask &= mask;
2356 :
2357 : /* Conceptually we need to:
2358 :
2359 : - clear bits of VAL outside UPPER_MASK
2360 : - add the lowest bit in UPPER_MASK to VAL (or add 0 if UPPER_MASK is 0)
2361 : - propagate the carry through the bits of VAL in UPPER_MASK
2362 :
2363 : If (~VAL & UPPER_MASK) is nonzero, the carry eventually
2364 : reaches that bit and the process leaves all lower bits clear.
2365 : If (~VAL & UPPER_MASK) is zero then the result is also zero. */
2366 3238 : wide_int tmp = wi::bit_and_not (upper_mask, val);
2367 :
2368 3238 : return (val | tmp) & -tmp;
2369 3238 : }
2370 :
2371 : /* Compute the modular multiplicative inverse of A modulo B
2372 : using extended Euclid's algorithm. Assumes A and B are coprime,
2373 : and that A and B have the same precision. */
2374 : wide_int
2375 4889 : wi::mod_inv (const wide_int &a, const wide_int &b)
2376 : {
2377 : /* Verify the assumption. */
2378 4889 : gcc_checking_assert (wi::eq_p (wi::gcd (a, b), 1));
2379 :
2380 4889 : unsigned int p = a.get_precision () + 1;
2381 4889 : gcc_checking_assert (b.get_precision () + 1 == p);
2382 4889 : wide_int c = wide_int::from (a, p, UNSIGNED);
2383 4889 : wide_int d = wide_int::from (b, p, UNSIGNED);
2384 4889 : wide_int x0 = wide_int::from (0, p, UNSIGNED);
2385 4889 : wide_int x1 = wide_int::from (1, p, UNSIGNED);
2386 :
2387 4889 : if (wi::eq_p (b, 1))
2388 0 : return wide_int::from (1, p, UNSIGNED);
2389 :
2390 24340 : while (wi::gt_p (c, 1, UNSIGNED))
2391 : {
2392 19451 : wide_int t = d;
2393 19451 : wide_int q = wi::divmod_trunc (c, d, UNSIGNED, &d);
2394 19451 : c = t;
2395 19451 : wide_int s = x0;
2396 19451 : x0 = wi::sub (x1, wi::mul (q, x0));
2397 19451 : x1 = s;
2398 19451 : }
2399 4889 : if (wi::lt_p (x1, 0, SIGNED))
2400 4109 : x1 += d;
2401 4889 : return x1;
2402 4889 : }
2403 :
2404 : /*
2405 : * Private utilities.
2406 : */
2407 :
2408 0 : void gt_ggc_mx (widest_int *) { }
2409 0 : void gt_pch_nx (widest_int *, void (*) (void *, void *), void *) { }
2410 0 : void gt_pch_nx (widest_int *) { }
2411 :
2412 : template void wide_int::dump () const;
2413 : template void generic_wide_int <wide_int_ref_storage <false> >::dump () const;
2414 : template void generic_wide_int <wide_int_ref_storage <true> >::dump () const;
2415 : template void offset_int::dump () const;
2416 : template void widest_int::dump () const;
2417 :
2418 : /* We could add all the above ::dump variants here, but wide_int and
2419 : widest_int should handle the common cases. Besides, you can always
2420 : call the dump method directly. */
2421 :
2422 : DEBUG_FUNCTION void
2423 0 : debug (const wide_int &ref)
2424 : {
2425 0 : ref.dump ();
2426 0 : }
2427 :
2428 : DEBUG_FUNCTION void
2429 0 : debug (const wide_int *ptr)
2430 : {
2431 0 : if (ptr)
2432 0 : debug (*ptr);
2433 : else
2434 0 : fprintf (stderr, "<nil>\n");
2435 0 : }
2436 :
2437 : DEBUG_FUNCTION void
2438 0 : debug (const widest_int &ref)
2439 : {
2440 0 : ref.dump ();
2441 0 : }
2442 :
2443 : DEBUG_FUNCTION void
2444 0 : debug (const widest_int *ptr)
2445 : {
2446 0 : if (ptr)
2447 0 : debug (*ptr);
2448 : else
2449 0 : fprintf (stderr, "<nil>\n");
2450 0 : }
2451 :
2452 : #if CHECKING_P
2453 :
2454 : namespace selftest {
2455 :
2456 : /* Selftests for wide ints. We run these multiple times, once per type. */
2457 :
2458 : /* Helper function for building a test value. */
2459 :
2460 : template <class VALUE_TYPE>
2461 : static VALUE_TYPE
2462 : from_int (int i);
2463 :
2464 : /* Specializations of the fixture for each wide-int type. */
2465 :
2466 : /* Specialization for VALUE_TYPE == wide_int. */
2467 :
2468 : template <>
2469 : wide_int
2470 20 : from_int (int i)
2471 : {
2472 20 : return wi::shwi (i, 32);
2473 : }
2474 :
2475 : /* Specialization for VALUE_TYPE == offset_int. */
2476 :
2477 : template <>
2478 : offset_int
2479 20 : from_int (int i)
2480 : {
2481 0 : return offset_int (i);
2482 : }
2483 :
2484 : /* Specialization for VALUE_TYPE == widest_int. */
2485 :
2486 : template <>
2487 : widest_int
2488 28 : from_int (int i)
2489 : {
2490 0 : return widest_int (i);
2491 : }
2492 :
2493 : /* Verify that print_dec (WI, ..., SGN) gives the expected string
2494 : representation (using base 10). */
2495 :
2496 : static void
2497 132 : assert_deceq (const char *expected, const wide_int_ref &wi, signop sgn)
2498 : {
2499 132 : char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p = buf;
2500 132 : unsigned len;
2501 132 : if (print_dec_buf_size (wi, sgn, &len))
2502 0 : p = XALLOCAVEC (char, len);
2503 132 : print_dec (wi, p, sgn);
2504 132 : ASSERT_STREQ (expected, p);
2505 132 : }
2506 :
2507 : /* Likewise for base 16. */
2508 :
2509 : static void
2510 72 : assert_hexeq (const char *expected, const wide_int_ref &wi)
2511 : {
2512 72 : char buf[WIDE_INT_PRINT_BUFFER_SIZE], *p = buf;
2513 72 : unsigned len;
2514 72 : if (print_hex_buf_size (wi, &len))
2515 0 : p = XALLOCAVEC (char, len);
2516 72 : print_hex (wi, p);
2517 72 : ASSERT_STREQ (expected, p);
2518 72 : }
2519 :
2520 : /* Test cases. */
2521 :
2522 : /* Verify that print_dec and print_hex work for VALUE_TYPE. */
2523 :
2524 : template <class VALUE_TYPE>
2525 : static void
2526 12 : test_printing ()
2527 : {
2528 12 : VALUE_TYPE a = from_int<VALUE_TYPE> (42);
2529 12 : assert_deceq ("42", a, SIGNED);
2530 12 : assert_hexeq ("0x2a", a);
2531 12 : assert_hexeq ("0x1fffffffffffffffff", wi::shwi (-1, 69));
2532 12 : assert_hexeq ("0xffffffffffffffff", wi::mask (64, false, 69));
2533 12 : assert_hexeq ("0xffffffffffffffff", wi::mask <widest_int> (64, false));
2534 : if (WIDE_INT_MAX_INL_PRECISION > 128)
2535 : {
2536 12 : assert_hexeq ("0x20000000000000000fffffffffffffffe",
2537 24 : wi::lshift (1, 129) + wi::lshift (1, 64) - 2);
2538 12 : assert_hexeq ("0x200000000000004000123456789abcdef",
2539 24 : wi::lshift (1, 129) + wi::lshift (1, 74)
2540 44 : + wi::lshift (0x1234567, 32) + 0x89abcdef);
2541 : }
2542 12 : }
2543 :
2544 : /* Verify that various operations work correctly for VALUE_TYPE,
2545 : unary and binary, using both function syntax, and
2546 : overloaded-operators. */
2547 :
2548 : template <class VALUE_TYPE>
2549 : static void
2550 12 : test_ops ()
2551 : {
2552 12 : VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2553 12 : VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2554 :
2555 : /* Using functions. */
2556 12 : assert_deceq ("-7", wi::neg (a), SIGNED);
2557 12 : assert_deceq ("10", wi::add (a, b), SIGNED);
2558 12 : assert_deceq ("4", wi::sub (a, b), SIGNED);
2559 12 : assert_deceq ("-4", wi::sub (b, a), SIGNED);
2560 12 : assert_deceq ("21", wi::mul (a, b), SIGNED);
2561 :
2562 : /* Using operators. */
2563 12 : assert_deceq ("-7", -a, SIGNED);
2564 12 : assert_deceq ("10", a + b, SIGNED);
2565 12 : assert_deceq ("4", a - b, SIGNED);
2566 12 : assert_deceq ("-4", b - a, SIGNED);
2567 12 : assert_deceq ("21", a * b, SIGNED);
2568 12 : }
2569 :
2570 : /* Verify that various comparisons work correctly for VALUE_TYPE. */
2571 :
2572 : template <class VALUE_TYPE>
2573 : static void
2574 12 : test_comparisons ()
2575 : {
2576 12 : VALUE_TYPE a = from_int<VALUE_TYPE> (7);
2577 12 : VALUE_TYPE b = from_int<VALUE_TYPE> (3);
2578 :
2579 : /* == */
2580 12 : ASSERT_TRUE (wi::eq_p (a, a));
2581 12 : ASSERT_FALSE (wi::eq_p (a, b));
2582 :
2583 : /* != */
2584 12 : ASSERT_TRUE (wi::ne_p (a, b));
2585 12 : ASSERT_FALSE (wi::ne_p (a, a));
2586 :
2587 : /* < */
2588 12 : ASSERT_FALSE (wi::lts_p (a, a));
2589 12 : ASSERT_FALSE (wi::lts_p (a, b));
2590 12 : ASSERT_TRUE (wi::lts_p (b, a));
2591 :
2592 : /* <= */
2593 12 : ASSERT_TRUE (wi::les_p (a, a));
2594 12 : ASSERT_FALSE (wi::les_p (a, b));
2595 12 : ASSERT_TRUE (wi::les_p (b, a));
2596 :
2597 : /* > */
2598 12 : ASSERT_FALSE (wi::gts_p (a, a));
2599 12 : ASSERT_TRUE (wi::gts_p (a, b));
2600 12 : ASSERT_FALSE (wi::gts_p (b, a));
2601 :
2602 : /* >= */
2603 12 : ASSERT_TRUE (wi::ges_p (a, a));
2604 12 : ASSERT_TRUE (wi::ges_p (a, b));
2605 12 : ASSERT_FALSE (wi::ges_p (b, a));
2606 :
2607 : /* comparison */
2608 12 : ASSERT_EQ (-1, wi::cmps (b, a));
2609 12 : ASSERT_EQ (0, wi::cmps (a, a));
2610 12 : ASSERT_EQ (1, wi::cmps (a, b));
2611 12 : }
2612 :
2613 : /* Run all of the selftests, using the given VALUE_TYPE. */
2614 :
2615 : template <class VALUE_TYPE>
2616 12 : static void run_all_wide_int_tests ()
2617 : {
2618 12 : test_printing <VALUE_TYPE> ();
2619 12 : test_ops <VALUE_TYPE> ();
2620 12 : test_comparisons <VALUE_TYPE> ();
2621 12 : }
2622 :
2623 : /* Test overflow conditions. */
2624 :
2625 : static void
2626 4 : test_overflow ()
2627 : {
2628 4 : static int precs[] = { 31, 32, 33, 63, 64, 65, 127, 128 };
2629 4 : static int offsets[] = { 16, 1, 0 };
2630 36 : for (unsigned int i = 0; i < ARRAY_SIZE (precs); ++i)
2631 128 : for (unsigned int j = 0; j < ARRAY_SIZE (offsets); ++j)
2632 : {
2633 96 : int prec = precs[i];
2634 96 : int offset = offsets[j];
2635 96 : wi::overflow_type overflow;
2636 96 : wide_int sum, diff;
2637 :
2638 192 : sum = wi::add (wi::max_value (prec, UNSIGNED) - offset, 1,
2639 96 : UNSIGNED, &overflow);
2640 96 : ASSERT_EQ (sum, -offset);
2641 96 : ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2642 :
2643 192 : sum = wi::add (1, wi::max_value (prec, UNSIGNED) - offset,
2644 96 : UNSIGNED, &overflow);
2645 96 : ASSERT_EQ (sum, -offset);
2646 96 : ASSERT_EQ (overflow != wi::OVF_NONE, offset == 0);
2647 :
2648 192 : diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2649 96 : wi::max_value (prec, UNSIGNED),
2650 96 : UNSIGNED, &overflow);
2651 96 : ASSERT_EQ (diff, -offset);
2652 96 : ASSERT_EQ (overflow != wi::OVF_NONE, offset != 0);
2653 :
2654 192 : diff = wi::sub (wi::max_value (prec, UNSIGNED) - offset,
2655 192 : wi::max_value (prec, UNSIGNED) - 1,
2656 96 : UNSIGNED, &overflow);
2657 96 : ASSERT_EQ (diff, 1 - offset);
2658 96 : ASSERT_EQ (overflow != wi::OVF_NONE, offset > 1);
2659 96 : }
2660 4 : }
2661 :
2662 : /* Test the round_{down,up}_for_mask functions. */
2663 :
2664 : static void
2665 4 : test_round_for_mask ()
2666 : {
2667 4 : unsigned int prec = 18;
2668 4 : ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (17, prec),
2669 : wi::shwi (0xf1, prec)));
2670 4 : ASSERT_EQ (17, wi::round_up_for_mask (wi::shwi (17, prec),
2671 : wi::shwi (0xf1, prec)));
2672 :
2673 4 : ASSERT_EQ (1, wi::round_down_for_mask (wi::shwi (6, prec),
2674 : wi::shwi (0xf1, prec)));
2675 4 : ASSERT_EQ (16, wi::round_up_for_mask (wi::shwi (6, prec),
2676 : wi::shwi (0xf1, prec)));
2677 :
2678 4 : ASSERT_EQ (17, wi::round_down_for_mask (wi::shwi (24, prec),
2679 : wi::shwi (0xf1, prec)));
2680 4 : ASSERT_EQ (32, wi::round_up_for_mask (wi::shwi (24, prec),
2681 : wi::shwi (0xf1, prec)));
2682 :
2683 4 : ASSERT_EQ (0x011, wi::round_down_for_mask (wi::shwi (0x22, prec),
2684 : wi::shwi (0x111, prec)));
2685 4 : ASSERT_EQ (0x100, wi::round_up_for_mask (wi::shwi (0x22, prec),
2686 : wi::shwi (0x111, prec)));
2687 :
2688 4 : ASSERT_EQ (100, wi::round_down_for_mask (wi::shwi (101, prec),
2689 : wi::shwi (0xfc, prec)));
2690 4 : ASSERT_EQ (104, wi::round_up_for_mask (wi::shwi (101, prec),
2691 : wi::shwi (0xfc, prec)));
2692 :
2693 4 : ASSERT_EQ (0x2bc, wi::round_down_for_mask (wi::shwi (0x2c2, prec),
2694 : wi::shwi (0xabc, prec)));
2695 4 : ASSERT_EQ (0x800, wi::round_up_for_mask (wi::shwi (0x2c2, prec),
2696 : wi::shwi (0xabc, prec)));
2697 :
2698 4 : ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0xabd, prec),
2699 : wi::shwi (0xabc, prec)));
2700 4 : ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0xabd, prec),
2701 : wi::shwi (0xabc, prec)));
2702 :
2703 4 : ASSERT_EQ (0xabc, wi::round_down_for_mask (wi::shwi (0x1000, prec),
2704 : wi::shwi (0xabc, prec)));
2705 4 : ASSERT_EQ (0, wi::round_up_for_mask (wi::shwi (0x1000, prec),
2706 : wi::shwi (0xabc, prec)));
2707 4 : }
2708 :
2709 : /* Run all of the selftests within this file, for all value types. */
2710 :
2711 : void
2712 4 : wide_int_cc_tests ()
2713 : {
2714 4 : run_all_wide_int_tests <wide_int> ();
2715 4 : run_all_wide_int_tests <offset_int> ();
2716 4 : run_all_wide_int_tests <widest_int> ();
2717 4 : test_overflow ();
2718 4 : test_round_for_mask ();
2719 4 : ASSERT_EQ (wi::mask (128, false, 128),
2720 : wi::shifted_mask (0, 128, false, 128));
2721 4 : ASSERT_EQ (wi::mask (128, true, 128),
2722 : wi::shifted_mask (0, 128, true, 128));
2723 4 : ASSERT_EQ (wi::multiple_of_p (from_int <widest_int> (1),
2724 : from_int <widest_int> (-128), UNSIGNED),
2725 : false);
2726 4 : }
2727 :
2728 : } // namespace selftest
2729 : #endif /* CHECKING_P */
|