1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
|
From d5293e2a8db54245553e01ad5d791b7492ad6101 Mon Sep 17 00:00:00 2001
From: Wilco Dijkstra <wdijkstr@arm.com>
Date: Mon, 24 Oct 2022 15:14:14 +0100
Subject: [PATCH 060/157] [Backport][SME] Improve immediate expansion
[PR106583]
Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=a096036589d82175a0f729c2dab73c9a527d075d
Improve immediate expansion of immediates which can be created from a
bitmask immediate and 2 MOVKs. Simplify, refactor and improve efficiency
of bitmask checks. Move various immediate handling functions together
to avoid forward declarations.
This reduces the number of 4-instruction immediates in SPECINT/FP by 10-15%.
gcc/
PR target/106583
* config/aarch64/aarch64.cc (aarch64_internal_mov_immediate)
Add support for a bitmask immediate with 2 MOVKs.
(aarch64_check_bitmask): New function after refactorization.
(aarch64_bitmask_imm): Simplify replication of small modes.
Split function into 64-bit only version for efficiency.
(aarch64_move_imm): Move near other immediate functions.
(aarch64_uimm12_shift): Likewise.
(aarch64_clamp_to_uimm12_shift): Likewise.
(aarch64_movk_shift): Likewise.
(aarch64_replicate_bitmask_imm): Likewise.
(aarch64_and_split_imm1): Likewise.
(aarch64_and_split_imm2): Likewise.
(aarch64_and_bitmask_imm): Likewise.
(aarch64_movw_imm): Likewise.
gcc/testsuite/
PR target/106583
* gcc.target/aarch64/pr106583.c: Add new test.
---
gcc/config/aarch64/aarch64.cc | 485 +++++++++++---------
gcc/testsuite/gcc.target/aarch64/pr106583.c | 41 ++
2 files changed, 301 insertions(+), 225 deletions(-)
create mode 100644 gcc/testsuite/gcc.target/aarch64/pr106583.c
diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
index b4b646fa0..cf7736994 100644
--- a/gcc/config/aarch64/aarch64.cc
+++ b/gcc/config/aarch64/aarch64.cc
@@ -305,7 +305,6 @@ static bool aarch64_builtin_support_vector_misalignment (machine_mode mode,
static machine_mode aarch64_simd_container_mode (scalar_mode, poly_int64);
static bool aarch64_print_address_internal (FILE*, machine_mode, rtx,
aarch64_addr_query_type);
-static HOST_WIDE_INT aarch64_clamp_to_uimm12_shift (HOST_WIDE_INT val);
/* The processor for which instructions should be scheduled. */
enum aarch64_processor aarch64_tune = cortexa53;
@@ -5756,6 +5755,143 @@ aarch64_output_sve_vector_inc_dec (const char *operands, rtx x)
factor, nelts_per_vq);
}
+/* Multipliers for repeating bitmasks of width 32, 16, 8, 4, and 2. */
+
+static const unsigned HOST_WIDE_INT bitmask_imm_mul[] =
+ {
+ 0x0000000100000001ull,
+ 0x0001000100010001ull,
+ 0x0101010101010101ull,
+ 0x1111111111111111ull,
+ 0x5555555555555555ull,
+ };
+
+
+
+/* Return true if 64-bit VAL is a valid bitmask immediate. */
+static bool
+aarch64_bitmask_imm (unsigned HOST_WIDE_INT val)
+{
+ unsigned HOST_WIDE_INT tmp, mask, first_one, next_one;
+ int bits;
+
+ /* Check for a single sequence of one bits and return quickly if so.
+ The special cases of all ones and all zeroes returns false. */
+ tmp = val + (val & -val);
+
+ if (tmp == (tmp & -tmp))
+ return (val + 1) > 1;
+
+ /* Invert if the immediate doesn't start with a zero bit - this means we
+ only need to search for sequences of one bits. */
+ if (val & 1)
+ val = ~val;
+
+ /* Find the first set bit and set tmp to val with the first sequence of one
+ bits removed. Return success if there is a single sequence of ones. */
+ first_one = val & -val;
+ tmp = val & (val + first_one);
+
+ if (tmp == 0)
+ return true;
+
+ /* Find the next set bit and compute the difference in bit position. */
+ next_one = tmp & -tmp;
+ bits = clz_hwi (first_one) - clz_hwi (next_one);
+ mask = val ^ tmp;
+
+ /* Check the bit position difference is a power of 2, and that the first
+ sequence of one bits fits within 'bits' bits. */
+ if ((mask >> bits) != 0 || bits != (bits & -bits))
+ return false;
+
+ /* Check the sequence of one bits is repeated 64/bits times. */
+ return val == mask * bitmask_imm_mul[__builtin_clz (bits) - 26];
+}
+
+
+/* Return true if VAL is a valid bitmask immediate for MODE. */
+bool
+aarch64_bitmask_imm (HOST_WIDE_INT val_in, machine_mode mode)
+{
+ if (mode == DImode)
+ return aarch64_bitmask_imm (val_in);
+
+ unsigned HOST_WIDE_INT val = val_in;
+
+ if (mode == SImode)
+ return aarch64_bitmask_imm ((val & 0xffffffff) | (val << 32));
+
+ /* Replicate small immediates to fit 64 bits. */
+ int size = GET_MODE_UNIT_PRECISION (mode);
+ val &= (HOST_WIDE_INT_1U << size) - 1;
+ val *= bitmask_imm_mul[__builtin_clz (size) - 26];
+
+ return aarch64_bitmask_imm (val);
+}
+
+
+/* Return true if the immediate VAL can be a bitfield immediate
+ by changing the given MASK bits in VAL to zeroes, ones or bits
+ from the other half of VAL. Return the new immediate in VAL2. */
+static inline bool
+aarch64_check_bitmask (unsigned HOST_WIDE_INT val,
+ unsigned HOST_WIDE_INT &val2,
+ unsigned HOST_WIDE_INT mask)
+{
+ val2 = val & ~mask;
+ if (val2 != val && aarch64_bitmask_imm (val2))
+ return true;
+ val2 = val | mask;
+ if (val2 != val && aarch64_bitmask_imm (val2))
+ return true;
+ val = val & ~mask;
+ val2 = val | (((val >> 32) | (val << 32)) & mask);
+ if (val2 != val && aarch64_bitmask_imm (val2))
+ return true;
+ val2 = val | (((val >> 16) | (val << 48)) & mask);
+ if (val2 != val && aarch64_bitmask_imm (val2))
+ return true;
+ return false;
+}
+
+
+/* Return true if val is an immediate that can be loaded into a
+ register by a MOVZ instruction. */
+static bool
+aarch64_movw_imm (HOST_WIDE_INT val, scalar_int_mode mode)
+{
+ if (GET_MODE_SIZE (mode) > 4)
+ {
+ if ((val & (((HOST_WIDE_INT) 0xffff) << 32)) == val
+ || (val & (((HOST_WIDE_INT) 0xffff) << 48)) == val)
+ return 1;
+ }
+ else
+ {
+ /* Ignore sign extension. */
+ val &= (HOST_WIDE_INT) 0xffffffff;
+ }
+ return ((val & (((HOST_WIDE_INT) 0xffff) << 0)) == val
+ || (val & (((HOST_WIDE_INT) 0xffff) << 16)) == val);
+}
+
+
+/* Return true if VAL is an immediate that can be loaded into a
+ register in a single instruction. */
+bool
+aarch64_move_imm (HOST_WIDE_INT val, machine_mode mode)
+{
+ scalar_int_mode int_mode;
+ if (!is_a <scalar_int_mode> (mode, &int_mode))
+ return false;
+
+ if (aarch64_movw_imm (val, int_mode) || aarch64_movw_imm (~val, int_mode))
+ return 1;
+ return aarch64_bitmask_imm (val, int_mode);
+}
+
+
static int
aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
scalar_int_mode mode)
@@ -5786,7 +5922,7 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
emit_insn (gen_rtx_SET (dest, GEN_INT (val2)));
/* Check if we have to emit a second instruction by checking to see
- if any of the upper 32 bits of the original DI mode value is set. */
+ if any of the upper 32 bits of the original DI mode value is set. */
if (val == val2)
return 1;
@@ -5822,36 +5958,43 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
one_match = ((~val & mask) == 0) + ((~val & (mask << 16)) == 0) +
((~val & (mask << 32)) == 0) + ((~val & (mask << 48)) == 0);
- if (zero_match != 2 && one_match != 2)
+ if (zero_match < 2 && one_match < 2)
{
/* Try emitting a bitmask immediate with a movk replacing 16 bits.
For a 64-bit bitmask try whether changing 16 bits to all ones or
zeroes creates a valid bitmask. To check any repeated bitmask,
try using 16 bits from the other 32-bit half of val. */
- for (i = 0; i < 64; i += 16, mask <<= 16)
- {
- val2 = val & ~mask;
- if (val2 != val && aarch64_bitmask_imm (val2, mode))
- break;
- val2 = val | mask;
- if (val2 != val && aarch64_bitmask_imm (val2, mode))
- break;
- val2 = val2 & ~mask;
- val2 = val2 | (((val2 >> 32) | (val2 << 32)) & mask);
- if (val2 != val && aarch64_bitmask_imm (val2, mode))
- break;
- }
- if (i != 64)
- {
- if (generate)
+ for (i = 0; i < 64; i += 16)
+ if (aarch64_check_bitmask (val, val2, mask << i))
+ {
+ if (generate)
+ {
+ emit_insn (gen_rtx_SET (dest, GEN_INT (val2)));
+ emit_insn (gen_insv_immdi (dest, GEN_INT (i),
+ GEN_INT ((val >> i) & 0xffff)));
+ }
+ return 2;
+ }
+ }
+
+ /* Try a bitmask plus 2 movk to generate the immediate in 3 instructions. */
+ if (zero_match + one_match == 0)
+ {
+ for (i = 0; i < 48; i += 16)
+ for (int j = i + 16; j < 64; j += 16)
+ if (aarch64_check_bitmask (val, val2, (mask << i) | (mask << j)))
{
- emit_insn (gen_rtx_SET (dest, GEN_INT (val2)));
- emit_insn (gen_insv_immdi (dest, GEN_INT (i),
- GEN_INT ((val >> i) & 0xffff)));
+ if (generate)
+ {
+ emit_insn (gen_rtx_SET (dest, GEN_INT (val2)));
+ emit_insn (gen_insv_immdi (dest, GEN_INT (i),
+ GEN_INT ((val >> i) & 0xffff)));
+ emit_insn (gen_insv_immdi (dest, GEN_INT (j),
+ GEN_INT ((val >> j) & 0xffff)));
+ }
+ return 3;
}
- return 2;
- }
}
/* Generate 2-4 instructions, skipping 16 bits of all zeroes or ones which
@@ -5898,6 +6041,99 @@ aarch64_mov128_immediate (rtx imm)
}
+/* Return true if val can be encoded as a 12-bit unsigned immediate with
+ a left shift of 0 or 12 bits. */
+bool
+aarch64_uimm12_shift (HOST_WIDE_INT val)
+{
+ return ((val & (((HOST_WIDE_INT) 0xfff) << 0)) == val
+ || (val & (((HOST_WIDE_INT) 0xfff) << 12)) == val
+ );
+}
+
+/* Returns the nearest value to VAL that will fit as a 12-bit unsigned immediate
+ that can be created with a left shift of 0 or 12. */
+static HOST_WIDE_INT
+aarch64_clamp_to_uimm12_shift (HOST_WIDE_INT val)
+{
+ /* Check to see if the value fits in 24 bits, as that is the maximum we can
+ handle correctly. */
+ gcc_assert ((val & 0xffffff) == val);
+
+ if (((val & 0xfff) << 0) == val)
+ return val;
+
+ return val & (0xfff << 12);
+}
+
+
+/* Test whether:
+
+ X = (X & AND_VAL) | IOR_VAL;
+
+ can be implemented using:
+
+ MOVK X, #(IOR_VAL >> shift), LSL #shift
+
+ Return the shift if so, otherwise return -1. */
+int
+aarch64_movk_shift (const wide_int_ref &and_val,
+ const wide_int_ref &ior_val)
+{
+ unsigned int precision = and_val.get_precision ();
+ unsigned HOST_WIDE_INT mask = 0xffff;
+ for (unsigned int shift = 0; shift < precision; shift += 16)
+ {
+ if (and_val == ~mask && (ior_val & mask) == ior_val)
+ return shift;
+ mask <<= 16;
+ }
+ return -1;
+}
+
+/* Create mask of ones, covering the lowest to highest bits set in VAL_IN.
+ Assumed precondition: VAL_IN Is not zero. */
+
+unsigned HOST_WIDE_INT
+aarch64_and_split_imm1 (HOST_WIDE_INT val_in)
+{
+ int lowest_bit_set = ctz_hwi (val_in);
+ int highest_bit_set = floor_log2 (val_in);
+ gcc_assert (val_in != 0);
+
+ return ((HOST_WIDE_INT_UC (2) << highest_bit_set) -
+ (HOST_WIDE_INT_1U << lowest_bit_set));
+}
+
+/* Create constant where bits outside of lowest bit set to highest bit set
+ are set to 1. */
+
+unsigned HOST_WIDE_INT
+aarch64_and_split_imm2 (HOST_WIDE_INT val_in)
+{
+ return val_in | ~aarch64_and_split_imm1 (val_in);
+}
+
+/* Return true if VAL_IN is a valid 'and' bitmask immediate. */
+
+bool
+aarch64_and_bitmask_imm (unsigned HOST_WIDE_INT val_in, machine_mode mode)
+{
+ scalar_int_mode int_mode;
+ if (!is_a <scalar_int_mode> (mode, &int_mode))
+ return false;
+
+ if (aarch64_bitmask_imm (val_in, int_mode))
+ return false;
+
+ if (aarch64_move_imm (val_in, int_mode))
+ return false;
+
+ unsigned HOST_WIDE_INT imm2 = aarch64_and_split_imm2 (val_in);
+
+ return aarch64_bitmask_imm (imm2, int_mode);
+}
+
/* Return the number of temporary registers that aarch64_add_offset_1
would need to add OFFSET to a register. */
@@ -10379,207 +10615,6 @@ aarch64_tls_referenced_p (rtx x)
}
-/* Return true if val can be encoded as a 12-bit unsigned immediate with
- a left shift of 0 or 12 bits. */
-bool
-aarch64_uimm12_shift (HOST_WIDE_INT val)
-{
- return ((val & (((HOST_WIDE_INT) 0xfff) << 0)) == val
- || (val & (((HOST_WIDE_INT) 0xfff) << 12)) == val
- );
-}
-
-/* Returns the nearest value to VAL that will fit as a 12-bit unsigned immediate
- that can be created with a left shift of 0 or 12. */
-static HOST_WIDE_INT
-aarch64_clamp_to_uimm12_shift (HOST_WIDE_INT val)
-{
- /* Check to see if the value fits in 24 bits, as that is the maximum we can
- handle correctly. */
- gcc_assert ((val & 0xffffff) == val);
-
- if (((val & 0xfff) << 0) == val)
- return val;
-
- return val & (0xfff << 12);
-}
-
-/* Return true if val is an immediate that can be loaded into a
- register by a MOVZ instruction. */
-static bool
-aarch64_movw_imm (HOST_WIDE_INT val, scalar_int_mode mode)
-{
- if (GET_MODE_SIZE (mode) > 4)
- {
- if ((val & (((HOST_WIDE_INT) 0xffff) << 32)) == val
- || (val & (((HOST_WIDE_INT) 0xffff) << 48)) == val)
- return 1;
- }
- else
- {
- /* Ignore sign extension. */
- val &= (HOST_WIDE_INT) 0xffffffff;
- }
- return ((val & (((HOST_WIDE_INT) 0xffff) << 0)) == val
- || (val & (((HOST_WIDE_INT) 0xffff) << 16)) == val);
-}
-
-/* Test whether:
-
- X = (X & AND_VAL) | IOR_VAL;
-
- can be implemented using:
-
- MOVK X, #(IOR_VAL >> shift), LSL #shift
-
- Return the shift if so, otherwise return -1. */
-int
-aarch64_movk_shift (const wide_int_ref &and_val,
- const wide_int_ref &ior_val)
-{
- unsigned int precision = and_val.get_precision ();
- unsigned HOST_WIDE_INT mask = 0xffff;
- for (unsigned int shift = 0; shift < precision; shift += 16)
- {
- if (and_val == ~mask && (ior_val & mask) == ior_val)
- return shift;
- mask <<= 16;
- }
- return -1;
-}
-
-/* VAL is a value with the inner mode of MODE. Replicate it to fill a
- 64-bit (DImode) integer. */
-
-static unsigned HOST_WIDE_INT
-aarch64_replicate_bitmask_imm (unsigned HOST_WIDE_INT val, machine_mode mode)
-{
- unsigned int size = GET_MODE_UNIT_PRECISION (mode);
- while (size < 64)
- {
- val &= (HOST_WIDE_INT_1U << size) - 1;
- val |= val << size;
- size *= 2;
- }
- return val;
-}
-
-/* Multipliers for repeating bitmasks of width 32, 16, 8, 4, and 2. */
-
-static const unsigned HOST_WIDE_INT bitmask_imm_mul[] =
- {
- 0x0000000100000001ull,
- 0x0001000100010001ull,
- 0x0101010101010101ull,
- 0x1111111111111111ull,
- 0x5555555555555555ull,
- };
-
-
-/* Return true if val is a valid bitmask immediate. */
-
-bool
-aarch64_bitmask_imm (HOST_WIDE_INT val_in, machine_mode mode)
-{
- unsigned HOST_WIDE_INT val, tmp, mask, first_one, next_one;
- int bits;
-
- /* Check for a single sequence of one bits and return quickly if so.
- The special cases of all ones and all zeroes returns false. */
- val = aarch64_replicate_bitmask_imm (val_in, mode);
- tmp = val + (val & -val);
-
- if (tmp == (tmp & -tmp))
- return (val + 1) > 1;
-
- /* Replicate 32-bit immediates so we can treat them as 64-bit. */
- if (mode == SImode)
- val = (val << 32) | (val & 0xffffffff);
-
- /* Invert if the immediate doesn't start with a zero bit - this means we
- only need to search for sequences of one bits. */
- if (val & 1)
- val = ~val;
-
- /* Find the first set bit and set tmp to val with the first sequence of one
- bits removed. Return success if there is a single sequence of ones. */
- first_one = val & -val;
- tmp = val & (val + first_one);
-
- if (tmp == 0)
- return true;
-
- /* Find the next set bit and compute the difference in bit position. */
- next_one = tmp & -tmp;
- bits = clz_hwi (first_one) - clz_hwi (next_one);
- mask = val ^ tmp;
-
- /* Check the bit position difference is a power of 2, and that the first
- sequence of one bits fits within 'bits' bits. */
- if ((mask >> bits) != 0 || bits != (bits & -bits))
- return false;
-
- /* Check the sequence of one bits is repeated 64/bits times. */
- return val == mask * bitmask_imm_mul[__builtin_clz (bits) - 26];
-}
-
-/* Create mask of ones, covering the lowest to highest bits set in VAL_IN.
- Assumed precondition: VAL_IN Is not zero. */
-
-unsigned HOST_WIDE_INT
-aarch64_and_split_imm1 (HOST_WIDE_INT val_in)
-{
- int lowest_bit_set = ctz_hwi (val_in);
- int highest_bit_set = floor_log2 (val_in);
- gcc_assert (val_in != 0);
-
- return ((HOST_WIDE_INT_UC (2) << highest_bit_set) -
- (HOST_WIDE_INT_1U << lowest_bit_set));
-}
-
-/* Create constant where bits outside of lowest bit set to highest bit set
- are set to 1. */
-
-unsigned HOST_WIDE_INT
-aarch64_and_split_imm2 (HOST_WIDE_INT val_in)
-{
- return val_in | ~aarch64_and_split_imm1 (val_in);
-}
-
-/* Return true if VAL_IN is a valid 'and' bitmask immediate. */
-
-bool
-aarch64_and_bitmask_imm (unsigned HOST_WIDE_INT val_in, machine_mode mode)
-{
- scalar_int_mode int_mode;
- if (!is_a <scalar_int_mode> (mode, &int_mode))
- return false;
-
- if (aarch64_bitmask_imm (val_in, int_mode))
- return false;
-
- if (aarch64_move_imm (val_in, int_mode))
- return false;
-
- unsigned HOST_WIDE_INT imm2 = aarch64_and_split_imm2 (val_in);
-
- return aarch64_bitmask_imm (imm2, int_mode);
-}
-
-/* Return true if val is an immediate that can be loaded into a
- register in a single instruction. */
-bool
-aarch64_move_imm (HOST_WIDE_INT val, machine_mode mode)
-{
- scalar_int_mode int_mode;
- if (!is_a <scalar_int_mode> (mode, &int_mode))
- return false;
-
- if (aarch64_movw_imm (val, int_mode) || aarch64_movw_imm (~val, int_mode))
- return 1;
- return aarch64_bitmask_imm (val, int_mode);
-}
-
static bool
aarch64_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
{
diff --git a/gcc/testsuite/gcc.target/aarch64/pr106583.c b/gcc/testsuite/gcc.target/aarch64/pr106583.c
new file mode 100644
index 000000000..0f9315808
--- /dev/null
+++ b/gcc/testsuite/gcc.target/aarch64/pr106583.c
@@ -0,0 +1,41 @@
+/* { dg-do assemble } */
+/* { dg-options "-O2 --save-temps" } */
+
+long f1 (void)
+{
+ return 0x7efefefefefefeff;
+}
+
+long f2 (void)
+{
+ return 0x12345678aaaaaaaa;
+}
+
+long f3 (void)
+{
+ return 0x1234cccccccc5678;
+}
+
+long f4 (void)
+{
+ return 0x7777123456787777;
+}
+
+long f5 (void)
+{
+ return 0x5555555512345678;
+}
+
+long f6 (void)
+{
+ return 0x1234bbbb5678bbbb;
+}
+
+long f7 (void)
+{
+ return 0x4444123444445678;
+}
+
+
+/* { dg-final { scan-assembler-times {\tmovk\t} 14 } } */
+/* { dg-final { scan-assembler-times {\tmov\t} 7 } } */
--
2.33.0
|