summaryrefslogtreecommitdiff
path: root/0160-Backport-SME-AArch64-Cleanup-move-immediate-code.patch
diff options
context:
space:
mode:
Diffstat (limited to '0160-Backport-SME-AArch64-Cleanup-move-immediate-code.patch')
-rw-r--r--0160-Backport-SME-AArch64-Cleanup-move-immediate-code.patch410
1 files changed, 410 insertions, 0 deletions
diff --git a/0160-Backport-SME-AArch64-Cleanup-move-immediate-code.patch b/0160-Backport-SME-AArch64-Cleanup-move-immediate-code.patch
new file mode 100644
index 0000000..dc06698
--- /dev/null
+++ b/0160-Backport-SME-AArch64-Cleanup-move-immediate-code.patch
@@ -0,0 +1,410 @@
+From d76be4acadc0641cc8e795cd6b8a1c3c83b4fdb2 Mon Sep 17 00:00:00 2001
+From: Wilco Dijkstra <wilco.dijkstra@arm.com>
+Date: Mon, 5 Dec 2022 10:49:25 +0000
+Subject: [PATCH 061/157] [Backport][SME] AArch64: Cleanup move immediate code
+
+Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=ba1536dac780f3f92c5eab999fda6931f6247fc1
+
+Simplify, refactor and improve various move immediate functions.
+Allow 32-bit MOVN/I as a valid 64-bit immediate which removes special
+cases in aarch64_internal_mov_immediate. Add new constraint so the movdi
+pattern only needs a single alternative for move immediate.
+
+gcc/
+ * config/aarch64/aarch64.cc (aarch64_bitmask_imm): Use unsigned type.
+ (aarch64_is_mov_xn_imm): New function.
+ (aarch64_move_imm): Refactor, assert mode is SImode or DImode.
+ (aarch64_internal_mov_immediate): Assert mode is SImode or DImode.
+ Simplify special cases.
+ (aarch64_uimm12_shift): Simplify code.
+ (aarch64_clamp_to_uimm12_shift): Likewise.
+ (aarch64_movw_imm): Rename to aarch64_is_movz.
+ (aarch64_float_const_rtx_p): Pass either SImode or DImode to
+ aarch64_internal_mov_immediate.
+ (aarch64_rtx_costs): Likewise.
+ * config/aarch64/aarch64.md (movdi_aarch64): Merge 'N' and 'M'
+ constraints into single 'O'.
+ (mov<mode>_aarch64): Likewise.
+ * config/aarch64/aarch64-protos.h (aarch64_move_imm): Use unsigned.
+ (aarch64_bitmask_imm): Likewise.
+ (aarch64_uimm12_shift): Likewise.
+ (aarch64_is_mov_xn_imm): New prototype.
+ * config/aarch64/constraints.md: Add 'O' for 32/64-bit immediates,
+ limit 'N' to 64-bit only moves.
+---
+ gcc/config/aarch64/aarch64-protos.h | 7 +-
+ gcc/config/aarch64/aarch64.cc | 158 ++++++++++++----------------
+ gcc/config/aarch64/aarch64.md | 17 ++-
+ gcc/config/aarch64/constraints.md | 5 +
+ 4 files changed, 85 insertions(+), 102 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64-protos.h b/gcc/config/aarch64/aarch64-protos.h
+index 97984f3ab..3ff1a0163 100644
+--- a/gcc/config/aarch64/aarch64-protos.h
++++ b/gcc/config/aarch64/aarch64-protos.h
+@@ -755,7 +755,7 @@ void aarch64_post_cfi_startproc (void);
+ poly_int64 aarch64_initial_elimination_offset (unsigned, unsigned);
+ int aarch64_get_condition_code (rtx);
+ bool aarch64_address_valid_for_prefetch_p (rtx, bool);
+-bool aarch64_bitmask_imm (HOST_WIDE_INT val, machine_mode);
++bool aarch64_bitmask_imm (unsigned HOST_WIDE_INT val, machine_mode);
+ unsigned HOST_WIDE_INT aarch64_and_split_imm1 (HOST_WIDE_INT val_in);
+ unsigned HOST_WIDE_INT aarch64_and_split_imm2 (HOST_WIDE_INT val_in);
+ bool aarch64_and_bitmask_imm (unsigned HOST_WIDE_INT val_in, machine_mode mode);
+@@ -793,7 +793,7 @@ bool aarch64_masks_and_shift_for_bfi_p (scalar_int_mode, unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT,
+ unsigned HOST_WIDE_INT);
+ bool aarch64_zero_extend_const_eq (machine_mode, rtx, machine_mode, rtx);
+-bool aarch64_move_imm (HOST_WIDE_INT, machine_mode);
++bool aarch64_move_imm (unsigned HOST_WIDE_INT, machine_mode);
+ machine_mode aarch64_sve_int_mode (machine_mode);
+ opt_machine_mode aarch64_sve_pred_mode (unsigned int);
+ machine_mode aarch64_sve_pred_mode (machine_mode);
+@@ -843,8 +843,9 @@ bool aarch64_sve_float_arith_immediate_p (rtx, bool);
+ bool aarch64_sve_float_mul_immediate_p (rtx);
+ bool aarch64_split_dimode_const_store (rtx, rtx);
+ bool aarch64_symbolic_address_p (rtx);
+-bool aarch64_uimm12_shift (HOST_WIDE_INT);
++bool aarch64_uimm12_shift (unsigned HOST_WIDE_INT);
+ int aarch64_movk_shift (const wide_int_ref &, const wide_int_ref &);
++bool aarch64_is_mov_xn_imm (unsigned HOST_WIDE_INT);
+ bool aarch64_use_return_insn_p (void);
+ const char *aarch64_output_casesi (rtx *);
+
+diff --git a/gcc/config/aarch64/aarch64.cc b/gcc/config/aarch64/aarch64.cc
+index cf7736994..acb659f53 100644
+--- a/gcc/config/aarch64/aarch64.cc
++++ b/gcc/config/aarch64/aarch64.cc
+@@ -5812,12 +5812,10 @@ aarch64_bitmask_imm (unsigned HOST_WIDE_INT val)
+
+ /* Return true if VAL is a valid bitmask immediate for MODE. */
+ bool
+-aarch64_bitmask_imm (HOST_WIDE_INT val_in, machine_mode mode)
++aarch64_bitmask_imm (unsigned HOST_WIDE_INT val, machine_mode mode)
+ {
+ if (mode == DImode)
+- return aarch64_bitmask_imm (val_in);
+-
+- unsigned HOST_WIDE_INT val = val_in;
++ return aarch64_bitmask_imm (val);
+
+ if (mode == SImode)
+ return aarch64_bitmask_imm ((val & 0xffffffff) | (val << 32));
+@@ -5856,51 +5854,55 @@ aarch64_check_bitmask (unsigned HOST_WIDE_INT val,
+ }
+
+
+-/* Return true if val is an immediate that can be loaded into a
+- register by a MOVZ instruction. */
+-static bool
+-aarch64_movw_imm (HOST_WIDE_INT val, scalar_int_mode mode)
++/* Return true if VAL is a valid MOVZ immediate. */
++static inline bool
++aarch64_is_movz (unsigned HOST_WIDE_INT val)
+ {
+- if (GET_MODE_SIZE (mode) > 4)
+- {
+- if ((val & (((HOST_WIDE_INT) 0xffff) << 32)) == val
+- || (val & (((HOST_WIDE_INT) 0xffff) << 48)) == val)
+- return 1;
+- }
+- else
+- {
+- /* Ignore sign extension. */
+- val &= (HOST_WIDE_INT) 0xffffffff;
+- }
+- return ((val & (((HOST_WIDE_INT) 0xffff) << 0)) == val
+- || (val & (((HOST_WIDE_INT) 0xffff) << 16)) == val);
++ return (val >> (ctz_hwi (val) & 48)) < 65536;
+ }
+
+
+-/* Return true if VAL is an immediate that can be loaded into a
+- register in a single instruction. */
++/* Return true if immediate VAL can be created by a 64-bit MOVI/MOVN/MOVZ. */
+ bool
+-aarch64_move_imm (HOST_WIDE_INT val, machine_mode mode)
++aarch64_is_mov_xn_imm (unsigned HOST_WIDE_INT val)
+ {
+- scalar_int_mode int_mode;
+- if (!is_a <scalar_int_mode> (mode, &int_mode))
+- return false;
++ return aarch64_is_movz (val) || aarch64_is_movz (~val)
++ || aarch64_bitmask_imm (val);
++}
+
+- if (aarch64_movw_imm (val, int_mode) || aarch64_movw_imm (~val, int_mode))
+- return 1;
+- return aarch64_bitmask_imm (val, int_mode);
++
++/* Return true if VAL is an immediate that can be created by a single
++ MOV instruction. */
++bool
++aarch64_move_imm (unsigned HOST_WIDE_INT val, machine_mode mode)
++{
++ gcc_assert (mode == SImode || mode == DImode);
++
++ if (val < 65536)
++ return true;
++
++ unsigned HOST_WIDE_INT mask =
++ (val >> 32) == 0 || mode == SImode ? 0xffffffff : HOST_WIDE_INT_M1U;
++
++ if (aarch64_is_movz (val & mask) || aarch64_is_movz (~val & mask))
++ return true;
++
++ val = (val & mask) | ((val << 32) & ~mask);
++ return aarch64_bitmask_imm (val);
+ }
+
+
+ static int
+ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
+- scalar_int_mode mode)
++ machine_mode mode)
+ {
+ int i;
+ unsigned HOST_WIDE_INT val, val2, mask;
+ int one_match, zero_match;
+ int num_insns;
+
++ gcc_assert (mode == SImode || mode == DImode);
++
+ val = INTVAL (imm);
+
+ if (aarch64_move_imm (val, mode))
+@@ -5910,31 +5912,6 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
+ return 1;
+ }
+
+- /* Check to see if the low 32 bits are either 0xffffXXXX or 0xXXXXffff
+- (with XXXX non-zero). In that case check to see if the move can be done in
+- a smaller mode. */
+- val2 = val & 0xffffffff;
+- if (mode == DImode
+- && aarch64_move_imm (val2, SImode)
+- && (((val >> 32) & 0xffff) == 0 || (val >> 48) == 0))
+- {
+- if (generate)
+- emit_insn (gen_rtx_SET (dest, GEN_INT (val2)));
+-
+- /* Check if we have to emit a second instruction by checking to see
+- if any of the upper 32 bits of the original DI mode value is set. */
+- if (val == val2)
+- return 1;
+-
+- i = (val >> 48) ? 48 : 32;
+-
+- if (generate)
+- emit_insn (gen_insv_immdi (dest, GEN_INT (i),
+- GEN_INT ((val >> i) & 0xffff)));
+-
+- return 2;
+- }
+-
+ if ((val >> 32) == 0 || mode == SImode)
+ {
+ if (generate)
+@@ -5958,24 +5935,31 @@ aarch64_internal_mov_immediate (rtx dest, rtx imm, bool generate,
+ one_match = ((~val & mask) == 0) + ((~val & (mask << 16)) == 0) +
+ ((~val & (mask << 32)) == 0) + ((~val & (mask << 48)) == 0);
+
++ /* Try a bitmask immediate and a movk to generate the immediate
++ in 2 instructions. */
++
+ if (zero_match < 2 && one_match < 2)
+ {
+- /* Try emitting a bitmask immediate with a movk replacing 16 bits.
+- For a 64-bit bitmask try whether changing 16 bits to all ones or
+- zeroes creates a valid bitmask. To check any repeated bitmask,
+- try using 16 bits from the other 32-bit half of val. */
+-
+ for (i = 0; i < 64; i += 16)
+- if (aarch64_check_bitmask (val, val2, mask << i))
+- {
+- if (generate)
+- {
+- emit_insn (gen_rtx_SET (dest, GEN_INT (val2)));
+- emit_insn (gen_insv_immdi (dest, GEN_INT (i),
+- GEN_INT ((val >> i) & 0xffff)));
+- }
+- return 2;
+- }
++ {
++ if (aarch64_check_bitmask (val, val2, mask << i))
++ break;
++
++ val2 = val & ~(mask << i);
++ if ((val2 >> 32) == 0 && aarch64_move_imm (val2, DImode))
++ break;
++ }
++
++ if (i != 64)
++ {
++ if (generate)
++ {
++ emit_insn (gen_rtx_SET (dest, GEN_INT (val2)));
++ emit_insn (gen_insv_immdi (dest, GEN_INT (i),
++ GEN_INT ((val >> i) & 0xffff)));
++ }
++ return 2;
++ }
+ }
+
+ /* Try a bitmask plus 2 movk to generate the immediate in 3 instructions. */
+@@ -6044,26 +6028,24 @@ aarch64_mov128_immediate (rtx imm)
+ /* Return true if val can be encoded as a 12-bit unsigned immediate with
+ a left shift of 0 or 12 bits. */
+ bool
+-aarch64_uimm12_shift (HOST_WIDE_INT val)
++aarch64_uimm12_shift (unsigned HOST_WIDE_INT val)
+ {
+- return ((val & (((HOST_WIDE_INT) 0xfff) << 0)) == val
+- || (val & (((HOST_WIDE_INT) 0xfff) << 12)) == val
+- );
++ return val < 4096 || (val & 0xfff000) == val;
+ }
+
+ /* Returns the nearest value to VAL that will fit as a 12-bit unsigned immediate
+ that can be created with a left shift of 0 or 12. */
+ static HOST_WIDE_INT
+-aarch64_clamp_to_uimm12_shift (HOST_WIDE_INT val)
++aarch64_clamp_to_uimm12_shift (unsigned HOST_WIDE_INT val)
+ {
+ /* Check to see if the value fits in 24 bits, as that is the maximum we can
+ handle correctly. */
+- gcc_assert ((val & 0xffffff) == val);
++ gcc_assert (val < 0x1000000);
+
+- if (((val & 0xfff) << 0) == val)
++ if (val < 4096)
+ return val;
+
+- return val & (0xfff << 12);
++ return val & 0xfff000;
+ }
+
+
+@@ -7211,8 +7193,7 @@ aarch64_expand_mov_immediate (rtx dest, rtx imm)
+ return;
+ }
+
+- aarch64_internal_mov_immediate (dest, imm, true,
+- as_a <scalar_int_mode> (mode));
++ aarch64_internal_mov_immediate (dest, imm, true, mode);
+ }
+
+ /* Return the MEM rtx that provides the canary value that should be used
+@@ -11410,9 +11391,7 @@ aarch64_float_const_rtx_p (rtx x)
+ && SCALAR_FLOAT_MODE_P (mode)
+ && aarch64_reinterpret_float_as_int (x, &ival))
+ {
+- scalar_int_mode imode = (mode == HFmode
+- ? SImode
+- : int_mode_for_mode (mode).require ());
++ machine_mode imode = known_eq (GET_MODE_SIZE (mode), 8) ? DImode : SImode;
+ int num_instr = aarch64_internal_mov_immediate
+ (NULL_RTX, gen_int_mode (ival, imode), false, imode);
+ return num_instr < 3;
+@@ -14049,10 +14028,10 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
+ proportionally expensive to the number of instructions
+ required to build that constant. This is true whether we
+ are compiling for SPEED or otherwise. */
+- if (!is_a <scalar_int_mode> (mode, &int_mode))
+- int_mode = word_mode;
++ machine_mode imode = known_le (GET_MODE_SIZE (mode), 4)
++ ? SImode : DImode;
+ *cost = COSTS_N_INSNS (aarch64_internal_mov_immediate
+- (NULL_RTX, x, false, int_mode));
++ (NULL_RTX, x, false, imode));
+ }
+ return true;
+
+@@ -14068,9 +14047,8 @@ aarch64_rtx_costs (rtx x, machine_mode mode, int outer ATTRIBUTE_UNUSED,
+ bool succeed = aarch64_reinterpret_float_as_int (x, &ival);
+ gcc_assert (succeed);
+
+- scalar_int_mode imode = (mode == HFmode
+- ? SImode
+- : int_mode_for_mode (mode).require ());
++ machine_mode imode = known_eq (GET_MODE_SIZE (mode), 8)
++ ? DImode : SImode;
+ int ncost = aarch64_internal_mov_immediate
+ (NULL_RTX, gen_int_mode (ival, imode), false, imode);
+ *cost += COSTS_N_INSNS (ncost);
+diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
+index 7454a5c77..ea94152bf 100644
+--- a/gcc/config/aarch64/aarch64.md
++++ b/gcc/config/aarch64/aarch64.md
+@@ -1288,16 +1288,15 @@
+ )
+
+ (define_insn_and_split "*movdi_aarch64"
+- [(set (match_operand:DI 0 "nonimmediate_operand" "=r,k,r,r,r,r,r, r,w, m,m, r, r, r, w,r,w, w")
+- (match_operand:DI 1 "aarch64_mov_operand" " r,r,k,N,M,n,Usv,m,m,rZ,w,Usw,Usa,Ush,rZ,w,w,Dd"))]
++ [(set (match_operand:DI 0 "nonimmediate_operand" "=r,k,r,r,r,r, r,w, m,m, r, r, r, w,r,w, w")
++ (match_operand:DI 1 "aarch64_mov_operand" " r,r,k,O,n,Usv,m,m,rZ,w,Usw,Usa,Ush,rZ,w,w,Dd"))]
+ "(register_operand (operands[0], DImode)
+ || aarch64_reg_or_zero (operands[1], DImode))"
+ "@
+ mov\\t%x0, %x1
+ mov\\t%0, %x1
+ mov\\t%x0, %1
+- mov\\t%x0, %1
+- mov\\t%w0, %1
++ * return aarch64_is_mov_xn_imm (INTVAL (operands[1])) ? \"mov\\t%x0, %1\" : \"mov\\t%w0, %1\";
+ #
+ * return aarch64_output_sve_cnt_immediate (\"cnt\", \"%x0\", operands[1]);
+ ldr\\t%x0, %1
+@@ -1319,11 +1318,11 @@
+ DONE;
+ }"
+ ;; The "mov_imm" type for CNTD is just a placeholder.
+- [(set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,mov_imm,mov_imm,mov_imm,
++ [(set_attr "type" "mov_reg,mov_reg,mov_reg,mov_imm,mov_imm,mov_imm,
+ load_8,load_8,store_8,store_8,load_8,adr,adr,f_mcr,f_mrc,
+ fmov,neon_move")
+- (set_attr "arch" "*,*,*,*,*,*,sve,*,fp,*,fp,*,*,*,fp,fp,fp,simd")
+- (set_attr "length" "4,4,4,4,4,*, 4,4, 4,4, 4,8,4,4, 4, 4, 4, 4")]
++ (set_attr "arch" "*,*,*,*,*,sve,*,fp,*,fp,*,*,*,fp,fp,fp,simd")
++ (set_attr "length" "4,4,4,4,*, 4,4, 4,4, 4,8,4,4, 4, 4, 4, 4")]
+ )
+
+ (define_insn "insv_imm<mode>"
+@@ -1487,7 +1486,7 @@
+
+ (define_insn "*mov<mode>_aarch64"
+ [(set (match_operand:DFD 0 "nonimmediate_operand" "=w, w ,?r,w,w ,w ,w,m,r,m ,r,r")
+- (match_operand:DFD 1 "general_operand" "Y , ?rY, w,w,Ufc,Uvi,m,w,m,rY,r,N"))]
++ (match_operand:DFD 1 "general_operand" "Y , ?rY, w,w,Ufc,Uvi,m,w,m,rY,r,O"))]
+ "TARGET_FLOAT && (register_operand (operands[0], <MODE>mode)
+ || aarch64_reg_or_fp_zero (operands[1], <MODE>mode))"
+ "@
+@@ -1502,7 +1501,7 @@
+ ldr\\t%x0, %1
+ str\\t%x1, %0
+ mov\\t%x0, %x1
+- mov\\t%x0, %1"
++ * return aarch64_is_mov_xn_imm (INTVAL (operands[1])) ? \"mov\\t%x0, %1\" : \"mov\\t%w0, %1\";"
+ [(set_attr "type" "neon_move,f_mcr,f_mrc,fmov,fconstd,neon_move,\
+ f_loadd,f_stored,load_8,store_8,mov_reg,\
+ fconstd")
+diff --git a/gcc/config/aarch64/constraints.md b/gcc/config/aarch64/constraints.md
+index ee7587cca..750a42fb1 100644
+--- a/gcc/config/aarch64/constraints.md
++++ b/gcc/config/aarch64/constraints.md
+@@ -106,6 +106,11 @@
+
+ (define_constraint "N"
+ "A constant that can be used with a 64-bit MOV immediate operation."
++ (and (match_code "const_int")
++ (match_test "aarch64_is_mov_xn_imm (ival)")))
++
++(define_constraint "O"
++ "A constant that can be used with a 32 or 64-bit MOV immediate operation."
+ (and (match_code "const_int")
+ (match_test "aarch64_move_imm (ival, DImode)")))
+
+--
+2.33.0
+