summaryrefslogtreecommitdiff
path: root/0125-Backport-SME-aarch64-Remove-redundant-TARGET_-checks.patch
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2025-02-28 10:03:49 +0000
committerCoprDistGit <infra@openeuler.org>2025-02-28 10:03:49 +0000
commit73127104a245052cd5cf29cdaaca3e5c32c70348 (patch)
tree8e28b63e478c43c252f18b49836dff7313affe54 /0125-Backport-SME-aarch64-Remove-redundant-TARGET_-checks.patch
parent49d3feaf4665cdb07576fc1a2382a4d82a612d35 (diff)
automatic import of gccopeneuler24.03_LTS_SP1
Diffstat (limited to '0125-Backport-SME-aarch64-Remove-redundant-TARGET_-checks.patch')
-rw-r--r--0125-Backport-SME-aarch64-Remove-redundant-TARGET_-checks.patch453
1 files changed, 453 insertions, 0 deletions
diff --git a/0125-Backport-SME-aarch64-Remove-redundant-TARGET_-checks.patch b/0125-Backport-SME-aarch64-Remove-redundant-TARGET_-checks.patch
new file mode 100644
index 0000000..f935069
--- /dev/null
+++ b/0125-Backport-SME-aarch64-Remove-redundant-TARGET_-checks.patch
@@ -0,0 +1,453 @@
+From 77a86d955dd1c9cd8c7fc35e6caf0cb707799129 Mon Sep 17 00:00:00 2001
+From: Richard Sandiford <richard.sandiford@arm.com>
+Date: Thu, 29 Sep 2022 11:32:57 +0100
+Subject: [PATCH 026/157] [Backport][SME] aarch64: Remove redundant TARGET_*
+ checks
+
+Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=a31641840af2c40cf36036fa472df34d4a4402c3
+
+After previous patches, it's possible to remove TARGET_*
+options that are redundant due to (IMO) obvious dependencies.
+
+gcc/
+ * config/aarch64/aarch64.h (TARGET_CRYPTO, TARGET_SHA3, TARGET_SM4)
+ (TARGET_DOTPROD): Don't depend on TARGET_SIMD.
+ (TARGET_AES, TARGET_SHA2): Likewise. Remove TARGET_CRYPTO test.
+ (TARGET_FP_F16INST): Don't depend on TARGET_FLOAT.
+ (TARGET_SVE2, TARGET_SVE_F32MM, TARGET_SVE_F64MM): Don't depend
+ on TARGET_SVE.
+ (TARGET_SVE2_AES, TARGET_SVE2_BITPERM, TARGET_SVE2_SHA3)
+ (TARGET_SVE2_SM4): Don't depend on TARGET_SVE2.
+ (TARGET_F32MM, TARGET_F64MM): Delete.
+ * config/aarch64/aarch64-c.cc (aarch64_update_cpp_builtins): Guard
+ float macros with just TARGET_FLOAT rather than TARGET_FLOAT
+ || TARGET_SIMD.
+ * config/aarch64/aarch64-simd.md (copysign<mode>3): Depend
+ only on TARGET_SIMD, rather than TARGET_FLOAT && TARGET_SIMD.
+ (aarch64_crypto_aes<aes_op>v16qi): Depend only on TARGET_AES,
+ rather than TARGET_SIMD && TARGET_AES.
+ (aarch64_crypto_aes<aesmc_op>v16qi): Likewise.
+ (*aarch64_crypto_aese_fused): Likewise.
+ (*aarch64_crypto_aesd_fused): Likewise.
+ (aarch64_crypto_pmulldi): Likewise.
+ (aarch64_crypto_pmullv2di): Likewise.
+ (aarch64_crypto_sha1hsi): Likewise TARGET_SHA2.
+ (aarch64_crypto_sha1hv4si): Likewise.
+ (aarch64_be_crypto_sha1hv4si): Likewise.
+ (aarch64_crypto_sha1su1v4si): Likewise.
+ (aarch64_crypto_sha1<sha1_op>v4si): Likewise.
+ (aarch64_crypto_sha1su0v4si): Likewise.
+ (aarch64_crypto_sha256h<sha256_op>v4si): Likewise.
+ (aarch64_crypto_sha256su0v4si): Likewise.
+ (aarch64_crypto_sha256su1v4si): Likewise.
+ (aarch64_crypto_sha512h<sha512_op>qv2di): Likewise TARGET_SHA3.
+ (aarch64_crypto_sha512su0qv2di): Likewise.
+ (aarch64_crypto_sha512su1qv2di, eor3q<mode>4): Likewise.
+ (aarch64_rax1qv2di, aarch64_xarqv2di, bcaxq<mode>4): Likewise.
+ (aarch64_sm3ss1qv4si): Likewise TARGET_SM4.
+ (aarch64_sm3tt<sm3tt_op>qv4si): Likewise.
+ (aarch64_sm3partw<sm3part_op>qv4si): Likewise.
+ (aarch64_sm4eqv4si, aarch64_sm4ekeyqv4si): Likewise.
+ * config/aarch64/aarch64.md (<FLOATUORS:optab>dihf2)
+ (copysign<GPF:mode>3, copysign<GPF:mode>3_insn)
+ (xorsign<mode>3): Remove redundant TARGET_FLOAT condition.
+---
+ gcc/config/aarch64/aarch64-c.cc | 2 +-
+ gcc/config/aarch64/aarch64-simd.md | 56 +++++++++++++++---------------
+ gcc/config/aarch64/aarch64.h | 30 ++++++++--------
+ gcc/config/aarch64/aarch64.md | 8 ++---
+ 4 files changed, 47 insertions(+), 49 deletions(-)
+
+diff --git a/gcc/config/aarch64/aarch64-c.cc b/gcc/config/aarch64/aarch64-c.cc
+index 18c9b975b..2dfe2b8f8 100644
+--- a/gcc/config/aarch64/aarch64-c.cc
++++ b/gcc/config/aarch64/aarch64-c.cc
+@@ -92,7 +92,7 @@ aarch64_update_cpp_builtins (cpp_reader *pfile)
+
+ aarch64_def_or_undef (TARGET_FLOAT, "__ARM_FEATURE_FMA", pfile);
+
+- if (TARGET_FLOAT || TARGET_SIMD)
++ if (TARGET_FLOAT)
+ {
+ builtin_define_with_int_value ("__ARM_FP", 0x0E);
+ builtin_define ("__ARM_FP16_FORMAT_IEEE");
+diff --git a/gcc/config/aarch64/aarch64-simd.md b/gcc/config/aarch64/aarch64-simd.md
+index de92802f5..a47b39281 100644
+--- a/gcc/config/aarch64/aarch64-simd.md
++++ b/gcc/config/aarch64/aarch64-simd.md
+@@ -693,7 +693,7 @@
+ [(match_operand:VHSDF 0 "register_operand")
+ (match_operand:VHSDF 1 "register_operand")
+ (match_operand:VHSDF 2 "register_operand")]
+- "TARGET_FLOAT && TARGET_SIMD"
++ "TARGET_SIMD"
+ {
+ rtx v_bitmask = gen_reg_rtx (<V_INT_EQUIV>mode);
+ int bits = GET_MODE_UNIT_BITSIZE (<MODE>mode) - 1;
+@@ -8352,7 +8352,7 @@
+ (match_operand:V16QI 1 "register_operand" "%0")
+ (match_operand:V16QI 2 "register_operand" "w"))]
+ CRYPTO_AES))]
+- "TARGET_SIMD && TARGET_AES"
++ "TARGET_AES"
+ "aes<aes_op>\\t%0.16b, %2.16b"
+ [(set_attr "type" "crypto_aese")]
+ )
+@@ -8361,7 +8361,7 @@
+ [(set (match_operand:V16QI 0 "register_operand" "=w")
+ (unspec:V16QI [(match_operand:V16QI 1 "register_operand" "w")]
+ CRYPTO_AESMC))]
+- "TARGET_SIMD && TARGET_AES"
++ "TARGET_AES"
+ "aes<aesmc_op>\\t%0.16b, %1.16b"
+ [(set_attr "type" "crypto_aesmc")]
+ )
+@@ -8380,7 +8380,7 @@
+ (match_operand:V16QI 2 "register_operand" "w"))]
+ UNSPEC_AESE)]
+ UNSPEC_AESMC))]
+- "TARGET_SIMD && TARGET_AES
++ "TARGET_AES
+ && aarch64_fusion_enabled_p (AARCH64_FUSE_AES_AESMC)"
+ "aese\\t%0.16b, %2.16b\;aesmc\\t%0.16b, %0.16b"
+ [(set_attr "type" "crypto_aese")
+@@ -8401,7 +8401,7 @@
+ (match_operand:V16QI 2 "register_operand" "w"))]
+ UNSPEC_AESD)]
+ UNSPEC_AESIMC))]
+- "TARGET_SIMD && TARGET_AES
++ "TARGET_AES
+ && aarch64_fusion_enabled_p (AARCH64_FUSE_AES_AESMC)"
+ "aesd\\t%0.16b, %2.16b\;aesimc\\t%0.16b, %0.16b"
+ [(set_attr "type" "crypto_aese")
+@@ -8415,7 +8415,7 @@
+ (unspec:SI [(match_operand:SI 1
+ "register_operand" "w")]
+ UNSPEC_SHA1H))]
+- "TARGET_SIMD && TARGET_SHA2"
++ "TARGET_SHA2"
+ "sha1h\\t%s0, %s1"
+ [(set_attr "type" "crypto_sha1_fast")]
+ )
+@@ -8425,7 +8425,7 @@
+ (unspec:SI [(vec_select:SI (match_operand:V4SI 1 "register_operand" "w")
+ (parallel [(const_int 0)]))]
+ UNSPEC_SHA1H))]
+- "TARGET_SIMD && TARGET_SHA2 && !BYTES_BIG_ENDIAN"
++ "TARGET_SHA2 && !BYTES_BIG_ENDIAN"
+ "sha1h\\t%s0, %s1"
+ [(set_attr "type" "crypto_sha1_fast")]
+ )
+@@ -8435,7 +8435,7 @@
+ (unspec:SI [(vec_select:SI (match_operand:V4SI 1 "register_operand" "w")
+ (parallel [(const_int 3)]))]
+ UNSPEC_SHA1H))]
+- "TARGET_SIMD && TARGET_SHA2 && BYTES_BIG_ENDIAN"
++ "TARGET_SHA2 && BYTES_BIG_ENDIAN"
+ "sha1h\\t%s0, %s1"
+ [(set_attr "type" "crypto_sha1_fast")]
+ )
+@@ -8445,7 +8445,7 @@
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "register_operand" "w")]
+ UNSPEC_SHA1SU1))]
+- "TARGET_SIMD && TARGET_SHA2"
++ "TARGET_SHA2"
+ "sha1su1\\t%0.4s, %2.4s"
+ [(set_attr "type" "crypto_sha1_fast")]
+ )
+@@ -8456,7 +8456,7 @@
+ (match_operand:SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ CRYPTO_SHA1))]
+- "TARGET_SIMD && TARGET_SHA2"
++ "TARGET_SHA2"
+ "sha1<sha1_op>\\t%q0, %s2, %3.4s"
+ [(set_attr "type" "crypto_sha1_slow")]
+ )
+@@ -8467,7 +8467,7 @@
+ (match_operand:V4SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ UNSPEC_SHA1SU0))]
+- "TARGET_SIMD && TARGET_SHA2"
++ "TARGET_SHA2"
+ "sha1su0\\t%0.4s, %2.4s, %3.4s"
+ [(set_attr "type" "crypto_sha1_xor")]
+ )
+@@ -8480,7 +8480,7 @@
+ (match_operand:V4SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ CRYPTO_SHA256))]
+- "TARGET_SIMD && TARGET_SHA2"
++ "TARGET_SHA2"
+ "sha256h<sha256_op>\\t%q0, %q2, %3.4s"
+ [(set_attr "type" "crypto_sha256_slow")]
+ )
+@@ -8490,7 +8490,7 @@
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "register_operand" "w")]
+ UNSPEC_SHA256SU0))]
+- "TARGET_SIMD && TARGET_SHA2"
++ "TARGET_SHA2"
+ "sha256su0\\t%0.4s, %2.4s"
+ [(set_attr "type" "crypto_sha256_fast")]
+ )
+@@ -8501,7 +8501,7 @@
+ (match_operand:V4SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ UNSPEC_SHA256SU1))]
+- "TARGET_SIMD && TARGET_SHA2"
++ "TARGET_SHA2"
+ "sha256su1\\t%0.4s, %2.4s, %3.4s"
+ [(set_attr "type" "crypto_sha256_slow")]
+ )
+@@ -8514,7 +8514,7 @@
+ (match_operand:V2DI 2 "register_operand" "w")
+ (match_operand:V2DI 3 "register_operand" "w")]
+ CRYPTO_SHA512))]
+- "TARGET_SIMD && TARGET_SHA3"
++ "TARGET_SHA3"
+ "sha512h<sha512_op>\\t%q0, %q2, %3.2d"
+ [(set_attr "type" "crypto_sha512")]
+ )
+@@ -8524,7 +8524,7 @@
+ (unspec:V2DI [(match_operand:V2DI 1 "register_operand" "0")
+ (match_operand:V2DI 2 "register_operand" "w")]
+ UNSPEC_SHA512SU0))]
+- "TARGET_SIMD && TARGET_SHA3"
++ "TARGET_SHA3"
+ "sha512su0\\t%0.2d, %2.2d"
+ [(set_attr "type" "crypto_sha512")]
+ )
+@@ -8535,7 +8535,7 @@
+ (match_operand:V2DI 2 "register_operand" "w")
+ (match_operand:V2DI 3 "register_operand" "w")]
+ UNSPEC_SHA512SU1))]
+- "TARGET_SIMD && TARGET_SHA3"
++ "TARGET_SHA3"
+ "sha512su1\\t%0.2d, %2.2d, %3.2d"
+ [(set_attr "type" "crypto_sha512")]
+ )
+@@ -8549,7 +8549,7 @@
+ (match_operand:VQ_I 2 "register_operand" "w")
+ (match_operand:VQ_I 3 "register_operand" "w"))
+ (match_operand:VQ_I 1 "register_operand" "w")))]
+- "TARGET_SIMD && TARGET_SHA3"
++ "TARGET_SHA3"
+ "eor3\\t%0.16b, %1.16b, %2.16b, %3.16b"
+ [(set_attr "type" "crypto_sha3")]
+ )
+@@ -8561,7 +8561,7 @@
+ (match_operand:V2DI 2 "register_operand" "w")
+ (const_int 1))
+ (match_operand:V2DI 1 "register_operand" "w")))]
+- "TARGET_SIMD && TARGET_SHA3"
++ "TARGET_SHA3"
+ "rax1\\t%0.2d, %1.2d, %2.2d"
+ [(set_attr "type" "crypto_sha3")]
+ )
+@@ -8573,7 +8573,7 @@
+ (match_operand:V2DI 1 "register_operand" "%w")
+ (match_operand:V2DI 2 "register_operand" "w"))
+ (match_operand:SI 3 "aarch64_simd_shift_imm_di" "Usd")))]
+- "TARGET_SIMD && TARGET_SHA3"
++ "TARGET_SHA3"
+ "xar\\t%0.2d, %1.2d, %2.2d, %3"
+ [(set_attr "type" "crypto_sha3")]
+ )
+@@ -8585,7 +8585,7 @@
+ (not:VQ_I (match_operand:VQ_I 3 "register_operand" "w"))
+ (match_operand:VQ_I 2 "register_operand" "w"))
+ (match_operand:VQ_I 1 "register_operand" "w")))]
+- "TARGET_SIMD && TARGET_SHA3"
++ "TARGET_SHA3"
+ "bcax\\t%0.16b, %1.16b, %2.16b, %3.16b"
+ [(set_attr "type" "crypto_sha3")]
+ )
+@@ -8598,7 +8598,7 @@
+ (match_operand:V4SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ UNSPEC_SM3SS1))]
+- "TARGET_SIMD && TARGET_SM4"
++ "TARGET_SM4"
+ "sm3ss1\\t%0.4s, %1.4s, %2.4s, %3.4s"
+ [(set_attr "type" "crypto_sm3")]
+ )
+@@ -8611,7 +8611,7 @@
+ (match_operand:V4SI 3 "register_operand" "w")
+ (match_operand:SI 4 "aarch64_imm2" "Ui2")]
+ CRYPTO_SM3TT))]
+- "TARGET_SIMD && TARGET_SM4"
++ "TARGET_SM4"
+ "sm3tt<sm3tt_op>\\t%0.4s, %2.4s, %3.4s[%4]"
+ [(set_attr "type" "crypto_sm3")]
+ )
+@@ -8622,7 +8622,7 @@
+ (match_operand:V4SI 2 "register_operand" "w")
+ (match_operand:V4SI 3 "register_operand" "w")]
+ CRYPTO_SM3PART))]
+- "TARGET_SIMD && TARGET_SM4"
++ "TARGET_SM4"
+ "sm3partw<sm3part_op>\\t%0.4s, %2.4s, %3.4s"
+ [(set_attr "type" "crypto_sm3")]
+ )
+@@ -8634,7 +8634,7 @@
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "0")
+ (match_operand:V4SI 2 "register_operand" "w")]
+ UNSPEC_SM4E))]
+- "TARGET_SIMD && TARGET_SM4"
++ "TARGET_SM4"
+ "sm4e\\t%0.4s, %2.4s"
+ [(set_attr "type" "crypto_sm4")]
+ )
+@@ -8644,7 +8644,7 @@
+ (unspec:V4SI [(match_operand:V4SI 1 "register_operand" "w")
+ (match_operand:V4SI 2 "register_operand" "w")]
+ UNSPEC_SM4EKEY))]
+- "TARGET_SIMD && TARGET_SM4"
++ "TARGET_SM4"
+ "sm4ekey\\t%0.4s, %1.4s, %2.4s"
+ [(set_attr "type" "crypto_sm4")]
+ )
+@@ -9230,7 +9230,7 @@
+ (unspec:TI [(match_operand:DI 1 "register_operand" "w")
+ (match_operand:DI 2 "register_operand" "w")]
+ UNSPEC_PMULL))]
+- "TARGET_SIMD && TARGET_AES"
++ "TARGET_AES"
+ "pmull\\t%0.1q, %1.1d, %2.1d"
+ [(set_attr "type" "crypto_pmull")]
+ )
+@@ -9240,7 +9240,7 @@
+ (unspec:TI [(match_operand:V2DI 1 "register_operand" "w")
+ (match_operand:V2DI 2 "register_operand" "w")]
+ UNSPEC_PMULL2))]
+- "TARGET_SIMD && TARGET_AES"
++ "TARGET_AES"
+ "pmull2\\t%0.1q, %1.2d, %2.2d"
+ [(set_attr "type" "crypto_pmull")]
+ )
+diff --git a/gcc/config/aarch64/aarch64.h b/gcc/config/aarch64/aarch64.h
+index 521031efe..2a9d2d031 100644
+--- a/gcc/config/aarch64/aarch64.h
++++ b/gcc/config/aarch64/aarch64.h
+@@ -222,19 +222,19 @@ enum class aarch64_feature : unsigned char {
+ #define AARCH64_ISA_LS64 (aarch64_isa_flags & AARCH64_FL_LS64)
+
+ /* Crypto is an optional extension to AdvSIMD. */
+-#define TARGET_CRYPTO (TARGET_SIMD && AARCH64_ISA_CRYPTO)
++#define TARGET_CRYPTO (AARCH64_ISA_CRYPTO)
+
+ /* SHA2 is an optional extension to AdvSIMD. */
+-#define TARGET_SHA2 ((TARGET_SIMD && AARCH64_ISA_SHA2) || TARGET_CRYPTO)
++#define TARGET_SHA2 (AARCH64_ISA_SHA2)
+
+ /* SHA3 is an optional extension to AdvSIMD. */
+-#define TARGET_SHA3 (TARGET_SIMD && AARCH64_ISA_SHA3)
++#define TARGET_SHA3 (AARCH64_ISA_SHA3)
+
+ /* AES is an optional extension to AdvSIMD. */
+-#define TARGET_AES ((TARGET_SIMD && AARCH64_ISA_AES) || TARGET_CRYPTO)
++#define TARGET_AES (AARCH64_ISA_AES)
+
+ /* SM is an optional extension to AdvSIMD. */
+-#define TARGET_SM4 (TARGET_SIMD && AARCH64_ISA_SM4)
++#define TARGET_SM4 (AARCH64_ISA_SM4)
+
+ /* FP16FML is an optional extension to AdvSIMD. */
+ #define TARGET_F16FML (TARGET_SIMD && AARCH64_ISA_F16FML && TARGET_FP_F16INST)
+@@ -246,29 +246,29 @@ enum class aarch64_feature : unsigned char {
+ #define TARGET_LSE (AARCH64_ISA_LSE)
+
+ /* ARMv8.2-A FP16 support that can be enabled through the +fp16 extension. */
+-#define TARGET_FP_F16INST (TARGET_FLOAT && AARCH64_ISA_F16)
++#define TARGET_FP_F16INST (AARCH64_ISA_F16)
+ #define TARGET_SIMD_F16INST (TARGET_SIMD && AARCH64_ISA_F16)
+
+ /* Dot Product is an optional extension to AdvSIMD enabled through +dotprod. */
+-#define TARGET_DOTPROD (TARGET_SIMD && AARCH64_ISA_DOTPROD)
++#define TARGET_DOTPROD (AARCH64_ISA_DOTPROD)
+
+ /* SVE instructions, enabled through +sve. */
+ #define TARGET_SVE (AARCH64_ISA_SVE)
+
+ /* SVE2 instructions, enabled through +sve2. */
+-#define TARGET_SVE2 (TARGET_SVE && AARCH64_ISA_SVE2)
++#define TARGET_SVE2 (AARCH64_ISA_SVE2)
+
+ /* SVE2 AES instructions, enabled through +sve2-aes. */
+-#define TARGET_SVE2_AES (TARGET_SVE2 && AARCH64_ISA_SVE2_AES)
++#define TARGET_SVE2_AES (AARCH64_ISA_SVE2_AES)
+
+ /* SVE2 BITPERM instructions, enabled through +sve2-bitperm. */
+-#define TARGET_SVE2_BITPERM (TARGET_SVE2 && AARCH64_ISA_SVE2_BITPERM)
++#define TARGET_SVE2_BITPERM (AARCH64_ISA_SVE2_BITPERM)
+
+ /* SVE2 SHA3 instructions, enabled through +sve2-sha3. */
+-#define TARGET_SVE2_SHA3 (TARGET_SVE2 && AARCH64_ISA_SVE2_SHA3)
++#define TARGET_SVE2_SHA3 (AARCH64_ISA_SVE2_SHA3)
+
+ /* SVE2 SM4 instructions, enabled through +sve2-sm4. */
+-#define TARGET_SVE2_SM4 (TARGET_SVE2 && AARCH64_ISA_SVE2_SM4)
++#define TARGET_SVE2_SM4 (AARCH64_ISA_SVE2_SM4)
+
+ /* ARMv8.3-A features. */
+ #define TARGET_ARMV8_3 (AARCH64_ISA_V8_3A)
+@@ -296,12 +296,10 @@ enum class aarch64_feature : unsigned char {
+ #define TARGET_SVE_I8MM (TARGET_SVE && AARCH64_ISA_I8MM)
+
+ /* F32MM instructions are enabled through +f32mm. */
+-#define TARGET_F32MM (AARCH64_ISA_F32MM)
+-#define TARGET_SVE_F32MM (TARGET_SVE && AARCH64_ISA_F32MM)
++#define TARGET_SVE_F32MM (AARCH64_ISA_F32MM)
+
+ /* F64MM instructions are enabled through +f64mm. */
+-#define TARGET_F64MM (AARCH64_ISA_F64MM)
+-#define TARGET_SVE_F64MM (TARGET_SVE && AARCH64_ISA_F64MM)
++#define TARGET_SVE_F64MM (AARCH64_ISA_F64MM)
+
+ /* BF16 instructions are enabled through +bf16. */
+ #define TARGET_BF16_FP (AARCH64_ISA_BF16)
+diff --git a/gcc/config/aarch64/aarch64.md b/gcc/config/aarch64/aarch64.md
+index c0c64a798..7ee26284d 100644
+--- a/gcc/config/aarch64/aarch64.md
++++ b/gcc/config/aarch64/aarch64.md
+@@ -6417,7 +6417,7 @@
+ (define_expand "<optab>dihf2"
+ [(set (match_operand:HF 0 "register_operand")
+ (FLOATUORS:HF (match_operand:DI 1 "register_operand")))]
+- "TARGET_FLOAT && (TARGET_FP_F16INST || TARGET_SIMD)"
++ "TARGET_FP_F16INST || TARGET_SIMD"
+ {
+ if (TARGET_FP_F16INST)
+ emit_insn (gen_aarch64_fp16_<optab>dihf2 (operands[0], operands[1]));
+@@ -6676,7 +6676,7 @@
+ [(match_operand:GPF 0 "register_operand")
+ (match_operand:GPF 1 "register_operand")
+ (match_operand:GPF 2 "register_operand")]
+- "TARGET_FLOAT && TARGET_SIMD"
++ "TARGET_SIMD"
+ {
+ rtx bitmask = gen_reg_rtx (<V_INT_EQUIV>mode);
+ emit_move_insn (bitmask, GEN_INT (HOST_WIDE_INT_M1U
+@@ -6693,7 +6693,7 @@
+ (match_operand:GPF 2 "register_operand" "w,w,0,0")
+ (match_operand:<V_INT_EQUIV> 3 "register_operand" "0,w,w,X")]
+ UNSPEC_COPYSIGN))]
+- "TARGET_FLOAT && TARGET_SIMD"
++ "TARGET_SIMD"
+ "@
+ bsl\\t%0.<Vbtype>, %2.<Vbtype>, %1.<Vbtype>
+ bit\\t%0.<Vbtype>, %2.<Vbtype>, %3.<Vbtype>
+@@ -6714,7 +6714,7 @@
+ [(match_operand:GPF 0 "register_operand")
+ (match_operand:GPF 1 "register_operand")
+ (match_operand:GPF 2 "register_operand")]
+- "TARGET_FLOAT && TARGET_SIMD"
++ "TARGET_SIMD"
+ {
+
+ machine_mode imode = <V_INT_EQUIV>mode;
+--
+2.33.0
+