diff options
Diffstat (limited to '0208-Backport-SME-aarch64-Add-a-VNx1TI-mode.patch')
-rw-r--r-- | 0208-Backport-SME-aarch64-Add-a-VNx1TI-mode.patch | 72 |
1 files changed, 72 insertions, 0 deletions
diff --git a/0208-Backport-SME-aarch64-Add-a-VNx1TI-mode.patch b/0208-Backport-SME-aarch64-Add-a-VNx1TI-mode.patch new file mode 100644 index 0000000..eefe7d8 --- /dev/null +++ b/0208-Backport-SME-aarch64-Add-a-VNx1TI-mode.patch @@ -0,0 +1,72 @@ +From 8310c0df319a86bc2f63b8d3198dd1c394827bac Mon Sep 17 00:00:00 2001 +From: Richard Sandiford <richard.sandiford@arm.com> +Date: Tue, 5 Dec 2023 10:11:27 +0000 +Subject: [PATCH 109/157] [Backport][SME] aarch64: Add a VNx1TI mode + +Reference: https://gcc.gnu.org/git/?p=gcc.git;a=commit;h=80fc055cf00fee4b1f9f19f77c8880b12226e086 + +Although TI isn't really a native SVE element mode, it's convenient +for SME if we define VNx1TI anyway, so that it can be used to +distinguish .Q ZA operations from others. It's purely an RTL +convenience and isn't (yet) a valid storage mode. + +gcc/ + * config/aarch64/aarch64-modes.def: Add VNx1TI. +--- + gcc/config/aarch64/aarch64-modes.def | 21 ++++++++++++++------- + 1 file changed, 14 insertions(+), 7 deletions(-) + +diff --git a/gcc/config/aarch64/aarch64-modes.def b/gcc/config/aarch64/aarch64-modes.def +index 8f399225a..8fa66fdb3 100644 +--- a/gcc/config/aarch64/aarch64-modes.def ++++ b/gcc/config/aarch64/aarch64-modes.def +@@ -146,7 +146,7 @@ ADV_SIMD_Q_REG_STRUCT_MODES (4, V4x16, V4x8, V4x4, V4x2) + for 8-bit, 16-bit, 32-bit and 64-bit elements respectively. It isn't + strictly necessary to set the alignment here, since the default would + be clamped to BIGGEST_ALIGNMENT anyhow, but it seems clearer. */ +-#define SVE_MODES(NVECS, VB, VH, VS, VD) \ ++#define SVE_MODES(NVECS, VB, VH, VS, VD, VT) \ + VECTOR_MODES_WITH_PREFIX (VNx, INT, 16 * NVECS, NVECS == 1 ? 1 : 4); \ + VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 16 * NVECS, NVECS == 1 ? 1 : 4); \ + \ +@@ -154,6 +154,7 @@ ADV_SIMD_Q_REG_STRUCT_MODES (4, V4x16, V4x8, V4x4, V4x2) + ADJUST_NUNITS (VH##HI, aarch64_sve_vg * NVECS * 4); \ + ADJUST_NUNITS (VS##SI, aarch64_sve_vg * NVECS * 2); \ + ADJUST_NUNITS (VD##DI, aarch64_sve_vg * NVECS); \ ++ ADJUST_NUNITS (VT##TI, exact_div (aarch64_sve_vg * NVECS, 2)); \ + ADJUST_NUNITS (VH##BF, aarch64_sve_vg * NVECS * 4); \ + ADJUST_NUNITS (VH##HF, aarch64_sve_vg * NVECS * 4); \ + ADJUST_NUNITS (VS##SF, aarch64_sve_vg * NVECS * 2); \ +@@ -163,17 +164,23 @@ ADV_SIMD_Q_REG_STRUCT_MODES (4, V4x16, V4x8, V4x4, V4x2) + ADJUST_ALIGNMENT (VH##HI, 16); \ + ADJUST_ALIGNMENT (VS##SI, 16); \ + ADJUST_ALIGNMENT (VD##DI, 16); \ ++ ADJUST_ALIGNMENT (VT##TI, 16); \ + ADJUST_ALIGNMENT (VH##BF, 16); \ + ADJUST_ALIGNMENT (VH##HF, 16); \ + ADJUST_ALIGNMENT (VS##SF, 16); \ + ADJUST_ALIGNMENT (VD##DF, 16); + +-/* Give SVE vectors the names normally used for 256-bit vectors. +- The actual number depends on command-line flags. */ +-SVE_MODES (1, VNx16, VNx8, VNx4, VNx2) +-SVE_MODES (2, VNx32, VNx16, VNx8, VNx4) +-SVE_MODES (3, VNx48, VNx24, VNx12, VNx6) +-SVE_MODES (4, VNx64, VNx32, VNx16, VNx8) ++/* Give SVE vectors names of the form VNxX, where X describes what is ++ stored in each 128-bit unit. The actual size of the mode depends ++ on command-line flags. ++ ++ VNx1TI isn't really a native SVE mode, but it can be useful in some ++ limited situations. */ ++VECTOR_MODE_WITH_PREFIX (VNx, INT, TI, 1, 1); ++SVE_MODES (1, VNx16, VNx8, VNx4, VNx2, VNx1) ++SVE_MODES (2, VNx32, VNx16, VNx8, VNx4, VNx2) ++SVE_MODES (3, VNx48, VNx24, VNx12, VNx6, VNx3) ++SVE_MODES (4, VNx64, VNx32, VNx16, VNx8, VNx4) + + /* Partial SVE vectors: + +-- +2.33.0 + |