From 2fb6e0cf79df2056e9750e29669c4633555e74b8 Mon Sep 17 00:00:00 2001 From: CoprDistGit Date: Tue, 15 Oct 2024 06:24:53 +0000 Subject: automatic import of glibc --- ...xmmN-with-vpxor-to-clear-a-vector-registe.patch | 43 ++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 x86_64-Use-xmmN-with-vpxor-to-clear-a-vector-registe.patch (limited to 'x86_64-Use-xmmN-with-vpxor-to-clear-a-vector-registe.patch') diff --git a/x86_64-Use-xmmN-with-vpxor-to-clear-a-vector-registe.patch b/x86_64-Use-xmmN-with-vpxor-to-clear-a-vector-registe.patch new file mode 100644 index 0000000..a308561 --- /dev/null +++ b/x86_64-Use-xmmN-with-vpxor-to-clear-a-vector-registe.patch @@ -0,0 +1,43 @@ +From a35a59036ebae3efcdf5e8167610e0656fca9770 Mon Sep 17 00:00:00 2001 +From: "H.J. Lu" +Date: Thu, 11 Jun 2020 12:41:18 -0700 +Subject: [PATCH] x86_64: Use %xmmN with vpxor to clear a vector register + +Since "vpxor %xmmN, %xmmN, %xmmN" clears the whole vector register, use +%xmmN, instead of %ymmN, with vpxor to clear a vector register. +--- + sysdeps/x86_64/multiarch/strcmp-avx2.S | 4 ++-- + sysdeps/x86_64/multiarch/strrchr-avx2.S | 2 +- + 2 files changed, 3 insertions(+), 3 deletions(-) + +diff --git a/sysdeps/x86_64/multiarch/strcmp-avx2.S b/sysdeps/x86_64/multiarch/strcmp-avx2.S +index 48d03a9f46..5f88a68262 100644 +--- a/sysdeps/x86_64/multiarch/strcmp-avx2.S ++++ b/sysdeps/x86_64/multiarch/strcmp-avx2.S +@@ -91,8 +91,8 @@ ENTRY (STRCMP) + # endif + movl %edi, %eax + xorl %edx, %edx +- /* Make %ymm7 all zeros in this function. */ +- vpxor %ymm7, %ymm7, %ymm7 ++ /* Make %xmm7 (%ymm7) all zeros in this function. */ ++ vpxor %xmm7, %xmm7, %xmm7 + orl %esi, %eax + andl $(PAGE_SIZE - 1), %eax + cmpl $(PAGE_SIZE - (VEC_SIZE * 4)), %eax +diff --git a/sysdeps/x86_64/multiarch/strrchr-avx2.S b/sysdeps/x86_64/multiarch/strrchr-avx2.S +index 23077b4c45..146bdd51d0 100644 +--- a/sysdeps/x86_64/multiarch/strrchr-avx2.S ++++ b/sysdeps/x86_64/multiarch/strrchr-avx2.S +@@ -44,7 +44,7 @@ ENTRY (STRRCHR) + movl %edi, %ecx + /* Broadcast CHAR to YMM4. */ + VPBROADCAST %xmm4, %ymm4 +- vpxor %ymm0, %ymm0, %ymm0 ++ vpxor %xmm0, %xmm0, %xmm0 + + /* Check if we may cross page boundary with one vector load. */ + andl $(2 * VEC_SIZE - 1), %ecx +-- +2.19.1 + -- cgit v1.2.3