diff options
Diffstat (limited to 'malloc-hugepage-0007-malloc-Enable-huge-page-support-on-main-arena.patch')
-rw-r--r-- | malloc-hugepage-0007-malloc-Enable-huge-page-support-on-main-arena.patch | 86 |
1 files changed, 86 insertions, 0 deletions
diff --git a/malloc-hugepage-0007-malloc-Enable-huge-page-support-on-main-arena.patch b/malloc-hugepage-0007-malloc-Enable-huge-page-support-on-main-arena.patch new file mode 100644 index 0000000..fa5c9db --- /dev/null +++ b/malloc-hugepage-0007-malloc-Enable-huge-page-support-on-main-arena.patch @@ -0,0 +1,86 @@ +From 0f982c182760bd7689769ee7590df592d0a132c0 Mon Sep 17 00:00:00 2001 +From: Adhemerval Zanella <adhemerval.zanella@linaro.org> +Date: Mon, 30 Aug 2021 14:01:00 -0300 +Subject: [PATCH 7/7] malloc: Enable huge page support on main arena + +This patch adds support huge page support on main arena allocation, +enable with tunable glibc.malloc.hugetlb=2. The patch essentially +disable the __glibc_morecore() sbrk() call (similar when memory +tag does when sbrk() call does not support it) and fallback to +default page size if the memory allocation fails. + +Checked on x86_64-linux-gnu. + +Reviewed-by: DJ Delorie <dj@redhat.com> +--- + malloc/arena.c | 4 ++++ + malloc/malloc.c | 12 ++++++++++-- + malloc/morecore.c | 4 ---- + 3 files changed, 14 insertions(+), 6 deletions(-) + +diff --git a/malloc/arena.c b/malloc/arena.c +index e1852f8597..3ed4ef3f05 100644 +--- a/malloc/arena.c ++++ b/malloc/arena.c +@@ -364,6 +364,10 @@ ptmalloc_init (void) + # endif + TUNABLE_GET (mxfast, size_t, TUNABLE_CALLBACK (set_mxfast)); + TUNABLE_GET (hugetlb, size_t, TUNABLE_CALLBACK (set_hugetlb)); ++ if (mp_.hp_pagesize > 0) ++ /* Force mmap for main arena instead of sbrk, so hugepages are explicitly ++ used. */ ++ __always_fail_morecore = true; + #else + if (__glibc_likely (_environ != NULL)) + { +diff --git a/malloc/malloc.c b/malloc/malloc.c +index 32050be4cc..b67f2c84ee 100644 +--- a/malloc/malloc.c ++++ b/malloc/malloc.c +@@ -2741,8 +2741,16 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av) + segregated mmap region. + */ + +- char *mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, pagesize, +- MMAP_AS_MORECORE_SIZE, 0, av); ++ char *mbrk = MAP_FAILED; ++#if HAVE_TUNABLES ++ if (mp_.hp_pagesize > 0) ++ mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, ++ mp_.hp_pagesize, mp_.hp_pagesize, ++ mp_.hp_flags, av); ++#endif ++ if (mbrk == MAP_FAILED) ++ mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, pagesize, ++ MMAP_AS_MORECORE_SIZE, 0, av); + if (mbrk != MAP_FAILED) + { + /* We do not need, and cannot use, another sbrk call to find end */ +diff --git a/malloc/morecore.c b/malloc/morecore.c +index 8168ef158c..004cd3ead4 100644 +--- a/malloc/morecore.c ++++ b/malloc/morecore.c +@@ -15,9 +15,7 @@ + License along with the GNU C Library; if not, see + <https://www.gnu.org/licenses/>. */ + +-#if defined(SHARED) || defined(USE_MTAG) + static bool __always_fail_morecore = false; +-#endif + + /* Allocate INCREMENT more bytes of data space, + and return the start of data space, or NULL on errors. +@@ -25,10 +23,8 @@ static bool __always_fail_morecore = false; + void * + __glibc_morecore (ptrdiff_t increment) + { +-#if defined(SHARED) || defined(USE_MTAG) + if (__always_fail_morecore) + return NULL; +-#endif + + void *result = (void *) __sbrk (increment); + if (result == (void *) -1) +-- +2.33.0 + |