summaryrefslogtreecommitdiff
path: root/backport-elf-Properly-align-PT_LOAD-segments-BZ-28676.patch
diff options
context:
space:
mode:
Diffstat (limited to 'backport-elf-Properly-align-PT_LOAD-segments-BZ-28676.patch')
-rw-r--r--backport-elf-Properly-align-PT_LOAD-segments-BZ-28676.patch135
1 files changed, 135 insertions, 0 deletions
diff --git a/backport-elf-Properly-align-PT_LOAD-segments-BZ-28676.patch b/backport-elf-Properly-align-PT_LOAD-segments-BZ-28676.patch
new file mode 100644
index 0000000..365aeca
--- /dev/null
+++ b/backport-elf-Properly-align-PT_LOAD-segments-BZ-28676.patch
@@ -0,0 +1,135 @@
+From 718fdd87b1b98ef88e883a37d9c18867256fa5a4 Mon Sep 17 00:00:00 2001
+From: Rongwei Wang <rongwei.wang@linux.alibaba.com>
+Date: Fri, 10 Dec 2021 20:39:10 +0800
+Subject: [PATCH] elf: Properly align PT_LOAD segments [BZ #28676]
+
+When PT_LOAD segment alignment > the page size, allocate enough space to
+ensure that the segment can be properly aligned. This change helps code
+segments use huge pages become simple and available.
+
+This fixes [BZ #28676].
+
+Signed-off-by: Xu Yu <xuyu@linux.alibaba.com>
+Signed-off-by: Rongwei Wang <rongwei.wang@linux.alibaba.com>
+---
+ elf/dl-load.c | 2 ++
+ elf/dl-load.h | 3 ++-
+ elf/dl-map-segments.h | 50 +++++++++++++++++++++++++++++++++++++++----
+ 3 files changed, 50 insertions(+), 5 deletions(-)
+
+diff --git a/elf/dl-load.c b/elf/dl-load.c
+index bf8957e73c..721593135e 100644
+--- a/elf/dl-load.c
++++ b/elf/dl-load.c
+@@ -1,5 +1,6 @@
+ /* Map in a shared object's segments from the file.
+ Copyright (C) 1995-2021 Free Software Foundation, Inc.
++ Copyright The GNU Toolchain Authors.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+@@ -1150,6 +1151,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
+ c->mapend = ALIGN_UP (ph->p_vaddr + ph->p_filesz, GLRO(dl_pagesize));
+ c->dataend = ph->p_vaddr + ph->p_filesz;
+ c->allocend = ph->p_vaddr + ph->p_memsz;
++ c->mapalign = ph->p_align;
+ c->mapoff = ALIGN_DOWN (ph->p_offset, GLRO(dl_pagesize));
+
+ /* Determine whether there is a gap between the last segment
+diff --git a/elf/dl-load.h b/elf/dl-load.h
+index e329d49a81..e6dabcb336 100644
+--- a/elf/dl-load.h
++++ b/elf/dl-load.h
+@@ -1,5 +1,6 @@
+ /* Map in a shared object's segments from the file.
+ Copyright (C) 1995-2021 Free Software Foundation, Inc.
++ Copyright The GNU Toolchain Authors.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+@@ -74,7 +75,7 @@ ELF_PREFERRED_ADDRESS_DATA;
+ Its details have been expanded out and converted. */
+ struct loadcmd
+ {
+- ElfW(Addr) mapstart, mapend, dataend, allocend;
++ ElfW(Addr) mapstart, mapend, dataend, allocend, mapalign;
+ ElfW(Off) mapoff;
+ int prot; /* PROT_* bits. */
+ };
+diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
+index f9fb110ee3..70a4c40695 100644
+--- a/elf/dl-map-segments.h
++++ b/elf/dl-map-segments.h
+@@ -1,5 +1,6 @@
+ /* Map in a shared object's segments. Generic version.
+ Copyright (C) 1995-2021 Free Software Foundation, Inc.
++ Copyright The GNU Toolchain Authors.
+ This file is part of the GNU C Library.
+
+ The GNU C Library is free software; you can redistribute it and/or
+@@ -18,6 +19,50 @@
+
+ #include <dl-load.h>
+
++/* Map a segment and align it properly. */
++
++static __always_inline ElfW(Addr)
++_dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
++ const size_t maplength, int fd)
++{
++ if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
++ return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
++ MAP_COPY|MAP_FILE, fd, c->mapoff);
++
++ /* If the segment alignment > the page size, allocate enough space to
++ ensure that the segment can be properly aligned. */
++ ElfW(Addr) maplen = (maplength >= c->mapalign
++ ? (maplength + c->mapalign)
++ : (2 * c->mapalign));
++ ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
++ PROT_NONE,
++ MAP_ANONYMOUS|MAP_PRIVATE,
++ -1, 0);
++ if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
++ return map_start;
++
++ ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
++ map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
++ maplength, c->prot,
++ MAP_COPY|MAP_FILE|MAP_FIXED,
++ fd, c->mapoff);
++ if (__glibc_unlikely ((void *) map_start_aligned == MAP_FAILED))
++ __munmap ((void *) map_start, maplen);
++ else
++ {
++ /* Unmap the unused regions. */
++ ElfW(Addr) delta = map_start_aligned - map_start;
++ if (delta)
++ __munmap ((void *) map_start, delta);
++ ElfW(Addr) map_end = map_start_aligned + maplength;
++ delta = map_start + maplen - map_end;
++ if (delta)
++ __munmap ((void *) map_end, delta);
++ }
++
++ return map_start_aligned;
++}
++
+ /* This implementation assumes (as does the corresponding implementation
+ of _dl_unmap_segments, in dl-unmap-segments.h) that shared objects
+ are always laid out with all segments contiguous (or with gaps
+@@ -53,10 +98,7 @@ _dl_map_segments (struct link_map *l, int fd,
+ - MAP_BASE_ADDR (l));
+
+ /* Remember which part of the address space this object uses. */
+- l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
+- c->prot,
+- MAP_COPY|MAP_FILE,
+- fd, c->mapoff);
++ l->l_map_start = _dl_map_segment (c, mappref, maplength, fd);
+ if (__glibc_unlikely ((void *) l->l_map_start == MAP_FAILED))
+ return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
+
+--
+2.43.0
+