summaryrefslogtreecommitdiff
path: root/backport-elf-Properly-align-PT_LOAD-segments-BZ-28676.patch
blob: 365aeca10b3dc6cb7bfc30fc5b19277f8e13c42d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
From 718fdd87b1b98ef88e883a37d9c18867256fa5a4 Mon Sep 17 00:00:00 2001
From: Rongwei Wang <rongwei.wang@linux.alibaba.com>
Date: Fri, 10 Dec 2021 20:39:10 +0800
Subject: [PATCH] elf: Properly align PT_LOAD segments [BZ #28676]

When PT_LOAD segment alignment > the page size, allocate enough space to
ensure that the segment can be properly aligned.  This change helps code
segments use huge pages become simple and available.

This fixes [BZ #28676].

Signed-off-by: Xu Yu <xuyu@linux.alibaba.com>
Signed-off-by: Rongwei Wang <rongwei.wang@linux.alibaba.com>
---
 elf/dl-load.c         |  2 ++
 elf/dl-load.h         |  3 ++-
 elf/dl-map-segments.h | 50 +++++++++++++++++++++++++++++++++++++++----
 3 files changed, 50 insertions(+), 5 deletions(-)

diff --git a/elf/dl-load.c b/elf/dl-load.c
index bf8957e73c..721593135e 100644
--- a/elf/dl-load.c
+++ b/elf/dl-load.c
@@ -1,5 +1,6 @@
 /* Map in a shared object's segments from the file.
    Copyright (C) 1995-2021 Free Software Foundation, Inc.
+   Copyright The GNU Toolchain Authors.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -1150,6 +1151,7 @@ _dl_map_object_from_fd (const char *name, const char *origname, int fd,
 	  c->mapend = ALIGN_UP (ph->p_vaddr + ph->p_filesz, GLRO(dl_pagesize));
 	  c->dataend = ph->p_vaddr + ph->p_filesz;
 	  c->allocend = ph->p_vaddr + ph->p_memsz;
+	  c->mapalign = ph->p_align;
 	  c->mapoff = ALIGN_DOWN (ph->p_offset, GLRO(dl_pagesize));
 
 	  /* Determine whether there is a gap between the last segment
diff --git a/elf/dl-load.h b/elf/dl-load.h
index e329d49a81..e6dabcb336 100644
--- a/elf/dl-load.h
+++ b/elf/dl-load.h
@@ -1,5 +1,6 @@
 /* Map in a shared object's segments from the file.
    Copyright (C) 1995-2021 Free Software Foundation, Inc.
+   Copyright The GNU Toolchain Authors.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -74,7 +75,7 @@ ELF_PREFERRED_ADDRESS_DATA;
    Its details have been expanded out and converted.  */
 struct loadcmd
 {
-  ElfW(Addr) mapstart, mapend, dataend, allocend;
+  ElfW(Addr) mapstart, mapend, dataend, allocend, mapalign;
   ElfW(Off) mapoff;
   int prot;                             /* PROT_* bits.  */
 };
diff --git a/elf/dl-map-segments.h b/elf/dl-map-segments.h
index f9fb110ee3..70a4c40695 100644
--- a/elf/dl-map-segments.h
+++ b/elf/dl-map-segments.h
@@ -1,5 +1,6 @@
 /* Map in a shared object's segments.  Generic version.
    Copyright (C) 1995-2021 Free Software Foundation, Inc.
+   Copyright The GNU Toolchain Authors.
    This file is part of the GNU C Library.
 
    The GNU C Library is free software; you can redistribute it and/or
@@ -18,6 +19,50 @@
 
 #include <dl-load.h>
 
+/* Map a segment and align it properly.  */
+
+static __always_inline ElfW(Addr)
+_dl_map_segment (const struct loadcmd *c, ElfW(Addr) mappref,
+		 const size_t maplength, int fd)
+{
+  if (__glibc_likely (c->mapalign <= GLRO(dl_pagesize)))
+    return (ElfW(Addr)) __mmap ((void *) mappref, maplength, c->prot,
+				MAP_COPY|MAP_FILE, fd, c->mapoff);
+
+  /* If the segment alignment > the page size, allocate enough space to
+     ensure that the segment can be properly aligned.  */
+  ElfW(Addr) maplen = (maplength >= c->mapalign
+		       ? (maplength + c->mapalign)
+		       : (2 * c->mapalign));
+  ElfW(Addr) map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplen,
+					      PROT_NONE,
+					      MAP_ANONYMOUS|MAP_PRIVATE,
+					      -1, 0);
+  if (__glibc_unlikely ((void *) map_start == MAP_FAILED))
+    return map_start;
+
+  ElfW(Addr) map_start_aligned = ALIGN_UP (map_start, c->mapalign);
+  map_start_aligned = (ElfW(Addr)) __mmap ((void *) map_start_aligned,
+					   maplength, c->prot,
+					   MAP_COPY|MAP_FILE|MAP_FIXED,
+					   fd, c->mapoff);
+  if (__glibc_unlikely ((void *) map_start_aligned == MAP_FAILED))
+    __munmap ((void *) map_start, maplen);
+  else
+    {
+      /* Unmap the unused regions.  */
+      ElfW(Addr) delta = map_start_aligned - map_start;
+      if (delta)
+	__munmap ((void *) map_start, delta);
+      ElfW(Addr) map_end = map_start_aligned + maplength;
+      delta = map_start + maplen - map_end;
+      if (delta)
+	__munmap ((void *) map_end, delta);
+    }
+
+  return map_start_aligned;
+}
+
 /* This implementation assumes (as does the corresponding implementation
    of _dl_unmap_segments, in dl-unmap-segments.h) that shared objects
    are always laid out with all segments contiguous (or with gaps
@@ -53,10 +98,7 @@ _dl_map_segments (struct link_map *l, int fd,
            - MAP_BASE_ADDR (l));
 
       /* Remember which part of the address space this object uses.  */
-      l->l_map_start = (ElfW(Addr)) __mmap ((void *) mappref, maplength,
-                                            c->prot,
-                                            MAP_COPY|MAP_FILE,
-                                            fd, c->mapoff);
+      l->l_map_start = _dl_map_segment (c, mappref, maplength, fd);
       if (__glibc_unlikely ((void *) l->l_map_start == MAP_FAILED))
         return DL_MAP_SEGMENTS_ERROR_MAP_SEGMENT;
 
-- 
2.43.0