1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
|
From 1b296563c92c896f072343b5f19286e8b5942b9e Mon Sep 17 00:00:00 2001
From: hubin <hubin73@huawei.com>
Date: Fri, 28 Apr 2023 11:30:47 +0800
Subject: [PATCH] elf/ld.so: Consider maybe-existing hole between PT_LOAD
segments when mmap reserved area
When mmap reserved area do NOT consider existing hole between PT_LOAD
segments, this will cause reserved virtual address space too small to
contain all the sections in the PT_LOAD segment. Since the address space
mapping of subsequent segments is based on the reserved address space,
each mmap has a MAP_FIXED flag, which will overwrite the reserved
address space, if the reserved space is too small, these mmap with
MAP_FIEXED flag will conflict with other address space. e.g. in some
case, the address sapce in the .bss section(last data segment) conflicts
with the address space in the heap(maybe other address space) cause the
program coredump.
---
elf/dl-map-segments-hugepage.h | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/elf/dl-map-segments-hugepage.h b/elf/dl-map-segments-hugepage.h
index b99f726f..218e93a0 100644
--- a/elf/dl-map-segments-hugepage.h
+++ b/elf/dl-map-segments-hugepage.h
@@ -47,10 +47,18 @@ __mmap_reserved_area(const struct loadcmd loadcmds[], size_t nloadcmds,
const struct loadcmd * c = loadcmds;
*maparealen = 0;
+ /*
+ * Consider maybe-existing hole between PT_LOAD segments
+ */
while (c < &loadcmds[nloadcmds])
{
- *maparealen += ALIGN_UP((c->mapend > c->allocend ? c->mapend : c->allocend), SIZE_2MB) -
+ // c is not the last loadcmd
+ if (c + 1 < &loadcmds[nloadcmds]) {
+ *maparealen += ALIGN_UP((c + 1)->mapstart - c->mapstart, SIZE_2MB);
+ } else {
+ *maparealen += ALIGN_UP((c->mapend > c->allocend ? c->mapend : c->allocend), SIZE_2MB) -
ALIGN_DOWN(c->mapstart, SIZE_2MB);
+ }
c++;
}
--
2.38.1
|