summaryrefslogtreecommitdiff
path: root/elf-ld.so-Consider-maybe-existing-hole-between-PT_LO.patch
diff options
context:
space:
mode:
Diffstat (limited to 'elf-ld.so-Consider-maybe-existing-hole-between-PT_LO.patch')
-rw-r--r--elf-ld.so-Consider-maybe-existing-hole-between-PT_LO.patch47
1 files changed, 47 insertions, 0 deletions
diff --git a/elf-ld.so-Consider-maybe-existing-hole-between-PT_LO.patch b/elf-ld.so-Consider-maybe-existing-hole-between-PT_LO.patch
new file mode 100644
index 0000000..9ce5870
--- /dev/null
+++ b/elf-ld.so-Consider-maybe-existing-hole-between-PT_LO.patch
@@ -0,0 +1,47 @@
+From 1b296563c92c896f072343b5f19286e8b5942b9e Mon Sep 17 00:00:00 2001
+From: hubin <hubin73@huawei.com>
+Date: Fri, 28 Apr 2023 11:30:47 +0800
+Subject: [PATCH] elf/ld.so: Consider maybe-existing hole between PT_LOAD
+ segments when mmap reserved area
+
+When mmap reserved area do NOT consider existing hole between PT_LOAD
+segments, this will cause reserved virtual address space too small to
+contain all the sections in the PT_LOAD segment. Since the address space
+mapping of subsequent segments is based on the reserved address space,
+each mmap has a MAP_FIXED flag, which will overwrite the reserved
+address space, if the reserved space is too small, these mmap with
+MAP_FIEXED flag will conflict with other address space. e.g. in some
+case, the address sapce in the .bss section(last data segment) conflicts
+with the address space in the heap(maybe other address space) cause the
+program coredump.
+---
+ elf/dl-map-segments-hugepage.h | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/elf/dl-map-segments-hugepage.h b/elf/dl-map-segments-hugepage.h
+index b99f726f..218e93a0 100644
+--- a/elf/dl-map-segments-hugepage.h
++++ b/elf/dl-map-segments-hugepage.h
+@@ -47,10 +47,18 @@ __mmap_reserved_area(const struct loadcmd loadcmds[], size_t nloadcmds,
+ const struct loadcmd * c = loadcmds;
+ *maparealen = 0;
+
++ /*
++ * Consider maybe-existing hole between PT_LOAD segments
++ */
+ while (c < &loadcmds[nloadcmds])
+ {
+- *maparealen += ALIGN_UP((c->mapend > c->allocend ? c->mapend : c->allocend), SIZE_2MB) -
++ // c is not the last loadcmd
++ if (c + 1 < &loadcmds[nloadcmds]) {
++ *maparealen += ALIGN_UP((c + 1)->mapstart - c->mapstart, SIZE_2MB);
++ } else {
++ *maparealen += ALIGN_UP((c->mapend > c->allocend ? c->mapend : c->allocend), SIZE_2MB) -
+ ALIGN_DOWN(c->mapstart, SIZE_2MB);
++ }
+ c++;
+ }
+
+--
+2.38.1
+