1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
|
From 9145e7cc47b0a43620273ec6f3d1bf016618eb62 Mon Sep 17 00:00:00 2001
From: chenziyang <chenziyang4@huawei.com>
Date: Sat, 4 Mar 2023 17:13:57 +0800
Subject: [PATCH 1/2] elf/ld.so: add MAP_NORESERVE flag for the first mmap 2MB contiguous va
If environment only has 50MB hugepage resources, RX PT_LOAD segment
requires 25MB and RW PT_LOAD segment requires 100MB memory. Before this
patch, we would exit mmap hugepage because we will try to mmap 125MB
hugepage. After this change, ld.so will allow RX PT_LOAD to map into 2MB
hugepage
Signed-off-by: Chen Ziyang<chenziyang4@huawei.com>
---
elf/dl-map-segments-hugepage.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/elf/dl-map-segments-hugepage.h b/elf/dl-map-segments-hugepage.h
index 5ea89a36..6ab5f939 100644
--- a/elf/dl-map-segments-hugepage.h
+++ b/elf/dl-map-segments-hugepage.h
@@ -59,7 +59,7 @@ __mmap_reserved_area(const struct loadcmd loadcmds[], size_t nloadcmds,
* This va space can not be munmap in case of multi thread dlopen concurrently
*/
void *map_area_start = __mmap(0, *maparealen, PROT_NONE,
- MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB|(SHFIT_2MB << MAP_HUGE_SHIFT), -1, 0);
+ MAP_PRIVATE|MAP_ANONYMOUS|MAP_HUGETLB|MAP_NORESERVE|(SHFIT_2MB << MAP_HUGE_SHIFT), -1, 0);
if (__glibc_unlikely (map_area_start == MAP_FAILED))
return MAP_FAILED;
--
2.33.0
|