1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
|
From 6cc3ccc67e0dda654fc839377af2818a296f0007 Mon Sep 17 00:00:00 2001
From: Adhemerval Zanella <adhemerval.zanella@linaro.org>
Date: Mon, 16 Aug 2021 11:14:20 -0300
Subject: [PATCH 3/7] malloc: Move mmap logic to its own function
So it can be used with different pagesize and flags.
Reviewed-by: DJ Delorie <dj@redhat.com>
---
malloc/malloc.c | 164 ++++++++++++++++++++++++++----------------------
1 file changed, 88 insertions(+), 76 deletions(-)
diff --git a/malloc/malloc.c b/malloc/malloc.c
index 57db4dd9a5..6b6ec53db1 100644
--- a/malloc/malloc.c
+++ b/malloc/malloc.c
@@ -2412,6 +2412,85 @@ do_check_malloc_state (mstate av)
be extended or replaced.
*/
+static void *
+sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
+{
+ long int size;
+
+ /*
+ Round up size to nearest page. For mmapped chunks, the overhead is one
+ SIZE_SZ unit larger than for normal chunks, because there is no
+ following chunk whose prev_size field could be used.
+
+ See the front_misalign handling below, for glibc there is no need for
+ further alignments unless we have have high alignment.
+ */
+ if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
+ size = ALIGN_UP (nb + SIZE_SZ, pagesize);
+ else
+ size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
+
+ /* Don't try if size wraps around 0. */
+ if ((unsigned long) (size) <= (unsigned long) (nb))
+ return MAP_FAILED;
+
+ char *mm = (char *) MMAP (0, size,
+ mtag_mmap_flags | PROT_READ | PROT_WRITE,
+ extra_flags);
+ if (mm == MAP_FAILED)
+ return mm;
+
+ madvise_thp (mm, size);
+
+ /*
+ The offset to the start of the mmapped region is stored in the prev_size
+ field of the chunk. This allows us to adjust returned start address to
+ meet alignment requirements here and in memalign(), and still be able to
+ compute proper address argument for later munmap in free() and realloc().
+ */
+
+ INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
+
+ if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
+ {
+ /* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and
+ MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1. Each mmap'ed area is page
+ aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
+ assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
+ front_misalign = 0;
+ }
+ else
+ front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
+
+ mchunkptr p; /* the allocated/returned chunk */
+
+ if (front_misalign > 0)
+ {
+ ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign;
+ p = (mchunkptr) (mm + correction);
+ set_prev_size (p, correction);
+ set_head (p, (size - correction) | IS_MMAPPED);
+ }
+ else
+ {
+ p = (mchunkptr) mm;
+ set_prev_size (p, 0);
+ set_head (p, size | IS_MMAPPED);
+ }
+
+ /* update statistics */
+ int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
+ atomic_max (&mp_.max_n_mmaps, new);
+
+ unsigned long sum;
+ sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
+ atomic_max (&mp_.max_mmapped_mem, sum);
+
+ check_chunk (av, p);
+
+ return chunk2mem (p);
+}
+
static void *
sysmalloc (INTERNAL_SIZE_T nb, mstate av)
{
@@ -2449,81 +2528,10 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
|| ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
&& (mp_.n_mmaps < mp_.n_mmaps_max)))
{
- char *mm; /* return value from mmap call*/
-
- try_mmap:
- /*
- Round up size to nearest page. For mmapped chunks, the overhead
- is one SIZE_SZ unit larger than for normal chunks, because there
- is no following chunk whose prev_size field could be used.
-
- See the front_misalign handling below, for glibc there is no
- need for further alignments unless we have have high alignment.
- */
- if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
- size = ALIGN_UP (nb + SIZE_SZ, pagesize);
- else
- size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
+ char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
+ if (mm != MAP_FAILED)
+ return mm;
tried_mmap = true;
-
- /* Don't try if size wraps around 0 */
- if ((unsigned long) (size) > (unsigned long) (nb))
- {
- mm = (char *) (MMAP (0, size,
- mtag_mmap_flags | PROT_READ | PROT_WRITE, 0));
-
- if (mm != MAP_FAILED)
- {
- madvise_thp (mm, size);
-
- /*
- The offset to the start of the mmapped region is stored
- in the prev_size field of the chunk. This allows us to adjust
- returned start address to meet alignment requirements here
- and in memalign(), and still be able to compute proper
- address argument for later munmap in free() and realloc().
- */
-
- if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
- {
- /* For glibc, chunk2mem increases the address by
- CHUNK_HDR_SZ and MALLOC_ALIGN_MASK is
- CHUNK_HDR_SZ-1. Each mmap'ed area is page
- aligned and therefore definitely
- MALLOC_ALIGN_MASK-aligned. */
- assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
- front_misalign = 0;
- }
- else
- front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
- if (front_misalign > 0)
- {
- correction = MALLOC_ALIGNMENT - front_misalign;
- p = (mchunkptr) (mm + correction);
- set_prev_size (p, correction);
- set_head (p, (size - correction) | IS_MMAPPED);
- }
- else
- {
- p = (mchunkptr) mm;
- set_prev_size (p, 0);
- set_head (p, size | IS_MMAPPED);
- }
-
- /* update statistics */
-
- int new = atomic_exchange_and_add (&mp_.n_mmaps, 1) + 1;
- atomic_max (&mp_.max_n_mmaps, new);
-
- unsigned long sum;
- sum = atomic_exchange_and_add (&mp_.mmapped_mem, size) + size;
- atomic_max (&mp_.max_mmapped_mem, sum);
-
- check_chunk (av, p);
-
- return chunk2mem (p);
- }
- }
}
/* There are no usable arenas and mmap also failed. */
@@ -2600,8 +2608,12 @@ sysmalloc (INTERNAL_SIZE_T nb, mstate av)
}
}
else if (!tried_mmap)
- /* We can at least try to use to mmap memory. */
- goto try_mmap;
+ {
+ /* We can at least try to use to mmap memory. */
+ char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
+ if (mm != MAP_FAILED)
+ return mm;
+ }
}
else /* av == main_arena */
--
2.33.0
|