mirror of
https://github.com/carlospolop/hacktricks
synced 2024-12-22 19:13:39 +00:00
1773 lines
60 KiB
Markdown
1773 lines
60 KiB
Markdown
|
# malloc & sysmalloc
|
||
|
|
||
|
{% hint style="success" %}
|
||
|
Learn & practice AWS Hacking:<img src="/.gitbook/assets/arte.png" alt="" data-size="line">[**HackTricks Training AWS Red Team Expert (ARTE)**](https://training.hacktricks.xyz/courses/arte)<img src="/.gitbook/assets/arte.png" alt="" data-size="line">\
|
||
|
Learn & practice GCP Hacking: <img src="/.gitbook/assets/grte.png" alt="" data-size="line">[**HackTricks Training GCP Red Team Expert (GRTE)**<img src="/.gitbook/assets/grte.png" alt="" data-size="line">](https://training.hacktricks.xyz/courses/grte)
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Support HackTricks</summary>
|
||
|
|
||
|
* Check the [**subscription plans**](https://github.com/sponsors/carlospolop)!
|
||
|
* **Join the** 💬 [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** us on **Twitter** 🐦 [**@hacktricks\_live**](https://twitter.com/hacktricks\_live)**.**
|
||
|
* **Share hacking tricks by submitting PRs to the** [**HackTricks**](https://github.com/carlospolop/hacktricks) and [**HackTricks Cloud**](https://github.com/carlospolop/hacktricks-cloud) github repos.
|
||
|
|
||
|
</details>
|
||
|
{% endhint %}
|
||
|
|
||
|
## Allocation Order Summary <a href="#libc_malloc" id="libc_malloc"></a>
|
||
|
|
||
|
(No checks are explained in this summary and some case have been omitted for brevity)
|
||
|
|
||
|
1. `__libc_malloc` tries to get a chunk from the tcache, if not it calls `_int_malloc`
|
||
|
2. `_int_malloc` : 
|
||
|
1. Tries to generate the arena if there isn't any
|
||
|
2. If any fast bin chunk of the correct size, use it
|
||
|
1. Fill tcache with other fast chunks
|
||
|
3. If any small bin chunk of the correct size, use it
|
||
|
1. Fill tcache with other chunks of that size
|
||
|
4. If the requested size isn't for small bins, consolidate fast bin into unsorted bin
|
||
|
5. Check the unsorted bin, use the first chunk with enough space
|
||
|
1. If the found chunk is bigger, divide it to return a part and add the reminder back to the unsorted bin
|
||
|
2. If a chunk is of the same size as the size requested, use to to fill the tcache instead of returning it (until the tcache is full, then return the next one)
|
||
|
3. For each chunk of smaller size checked, put it in its respective small or large bin
|
||
|
6. Check the large bin in the index of the requested size
|
||
|
1. Start looking from the first chunk that is bigger than the requested size, if any is found return it and add the reminders to the small bin
|
||
|
7. Check the large bins from the next indexes until the end
|
||
|
1. From the next bigger index check for any chunk, divide the first found chunk to use it for the requested size and add the reminder to the unsorted bin
|
||
|
8. If nothing is found in the previous bins, get a chunk from the top chunk
|
||
|
9. If the top chunk wasn't big enough enlarge it with `sysmalloc`
|
||
|
|
||
|
## \_\_libc\_malloc <a href="#libc_malloc" id="libc_malloc"></a>
|
||
|
|
||
|
The `malloc` function actually calls `__libc_malloc`. This function will check the tcache to see if there is any available chunk of the desired size. If the re is it'll use it and if not it'll check if it's a single thread and in that case it'll call `_int_malloc` in the main arena, and if not it'll call `_int_malloc` in arena of the thread.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>__libc_malloc code</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c
|
||
|
|
||
|
#if IS_IN (libc)
|
||
|
void *
|
||
|
__libc_malloc (size_t bytes)
|
||
|
{
|
||
|
mstate ar_ptr;
|
||
|
void *victim;
|
||
|
|
||
|
_Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2,
|
||
|
"PTRDIFF_MAX is not more than half of SIZE_MAX");
|
||
|
|
||
|
if (!__malloc_initialized)
|
||
|
ptmalloc_init ();
|
||
|
#if USE_TCACHE
|
||
|
/* int_free also calls request2size, be careful to not pad twice. */
|
||
|
size_t tbytes = checked_request2size (bytes);
|
||
|
if (tbytes == 0)
|
||
|
{
|
||
|
__set_errno (ENOMEM);
|
||
|
return NULL;
|
||
|
}
|
||
|
size_t tc_idx = csize2tidx (tbytes);
|
||
|
|
||
|
MAYBE_INIT_TCACHE ();
|
||
|
|
||
|
DIAG_PUSH_NEEDS_COMMENT;
|
||
|
if (tc_idx < mp_.tcache_bins
|
||
|
&& tcache != NULL
|
||
|
&& tcache->counts[tc_idx] > 0)
|
||
|
{
|
||
|
victim = tcache_get (tc_idx);
|
||
|
return tag_new_usable (victim);
|
||
|
}
|
||
|
DIAG_POP_NEEDS_COMMENT;
|
||
|
#endif
|
||
|
|
||
|
if (SINGLE_THREAD_P)
|
||
|
{
|
||
|
victim = tag_new_usable (_int_malloc (&main_arena, bytes));
|
||
|
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
|
||
|
&main_arena == arena_for_chunk (mem2chunk (victim)));
|
||
|
return victim;
|
||
|
}
|
||
|
|
||
|
arena_get (ar_ptr, bytes);
|
||
|
|
||
|
victim = _int_malloc (ar_ptr, bytes);
|
||
|
/* Retry with another arena only if we were able to find a usable arena
|
||
|
before. */
|
||
|
if (!victim && ar_ptr != NULL)
|
||
|
{
|
||
|
LIBC_PROBE (memory_malloc_retry, 1, bytes);
|
||
|
ar_ptr = arena_get_retry (ar_ptr, bytes);
|
||
|
victim = _int_malloc (ar_ptr, bytes);
|
||
|
}
|
||
|
|
||
|
if (ar_ptr != NULL)
|
||
|
__libc_lock_unlock (ar_ptr->mutex);
|
||
|
|
||
|
victim = tag_new_usable (victim);
|
||
|
|
||
|
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
|
||
|
ar_ptr == arena_for_chunk (mem2chunk (victim)));
|
||
|
return victim;
|
||
|
}
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
Note how it'll always tag the returned pointer with `tag_new_usable`, from the code:
|
||
|
|
||
|
```c
|
||
|
void *tag_new_usable (void *ptr)
|
||
|
|
||
|
Allocate a new random color and use it to color the user region of
|
||
|
a chunk; this may include data from the subsequent chunk's header
|
||
|
if tagging is sufficiently fine grained. Returns PTR suitably
|
||
|
recolored for accessing the memory there.
|
||
|
```
|
||
|
|
||
|
## \_int\_malloc <a href="#int_malloc" id="int_malloc"></a>
|
||
|
|
||
|
This is the function that allocates memory using the other bins and top chunk.
|
||
|
|
||
|
* Start
|
||
|
|
||
|
It starts defining some vars and getting the real size the request memory space need to have:
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>_int_malloc start</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3847
|
||
|
static void *
|
||
|
_int_malloc (mstate av, size_t bytes)
|
||
|
{
|
||
|
INTERNAL_SIZE_T nb; /* normalized request size */
|
||
|
unsigned int idx; /* associated bin index */
|
||
|
mbinptr bin; /* associated bin */
|
||
|
|
||
|
mchunkptr victim; /* inspected/selected chunk */
|
||
|
INTERNAL_SIZE_T size; /* its size */
|
||
|
int victim_index; /* its bin index */
|
||
|
|
||
|
mchunkptr remainder; /* remainder from a split */
|
||
|
unsigned long remainder_size; /* its size */
|
||
|
|
||
|
unsigned int block; /* bit map traverser */
|
||
|
unsigned int bit; /* bit map traverser */
|
||
|
unsigned int map; /* current word of binmap */
|
||
|
|
||
|
mchunkptr fwd; /* misc temp for linking */
|
||
|
mchunkptr bck; /* misc temp for linking */
|
||
|
|
||
|
#if USE_TCACHE
|
||
|
size_t tcache_unsorted_count; /* count of unsorted chunks processed */
|
||
|
#endif
|
||
|
|
||
|
/*
|
||
|
Convert request size to internal form by adding SIZE_SZ bytes
|
||
|
overhead plus possibly more to obtain necessary alignment and/or
|
||
|
to obtain a size of at least MINSIZE, the smallest allocatable
|
||
|
size. Also, checked_request2size returns false for request sizes
|
||
|
that are so large that they wrap around zero when padded and
|
||
|
aligned.
|
||
|
*/
|
||
|
|
||
|
nb = checked_request2size (bytes);
|
||
|
if (nb == 0)
|
||
|
{
|
||
|
__set_errno (ENOMEM);
|
||
|
return NULL;
|
||
|
}
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
### Arena
|
||
|
|
||
|
In the unlikely event that there aren't usable arenas, it uses `sysmalloc` to get a chunk from `mmap`:
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>_int_malloc not arena</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3885C3-L3893C6
|
||
|
/* There are no usable arenas. Fall back to sysmalloc to get a chunk from
|
||
|
mmap. */
|
||
|
if (__glibc_unlikely (av == NULL))
|
||
|
{
|
||
|
void *p = sysmalloc (nb, av);
|
||
|
if (p != NULL)
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
### Fast Bin
|
||
|
|
||
|
If the needed size is inside the Fast Bins sizes, try to use a chunk from the fast bin. Basically, based on the size, it'll find the fast bin index where valid chunks should be located, and if any, it'll return one of those.\
|
||
|
Moreover, if tcache is enabled, it'll **fill the tcache bin of that size with fast bins**.
|
||
|
|
||
|
While performing these actions, some security checks are executed in here:
|
||
|
|
||
|
* If the chunk is misaligned: `malloc(): unaligned fastbin chunk detected 2`
|
||
|
* If the forward chunk is misaligned: `malloc(): unaligned fastbin chunk detected`
|
||
|
* If the returned chunk has a size that isn't correct because of it's index in the fast bin: `malloc(): memory corruption (fast)`
|
||
|
* If any chunk used to fill the tcache is misaligned: `malloc(): unaligned fastbin chunk detected 3`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>_int_malloc fast bin</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3895C3-L3967C6
|
||
|
/*
|
||
|
If the size qualifies as a fastbin, first check corresponding bin.
|
||
|
This code is safe to execute even if av is not yet initialized, so we
|
||
|
can try it without checking, which saves some time on this fast path.
|
||
|
*/
|
||
|
|
||
|
#define REMOVE_FB(fb, victim, pp) \
|
||
|
do \
|
||
|
{ \
|
||
|
victim = pp; \
|
||
|
if (victim == NULL) \
|
||
|
break; \
|
||
|
pp = REVEAL_PTR (victim->fd); \
|
||
|
if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
|
||
|
malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
|
||
|
} \
|
||
|
while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
|
||
|
!= victim); \
|
||
|
|
||
|
if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
|
||
|
{
|
||
|
idx = fastbin_index (nb);
|
||
|
mfastbinptr *fb = &fastbin (av, idx);
|
||
|
mchunkptr pp;
|
||
|
victim = *fb;
|
||
|
|
||
|
if (victim != NULL)
|
||
|
{
|
||
|
if (__glibc_unlikely (misaligned_chunk (victim)))
|
||
|
malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
|
||
|
|
||
|
if (SINGLE_THREAD_P)
|
||
|
*fb = REVEAL_PTR (victim->fd);
|
||
|
else
|
||
|
REMOVE_FB (fb, pp, victim);
|
||
|
if (__glibc_likely (victim != NULL))
|
||
|
{
|
||
|
size_t victim_idx = fastbin_index (chunksize (victim));
|
||
|
if (__builtin_expect (victim_idx != idx, 0))
|
||
|
malloc_printerr ("malloc(): memory corruption (fast)");
|
||
|
check_remalloced_chunk (av, victim, nb);
|
||
|
#if USE_TCACHE
|
||
|
/* While we're here, if we see other chunks of the same size,
|
||
|
stash them in the tcache. */
|
||
|
size_t tc_idx = csize2tidx (nb);
|
||
|
if (tcache != NULL && tc_idx < mp_.tcache_bins)
|
||
|
{
|
||
|
mchunkptr tc_victim;
|
||
|
|
||
|
/* While bin not empty and tcache not full, copy chunks. */
|
||
|
while (tcache->counts[tc_idx] < mp_.tcache_count
|
||
|
&& (tc_victim = *fb) != NULL)
|
||
|
{
|
||
|
if (__glibc_unlikely (misaligned_chunk (tc_victim)))
|
||
|
malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
|
||
|
if (SINGLE_THREAD_P)
|
||
|
*fb = REVEAL_PTR (tc_victim->fd);
|
||
|
else
|
||
|
{
|
||
|
REMOVE_FB (fb, pp, tc_victim);
|
||
|
if (__glibc_unlikely (tc_victim == NULL))
|
||
|
break;
|
||
|
}
|
||
|
tcache_put (tc_victim, tc_idx);
|
||
|
}
|
||
|
}
|
||
|
#endif
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
### Small Bin
|
||
|
|
||
|
As indicated in a comment, small bins hold one size per index, therefore checking if a valid chunk is available is super fast, so after fast bins, small bins are checked.
|
||
|
|
||
|
The first check is to find out if the requested size could be inside a small bin. In that case, get the corresponded **index** inside the smallbin and see if there is **any available chunk**.
|
||
|
|
||
|
Then, a security check is performed checking:
|
||
|
|
||
|
*  if `victim->bk->fd = victim`. To see that both chunks are correctly linked.
|
||
|
|
||
|
In that case, the chunk **gets the `inuse` bit,** the doubled linked list is fixed so this chunk disappears from it (as it's going to be used), and the non main arena bit is set if needed.
|
||
|
|
||
|
Finally, **fill the tcache index of the requested size** with other chunks inside the small bin (if any).
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>_int_malloc small bin</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3895C3-L3967C6
|
||
|
|
||
|
/*
|
||
|
If a small request, check regular bin. Since these "smallbins"
|
||
|
hold one size each, no searching within bins is necessary.
|
||
|
(For a large request, we need to wait until unsorted chunks are
|
||
|
processed to find best fit. But for small ones, fits are exact
|
||
|
anyway, so we can check now, which is faster.)
|
||
|
*/
|
||
|
|
||
|
if (in_smallbin_range (nb))
|
||
|
{
|
||
|
idx = smallbin_index (nb);
|
||
|
bin = bin_at (av, idx);
|
||
|
|
||
|
if ((victim = last (bin)) != bin)
|
||
|
{
|
||
|
bck = victim->bk;
|
||
|
if (__glibc_unlikely (bck->fd != victim))
|
||
|
malloc_printerr ("malloc(): smallbin double linked list corrupted");
|
||
|
set_inuse_bit_at_offset (victim, nb);
|
||
|
bin->bk = bck;
|
||
|
bck->fd = bin;
|
||
|
|
||
|
if (av != &main_arena)
|
||
|
set_non_main_arena (victim);
|
||
|
check_malloced_chunk (av, victim, nb);
|
||
|
#if USE_TCACHE
|
||
|
/* While we're here, if we see other chunks of the same size,
|
||
|
stash them in the tcache. */
|
||
|
size_t tc_idx = csize2tidx (nb);
|
||
|
if (tcache != NULL && tc_idx < mp_.tcache_bins)
|
||
|
{
|
||
|
mchunkptr tc_victim;
|
||
|
|
||
|
/* While bin not empty and tcache not full, copy chunks over. */
|
||
|
while (tcache->counts[tc_idx] < mp_.tcache_count
|
||
|
&& (tc_victim = last (bin)) != bin)
|
||
|
{
|
||
|
if (tc_victim != 0)
|
||
|
{
|
||
|
bck = tc_victim->bk;
|
||
|
set_inuse_bit_at_offset (tc_victim, nb);
|
||
|
if (av != &main_arena)
|
||
|
set_non_main_arena (tc_victim);
|
||
|
bin->bk = bck;
|
||
|
bck->fd = bin;
|
||
|
|
||
|
tcache_put (tc_victim, tc_idx);
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
#endif
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
}
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
### malloc\_consolidate
|
||
|
|
||
|
If it wasn't a small chunk, it's a large chunk, and in this case **`malloc_consolidate`** is called to avoid memory fragmentation.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>malloc_consolidate call</summary>
|
||
|
|
||
|
```c
|
||
|
/*
|
||
|
If this is a large request, consolidate fastbins before continuing.
|
||
|
While it might look excessive to kill all fastbins before
|
||
|
even seeing if there is space available, this avoids
|
||
|
fragmentation problems normally associated with fastbins.
|
||
|
Also, in practice, programs tend to have runs of either small or
|
||
|
large requests, but less often mixtures, so consolidation is not
|
||
|
invoked all that often in most programs. And the programs that
|
||
|
it is called frequently in otherwise tend to fragment.
|
||
|
*/
|
||
|
|
||
|
else
|
||
|
{
|
||
|
idx = largebin_index (nb);
|
||
|
if (atomic_load_relaxed (&av->have_fastchunks))
|
||
|
malloc_consolidate (av);
|
||
|
}
|
||
|
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
The malloc consolidate function basically removes chunks from the fast bin and places them into the unsorted bin. After the next malloc these chunks will be organized in their respective small/fast bins.
|
||
|
|
||
|
Note that if while removing these chunks, if they are found with previous or next chunks that aren't in use they will be **unliked and merged** before placing the final chunk in the **unsorted** bin.
|
||
|
|
||
|
For each fast bin chunk a couple of security checks are performed:
|
||
|
|
||
|
* If the chunk is unaligned trigger: `malloc_consolidate(): unaligned fastbin chunk detected`
|
||
|
* If the chunk has a different size that the one it should because of the index it's in: `malloc_consolidate(): invalid chunk size`
|
||
|
* If the previous chunk is not in use and the previous chunk has a size different of the one indicated by `prev_chunk`: `corrupted size vs. prev_size in fastbins`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>malloc_consolidate function</summary>
|
||
|
|
||
|
```c
|
||
|
// https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4810C1-L4905C2
|
||
|
|
||
|
static void malloc_consolidate(mstate av)
|
||
|
{
|
||
|
mfastbinptr* fb; /* current fastbin being consolidated */
|
||
|
mfastbinptr* maxfb; /* last fastbin (for loop control) */
|
||
|
mchunkptr p; /* current chunk being consolidated */
|
||
|
mchunkptr nextp; /* next chunk to consolidate */
|
||
|
mchunkptr unsorted_bin; /* bin header */
|
||
|
mchunkptr first_unsorted; /* chunk to link to */
|
||
|
|
||
|
/* These have same use as in free() */
|
||
|
mchunkptr nextchunk;
|
||
|
INTERNAL_SIZE_T size;
|
||
|
INTERNAL_SIZE_T nextsize;
|
||
|
INTERNAL_SIZE_T prevsize;
|
||
|
int nextinuse;
|
||
|
|
||
|
atomic_store_relaxed (&av->have_fastchunks, false);
|
||
|
|
||
|
unsorted_bin = unsorted_chunks(av);
|
||
|
|
||
|
/*
|
||
|
Remove each chunk from fast bin and consolidate it, placing it
|
||
|
then in unsorted bin. Among other reasons for doing this,
|
||
|
placing in unsorted bin avoids needing to calculate actual bins
|
||
|
until malloc is sure that chunks aren't immediately going to be
|
||
|
reused anyway.
|
||
|
*/
|
||
|
|
||
|
maxfb = &fastbin (av, NFASTBINS - 1);
|
||
|
fb = &fastbin (av, 0);
|
||
|
do {
|
||
|
p = atomic_exchange_acquire (fb, NULL);
|
||
|
if (p != 0) {
|
||
|
do {
|
||
|
{
|
||
|
if (__glibc_unlikely (misaligned_chunk (p)))
|
||
|
malloc_printerr ("malloc_consolidate(): "
|
||
|
"unaligned fastbin chunk detected");
|
||
|
|
||
|
unsigned int idx = fastbin_index (chunksize (p));
|
||
|
if ((&fastbin (av, idx)) != fb)
|
||
|
malloc_printerr ("malloc_consolidate(): invalid chunk size");
|
||
|
}
|
||
|
|
||
|
check_inuse_chunk(av, p);
|
||
|
nextp = REVEAL_PTR (p->fd);
|
||
|
|
||
|
/* Slightly streamlined version of consolidation code in free() */
|
||
|
size = chunksize (p);
|
||
|
nextchunk = chunk_at_offset(p, size);
|
||
|
nextsize = chunksize(nextchunk);
|
||
|
|
||
|
if (!prev_inuse(p)) {
|
||
|
prevsize = prev_size (p);
|
||
|
size += prevsize;
|
||
|
p = chunk_at_offset(p, -((long) prevsize));
|
||
|
if (__glibc_unlikely (chunksize(p) != prevsize))
|
||
|
malloc_printerr ("corrupted size vs. prev_size in fastbins");
|
||
|
unlink_chunk (av, p);
|
||
|
}
|
||
|
|
||
|
if (nextchunk != av->top) {
|
||
|
nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
|
||
|
|
||
|
if (!nextinuse) {
|
||
|
size += nextsize;
|
||
|
unlink_chunk (av, nextchunk);
|
||
|
} else
|
||
|
clear_inuse_bit_at_offset(nextchunk, 0);
|
||
|
|
||
|
first_unsorted = unsorted_bin->fd;
|
||
|
unsorted_bin->fd = p;
|
||
|
first_unsorted->bk = p;
|
||
|
|
||
|
if (!in_smallbin_range (size)) {
|
||
|
p->fd_nextsize = NULL;
|
||
|
p->bk_nextsize = NULL;
|
||
|
}
|
||
|
|
||
|
set_head(p, size | PREV_INUSE);
|
||
|
p->bk = unsorted_bin;
|
||
|
p->fd = first_unsorted;
|
||
|
set_foot(p, size);
|
||
|
}
|
||
|
|
||
|
else {
|
||
|
size += nextsize;
|
||
|
set_head(p, size | PREV_INUSE);
|
||
|
av->top = p;
|
||
|
}
|
||
|
|
||
|
} while ( (p = nextp) != 0);
|
||
|
|
||
|
}
|
||
|
} while (fb++ != maxfb);
|
||
|
}
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
### Unsorted bin
|
||
|
|
||
|
It's time to check the unsorted bin for a potential valid chunk to use.
|
||
|
|
||
|
#### Start
|
||
|
|
||
|
This starts with a big for look that will be traversing the unsorted bin in the `bk` direction until it arrives til the end (the arena struct) with `while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))` 
|
||
|
|
||
|
Moreover, some security checks are perform every time a new chunk is considered:
|
||
|
|
||
|
* If the chunk size is weird (too small or too big): `malloc(): invalid size (unsorted)`
|
||
|
* If the next chunk size is weird (too small or too big): `malloc(): invalid next size (unsorted)`
|
||
|
* If the previous size indicated by the next chunk differs from the size of the chunk: `malloc(): mismatching next->prev_size (unsorted)`
|
||
|
* If not `victim->bck->fd == victim` or not `victim->fd == av` (arena): `malloc(): unsorted double linked list corrupted`
|
||
|
* As we are always checking the las one, it's `fd` should be pointing always to the arena struct.
|
||
|
* If the next chunk isn't indicating that the previous is in use: `malloc(): invalid next->prev_inuse (unsorted)`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> unsorted bin start</summary>
|
||
|
|
||
|
```c
|
||
|
/*
|
||
|
Process recently freed or remaindered chunks, taking one only if
|
||
|
it is exact fit, or, if this a small request, the chunk is remainder from
|
||
|
the most recent non-exact fit. Place other traversed chunks in
|
||
|
bins. Note that this step is the only place in any routine where
|
||
|
chunks are placed in bins.
|
||
|
|
||
|
The outer loop here is needed because we might not realize until
|
||
|
near the end of malloc that we should have consolidated, so must
|
||
|
do so and retry. This happens at most once, and only when we would
|
||
|
otherwise need to expand memory to service a "small" request.
|
||
|
*/
|
||
|
|
||
|
#if USE_TCACHE
|
||
|
INTERNAL_SIZE_T tcache_nb = 0;
|
||
|
size_t tc_idx = csize2tidx (nb);
|
||
|
if (tcache != NULL && tc_idx < mp_.tcache_bins)
|
||
|
tcache_nb = nb;
|
||
|
int return_cached = 0;
|
||
|
|
||
|
tcache_unsorted_count = 0;
|
||
|
#endif
|
||
|
|
||
|
for (;; )
|
||
|
{
|
||
|
int iters = 0;
|
||
|
while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
|
||
|
{
|
||
|
bck = victim->bk;
|
||
|
size = chunksize (victim);
|
||
|
mchunkptr next = chunk_at_offset (victim, size);
|
||
|
|
||
|
if (__glibc_unlikely (size <= CHUNK_HDR_SZ)
|
||
|
|| __glibc_unlikely (size > av->system_mem))
|
||
|
malloc_printerr ("malloc(): invalid size (unsorted)");
|
||
|
if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ)
|
||
|
|| __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
|
||
|
malloc_printerr ("malloc(): invalid next size (unsorted)");
|
||
|
if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
|
||
|
malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
|
||
|
if (__glibc_unlikely (bck->fd != victim)
|
||
|
|| __glibc_unlikely (victim->fd != unsorted_chunks (av)))
|
||
|
malloc_printerr ("malloc(): unsorted double linked list corrupted");
|
||
|
if (__glibc_unlikely (prev_inuse (next)))
|
||
|
malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
|
||
|
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
#### if `in_smallbin_range`
|
||
|
|
||
|
If the chunk is bigger than the requested size use it, and set the rest of the chunk space into the unsorted list and update the `last_remainder` with it.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> unsorted bin <code>in_smallbin_range</code></summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4090C11-L4124C14
|
||
|
|
||
|
/*
|
||
|
If a small request, try to use last remainder if it is the
|
||
|
only chunk in unsorted bin. This helps promote locality for
|
||
|
runs of consecutive small requests. This is the only
|
||
|
exception to best-fit, and applies only when there is
|
||
|
no exact fit for a small chunk.
|
||
|
*/
|
||
|
|
||
|
if (in_smallbin_range (nb) &&
|
||
|
bck == unsorted_chunks (av) &&
|
||
|
victim == av->last_remainder &&
|
||
|
(unsigned long) (size) > (unsigned long) (nb + MINSIZE))
|
||
|
{
|
||
|
/* split and reattach remainder */
|
||
|
remainder_size = size - nb;
|
||
|
remainder = chunk_at_offset (victim, nb);
|
||
|
unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
|
||
|
av->last_remainder = remainder;
|
||
|
remainder->bk = remainder->fd = unsorted_chunks (av);
|
||
|
if (!in_smallbin_range (remainder_size))
|
||
|
{
|
||
|
remainder->fd_nextsize = NULL;
|
||
|
remainder->bk_nextsize = NULL;
|
||
|
}
|
||
|
|
||
|
set_head (victim, nb | PREV_INUSE |
|
||
|
(av != &main_arena ? NON_MAIN_ARENA : 0));
|
||
|
set_head (remainder, remainder_size | PREV_INUSE);
|
||
|
set_foot (remainder, remainder_size);
|
||
|
|
||
|
check_malloced_chunk (av, victim, nb);
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
If this was successful, return the chunk ant it's over, if not, continue executing the function...
|
||
|
|
||
|
#### if equal size
|
||
|
|
||
|
Continue removing the chunk from the bin, in case the requested size is exactly the one of the chunk:
|
||
|
|
||
|
* If the tcache is not filled, add it to the tcache and continue indicating that there is a tcache chunk that could be used
|
||
|
* If tcache is full, just use it returning it
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> unsorted bin equal size</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4126C11-L4157C14
|
||
|
|
||
|
/* remove from unsorted list */
|
||
|
unsorted_chunks (av)->bk = bck;
|
||
|
bck->fd = unsorted_chunks (av);
|
||
|
|
||
|
/* Take now instead of binning if exact fit */
|
||
|
|
||
|
if (size == nb)
|
||
|
{
|
||
|
set_inuse_bit_at_offset (victim, size);
|
||
|
if (av != &main_arena)
|
||
|
set_non_main_arena (victim);
|
||
|
#if USE_TCACHE
|
||
|
/* Fill cache first, return to user only if cache fills.
|
||
|
We may return one of these chunks later. */
|
||
|
if (tcache_nb > 0
|
||
|
&& tcache->counts[tc_idx] < mp_.tcache_count)
|
||
|
{
|
||
|
tcache_put (victim, tc_idx);
|
||
|
return_cached = 1;
|
||
|
continue;
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
#endif
|
||
|
check_malloced_chunk (av, victim, nb);
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
#if USE_TCACHE
|
||
|
}
|
||
|
#endif
|
||
|
}
|
||
|
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
If chunk not returned or added to tcache, continue with the code...
|
||
|
|
||
|
#### place chunk in a bin
|
||
|
|
||
|
Store the checked chunk in the small bin or in the large bin according to the size of the chunk (keeping the large bin properly organized).
|
||
|
|
||
|
There are security checks being performed to make sure both large bin doubled linked list are corrupted:
|
||
|
|
||
|
* If `fwd->bk_nextsize->fd_nextsize != fwd`: `malloc(): largebin double linked list corrupted (nextsize)`
|
||
|
* If `fwd->bk->fd != fwd`: `malloc(): largebin double linked list corrupted (bk)`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> place chunk in a bin</summary>
|
||
|
|
||
|
```c
|
||
|
/* place chunk in bin */
|
||
|
|
||
|
if (in_smallbin_range (size))
|
||
|
{
|
||
|
victim_index = smallbin_index (size);
|
||
|
bck = bin_at (av, victim_index);
|
||
|
fwd = bck->fd;
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
victim_index = largebin_index (size);
|
||
|
bck = bin_at (av, victim_index);
|
||
|
fwd = bck->fd;
|
||
|
|
||
|
/* maintain large bins in sorted order */
|
||
|
if (fwd != bck)
|
||
|
{
|
||
|
/* Or with inuse bit to speed comparisons */
|
||
|
size |= PREV_INUSE;
|
||
|
/* if smaller than smallest, bypass loop below */
|
||
|
assert (chunk_main_arena (bck->bk));
|
||
|
if ((unsigned long) (size)
|
||
|
< (unsigned long) chunksize_nomask (bck->bk))
|
||
|
{
|
||
|
fwd = bck;
|
||
|
bck = bck->bk;
|
||
|
|
||
|
victim->fd_nextsize = fwd->fd;
|
||
|
victim->bk_nextsize = fwd->fd->bk_nextsize;
|
||
|
fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
assert (chunk_main_arena (fwd));
|
||
|
while ((unsigned long) size < chunksize_nomask (fwd))
|
||
|
{
|
||
|
fwd = fwd->fd_nextsize;
|
||
|
assert (chunk_main_arena (fwd));
|
||
|
}
|
||
|
|
||
|
if ((unsigned long) size
|
||
|
== (unsigned long) chunksize_nomask (fwd))
|
||
|
/* Always insert in the second position. */
|
||
|
fwd = fwd->fd;
|
||
|
else
|
||
|
{
|
||
|
victim->fd_nextsize = fwd;
|
||
|
victim->bk_nextsize = fwd->bk_nextsize;
|
||
|
if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
|
||
|
malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
|
||
|
fwd->bk_nextsize = victim;
|
||
|
victim->bk_nextsize->fd_nextsize = victim;
|
||
|
}
|
||
|
bck = fwd->bk;
|
||
|
if (bck->fd != fwd)
|
||
|
malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
|
||
|
}
|
||
|
}
|
||
|
else
|
||
|
victim->fd_nextsize = victim->bk_nextsize = victim;
|
||
|
}
|
||
|
|
||
|
mark_bin (av, victim_index);
|
||
|
victim->bk = bck;
|
||
|
victim->fd = fwd;
|
||
|
fwd->bk = victim;
|
||
|
bck->fd = victim;
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
#### `_int_malloc` limits
|
||
|
|
||
|
At this point, if some chunk was stored in the tcache that can be used and the limit is reached, just **return a tcache chunk**.
|
||
|
|
||
|
Moreover, if **MAX\_ITERS** is reached, break from the loop for and get a chunk in a different way (top chunk).
|
||
|
|
||
|
If `return_cached` was set, just return a chunk from the tcache to avoid larger searches.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> limits</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4227C1-L4250C7
|
||
|
|
||
|
#if USE_TCACHE
|
||
|
/* If we've processed as many chunks as we're allowed while
|
||
|
filling the cache, return one of the cached ones. */
|
||
|
++tcache_unsorted_count;
|
||
|
if (return_cached
|
||
|
&& mp_.tcache_unsorted_limit > 0
|
||
|
&& tcache_unsorted_count > mp_.tcache_unsorted_limit)
|
||
|
{
|
||
|
return tcache_get (tc_idx);
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
#define MAX_ITERS 10000
|
||
|
if (++iters >= MAX_ITERS)
|
||
|
break;
|
||
|
}
|
||
|
|
||
|
#if USE_TCACHE
|
||
|
/* If all the small chunks we found ended up cached, return one now. */
|
||
|
if (return_cached)
|
||
|
{
|
||
|
return tcache_get (tc_idx);
|
||
|
}
|
||
|
#endif
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
If limits not reached, continue with the code...
|
||
|
|
||
|
### Large Bin (by index)
|
||
|
|
||
|
If the request is large (not in small bin) and we haven't yet returned any chunk, get the **index** of the requested size in the **large bin**, check if **not empty** of if the **biggest chunk in this bin is bigger** than the requested size and in that case find the **smallest chunk that can be used** for the requested size.
|
||
|
|
||
|
If the reminder space from the finally used chunk can be a new chunk, add it to the unsorted bin and the lsast\_reminder is updated.
|
||
|
|
||
|
A security check is made when adding the reminder to the unsorted bin:
|
||
|
|
||
|
* `bck->fd-> bk != bck`: `malloc(): corrupted unsorted chunks`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> Large bin (by index)</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4252C7-L4317C10
|
||
|
|
||
|
/*
|
||
|
If a large request, scan through the chunks of current bin in
|
||
|
sorted order to find smallest that fits. Use the skip list for this.
|
||
|
*/
|
||
|
|
||
|
if (!in_smallbin_range (nb))
|
||
|
{
|
||
|
bin = bin_at (av, idx);
|
||
|
|
||
|
/* skip scan if empty or largest chunk is too small */
|
||
|
if ((victim = first (bin)) != bin
|
||
|
&& (unsigned long) chunksize_nomask (victim)
|
||
|
>= (unsigned long) (nb))
|
||
|
{
|
||
|
victim = victim->bk_nextsize;
|
||
|
while (((unsigned long) (size = chunksize (victim)) <
|
||
|
(unsigned long) (nb)))
|
||
|
victim = victim->bk_nextsize;
|
||
|
|
||
|
/* Avoid removing the first entry for a size so that the skip
|
||
|
list does not have to be rerouted. */
|
||
|
if (victim != last (bin)
|
||
|
&& chunksize_nomask (victim)
|
||
|
== chunksize_nomask (victim->fd))
|
||
|
victim = victim->fd;
|
||
|
|
||
|
remainder_size = size - nb;
|
||
|
unlink_chunk (av, victim);
|
||
|
|
||
|
/* Exhaust */
|
||
|
if (remainder_size < MINSIZE)
|
||
|
{
|
||
|
set_inuse_bit_at_offset (victim, size);
|
||
|
if (av != &main_arena)
|
||
|
set_non_main_arena (victim);
|
||
|
}
|
||
|
/* Split */
|
||
|
else
|
||
|
{
|
||
|
remainder = chunk_at_offset (victim, nb);
|
||
|
/* We cannot assume the unsorted list is empty and therefore
|
||
|
have to perform a complete insert here. */
|
||
|
bck = unsorted_chunks (av);
|
||
|
fwd = bck->fd;
|
||
|
if (__glibc_unlikely (fwd->bk != bck))
|
||
|
malloc_printerr ("malloc(): corrupted unsorted chunks");
|
||
|
last_re->bk = bck;
|
||
|
remainder->fd = fwd;
|
||
|
bck->fd = remainder;
|
||
|
fwd->bk = remainder;
|
||
|
if (!in_smallbin_range (remainder_size))
|
||
|
{
|
||
|
remainder->fd_nextsize = NULL;
|
||
|
remainder->bk_nextsize = NULL;
|
||
|
}
|
||
|
set_head (victim, nb | PREV_INUSE |
|
||
|
(av != &main_arena ? NON_MAIN_ARENA : 0));
|
||
|
set_head (remainder, remainder_size | PREV_INUSE);
|
||
|
set_foot (remainder, remainder_size);
|
||
|
}
|
||
|
check_malloced_chunk (av, victim, nb);
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
}
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
If a chunk isn't found suitable for this, continue
|
||
|
|
||
|
### Large Bin (next bigger)
|
||
|
|
||
|
If in the exact large bin there wasn't any chunk that could be used, start looping through all the next large bin (starting y the immediately larger) until one is found (if any).
|
||
|
|
||
|
The reminder of the split chunk is added in the unsorted bin, last\_reminder is updated and the same security check is performed:
|
||
|
|
||
|
* `bck->fd-> bk != bck`: `malloc(): corrupted unsorted chunks2`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> Large bin (next bigger)</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4319C7-L4425C10
|
||
|
|
||
|
/*
|
||
|
Search for a chunk by scanning bins, starting with next largest
|
||
|
bin. This search is strictly by best-fit; i.e., the smallest
|
||
|
(with ties going to approximately the least recently used) chunk
|
||
|
that fits is selected.
|
||
|
|
||
|
The bitmap avoids needing to check that most blocks are nonempty.
|
||
|
The particular case of skipping all bins during warm-up phases
|
||
|
when no chunks have been returned yet is faster than it might look.
|
||
|
*/
|
||
|
|
||
|
++idx;
|
||
|
bin = bin_at (av, idx);
|
||
|
block = idx2block (idx);
|
||
|
map = av->binmap[block];
|
||
|
bit = idx2bit (idx);
|
||
|
|
||
|
for (;; )
|
||
|
{
|
||
|
/* Skip rest of block if there are no more set bits in this block. */
|
||
|
if (bit > map || bit == 0)
|
||
|
{
|
||
|
do
|
||
|
{
|
||
|
if (++block >= BINMAPSIZE) /* out of bins */
|
||
|
goto use_top;
|
||
|
}
|
||
|
while ((map = av->binmap[block]) == 0);
|
||
|
|
||
|
bin = bin_at (av, (block << BINMAPSHIFT));
|
||
|
bit = 1;
|
||
|
}
|
||
|
|
||
|
/* Advance to bin with set bit. There must be one. */
|
||
|
while ((bit & map) == 0)
|
||
|
{
|
||
|
bin = next_bin (bin);
|
||
|
bit <<= 1;
|
||
|
assert (bit != 0);
|
||
|
}
|
||
|
|
||
|
/* Inspect the bin. It is likely to be non-empty */
|
||
|
victim = last (bin);
|
||
|
|
||
|
/* If a false alarm (empty bin), clear the bit. */
|
||
|
if (victim == bin)
|
||
|
{
|
||
|
av->binmap[block] = map &= ~bit; /* Write through */
|
||
|
bin = next_bin (bin);
|
||
|
bit <<= 1;
|
||
|
}
|
||
|
|
||
|
else
|
||
|
{
|
||
|
size = chunksize (victim);
|
||
|
|
||
|
/* We know the first chunk in this bin is big enough to use. */
|
||
|
assert ((unsigned long) (size) >= (unsigned long) (nb));
|
||
|
|
||
|
remainder_size = size - nb;
|
||
|
|
||
|
/* unlink */
|
||
|
unlink_chunk (av, victim);
|
||
|
|
||
|
/* Exhaust */
|
||
|
if (remainder_size < MINSIZE)
|
||
|
{
|
||
|
set_inuse_bit_at_offset (victim, size);
|
||
|
if (av != &main_arena)
|
||
|
set_non_main_arena (victim);
|
||
|
}
|
||
|
|
||
|
/* Split */
|
||
|
else
|
||
|
{
|
||
|
remainder = chunk_at_offset (victim, nb);
|
||
|
|
||
|
/* We cannot assume the unsorted list is empty and therefore
|
||
|
have to perform a complete insert here. */
|
||
|
bck = unsorted_chunks (av);
|
||
|
fwd = bck->fd;
|
||
|
if (__glibc_unlikely (fwd->bk != bck))
|
||
|
malloc_printerr ("malloc(): corrupted unsorted chunks 2");
|
||
|
remainder->bk = bck;
|
||
|
remainder->fd = fwd;
|
||
|
bck->fd = remainder;
|
||
|
fwd->bk = remainder;
|
||
|
|
||
|
/* advertise as last remainder */
|
||
|
if (in_smallbin_range (nb))
|
||
|
av->last_remainder = remainder;
|
||
|
if (!in_smallbin_range (remainder_size))
|
||
|
{
|
||
|
remainder->fd_nextsize = NULL;
|
||
|
remainder->bk_nextsize = NULL;
|
||
|
}
|
||
|
set_head (victim, nb | PREV_INUSE |
|
||
|
(av != &main_arena ? NON_MAIN_ARENA : 0));
|
||
|
set_head (remainder, remainder_size | PREV_INUSE);
|
||
|
set_foot (remainder, remainder_size);
|
||
|
}
|
||
|
check_malloced_chunk (av, victim, nb);
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
}
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
### Top Chunk
|
||
|
|
||
|
At this point, it's time to get a new chunk from the Top chunk (if big enough).
|
||
|
|
||
|
It starts with a security check making sure that the size of the chunk size is not too big (corrupted):
|
||
|
|
||
|
* `chunksize(av->top) > av->system_mem`: `malloc(): corrupted top size`
|
||
|
|
||
|
Then, it'll use the top chunk space if it's large enough to create a chunk of the requested size.\
|
||
|
If not, if there are fast chunks, consolidate them and try again.\
|
||
|
Finally, if not enough space use `sysmalloc` to allocate enough size.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> Top chunk</summary>
|
||
|
|
||
|
```c
|
||
|
use_top:
|
||
|
/*
|
||
|
If large enough, split off the chunk bordering the end of memory
|
||
|
(held in av->top). Note that this is in accord with the best-fit
|
||
|
search rule. In effect, av->top is treated as larger (and thus
|
||
|
less well fitting) than any other available chunk since it can
|
||
|
be extended to be as large as necessary (up to system
|
||
|
limitations).
|
||
|
|
||
|
We require that av->top always exists (i.e., has size >=
|
||
|
MINSIZE) after initialization, so if it would otherwise be
|
||
|
exhausted by current request, it is replenished. (The main
|
||
|
reason for ensuring it exists is that we may need MINSIZE space
|
||
|
to put in fenceposts in sysmalloc.)
|
||
|
*/
|
||
|
|
||
|
victim = av->top;
|
||
|
size = chunksize (victim);
|
||
|
|
||
|
if (__glibc_unlikely (size > av->system_mem))
|
||
|
malloc_printerr ("malloc(): corrupted top size");
|
||
|
|
||
|
if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
|
||
|
{
|
||
|
remainder_size = size - nb;
|
||
|
remainder = chunk_at_offset (victim, nb);
|
||
|
av->top = remainder;
|
||
|
set_head (victim, nb | PREV_INUSE |
|
||
|
(av != &main_arena ? NON_MAIN_ARENA : 0));
|
||
|
set_head (remainder, remainder_size | PREV_INUSE);
|
||
|
|
||
|
check_malloced_chunk (av, victim, nb);
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
|
||
|
/* When we are using atomic ops to free fast chunks we can get
|
||
|
here for all block sizes. */
|
||
|
else if (atomic_load_relaxed (&av->have_fastchunks))
|
||
|
{
|
||
|
malloc_consolidate (av);
|
||
|
/* restore original bin index */
|
||
|
if (in_smallbin_range (nb))
|
||
|
idx = smallbin_index (nb);
|
||
|
else
|
||
|
idx = largebin_index (nb);
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
Otherwise, relay to handle system-dependent cases
|
||
|
*/
|
||
|
else
|
||
|
{
|
||
|
void *p = sysmalloc (nb, av);
|
||
|
if (p != NULL)
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
## sysmalloc
|
||
|
|
||
|
### sysmalloc start
|
||
|
|
||
|
If arena is null or the requested size is too big (and there are mmaps left permitted) use `sysmalloc_mmap` to allocate space and return it.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>sysmalloc start</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2531
|
||
|
|
||
|
/*
|
||
|
sysmalloc handles malloc cases requiring more memory from the system.
|
||
|
On entry, it is assumed that av->top does not have enough
|
||
|
space to service request for nb bytes, thus requiring that av->top
|
||
|
be extended or replaced.
|
||
|
*/
|
||
|
|
||
|
static void *
|
||
|
sysmalloc (INTERNAL_SIZE_T nb, mstate av)
|
||
|
{
|
||
|
mchunkptr old_top; /* incoming value of av->top */
|
||
|
INTERNAL_SIZE_T old_size; /* its size */
|
||
|
char *old_end; /* its end address */
|
||
|
|
||
|
long size; /* arg to first MORECORE or mmap call */
|
||
|
char *brk; /* return value from MORECORE */
|
||
|
|
||
|
long correction; /* arg to 2nd MORECORE call */
|
||
|
char *snd_brk; /* 2nd return val */
|
||
|
|
||
|
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
|
||
|
INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
|
||
|
char *aligned_brk; /* aligned offset into brk */
|
||
|
|
||
|
mchunkptr p; /* the allocated/returned chunk */
|
||
|
mchunkptr remainder; /* remainder from allocation */
|
||
|
unsigned long remainder_size; /* its size */
|
||
|
|
||
|
|
||
|
size_t pagesize = GLRO (dl_pagesize);
|
||
|
bool tried_mmap = false;
|
||
|
|
||
|
|
||
|
/*
|
||
|
If have mmap, and the request size meets the mmap threshold, and
|
||
|
the system supports mmap, and there are few enough currently
|
||
|
allocated mmapped regions, try to directly map this request
|
||
|
rather than expanding top.
|
||
|
*/
|
||
|
|
||
|
if (av == NULL
|
||
|
|| ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
|
||
|
&& (mp_.n_mmaps < mp_.n_mmaps_max)))
|
||
|
{
|
||
|
char *mm;
|
||
|
if (mp_.hp_pagesize > 0 && nb >= mp_.hp_pagesize)
|
||
|
{
|
||
|
/* There is no need to issue the THP madvise call if Huge Pages are
|
||
|
used directly. */
|
||
|
mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags, av);
|
||
|
if (mm != MAP_FAILED)
|
||
|
return mm;
|
||
|
}
|
||
|
mm = sysmalloc_mmap (nb, pagesize, 0, av);
|
||
|
if (mm != MAP_FAILED)
|
||
|
return mm;
|
||
|
tried_mmap = true;
|
||
|
}
|
||
|
|
||
|
/* There are no usable arenas and mmap also failed. */
|
||
|
if (av == NULL)
|
||
|
return 0;
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
### sysmalloc checks
|
||
|
|
||
|
It starts by getting old top chunk information and checking that some of the following condations are true:
|
||
|
|
||
|
* The old heap size is 0 (new heap)
|
||
|
* The size of the previous heap is greater and MINSIZE and the old Top is in use
|
||
|
* The heap is aligned to page size (0x1000 so the lower 12 bits need to be 0)
|
||
|
|
||
|
Then it also checks that:
|
||
|
|
||
|
* The old size hasn't enough space to create a chunk for the requested size
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>sysmalloc checks</summary>
|
||
|
|
||
|
```c
|
||
|
/* Record incoming configuration of top */
|
||
|
|
||
|
old_top = av->top;
|
||
|
old_size = chunksize (old_top);
|
||
|
old_end = (char *) (chunk_at_offset (old_top, old_size));
|
||
|
|
||
|
brk = snd_brk = (char *) (MORECORE_FAILURE);
|
||
|
|
||
|
/*
|
||
|
If not the first time through, we require old_size to be
|
||
|
at least MINSIZE and to have prev_inuse set.
|
||
|
*/
|
||
|
|
||
|
assert ((old_top == initial_top (av) && old_size == 0) ||
|
||
|
((unsigned long) (old_size) >= MINSIZE &&
|
||
|
prev_inuse (old_top) &&
|
||
|
((unsigned long) old_end & (pagesize - 1)) == 0));
|
||
|
|
||
|
/* Precondition: not enough current space to satisfy nb request */
|
||
|
assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
### sysmalloc not main arena
|
||
|
|
||
|
It'll first try to **extend** the previous heap for this heap. If not possible try to **allocate a new heap** and update the pointers to be able to use it.\
|
||
|
Finally if that didn't work, try calling **`sysmalloc_mmap`**. 
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>sysmalloc not main arena</summary>
|
||
|
|
||
|
```c
|
||
|
if (av != &main_arena)
|
||
|
{
|
||
|
heap_info *old_heap, *heap;
|
||
|
size_t old_heap_size;
|
||
|
|
||
|
/* First try to extend the current heap. */
|
||
|
old_heap = heap_for_ptr (old_top);
|
||
|
old_heap_size = old_heap->size;
|
||
|
if ((long) (MINSIZE + nb - old_size) > 0
|
||
|
&& grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
|
||
|
{
|
||
|
av->system_mem += old_heap->size - old_heap_size;
|
||
|
set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
|
||
|
| PREV_INUSE);
|
||
|
}
|
||
|
else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
|
||
|
{
|
||
|
/* Use a newly allocated heap. */
|
||
|
heap->ar_ptr = av;
|
||
|
heap->prev = old_heap;
|
||
|
av->system_mem += heap->size;
|
||
|
/* Set up the new top. */
|
||
|
top (av) = chunk_at_offset (heap, sizeof (*heap));
|
||
|
set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
|
||
|
|
||
|
/* Setup fencepost and free the old top chunk with a multiple of
|
||
|
MALLOC_ALIGNMENT in size. */
|
||
|
/* The fencepost takes at least MINSIZE bytes, because it might
|
||
|
become the top chunk again later. Note that a footer is set
|
||
|
up, too, although the chunk is marked in use. */
|
||
|
old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
|
||
|
set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ),
|
||
|
0 | PREV_INUSE);
|
||
|
if (old_size >= MINSIZE)
|
||
|
{
|
||
|
set_head (chunk_at_offset (old_top, old_size),
|
||
|
CHUNK_HDR_SZ | PREV_INUSE);
|
||
|
set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ);
|
||
|
set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
|
||
|
_int_free (av, old_top, 1);
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE);
|
||
|
set_foot (old_top, (old_size + CHUNK_HDR_SZ));
|
||
|
}
|
||
|
}
|
||
|
else if (!tried_mmap)
|
||
|
{
|
||
|
/* We can at least try to use to mmap memory. If new_heap fails
|
||
|
it is unlikely that trying to allocate huge pages will
|
||
|
succeed. */
|
||
|
char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
|
||
|
if (mm != MAP_FAILED)
|
||
|
return mm;
|
||
|
}
|
||
|
}
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
### sysmalloc main arena
|
||
|
|
||
|
It starts calculating the amount of memory needed. It'll start by requesting contiguous memory so in this case it'll be possible to use the old memory not used. Also some align operations are performed.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>sysmalloc main arena</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2665C1-L2713C10
|
||
|
|
||
|
else /* av == main_arena */
|
||
|
|
||
|
|
||
|
{ /* Request enough space for nb + pad + overhead */
|
||
|
size = nb + mp_.top_pad + MINSIZE;
|
||
|
|
||
|
/*
|
||
|
If contiguous, we can subtract out existing space that we hope to
|
||
|
combine with new space. We add it back later only if
|
||
|
we don't actually get contiguous space.
|
||
|
*/
|
||
|
|
||
|
if (contiguous (av))
|
||
|
size -= old_size;
|
||
|
|
||
|
/*
|
||
|
Round to a multiple of page size or huge page size.
|
||
|
If MORECORE is not contiguous, this ensures that we only call it
|
||
|
with whole-page arguments. And if MORECORE is contiguous and
|
||
|
this is not first time through, this preserves page-alignment of
|
||
|
previous calls. Otherwise, we correct to page-align below.
|
||
|
*/
|
||
|
|
||
|
#ifdef MADV_HUGEPAGE
|
||
|
/* Defined in brk.c. */
|
||
|
extern void *__curbrk;
|
||
|
if (__glibc_unlikely (mp_.thp_pagesize != 0))
|
||
|
{
|
||
|
uintptr_t top = ALIGN_UP ((uintptr_t) __curbrk + size,
|
||
|
mp_.thp_pagesize);
|
||
|
size = top - (uintptr_t) __curbrk;
|
||
|
}
|
||
|
else
|
||
|
#endif
|
||
|
size = ALIGN_UP (size, GLRO(dl_pagesize));
|
||
|
|
||
|
/*
|
||
|
Don't try to call MORECORE if argument is so big as to appear
|
||
|
negative. Note that since mmap takes size_t arg, it may succeed
|
||
|
below even if we cannot call MORECORE.
|
||
|
*/
|
||
|
|
||
|
if (size > 0)
|
||
|
{
|
||
|
brk = (char *) (MORECORE (size));
|
||
|
if (brk != (char *) (MORECORE_FAILURE))
|
||
|
madvise_thp (brk, size);
|
||
|
LIBC_PROBE (memory_sbrk_more, 2, brk, size);
|
||
|
}
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
### sysmalloc main arena previous error 1
|
||
|
|
||
|
If the previous returned `MORECORE_FAILURE`, try agin to allocate memory using `sysmalloc_mmap_fallback`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>sysmalloc</code> main arena previous error 1</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2715C7-L2740C10
|
||
|
|
||
|
if (brk == (char *) (MORECORE_FAILURE))
|
||
|
{
|
||
|
/*
|
||
|
If have mmap, try using it as a backup when MORECORE fails or
|
||
|
cannot be used. This is worth doing on systems that have "holes" in
|
||
|
address space, so sbrk cannot extend to give contiguous space, but
|
||
|
space is available elsewhere. Note that we ignore mmap max count
|
||
|
and threshold limits, since the space will not be used as a
|
||
|
segregated mmap region.
|
||
|
*/
|
||
|
|
||
|
char *mbrk = MAP_FAILED;
|
||
|
if (mp_.hp_pagesize > 0)
|
||
|
mbrk = sysmalloc_mmap_fallback (&size, nb, old_size,
|
||
|
mp_.hp_pagesize, mp_.hp_pagesize,
|
||
|
mp_.hp_flags, av);
|
||
|
if (mbrk == MAP_FAILED)
|
||
|
mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, MMAP_AS_MORECORE_SIZE,
|
||
|
pagesize, 0, av);
|
||
|
if (mbrk != MAP_FAILED)
|
||
|
{
|
||
|
/* We do not need, and cannot use, another sbrk call to find end */
|
||
|
brk = mbrk;
|
||
|
snd_brk = brk + size;
|
||
|
}
|
||
|
}
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
### sysmalloc main arena continue
|
||
|
|
||
|
If the previous didn't return `MORECORE_FAILURE`, if it worked create some alignments:
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>sysmalloc main arena previous error 2</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2742
|
||
|
|
||
|
if (brk != (char *) (MORECORE_FAILURE))
|
||
|
{
|
||
|
if (mp_.sbrk_base == 0)
|
||
|
mp_.sbrk_base = brk;
|
||
|
av->system_mem += size;
|
||
|
|
||
|
/*
|
||
|
If MORECORE extends previous space, we can likewise extend top size.
|
||
|
*/
|
||
|
|
||
|
if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
|
||
|
set_head (old_top, (size + old_size) | PREV_INUSE);
|
||
|
|
||
|
else if (contiguous (av) && old_size && brk < old_end)
|
||
|
/* Oops! Someone else killed our space.. Can't touch anything. */
|
||
|
malloc_printerr ("break adjusted to free malloc space");
|
||
|
|
||
|
/*
|
||
|
Otherwise, make adjustments:
|
||
|
|
||
|
* If the first time through or noncontiguous, we need to call sbrk
|
||
|
just to find out where the end of memory lies.
|
||
|
|
||
|
* We need to ensure that all returned chunks from malloc will meet
|
||
|
MALLOC_ALIGNMENT
|
||
|
|
||
|
* If there was an intervening foreign sbrk, we need to adjust sbrk
|
||
|
request size to account for fact that we will not be able to
|
||
|
combine new space with existing space in old_top.
|
||
|
|
||
|
* Almost all systems internally allocate whole pages at a time, in
|
||
|
which case we might as well use the whole last page of request.
|
||
|
So we allocate enough more memory to hit a page boundary now,
|
||
|
which in turn causes future contiguous calls to page-align.
|
||
|
*/
|
||
|
|
||
|
else
|
||
|
{
|
||
|
front_misalign = 0;
|
||
|
end_misalign = 0;
|
||
|
correction = 0;
|
||
|
aligned_brk = brk;
|
||
|
|
||
|
/* handle contiguous cases */
|
||
|
if (contiguous (av))
|
||
|
{
|
||
|
/* Count foreign sbrk as system_mem. */
|
||
|
if (old_size)
|
||
|
av->system_mem += brk - old_end;
|
||
|
|
||
|
/* Guarantee alignment of first new chunk made from this space */
|
||
|
|
||
|
front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
|
||
|
if (front_misalign > 0)
|
||
|
{
|
||
|
/*
|
||
|
Skip over some bytes to arrive at an aligned position.
|
||
|
We don't need to specially mark these wasted front bytes.
|
||
|
They will never be accessed anyway because
|
||
|
prev_inuse of av->top (and any chunk created from its start)
|
||
|
is always true after initialization.
|
||
|
*/
|
||
|
|
||
|
correction = MALLOC_ALIGNMENT - front_misalign;
|
||
|
aligned_brk += correction;
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
If this isn't adjacent to existing space, then we will not
|
||
|
be able to merge with old_top space, so must add to 2nd request.
|
||
|
*/
|
||
|
|
||
|
correction += old_size;
|
||
|
|
||
|
/* Extend the end address to hit a page boundary */
|
||
|
end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
|
||
|
correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
|
||
|
|
||
|
assert (correction >= 0);
|
||
|
snd_brk = (char *) (MORECORE (correction));
|
||
|
|
||
|
/*
|
||
|
If can't allocate correction, try to at least find out current
|
||
|
brk. It might be enough to proceed without failing.
|
||
|
|
||
|
Note that if second sbrk did NOT fail, we assume that space
|
||
|
is contiguous with first sbrk. This is a safe assumption unless
|
||
|
program is multithreaded but doesn't use locks and a foreign sbrk
|
||
|
occurred between our first and second calls.
|
||
|
*/
|
||
|
|
||
|
if (snd_brk == (char *) (MORECORE_FAILURE))
|
||
|
{
|
||
|
correction = 0;
|
||
|
snd_brk = (char *) (MORECORE (0));
|
||
|
}
|
||
|
else
|
||
|
madvise_thp (snd_brk, correction);
|
||
|
}
|
||
|
|
||
|
/* handle non-contiguous cases */
|
||
|
else
|
||
|
{
|
||
|
if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
|
||
|
/* MORECORE/mmap must correctly align */
|
||
|
assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
|
||
|
else
|
||
|
{
|
||
|
front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
|
||
|
if (front_misalign > 0)
|
||
|
{
|
||
|
/*
|
||
|
Skip over some bytes to arrive at an aligned position.
|
||
|
We don't need to specially mark these wasted front bytes.
|
||
|
They will never be accessed anyway because
|
||
|
prev_inuse of av->top (and any chunk created from its start)
|
||
|
is always true after initialization.
|
||
|
*/
|
||
|
|
||
|
aligned_brk += MALLOC_ALIGNMENT - front_misalign;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
/* Find out current end of memory */
|
||
|
if (snd_brk == (char *) (MORECORE_FAILURE))
|
||
|
{
|
||
|
snd_brk = (char *) (MORECORE (0));
|
||
|
}
|
||
|
}
|
||
|
|
||
|
/* Adjust top based on results of second sbrk */
|
||
|
if (snd_brk != (char *) (MORECORE_FAILURE))
|
||
|
{
|
||
|
av->top = (mchunkptr) aligned_brk;
|
||
|
set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
|
||
|
av->system_mem += correction;
|
||
|
|
||
|
/*
|
||
|
If not the first time through, we either have a
|
||
|
gap due to foreign sbrk or a non-contiguous region. Insert a
|
||
|
double fencepost at old_top to prevent consolidation with space
|
||
|
we don't own. These fenceposts are artificial chunks that are
|
||
|
marked as inuse and are in any case too small to use. We need
|
||
|
two to make sizes and alignments work out.
|
||
|
*/
|
||
|
|
||
|
if (old_size != 0)
|
||
|
{
|
||
|
/*
|
||
|
Shrink old_top to insert fenceposts, keeping size a
|
||
|
multiple of MALLOC_ALIGNMENT. We know there is at least
|
||
|
enough space in old_top to do this.
|
||
|
*/
|
||
|
old_size = (old_size - 2 * CHUNK_HDR_SZ) & ~MALLOC_ALIGN_MASK;
|
||
|
set_head (old_top, old_size | PREV_INUSE);
|
||
|
|
||
|
/*
|
||
|
Note that the following assignments completely overwrite
|
||
|
old_top when old_size was previously MINSIZE. This is
|
||
|
intentional. We need the fencepost, even if old_top otherwise gets
|
||
|
lost.
|
||
|
*/
|
||
|
set_head (chunk_at_offset (old_top, old_size),
|
||
|
CHUNK_HDR_SZ | PREV_INUSE);
|
||
|
set_head (chunk_at_offset (old_top,
|
||
|
old_size + CHUNK_HDR_SZ),
|
||
|
CHUNK_HDR_SZ | PREV_INUSE);
|
||
|
|
||
|
/* If possible, release the rest. */
|
||
|
if (old_size >= MINSIZE)
|
||
|
{
|
||
|
_int_free (av, old_top, 1);
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
} /* if (av != &main_arena) */
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
### sysmalloc finale
|
||
|
|
||
|
Finish the allocation updating the arena information
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2921C3-L2943C12
|
||
|
|
||
|
if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
|
||
|
av->max_system_mem = av->system_mem;
|
||
|
check_malloc_state (av);
|
||
|
|
||
|
/* finally, do the allocation */
|
||
|
p = av->top;
|
||
|
size = chunksize (p);
|
||
|
|
||
|
/* check that one of the above allocation paths succeeded */
|
||
|
if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
|
||
|
{
|
||
|
remainder_size = size - nb;
|
||
|
remainder = chunk_at_offset (p, nb);
|
||
|
av->top = remainder;
|
||
|
set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
|
||
|
set_head (remainder, remainder_size | PREV_INUSE);
|
||
|
check_malloced_chunk (av, p, nb);
|
||
|
return chunk2mem (p);
|
||
|
}
|
||
|
|
||
|
/* catch all failure paths */
|
||
|
__set_errno (ENOMEM);
|
||
|
return 0;
|
||
|
```
|
||
|
|
||
|
## sysmalloc\_mmap
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>sysmalloc_mmap code</summary>
|
||
|
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2392C1-L2481C2
|
||
|
|
||
|
static void *
|
||
|
sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
|
||
|
{
|
||
|
long int size;
|
||
|
|
||
|
/*
|
||
|
Round up size to nearest page. For mmapped chunks, the overhead is one
|
||
|
SIZE_SZ unit larger than for normal chunks, because there is no
|
||
|
following chunk whose prev_size field could be used.
|
||
|
|
||
|
See the front_misalign handling below, for glibc there is no need for
|
||
|
further alignments unless we have have high alignment.
|
||
|
*/
|
||
|
if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
|
||
|
size = ALIGN_UP (nb + SIZE_SZ, pagesize);
|
||
|
else
|
||
|
size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
|
||
|
|
||
|
/* Don't try if size wraps around 0. */
|
||
|
if ((unsigned long) (size) <= (unsigned long) (nb))
|
||
|
return MAP_FAILED;
|
||
|
|
||
|
char *mm = (char *) MMAP (0, size,
|
||
|
mtag_mmap_flags | PROT_READ | PROT_WRITE,
|
||
|
extra_flags);
|
||
|
if (mm == MAP_FAILED)
|
||
|
return mm;
|
||
|
|
||
|
#ifdef MAP_HUGETLB
|
||
|
if (!(extra_flags & MAP_HUGETLB))
|
||
|
madvise_thp (mm, size);
|
||
|
#endif
|
||
|
|
||
|
__set_vma_name (mm, size, " glibc: malloc");
|
||
|
|
||
|
/*
|
||
|
The offset to the start of the mmapped region is stored in the prev_size
|
||
|
field of the chunk. This allows us to adjust returned start address to
|
||
|
meet alignment requirements here and in memalign(), and still be able to
|
||
|
compute proper address argument for later munmap in free() and realloc().
|
||
|
*/
|
||
|
|
||
|
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
|
||
|
|
||
|
if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
|
||
|
{
|
||
|
/* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and
|
||
|
MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1. Each mmap'ed area is page
|
||
|
aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
|
||
|
assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
|
||
|
front_misalign = 0;
|
||
|
}
|
||
|
else
|
||
|
front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
|
||
|
|
||
|
mchunkptr p; /* the allocated/returned chunk */
|
||
|
|
||
|
if (front_misalign > 0)
|
||
|
{
|
||
|
ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign;
|
||
|
p = (mchunkptr) (mm + correction);
|
||
|
set_prev_size (p, correction);
|
||
|
set_head (p, (size - correction) | IS_MMAPPED);
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
p = (mchunkptr) mm;
|
||
|
set_prev_size (p, 0);
|
||
|
set_head (p, size | IS_MMAPPED);
|
||
|
}
|
||
|
|
||
|
/* update statistics */
|
||
|
int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1;
|
||
|
atomic_max (&mp_.max_n_mmaps, new);
|
||
|
|
||
|
unsigned long sum;
|
||
|
sum = atomic_fetch_add_relaxed (&mp_.mmapped_mem, size) + size;
|
||
|
atomic_max (&mp_.max_mmapped_mem, sum);
|
||
|
|
||
|
check_chunk (av, p);
|
||
|
|
||
|
return chunk2mem (p);
|
||
|
}
|
||
|
```
|
||
|
|
||
|
</details>
|
||
|
|
||
|
{% hint style="success" %}
|
||
|
Learn & practice AWS Hacking:<img src="/.gitbook/assets/arte.png" alt="" data-size="line">[**HackTricks Training AWS Red Team Expert (ARTE)**](https://training.hacktricks.xyz/courses/arte)<img src="/.gitbook/assets/arte.png" alt="" data-size="line">\
|
||
|
Learn & practice GCP Hacking: <img src="/.gitbook/assets/grte.png" alt="" data-size="line">[**HackTricks Training GCP Red Team Expert (GRTE)**<img src="/.gitbook/assets/grte.png" alt="" data-size="line">](https://training.hacktricks.xyz/courses/grte)
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Support HackTricks</summary>
|
||
|
|
||
|
* Check the [**subscription plans**](https://github.com/sponsors/carlospolop)!
|
||
|
* **Join the** 💬 [**Discord group**](https://discord.gg/hRep4RUj7f) or the [**telegram group**](https://t.me/peass) or **follow** us on **Twitter** 🐦 [**@hacktricks\_live**](https://twitter.com/hacktricks\_live)**.**
|
||
|
* **Share hacking tricks by submitting PRs to the** [**HackTricks**](https://github.com/carlospolop/hacktricks) and [**HackTricks Cloud**](https://github.com/carlospolop/hacktricks-cloud) github repos.
|
||
|
|
||
|
</details>
|
||
|
{% endhint %}
|