mirror of
https://github.com/carlospolop/hacktricks
synced 2024-12-20 18:14:15 +00:00
1703 lines
51 KiB
Markdown
1703 lines
51 KiB
Markdown
|
# malloc & sysmalloc
|
||
|
|
||
|
{% hint style="success" %}
|
||
|
Aprenda e pratique Hacking AWS:<img src="/.gitbook/assets/arte.png" alt="" data-size="line">[**HackTricks Training AWS Red Team Expert (ARTE)**](https://training.hacktricks.xyz/courses/arte)<img src="/.gitbook/assets/arte.png" alt="" data-size="line">\
|
||
|
Aprenda e pratique Hacking GCP: <img src="/.gitbook/assets/grte.png" alt="" data-size="line">[**HackTricks Training GCP Red Team Expert (GRTE)**<img src="/.gitbook/assets/grte.png" alt="" data-size="line">](https://training.hacktricks.xyz/courses/grte)
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Apoie o HackTricks</summary>
|
||
|
|
||
|
* Verifique os [**planos de assinatura**](https://github.com/sponsors/carlospolop)!
|
||
|
* **Junte-se ao** 💬 [**grupo Discord**](https://discord.gg/hRep4RUj7f) ou ao [**grupo telegram**](https://t.me/peass) ou **siga-nos** no **Twitter** 🐦 [**@hacktricks\_live**](https://twitter.com/hacktricks\_live)**.**
|
||
|
* **Compartilhe truques de hacking enviando PRs para os repositórios** [**HackTricks**](https://github.com/carlospolop/hacktricks) e [**HackTricks Cloud**](https://github.com/carlospolop/hacktricks-cloud).
|
||
|
|
||
|
</details>
|
||
|
{% endhint %}
|
||
|
|
||
|
## Resumo da Ordem de Alocação <a href="#libc_malloc" id="libc_malloc"></a>
|
||
|
|
||
|
(Nenhuma verificação é explicada neste resumo e alguns casos foram omitidos por brevidade)
|
||
|
|
||
|
1. `__libc_malloc` tenta obter um bloco do tcache, se não conseguir, chama `_int_malloc`
|
||
|
2. `_int_malloc` : 
|
||
|
1. Tenta gerar a arena se não houver nenhuma
|
||
|
2. Se houver algum bloco de fast bin do tamanho correto, use-o
|
||
|
1. Preenche o tcache com outros blocos de fast bin
|
||
|
3. Se houver algum bloco de small bin do tamanho correto, use-o
|
||
|
1. Preenche o tcache com outros blocos desse tamanho
|
||
|
4. Se o tamanho solicitado não for para small bins, consolida fast bin em unsorted bin
|
||
|
5. Verifica o unsorted bin, usa o primeiro bloco com espaço suficiente
|
||
|
1. Se o bloco encontrado for maior, divida-o para retornar uma parte e adicione o restante de volta ao unsorted bin
|
||
|
2. Se um bloco for do mesmo tamanho que o solicitado, use-o para preencher o tcache em vez de retorná-lo (até que o tcache esteja cheio, então retorne o próximo)
|
||
|
3. Para cada bloco de tamanho menor verificado, coloque-o no seu respectivo small ou large bin
|
||
|
6. Verifica o large bin no índice do tamanho solicitado
|
||
|
1. Comece a procurar a partir do primeiro bloco que seja maior que o tamanho solicitado, se encontrar algum, retorne-o e adicione os restos ao small bin
|
||
|
7. Verifica os large bins dos próximos índices até o final
|
||
|
1. Do próximo índice maior, verifique se há algum bloco, divida o primeiro bloco encontrado para usá-lo para o tamanho solicitado e adicione o restante ao unsorted bin
|
||
|
8. Se nada for encontrado nos bins anteriores, obtenha um bloco do bloco superior
|
||
|
9. Se o bloco superior não for grande o suficiente, aumente-o com `sysmalloc`
|
||
|
|
||
|
## \_\_libc\_malloc <a href="#libc_malloc" id="libc_malloc"></a>
|
||
|
|
||
|
A função `malloc` na verdade chama `__libc_malloc`. Esta função verificará o tcache para ver se há algum bloco disponível do tamanho desejado. Se houver, ele o usará e, se não houver, verificará se é uma única thread e, nesse caso, chamará `_int_malloc` na arena principal e, se não, chamará `_int_malloc` na arena da thread.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Código __libc_malloc</summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c
|
||
|
|
||
|
#if IS_IN (libc)
|
||
|
void *
|
||
|
__libc_malloc (size_t bytes)
|
||
|
{
|
||
|
mstate ar_ptr;
|
||
|
void *victim;
|
||
|
|
||
|
_Static_assert (PTRDIFF_MAX <= SIZE_MAX / 2,
|
||
|
"PTRDIFF_MAX is not more than half of SIZE_MAX");
|
||
|
|
||
|
if (!__malloc_initialized)
|
||
|
ptmalloc_init ();
|
||
|
#if USE_TCACHE
|
||
|
/* int_free also calls request2size, be careful to not pad twice. */
|
||
|
size_t tbytes = checked_request2size (bytes);
|
||
|
if (tbytes == 0)
|
||
|
{
|
||
|
__set_errno (ENOMEM);
|
||
|
return NULL;
|
||
|
}
|
||
|
size_t tc_idx = csize2tidx (tbytes);
|
||
|
|
||
|
MAYBE_INIT_TCACHE ();
|
||
|
|
||
|
DIAG_PUSH_NEEDS_COMMENT;
|
||
|
if (tc_idx < mp_.tcache_bins
|
||
|
&& tcache != NULL
|
||
|
&& tcache->counts[tc_idx] > 0)
|
||
|
{
|
||
|
victim = tcache_get (tc_idx);
|
||
|
return tag_new_usable (victim);
|
||
|
}
|
||
|
DIAG_POP_NEEDS_COMMENT;
|
||
|
#endif
|
||
|
|
||
|
if (SINGLE_THREAD_P)
|
||
|
{
|
||
|
victim = tag_new_usable (_int_malloc (&main_arena, bytes));
|
||
|
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
|
||
|
&main_arena == arena_for_chunk (mem2chunk (victim)));
|
||
|
return victim;
|
||
|
}
|
||
|
|
||
|
arena_get (ar_ptr, bytes);
|
||
|
|
||
|
victim = _int_malloc (ar_ptr, bytes);
|
||
|
/* Retry with another arena only if we were able to find a usable arena
|
||
|
before. */
|
||
|
if (!victim && ar_ptr != NULL)
|
||
|
{
|
||
|
LIBC_PROBE (memory_malloc_retry, 1, bytes);
|
||
|
ar_ptr = arena_get_retry (ar_ptr, bytes);
|
||
|
victim = _int_malloc (ar_ptr, bytes);
|
||
|
}
|
||
|
|
||
|
if (ar_ptr != NULL)
|
||
|
__libc_lock_unlock (ar_ptr->mutex);
|
||
|
|
||
|
victim = tag_new_usable (victim);
|
||
|
|
||
|
assert (!victim || chunk_is_mmapped (mem2chunk (victim)) ||
|
||
|
ar_ptr == arena_for_chunk (mem2chunk (victim)));
|
||
|
return victim;
|
||
|
}
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
Observe como ele sempre marcará o ponteiro retornado com `tag_new_usable`, a partir do código:
|
||
|
```c
|
||
|
void *tag_new_usable (void *ptr)
|
||
|
|
||
|
Allocate a new random color and use it to color the user region of
|
||
|
a chunk; this may include data from the subsequent chunk's header
|
||
|
if tagging is sufficiently fine grained. Returns PTR suitably
|
||
|
recolored for accessing the memory there.
|
||
|
```
|
||
|
## \_int\_malloc <a href="#int_malloc" id="int_malloc"></a>
|
||
|
|
||
|
Esta é a função que aloca memória usando os outros bins e o bloco superior.
|
||
|
|
||
|
* Início
|
||
|
|
||
|
Começa definindo algumas variáveis e obtendo o tamanho real que o espaço de memória solicitado precisa ter:
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Início do \_int\_malloc</summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3847
|
||
|
static void *
|
||
|
_int_malloc (mstate av, size_t bytes)
|
||
|
{
|
||
|
INTERNAL_SIZE_T nb; /* normalized request size */
|
||
|
unsigned int idx; /* associated bin index */
|
||
|
mbinptr bin; /* associated bin */
|
||
|
|
||
|
mchunkptr victim; /* inspected/selected chunk */
|
||
|
INTERNAL_SIZE_T size; /* its size */
|
||
|
int victim_index; /* its bin index */
|
||
|
|
||
|
mchunkptr remainder; /* remainder from a split */
|
||
|
unsigned long remainder_size; /* its size */
|
||
|
|
||
|
unsigned int block; /* bit map traverser */
|
||
|
unsigned int bit; /* bit map traverser */
|
||
|
unsigned int map; /* current word of binmap */
|
||
|
|
||
|
mchunkptr fwd; /* misc temp for linking */
|
||
|
mchunkptr bck; /* misc temp for linking */
|
||
|
|
||
|
#if USE_TCACHE
|
||
|
size_t tcache_unsorted_count; /* count of unsorted chunks processed */
|
||
|
#endif
|
||
|
|
||
|
/*
|
||
|
Convert request size to internal form by adding SIZE_SZ bytes
|
||
|
overhead plus possibly more to obtain necessary alignment and/or
|
||
|
to obtain a size of at least MINSIZE, the smallest allocatable
|
||
|
size. Also, checked_request2size returns false for request sizes
|
||
|
that are so large that they wrap around zero when padded and
|
||
|
aligned.
|
||
|
*/
|
||
|
|
||
|
nb = checked_request2size (bytes);
|
||
|
if (nb == 0)
|
||
|
{
|
||
|
__set_errno (ENOMEM);
|
||
|
return NULL;
|
||
|
}
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
### Arena
|
||
|
|
||
|
No caso improvável de não haver arenas utilizáveis, ele usa `sysmalloc` para obter um bloco do `mmap`:
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>_int_malloc não é arena</summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3885C3-L3893C6
|
||
|
/* There are no usable arenas. Fall back to sysmalloc to get a chunk from
|
||
|
mmap. */
|
||
|
if (__glibc_unlikely (av == NULL))
|
||
|
{
|
||
|
void *p = sysmalloc (nb, av);
|
||
|
if (p != NULL)
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
### Fast Bin
|
||
|
|
||
|
Se o tamanho necessário estiver dentro dos tamanhos dos Fast Bins, tente usar um pedaço do fast bin. Basicamente, com base no tamanho, ele encontrará o índice do fast bin onde os pedaços válidos devem estar localizados e, se houver algum, retornará um deles.\
|
||
|
Além disso, se o tcache estiver ativado, ele **preencherá o tcache bin desse tamanho com fast bins**.
|
||
|
|
||
|
Enquanto realiza essas ações, algumas verificações de segurança são executadas aqui:
|
||
|
|
||
|
* Se o pedaço estiver desalinhado: `malloc(): pedaço fastbin desalinhado detectado 2`
|
||
|
* Se o pedaço à frente estiver desalinhado: `malloc(): pedaço fastbin desalinhado detectado`
|
||
|
* Se o pedaço retornado tiver um tamanho incorreto por causa de seu índice no fast bin: `malloc(): corrupção de memória (fast)`
|
||
|
* Se algum pedaço usado para preencher o tcache estiver desalinhado: `malloc(): pedaço fastbin desalinhado detectado 3`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>_int_malloc fast bin</summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3895C3-L3967C6
|
||
|
/*
|
||
|
If the size qualifies as a fastbin, first check corresponding bin.
|
||
|
This code is safe to execute even if av is not yet initialized, so we
|
||
|
can try it without checking, which saves some time on this fast path.
|
||
|
*/
|
||
|
|
||
|
#define REMOVE_FB(fb, victim, pp) \
|
||
|
do \
|
||
|
{ \
|
||
|
victim = pp; \
|
||
|
if (victim == NULL) \
|
||
|
break; \
|
||
|
pp = REVEAL_PTR (victim->fd); \
|
||
|
if (__glibc_unlikely (pp != NULL && misaligned_chunk (pp))) \
|
||
|
malloc_printerr ("malloc(): unaligned fastbin chunk detected"); \
|
||
|
} \
|
||
|
while ((pp = catomic_compare_and_exchange_val_acq (fb, pp, victim)) \
|
||
|
!= victim); \
|
||
|
|
||
|
if ((unsigned long) (nb) <= (unsigned long) (get_max_fast ()))
|
||
|
{
|
||
|
idx = fastbin_index (nb);
|
||
|
mfastbinptr *fb = &fastbin (av, idx);
|
||
|
mchunkptr pp;
|
||
|
victim = *fb;
|
||
|
|
||
|
if (victim != NULL)
|
||
|
{
|
||
|
if (__glibc_unlikely (misaligned_chunk (victim)))
|
||
|
malloc_printerr ("malloc(): unaligned fastbin chunk detected 2");
|
||
|
|
||
|
if (SINGLE_THREAD_P)
|
||
|
*fb = REVEAL_PTR (victim->fd);
|
||
|
else
|
||
|
REMOVE_FB (fb, pp, victim);
|
||
|
if (__glibc_likely (victim != NULL))
|
||
|
{
|
||
|
size_t victim_idx = fastbin_index (chunksize (victim));
|
||
|
if (__builtin_expect (victim_idx != idx, 0))
|
||
|
malloc_printerr ("malloc(): memory corruption (fast)");
|
||
|
check_remalloced_chunk (av, victim, nb);
|
||
|
#if USE_TCACHE
|
||
|
/* While we're here, if we see other chunks of the same size,
|
||
|
stash them in the tcache. */
|
||
|
size_t tc_idx = csize2tidx (nb);
|
||
|
if (tcache != NULL && tc_idx < mp_.tcache_bins)
|
||
|
{
|
||
|
mchunkptr tc_victim;
|
||
|
|
||
|
/* While bin not empty and tcache not full, copy chunks. */
|
||
|
while (tcache->counts[tc_idx] < mp_.tcache_count
|
||
|
&& (tc_victim = *fb) != NULL)
|
||
|
{
|
||
|
if (__glibc_unlikely (misaligned_chunk (tc_victim)))
|
||
|
malloc_printerr ("malloc(): unaligned fastbin chunk detected 3");
|
||
|
if (SINGLE_THREAD_P)
|
||
|
*fb = REVEAL_PTR (tc_victim->fd);
|
||
|
else
|
||
|
{
|
||
|
REMOVE_FB (fb, pp, tc_victim);
|
||
|
if (__glibc_unlikely (tc_victim == NULL))
|
||
|
break;
|
||
|
}
|
||
|
tcache_put (tc_victim, tc_idx);
|
||
|
}
|
||
|
}
|
||
|
#endif
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
### Small Bin
|
||
|
|
||
|
Conforme indicado em um comentário, os small bins mantêm um tamanho por índice, portanto, verificar se um chunk válido está disponível é super rápido, então após os fast bins, os small bins são verificados.
|
||
|
|
||
|
A primeira verificação é descobrir se o tamanho solicitado poderia estar dentro de um small bin. Nesse caso, obtenha o **índice** correspondente dentro do small bin e veja se há **qualquer chunk disponível**.
|
||
|
|
||
|
Em seguida, é realizada uma verificação de segurança verificando:
|
||
|
|
||
|
- se `victim->bk->fd = victim`. Para verificar se ambos os chunks estão corretamente vinculados.
|
||
|
|
||
|
Nesse caso, o chunk **recebe o bit `inuse`,** a lista duplamente vinculada é corrigida para que esse chunk desapareça dela (pois será usado) e o bit de não ser da arena principal é definido, se necessário.
|
||
|
|
||
|
Por fim, **preencha o índice do tcache do tamanho solicitado** com outros chunks dentro do small bin (se houver).
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L3895C3-L3967C6
|
||
|
|
||
|
/*
|
||
|
If a small request, check regular bin. Since these "smallbins"
|
||
|
hold one size each, no searching within bins is necessary.
|
||
|
(For a large request, we need to wait until unsorted chunks are
|
||
|
processed to find best fit. But for small ones, fits are exact
|
||
|
anyway, so we can check now, which is faster.)
|
||
|
*/
|
||
|
|
||
|
if (in_smallbin_range (nb))
|
||
|
{
|
||
|
idx = smallbin_index (nb);
|
||
|
bin = bin_at (av, idx);
|
||
|
|
||
|
if ((victim = last (bin)) != bin)
|
||
|
{
|
||
|
bck = victim->bk;
|
||
|
if (__glibc_unlikely (bck->fd != victim))
|
||
|
malloc_printerr ("malloc(): smallbin double linked list corrupted");
|
||
|
set_inuse_bit_at_offset (victim, nb);
|
||
|
bin->bk = bck;
|
||
|
bck->fd = bin;
|
||
|
|
||
|
if (av != &main_arena)
|
||
|
set_non_main_arena (victim);
|
||
|
check_malloced_chunk (av, victim, nb);
|
||
|
#if USE_TCACHE
|
||
|
/* While we're here, if we see other chunks of the same size,
|
||
|
stash them in the tcache. */
|
||
|
size_t tc_idx = csize2tidx (nb);
|
||
|
if (tcache != NULL && tc_idx < mp_.tcache_bins)
|
||
|
{
|
||
|
mchunkptr tc_victim;
|
||
|
|
||
|
/* While bin not empty and tcache not full, copy chunks over. */
|
||
|
while (tcache->counts[tc_idx] < mp_.tcache_count
|
||
|
&& (tc_victim = last (bin)) != bin)
|
||
|
{
|
||
|
if (tc_victim != 0)
|
||
|
{
|
||
|
bck = tc_victim->bk;
|
||
|
set_inuse_bit_at_offset (tc_victim, nb);
|
||
|
if (av != &main_arena)
|
||
|
set_non_main_arena (tc_victim);
|
||
|
bin->bk = bck;
|
||
|
bck->fd = bin;
|
||
|
|
||
|
tcache_put (tc_victim, tc_idx);
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
#endif
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
}
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
### malloc\_consolidate
|
||
|
|
||
|
Se não era um pequeno pedaço, é um grande pedaço e, nesse caso, **`malloc_consolidate`** é chamado para evitar fragmentação de memória.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>chamada malloc_consolidate</summary>
|
||
|
```c
|
||
|
/*
|
||
|
If this is a large request, consolidate fastbins before continuing.
|
||
|
While it might look excessive to kill all fastbins before
|
||
|
even seeing if there is space available, this avoids
|
||
|
fragmentation problems normally associated with fastbins.
|
||
|
Also, in practice, programs tend to have runs of either small or
|
||
|
large requests, but less often mixtures, so consolidation is not
|
||
|
invoked all that often in most programs. And the programs that
|
||
|
it is called frequently in otherwise tend to fragment.
|
||
|
*/
|
||
|
|
||
|
else
|
||
|
{
|
||
|
idx = largebin_index (nb);
|
||
|
if (atomic_load_relaxed (&av->have_fastchunks))
|
||
|
malloc_consolidate (av);
|
||
|
}
|
||
|
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
A função de consolidação malloc basicamente remove chunks do fast bin e os coloca no unsorted bin. Após o próximo malloc, esses chunks serão organizados em seus respectivos small/fast bins.
|
||
|
|
||
|
Observe que, ao remover esses chunks, se forem encontrados com chunks anteriores ou posteriores que não estão em uso, eles serão **desvinculados e mesclados** antes de colocar o chunk final no **unsorted** bin.
|
||
|
|
||
|
Para cada chunk do fast bin, são realizadas algumas verificações de segurança:
|
||
|
|
||
|
* Se o chunk não estiver alinhado, aciona: `malloc_consolidate(): unaligned fastbin chunk detected`
|
||
|
* Se o chunk tiver um tamanho diferente do que deveria por causa do índice em que está: `malloc_consolidate(): invalid chunk size`
|
||
|
* Se o chunk anterior não estiver em uso e o chunk anterior tiver um tamanho diferente do indicado por `prev_chunk`: `corrupted size vs. prev_size in fastbins`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Função malloc_consolidate</summary>
|
||
|
```c
|
||
|
// https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4810C1-L4905C2
|
||
|
|
||
|
static void malloc_consolidate(mstate av)
|
||
|
{
|
||
|
mfastbinptr* fb; /* current fastbin being consolidated */
|
||
|
mfastbinptr* maxfb; /* last fastbin (for loop control) */
|
||
|
mchunkptr p; /* current chunk being consolidated */
|
||
|
mchunkptr nextp; /* next chunk to consolidate */
|
||
|
mchunkptr unsorted_bin; /* bin header */
|
||
|
mchunkptr first_unsorted; /* chunk to link to */
|
||
|
|
||
|
/* These have same use as in free() */
|
||
|
mchunkptr nextchunk;
|
||
|
INTERNAL_SIZE_T size;
|
||
|
INTERNAL_SIZE_T nextsize;
|
||
|
INTERNAL_SIZE_T prevsize;
|
||
|
int nextinuse;
|
||
|
|
||
|
atomic_store_relaxed (&av->have_fastchunks, false);
|
||
|
|
||
|
unsorted_bin = unsorted_chunks(av);
|
||
|
|
||
|
/*
|
||
|
Remove each chunk from fast bin and consolidate it, placing it
|
||
|
then in unsorted bin. Among other reasons for doing this,
|
||
|
placing in unsorted bin avoids needing to calculate actual bins
|
||
|
until malloc is sure that chunks aren't immediately going to be
|
||
|
reused anyway.
|
||
|
*/
|
||
|
|
||
|
maxfb = &fastbin (av, NFASTBINS - 1);
|
||
|
fb = &fastbin (av, 0);
|
||
|
do {
|
||
|
p = atomic_exchange_acquire (fb, NULL);
|
||
|
if (p != 0) {
|
||
|
do {
|
||
|
{
|
||
|
if (__glibc_unlikely (misaligned_chunk (p)))
|
||
|
malloc_printerr ("malloc_consolidate(): "
|
||
|
"unaligned fastbin chunk detected");
|
||
|
|
||
|
unsigned int idx = fastbin_index (chunksize (p));
|
||
|
if ((&fastbin (av, idx)) != fb)
|
||
|
malloc_printerr ("malloc_consolidate(): invalid chunk size");
|
||
|
}
|
||
|
|
||
|
check_inuse_chunk(av, p);
|
||
|
nextp = REVEAL_PTR (p->fd);
|
||
|
|
||
|
/* Slightly streamlined version of consolidation code in free() */
|
||
|
size = chunksize (p);
|
||
|
nextchunk = chunk_at_offset(p, size);
|
||
|
nextsize = chunksize(nextchunk);
|
||
|
|
||
|
if (!prev_inuse(p)) {
|
||
|
prevsize = prev_size (p);
|
||
|
size += prevsize;
|
||
|
p = chunk_at_offset(p, -((long) prevsize));
|
||
|
if (__glibc_unlikely (chunksize(p) != prevsize))
|
||
|
malloc_printerr ("corrupted size vs. prev_size in fastbins");
|
||
|
unlink_chunk (av, p);
|
||
|
}
|
||
|
|
||
|
if (nextchunk != av->top) {
|
||
|
nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
|
||
|
|
||
|
if (!nextinuse) {
|
||
|
size += nextsize;
|
||
|
unlink_chunk (av, nextchunk);
|
||
|
} else
|
||
|
clear_inuse_bit_at_offset(nextchunk, 0);
|
||
|
|
||
|
first_unsorted = unsorted_bin->fd;
|
||
|
unsorted_bin->fd = p;
|
||
|
first_unsorted->bk = p;
|
||
|
|
||
|
if (!in_smallbin_range (size)) {
|
||
|
p->fd_nextsize = NULL;
|
||
|
p->bk_nextsize = NULL;
|
||
|
}
|
||
|
|
||
|
set_head(p, size | PREV_INUSE);
|
||
|
p->bk = unsorted_bin;
|
||
|
p->fd = first_unsorted;
|
||
|
set_foot(p, size);
|
||
|
}
|
||
|
|
||
|
else {
|
||
|
size += nextsize;
|
||
|
set_head(p, size | PREV_INUSE);
|
||
|
av->top = p;
|
||
|
}
|
||
|
|
||
|
} while ( (p = nextp) != 0);
|
||
|
|
||
|
}
|
||
|
} while (fb++ != maxfb);
|
||
|
}
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
### Bin não ordenado
|
||
|
|
||
|
É hora de verificar o bin não ordenado em busca de um possível chunk válido para usar.
|
||
|
|
||
|
#### Início
|
||
|
|
||
|
Isso começa com um grande loop que percorrerá o bin não ordenado na direção `bk` até chegar ao final (a estrutura da arena) com `while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))` 
|
||
|
|
||
|
Além disso, algumas verificações de segurança são realizadas sempre que um novo chunk é considerado:
|
||
|
|
||
|
* Se o tamanho do chunk for estranho (muito pequeno ou muito grande): `malloc(): tamanho inválido (não ordenado)`
|
||
|
* Se o tamanho do próximo chunk for estranho (muito pequeno ou muito grande): `malloc(): tamanho próximo inválido (não ordenado)`
|
||
|
* Se o tamanho anterior indicado pelo próximo chunk difere do tamanho do chunk: `malloc(): next->prev_size incompatível (não ordenado)`
|
||
|
* Se não `victim->bck->fd == victim` ou não `victim->fd == av` (arena): `malloc(): lista duplamente encadeada não ordenada corrompida`
|
||
|
* Como estamos sempre verificando o último, seu `fd` deve estar sempre apontando para a estrutura da arena.
|
||
|
* Se o próximo chunk não estiver indicando que o anterior está em uso: `malloc(): next->prev_inuse inválido (não ordenado)`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> início do bin não ordenado</summary>
|
||
|
```c
|
||
|
/*
|
||
|
Process recently freed or remaindered chunks, taking one only if
|
||
|
it is exact fit, or, if this a small request, the chunk is remainder from
|
||
|
the most recent non-exact fit. Place other traversed chunks in
|
||
|
bins. Note that this step is the only place in any routine where
|
||
|
chunks are placed in bins.
|
||
|
|
||
|
The outer loop here is needed because we might not realize until
|
||
|
near the end of malloc that we should have consolidated, so must
|
||
|
do so and retry. This happens at most once, and only when we would
|
||
|
otherwise need to expand memory to service a "small" request.
|
||
|
*/
|
||
|
|
||
|
#if USE_TCACHE
|
||
|
INTERNAL_SIZE_T tcache_nb = 0;
|
||
|
size_t tc_idx = csize2tidx (nb);
|
||
|
if (tcache != NULL && tc_idx < mp_.tcache_bins)
|
||
|
tcache_nb = nb;
|
||
|
int return_cached = 0;
|
||
|
|
||
|
tcache_unsorted_count = 0;
|
||
|
#endif
|
||
|
|
||
|
for (;; )
|
||
|
{
|
||
|
int iters = 0;
|
||
|
while ((victim = unsorted_chunks (av)->bk) != unsorted_chunks (av))
|
||
|
{
|
||
|
bck = victim->bk;
|
||
|
size = chunksize (victim);
|
||
|
mchunkptr next = chunk_at_offset (victim, size);
|
||
|
|
||
|
if (__glibc_unlikely (size <= CHUNK_HDR_SZ)
|
||
|
|| __glibc_unlikely (size > av->system_mem))
|
||
|
malloc_printerr ("malloc(): invalid size (unsorted)");
|
||
|
if (__glibc_unlikely (chunksize_nomask (next) < CHUNK_HDR_SZ)
|
||
|
|| __glibc_unlikely (chunksize_nomask (next) > av->system_mem))
|
||
|
malloc_printerr ("malloc(): invalid next size (unsorted)");
|
||
|
if (__glibc_unlikely ((prev_size (next) & ~(SIZE_BITS)) != size))
|
||
|
malloc_printerr ("malloc(): mismatching next->prev_size (unsorted)");
|
||
|
if (__glibc_unlikely (bck->fd != victim)
|
||
|
|| __glibc_unlikely (victim->fd != unsorted_chunks (av)))
|
||
|
malloc_printerr ("malloc(): unsorted double linked list corrupted");
|
||
|
if (__glibc_unlikely (prev_inuse (next)))
|
||
|
malloc_printerr ("malloc(): invalid next->prev_inuse (unsorted)");
|
||
|
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
#### se `in_smallbin_range`
|
||
|
|
||
|
Se o chunk for maior do que o tamanho solicitado, use-o e defina o restante do espaço do chunk na lista não ordenada e atualize o `last_remainder` com ele.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> lista não ordenada <code>in_smallbin_range</code></summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4090C11-L4124C14
|
||
|
|
||
|
/*
|
||
|
If a small request, try to use last remainder if it is the
|
||
|
only chunk in unsorted bin. This helps promote locality for
|
||
|
runs of consecutive small requests. This is the only
|
||
|
exception to best-fit, and applies only when there is
|
||
|
no exact fit for a small chunk.
|
||
|
*/
|
||
|
|
||
|
if (in_smallbin_range (nb) &&
|
||
|
bck == unsorted_chunks (av) &&
|
||
|
victim == av->last_remainder &&
|
||
|
(unsigned long) (size) > (unsigned long) (nb + MINSIZE))
|
||
|
{
|
||
|
/* split and reattach remainder */
|
||
|
remainder_size = size - nb;
|
||
|
remainder = chunk_at_offset (victim, nb);
|
||
|
unsorted_chunks (av)->bk = unsorted_chunks (av)->fd = remainder;
|
||
|
av->last_remainder = remainder;
|
||
|
remainder->bk = remainder->fd = unsorted_chunks (av);
|
||
|
if (!in_smallbin_range (remainder_size))
|
||
|
{
|
||
|
remainder->fd_nextsize = NULL;
|
||
|
remainder->bk_nextsize = NULL;
|
||
|
}
|
||
|
|
||
|
set_head (victim, nb | PREV_INUSE |
|
||
|
(av != &main_arena ? NON_MAIN_ARENA : 0));
|
||
|
set_head (remainder, remainder_size | PREV_INUSE);
|
||
|
set_foot (remainder, remainder_size);
|
||
|
|
||
|
check_malloced_chunk (av, victim, nb);
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
Se isso foi bem-sucedido, retorne o chunk e acabou, caso contrário, continue executando a função...
|
||
|
|
||
|
#### se o tamanho for igual
|
||
|
|
||
|
Continue removendo o chunk do bin, no caso em que o tamanho solicitado é exatamente o mesmo do chunk:
|
||
|
|
||
|
* Se o tcache não estiver cheio, adicione-o ao tcache e continue indicando que há um chunk de tcache que poderia ser usado
|
||
|
* Se o tcache estiver cheio, simplesmente use-o retornando-o
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> bin não ordenado tamanho igual</summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4126C11-L4157C14
|
||
|
|
||
|
/* remove from unsorted list */
|
||
|
unsorted_chunks (av)->bk = bck;
|
||
|
bck->fd = unsorted_chunks (av);
|
||
|
|
||
|
/* Take now instead of binning if exact fit */
|
||
|
|
||
|
if (size == nb)
|
||
|
{
|
||
|
set_inuse_bit_at_offset (victim, size);
|
||
|
if (av != &main_arena)
|
||
|
set_non_main_arena (victim);
|
||
|
#if USE_TCACHE
|
||
|
/* Fill cache first, return to user only if cache fills.
|
||
|
We may return one of these chunks later. */
|
||
|
if (tcache_nb > 0
|
||
|
&& tcache->counts[tc_idx] < mp_.tcache_count)
|
||
|
{
|
||
|
tcache_put (victim, tc_idx);
|
||
|
return_cached = 1;
|
||
|
continue;
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
#endif
|
||
|
check_malloced_chunk (av, victim, nb);
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
#if USE_TCACHE
|
||
|
}
|
||
|
#endif
|
||
|
}
|
||
|
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
Se o bloco não for retornado ou adicionado ao tcache, continue com o código...
|
||
|
|
||
|
#### colocar bloco em um bin
|
||
|
|
||
|
Armazene o bloco verificado no bin pequeno ou no bin grande de acordo com o tamanho do bloco (mantendo o bin grande devidamente organizado).
|
||
|
|
||
|
Existem verificações de segurança sendo realizadas para garantir que ambas as listas duplamente encadeadas do bin grande não estejam corrompidas:
|
||
|
|
||
|
* Se `fwd->bk_nextsize->fd_nextsize != fwd`: `malloc(): lista duplamente encadeada do bin grande corrompida (nextsize)`
|
||
|
* Se `fwd->bk->fd != fwd`: `malloc(): lista duplamente encadeada do bin grande corrompida (bk)`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> colocar bloco em um bin</summary>
|
||
|
```c
|
||
|
/* place chunk in bin */
|
||
|
|
||
|
if (in_smallbin_range (size))
|
||
|
{
|
||
|
victim_index = smallbin_index (size);
|
||
|
bck = bin_at (av, victim_index);
|
||
|
fwd = bck->fd;
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
victim_index = largebin_index (size);
|
||
|
bck = bin_at (av, victim_index);
|
||
|
fwd = bck->fd;
|
||
|
|
||
|
/* maintain large bins in sorted order */
|
||
|
if (fwd != bck)
|
||
|
{
|
||
|
/* Or with inuse bit to speed comparisons */
|
||
|
size |= PREV_INUSE;
|
||
|
/* if smaller than smallest, bypass loop below */
|
||
|
assert (chunk_main_arena (bck->bk));
|
||
|
if ((unsigned long) (size)
|
||
|
< (unsigned long) chunksize_nomask (bck->bk))
|
||
|
{
|
||
|
fwd = bck;
|
||
|
bck = bck->bk;
|
||
|
|
||
|
victim->fd_nextsize = fwd->fd;
|
||
|
victim->bk_nextsize = fwd->fd->bk_nextsize;
|
||
|
fwd->fd->bk_nextsize = victim->bk_nextsize->fd_nextsize = victim;
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
assert (chunk_main_arena (fwd));
|
||
|
while ((unsigned long) size < chunksize_nomask (fwd))
|
||
|
{
|
||
|
fwd = fwd->fd_nextsize;
|
||
|
assert (chunk_main_arena (fwd));
|
||
|
}
|
||
|
|
||
|
if ((unsigned long) size
|
||
|
== (unsigned long) chunksize_nomask (fwd))
|
||
|
/* Always insert in the second position. */
|
||
|
fwd = fwd->fd;
|
||
|
else
|
||
|
{
|
||
|
victim->fd_nextsize = fwd;
|
||
|
victim->bk_nextsize = fwd->bk_nextsize;
|
||
|
if (__glibc_unlikely (fwd->bk_nextsize->fd_nextsize != fwd))
|
||
|
malloc_printerr ("malloc(): largebin double linked list corrupted (nextsize)");
|
||
|
fwd->bk_nextsize = victim;
|
||
|
victim->bk_nextsize->fd_nextsize = victim;
|
||
|
}
|
||
|
bck = fwd->bk;
|
||
|
if (bck->fd != fwd)
|
||
|
malloc_printerr ("malloc(): largebin double linked list corrupted (bk)");
|
||
|
}
|
||
|
}
|
||
|
else
|
||
|
victim->fd_nextsize = victim->bk_nextsize = victim;
|
||
|
}
|
||
|
|
||
|
mark_bin (av, victim_index);
|
||
|
victim->bk = bck;
|
||
|
victim->fd = fwd;
|
||
|
fwd->bk = victim;
|
||
|
bck->fd = victim;
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
#### Limites do `_int_malloc`
|
||
|
|
||
|
Neste ponto, se algum bloco foi armazenado no tcache que pode ser usado e o limite é atingido, apenas **retorne um bloco tcache**.
|
||
|
|
||
|
Além disso, se **MAX\_ITERS** for atingido, saia do loop e obtenha um bloco de uma maneira diferente (bloco superior).
|
||
|
|
||
|
Se `return_cached` foi definido, apenas retorne um bloco do tcache para evitar buscas maiores.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Limites do <code>_int_malloc</code></summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4227C1-L4250C7
|
||
|
|
||
|
#if USE_TCACHE
|
||
|
/* If we've processed as many chunks as we're allowed while
|
||
|
filling the cache, return one of the cached ones. */
|
||
|
++tcache_unsorted_count;
|
||
|
if (return_cached
|
||
|
&& mp_.tcache_unsorted_limit > 0
|
||
|
&& tcache_unsorted_count > mp_.tcache_unsorted_limit)
|
||
|
{
|
||
|
return tcache_get (tc_idx);
|
||
|
}
|
||
|
#endif
|
||
|
|
||
|
#define MAX_ITERS 10000
|
||
|
if (++iters >= MAX_ITERS)
|
||
|
break;
|
||
|
}
|
||
|
|
||
|
#if USE_TCACHE
|
||
|
/* If all the small chunks we found ended up cached, return one now. */
|
||
|
if (return_cached)
|
||
|
{
|
||
|
return tcache_get (tc_idx);
|
||
|
}
|
||
|
#endif
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
Se os limites não forem atingidos, continue com o código...
|
||
|
|
||
|
### Large Bin (por índice)
|
||
|
|
||
|
Se o pedido for grande (não estiver no small bin) e ainda não tivermos retornado nenhum chunk, obtenha o **índice** do tamanho solicitado no **large bin**, verifique se **não está vazio** ou se o **maior chunk neste bin é maior** do que o tamanho solicitado e, nesse caso, encontre o **menor chunk que pode ser usado** para o tamanho solicitado.
|
||
|
|
||
|
Se o espaço restante do chunk finalmente usado puder ser um novo chunk, adicione-o ao unsorted bin e o lsast\_reminder é atualizado.
|
||
|
|
||
|
Uma verificação de segurança é feita ao adicionar o lembrete ao unsorted bin:
|
||
|
|
||
|
* `bck->fd-> bk != bck`: `malloc(): corrupted unsorted chunks`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> Large bin (por índice)</summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4252C7-L4317C10
|
||
|
|
||
|
/*
|
||
|
If a large request, scan through the chunks of current bin in
|
||
|
sorted order to find smallest that fits. Use the skip list for this.
|
||
|
*/
|
||
|
|
||
|
if (!in_smallbin_range (nb))
|
||
|
{
|
||
|
bin = bin_at (av, idx);
|
||
|
|
||
|
/* skip scan if empty or largest chunk is too small */
|
||
|
if ((victim = first (bin)) != bin
|
||
|
&& (unsigned long) chunksize_nomask (victim)
|
||
|
>= (unsigned long) (nb))
|
||
|
{
|
||
|
victim = victim->bk_nextsize;
|
||
|
while (((unsigned long) (size = chunksize (victim)) <
|
||
|
(unsigned long) (nb)))
|
||
|
victim = victim->bk_nextsize;
|
||
|
|
||
|
/* Avoid removing the first entry for a size so that the skip
|
||
|
list does not have to be rerouted. */
|
||
|
if (victim != last (bin)
|
||
|
&& chunksize_nomask (victim)
|
||
|
== chunksize_nomask (victim->fd))
|
||
|
victim = victim->fd;
|
||
|
|
||
|
remainder_size = size - nb;
|
||
|
unlink_chunk (av, victim);
|
||
|
|
||
|
/* Exhaust */
|
||
|
if (remainder_size < MINSIZE)
|
||
|
{
|
||
|
set_inuse_bit_at_offset (victim, size);
|
||
|
if (av != &main_arena)
|
||
|
set_non_main_arena (victim);
|
||
|
}
|
||
|
/* Split */
|
||
|
else
|
||
|
{
|
||
|
remainder = chunk_at_offset (victim, nb);
|
||
|
/* We cannot assume the unsorted list is empty and therefore
|
||
|
have to perform a complete insert here. */
|
||
|
bck = unsorted_chunks (av);
|
||
|
fwd = bck->fd;
|
||
|
if (__glibc_unlikely (fwd->bk != bck))
|
||
|
malloc_printerr ("malloc(): corrupted unsorted chunks");
|
||
|
last_re->bk = bck;
|
||
|
remainder->fd = fwd;
|
||
|
bck->fd = remainder;
|
||
|
fwd->bk = remainder;
|
||
|
if (!in_smallbin_range (remainder_size))
|
||
|
{
|
||
|
remainder->fd_nextsize = NULL;
|
||
|
remainder->bk_nextsize = NULL;
|
||
|
}
|
||
|
set_head (victim, nb | PREV_INUSE |
|
||
|
(av != &main_arena ? NON_MAIN_ARENA : 0));
|
||
|
set_head (remainder, remainder_size | PREV_INUSE);
|
||
|
set_foot (remainder, remainder_size);
|
||
|
}
|
||
|
check_malloced_chunk (av, victim, nb);
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
}
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
Se um chunk não for encontrado adequado para isso, continue
|
||
|
|
||
|
### Large Bin (próximo maior)
|
||
|
|
||
|
Se na large bin exata não houver nenhum chunk que possa ser usado, comece a percorrer todas as próximas large bins (começando pela imediatamente maior) até que uma seja encontrada (se houver).
|
||
|
|
||
|
O restante do chunk dividido é adicionado na unsorted bin, last\_reminder é atualizado e a mesma verificação de segurança é realizada:
|
||
|
|
||
|
* `bck->fd-> bk != bck`: `malloc(): corrupted unsorted chunks2`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> Large bin (próximo maior)</summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/master/malloc/malloc.c#L4319C7-L4425C10
|
||
|
|
||
|
/*
|
||
|
Search for a chunk by scanning bins, starting with next largest
|
||
|
bin. This search is strictly by best-fit; i.e., the smallest
|
||
|
(with ties going to approximately the least recently used) chunk
|
||
|
that fits is selected.
|
||
|
|
||
|
The bitmap avoids needing to check that most blocks are nonempty.
|
||
|
The particular case of skipping all bins during warm-up phases
|
||
|
when no chunks have been returned yet is faster than it might look.
|
||
|
*/
|
||
|
|
||
|
++idx;
|
||
|
bin = bin_at (av, idx);
|
||
|
block = idx2block (idx);
|
||
|
map = av->binmap[block];
|
||
|
bit = idx2bit (idx);
|
||
|
|
||
|
for (;; )
|
||
|
{
|
||
|
/* Skip rest of block if there are no more set bits in this block. */
|
||
|
if (bit > map || bit == 0)
|
||
|
{
|
||
|
do
|
||
|
{
|
||
|
if (++block >= BINMAPSIZE) /* out of bins */
|
||
|
goto use_top;
|
||
|
}
|
||
|
while ((map = av->binmap[block]) == 0);
|
||
|
|
||
|
bin = bin_at (av, (block << BINMAPSHIFT));
|
||
|
bit = 1;
|
||
|
}
|
||
|
|
||
|
/* Advance to bin with set bit. There must be one. */
|
||
|
while ((bit & map) == 0)
|
||
|
{
|
||
|
bin = next_bin (bin);
|
||
|
bit <<= 1;
|
||
|
assert (bit != 0);
|
||
|
}
|
||
|
|
||
|
/* Inspect the bin. It is likely to be non-empty */
|
||
|
victim = last (bin);
|
||
|
|
||
|
/* If a false alarm (empty bin), clear the bit. */
|
||
|
if (victim == bin)
|
||
|
{
|
||
|
av->binmap[block] = map &= ~bit; /* Write through */
|
||
|
bin = next_bin (bin);
|
||
|
bit <<= 1;
|
||
|
}
|
||
|
|
||
|
else
|
||
|
{
|
||
|
size = chunksize (victim);
|
||
|
|
||
|
/* We know the first chunk in this bin is big enough to use. */
|
||
|
assert ((unsigned long) (size) >= (unsigned long) (nb));
|
||
|
|
||
|
remainder_size = size - nb;
|
||
|
|
||
|
/* unlink */
|
||
|
unlink_chunk (av, victim);
|
||
|
|
||
|
/* Exhaust */
|
||
|
if (remainder_size < MINSIZE)
|
||
|
{
|
||
|
set_inuse_bit_at_offset (victim, size);
|
||
|
if (av != &main_arena)
|
||
|
set_non_main_arena (victim);
|
||
|
}
|
||
|
|
||
|
/* Split */
|
||
|
else
|
||
|
{
|
||
|
remainder = chunk_at_offset (victim, nb);
|
||
|
|
||
|
/* We cannot assume the unsorted list is empty and therefore
|
||
|
have to perform a complete insert here. */
|
||
|
bck = unsorted_chunks (av);
|
||
|
fwd = bck->fd;
|
||
|
if (__glibc_unlikely (fwd->bk != bck))
|
||
|
malloc_printerr ("malloc(): corrupted unsorted chunks 2");
|
||
|
remainder->bk = bck;
|
||
|
remainder->fd = fwd;
|
||
|
bck->fd = remainder;
|
||
|
fwd->bk = remainder;
|
||
|
|
||
|
/* advertise as last remainder */
|
||
|
if (in_smallbin_range (nb))
|
||
|
av->last_remainder = remainder;
|
||
|
if (!in_smallbin_range (remainder_size))
|
||
|
{
|
||
|
remainder->fd_nextsize = NULL;
|
||
|
remainder->bk_nextsize = NULL;
|
||
|
}
|
||
|
set_head (victim, nb | PREV_INUSE |
|
||
|
(av != &main_arena ? NON_MAIN_ARENA : 0));
|
||
|
set_head (remainder, remainder_size | PREV_INUSE);
|
||
|
set_foot (remainder, remainder_size);
|
||
|
}
|
||
|
check_malloced_chunk (av, victim, nb);
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
}
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
### Top Chunk
|
||
|
|
||
|
Neste ponto, é hora de obter um novo chunk do Top chunk (se for grande o suficiente).
|
||
|
|
||
|
Ele começa com uma verificação de segurança para garantir que o tamanho do chunk não seja muito grande (corrompido):
|
||
|
|
||
|
* `chunksize(av->top) > av->system_mem`: `malloc(): tamanho do top corrompido`
|
||
|
|
||
|
Em seguida, ele usará o espaço do top chunk se for grande o suficiente para criar um chunk do tamanho solicitado.\
|
||
|
Se não houver espaço suficiente, se houver chunks rápidos, consolide-os e tente novamente.\
|
||
|
Por fim, se não houver espaço suficiente, use `sysmalloc` para alocar o tamanho necessário.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary><code>_int_malloc</code> Top chunk</summary>
|
||
|
```c
|
||
|
use_top:
|
||
|
/*
|
||
|
If large enough, split off the chunk bordering the end of memory
|
||
|
(held in av->top). Note that this is in accord with the best-fit
|
||
|
search rule. In effect, av->top is treated as larger (and thus
|
||
|
less well fitting) than any other available chunk since it can
|
||
|
be extended to be as large as necessary (up to system
|
||
|
limitations).
|
||
|
|
||
|
We require that av->top always exists (i.e., has size >=
|
||
|
MINSIZE) after initialization, so if it would otherwise be
|
||
|
exhausted by current request, it is replenished. (The main
|
||
|
reason for ensuring it exists is that we may need MINSIZE space
|
||
|
to put in fenceposts in sysmalloc.)
|
||
|
*/
|
||
|
|
||
|
victim = av->top;
|
||
|
size = chunksize (victim);
|
||
|
|
||
|
if (__glibc_unlikely (size > av->system_mem))
|
||
|
malloc_printerr ("malloc(): corrupted top size");
|
||
|
|
||
|
if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
|
||
|
{
|
||
|
remainder_size = size - nb;
|
||
|
remainder = chunk_at_offset (victim, nb);
|
||
|
av->top = remainder;
|
||
|
set_head (victim, nb | PREV_INUSE |
|
||
|
(av != &main_arena ? NON_MAIN_ARENA : 0));
|
||
|
set_head (remainder, remainder_size | PREV_INUSE);
|
||
|
|
||
|
check_malloced_chunk (av, victim, nb);
|
||
|
void *p = chunk2mem (victim);
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
|
||
|
/* When we are using atomic ops to free fast chunks we can get
|
||
|
here for all block sizes. */
|
||
|
else if (atomic_load_relaxed (&av->have_fastchunks))
|
||
|
{
|
||
|
malloc_consolidate (av);
|
||
|
/* restore original bin index */
|
||
|
if (in_smallbin_range (nb))
|
||
|
idx = smallbin_index (nb);
|
||
|
else
|
||
|
idx = largebin_index (nb);
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
Otherwise, relay to handle system-dependent cases
|
||
|
*/
|
||
|
else
|
||
|
{
|
||
|
void *p = sysmalloc (nb, av);
|
||
|
if (p != NULL)
|
||
|
alloc_perturb (p, bytes);
|
||
|
return p;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
## sysmalloc
|
||
|
|
||
|
### Início do sysmalloc
|
||
|
|
||
|
Se a arena for nula ou o tamanho solicitado for muito grande (e ainda houver mmaps permitidos), use `sysmalloc_mmap` para alocar espaço e retorná-lo.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Início do sysmalloc</summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2531
|
||
|
|
||
|
/*
|
||
|
sysmalloc handles malloc cases requiring more memory from the system.
|
||
|
On entry, it is assumed that av->top does not have enough
|
||
|
space to service request for nb bytes, thus requiring that av->top
|
||
|
be extended or replaced.
|
||
|
*/
|
||
|
|
||
|
static void *
|
||
|
sysmalloc (INTERNAL_SIZE_T nb, mstate av)
|
||
|
{
|
||
|
mchunkptr old_top; /* incoming value of av->top */
|
||
|
INTERNAL_SIZE_T old_size; /* its size */
|
||
|
char *old_end; /* its end address */
|
||
|
|
||
|
long size; /* arg to first MORECORE or mmap call */
|
||
|
char *brk; /* return value from MORECORE */
|
||
|
|
||
|
long correction; /* arg to 2nd MORECORE call */
|
||
|
char *snd_brk; /* 2nd return val */
|
||
|
|
||
|
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
|
||
|
INTERNAL_SIZE_T end_misalign; /* partial page left at end of new space */
|
||
|
char *aligned_brk; /* aligned offset into brk */
|
||
|
|
||
|
mchunkptr p; /* the allocated/returned chunk */
|
||
|
mchunkptr remainder; /* remainder from allocation */
|
||
|
unsigned long remainder_size; /* its size */
|
||
|
|
||
|
|
||
|
size_t pagesize = GLRO (dl_pagesize);
|
||
|
bool tried_mmap = false;
|
||
|
|
||
|
|
||
|
/*
|
||
|
If have mmap, and the request size meets the mmap threshold, and
|
||
|
the system supports mmap, and there are few enough currently
|
||
|
allocated mmapped regions, try to directly map this request
|
||
|
rather than expanding top.
|
||
|
*/
|
||
|
|
||
|
if (av == NULL
|
||
|
|| ((unsigned long) (nb) >= (unsigned long) (mp_.mmap_threshold)
|
||
|
&& (mp_.n_mmaps < mp_.n_mmaps_max)))
|
||
|
{
|
||
|
char *mm;
|
||
|
if (mp_.hp_pagesize > 0 && nb >= mp_.hp_pagesize)
|
||
|
{
|
||
|
/* There is no need to issue the THP madvise call if Huge Pages are
|
||
|
used directly. */
|
||
|
mm = sysmalloc_mmap (nb, mp_.hp_pagesize, mp_.hp_flags, av);
|
||
|
if (mm != MAP_FAILED)
|
||
|
return mm;
|
||
|
}
|
||
|
mm = sysmalloc_mmap (nb, pagesize, 0, av);
|
||
|
if (mm != MAP_FAILED)
|
||
|
return mm;
|
||
|
tried_mmap = true;
|
||
|
}
|
||
|
|
||
|
/* There are no usable arenas and mmap also failed. */
|
||
|
if (av == NULL)
|
||
|
return 0;
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
### Verificações do sysmalloc
|
||
|
|
||
|
Começa obtendo informações do antigo top chunk e verificando se algumas das seguintes condições são verdadeiras:
|
||
|
|
||
|
* O tamanho do heap antigo é 0 (novo heap)
|
||
|
* O tamanho do heap anterior é maior que MINSIZE e o Top antigo está em uso
|
||
|
* O heap está alinhado ao tamanho da página (0x1000, então os 12 bits inferiores precisam ser 0)
|
||
|
|
||
|
Em seguida, também verifica se:
|
||
|
|
||
|
* O tamanho antigo não tem espaço suficiente para criar um chunk do tamanho solicitado
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Verificações do sysmalloc</summary>
|
||
|
```c
|
||
|
/* Record incoming configuration of top */
|
||
|
|
||
|
old_top = av->top;
|
||
|
old_size = chunksize (old_top);
|
||
|
old_end = (char *) (chunk_at_offset (old_top, old_size));
|
||
|
|
||
|
brk = snd_brk = (char *) (MORECORE_FAILURE);
|
||
|
|
||
|
/*
|
||
|
If not the first time through, we require old_size to be
|
||
|
at least MINSIZE and to have prev_inuse set.
|
||
|
*/
|
||
|
|
||
|
assert ((old_top == initial_top (av) && old_size == 0) ||
|
||
|
((unsigned long) (old_size) >= MINSIZE &&
|
||
|
prev_inuse (old_top) &&
|
||
|
((unsigned long) old_end & (pagesize - 1)) == 0));
|
||
|
|
||
|
/* Precondition: not enough current space to satisfy nb request */
|
||
|
assert ((unsigned long) (old_size) < (unsigned long) (nb + MINSIZE));
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
### sysmalloc não é a arena principal
|
||
|
|
||
|
Primeiro tentará **expandir** o heap anterior para este heap. Se não for possível, tentará **alocar um novo heap** e atualizar os ponteiros para poder usá-lo.\
|
||
|
Por fim, se isso não funcionar, tentará chamar **`sysmalloc_mmap`**. 
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>sysmalloc não é a arena principal</summary>
|
||
|
```c
|
||
|
if (av != &main_arena)
|
||
|
{
|
||
|
heap_info *old_heap, *heap;
|
||
|
size_t old_heap_size;
|
||
|
|
||
|
/* First try to extend the current heap. */
|
||
|
old_heap = heap_for_ptr (old_top);
|
||
|
old_heap_size = old_heap->size;
|
||
|
if ((long) (MINSIZE + nb - old_size) > 0
|
||
|
&& grow_heap (old_heap, MINSIZE + nb - old_size) == 0)
|
||
|
{
|
||
|
av->system_mem += old_heap->size - old_heap_size;
|
||
|
set_head (old_top, (((char *) old_heap + old_heap->size) - (char *) old_top)
|
||
|
| PREV_INUSE);
|
||
|
}
|
||
|
else if ((heap = new_heap (nb + (MINSIZE + sizeof (*heap)), mp_.top_pad)))
|
||
|
{
|
||
|
/* Use a newly allocated heap. */
|
||
|
heap->ar_ptr = av;
|
||
|
heap->prev = old_heap;
|
||
|
av->system_mem += heap->size;
|
||
|
/* Set up the new top. */
|
||
|
top (av) = chunk_at_offset (heap, sizeof (*heap));
|
||
|
set_head (top (av), (heap->size - sizeof (*heap)) | PREV_INUSE);
|
||
|
|
||
|
/* Setup fencepost and free the old top chunk with a multiple of
|
||
|
MALLOC_ALIGNMENT in size. */
|
||
|
/* The fencepost takes at least MINSIZE bytes, because it might
|
||
|
become the top chunk again later. Note that a footer is set
|
||
|
up, too, although the chunk is marked in use. */
|
||
|
old_size = (old_size - MINSIZE) & ~MALLOC_ALIGN_MASK;
|
||
|
set_head (chunk_at_offset (old_top, old_size + CHUNK_HDR_SZ),
|
||
|
0 | PREV_INUSE);
|
||
|
if (old_size >= MINSIZE)
|
||
|
{
|
||
|
set_head (chunk_at_offset (old_top, old_size),
|
||
|
CHUNK_HDR_SZ | PREV_INUSE);
|
||
|
set_foot (chunk_at_offset (old_top, old_size), CHUNK_HDR_SZ);
|
||
|
set_head (old_top, old_size | PREV_INUSE | NON_MAIN_ARENA);
|
||
|
_int_free (av, old_top, 1);
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
set_head (old_top, (old_size + CHUNK_HDR_SZ) | PREV_INUSE);
|
||
|
set_foot (old_top, (old_size + CHUNK_HDR_SZ));
|
||
|
}
|
||
|
}
|
||
|
else if (!tried_mmap)
|
||
|
{
|
||
|
/* We can at least try to use to mmap memory. If new_heap fails
|
||
|
it is unlikely that trying to allocate huge pages will
|
||
|
succeed. */
|
||
|
char *mm = sysmalloc_mmap (nb, pagesize, 0, av);
|
||
|
if (mm != MAP_FAILED)
|
||
|
return mm;
|
||
|
}
|
||
|
}
|
||
|
```
|
||
|
### Arena principal do sysmalloc
|
||
|
|
||
|
Começa calculando a quantidade de memória necessária. Ele começará solicitando memória contígua para que, neste caso, seja possível usar a memória antiga não utilizada. Também são realizadas algumas operações de alinhamento.
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2665C1-L2713C10
|
||
|
|
||
|
else /* av == main_arena */
|
||
|
|
||
|
|
||
|
{ /* Request enough space for nb + pad + overhead */
|
||
|
size = nb + mp_.top_pad + MINSIZE;
|
||
|
|
||
|
/*
|
||
|
If contiguous, we can subtract out existing space that we hope to
|
||
|
combine with new space. We add it back later only if
|
||
|
we don't actually get contiguous space.
|
||
|
*/
|
||
|
|
||
|
if (contiguous (av))
|
||
|
size -= old_size;
|
||
|
|
||
|
/*
|
||
|
Round to a multiple of page size or huge page size.
|
||
|
If MORECORE is not contiguous, this ensures that we only call it
|
||
|
with whole-page arguments. And if MORECORE is contiguous and
|
||
|
this is not first time through, this preserves page-alignment of
|
||
|
previous calls. Otherwise, we correct to page-align below.
|
||
|
*/
|
||
|
|
||
|
#ifdef MADV_HUGEPAGE
|
||
|
/* Defined in brk.c. */
|
||
|
extern void *__curbrk;
|
||
|
if (__glibc_unlikely (mp_.thp_pagesize != 0))
|
||
|
{
|
||
|
uintptr_t top = ALIGN_UP ((uintptr_t) __curbrk + size,
|
||
|
mp_.thp_pagesize);
|
||
|
size = top - (uintptr_t) __curbrk;
|
||
|
}
|
||
|
else
|
||
|
#endif
|
||
|
size = ALIGN_UP (size, GLRO(dl_pagesize));
|
||
|
|
||
|
/*
|
||
|
Don't try to call MORECORE if argument is so big as to appear
|
||
|
negative. Note that since mmap takes size_t arg, it may succeed
|
||
|
below even if we cannot call MORECORE.
|
||
|
*/
|
||
|
|
||
|
if (size > 0)
|
||
|
{
|
||
|
brk = (char *) (MORECORE (size));
|
||
|
if (brk != (char *) (MORECORE_FAILURE))
|
||
|
madvise_thp (brk, size);
|
||
|
LIBC_PROBE (memory_sbrk_more, 2, brk, size);
|
||
|
}
|
||
|
```
|
||
|
### Erro anterior da arena principal do `sysmalloc` 1
|
||
|
|
||
|
Se o anterior retornou `MORECORE_FAILURE`, tente alocar memória novamente usando `sysmalloc_mmap_fallback`
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2715C7-L2740C10
|
||
|
|
||
|
if (brk == (char *) (MORECORE_FAILURE))
|
||
|
{
|
||
|
/*
|
||
|
If have mmap, try using it as a backup when MORECORE fails or
|
||
|
cannot be used. This is worth doing on systems that have "holes" in
|
||
|
address space, so sbrk cannot extend to give contiguous space, but
|
||
|
space is available elsewhere. Note that we ignore mmap max count
|
||
|
and threshold limits, since the space will not be used as a
|
||
|
segregated mmap region.
|
||
|
*/
|
||
|
|
||
|
char *mbrk = MAP_FAILED;
|
||
|
if (mp_.hp_pagesize > 0)
|
||
|
mbrk = sysmalloc_mmap_fallback (&size, nb, old_size,
|
||
|
mp_.hp_pagesize, mp_.hp_pagesize,
|
||
|
mp_.hp_flags, av);
|
||
|
if (mbrk == MAP_FAILED)
|
||
|
mbrk = sysmalloc_mmap_fallback (&size, nb, old_size, MMAP_AS_MORECORE_SIZE,
|
||
|
pagesize, 0, av);
|
||
|
if (mbrk != MAP_FAILED)
|
||
|
{
|
||
|
/* We do not need, and cannot use, another sbrk call to find end */
|
||
|
brk = mbrk;
|
||
|
snd_brk = brk + size;
|
||
|
}
|
||
|
}
|
||
|
```
|
||
|
### Continuação da arena principal do sysmalloc
|
||
|
|
||
|
Se o anterior não retornou `MORECORE_FAILURE`, se funcionou, crie alguns alinhamentos:
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2742
|
||
|
|
||
|
if (brk != (char *) (MORECORE_FAILURE))
|
||
|
{
|
||
|
if (mp_.sbrk_base == 0)
|
||
|
mp_.sbrk_base = brk;
|
||
|
av->system_mem += size;
|
||
|
|
||
|
/*
|
||
|
If MORECORE extends previous space, we can likewise extend top size.
|
||
|
*/
|
||
|
|
||
|
if (brk == old_end && snd_brk == (char *) (MORECORE_FAILURE))
|
||
|
set_head (old_top, (size + old_size) | PREV_INUSE);
|
||
|
|
||
|
else if (contiguous (av) && old_size && brk < old_end)
|
||
|
/* Oops! Someone else killed our space.. Can't touch anything. */
|
||
|
malloc_printerr ("break adjusted to free malloc space");
|
||
|
|
||
|
/*
|
||
|
Otherwise, make adjustments:
|
||
|
|
||
|
* If the first time through or noncontiguous, we need to call sbrk
|
||
|
just to find out where the end of memory lies.
|
||
|
|
||
|
* We need to ensure that all returned chunks from malloc will meet
|
||
|
MALLOC_ALIGNMENT
|
||
|
|
||
|
* If there was an intervening foreign sbrk, we need to adjust sbrk
|
||
|
request size to account for fact that we will not be able to
|
||
|
combine new space with existing space in old_top.
|
||
|
|
||
|
* Almost all systems internally allocate whole pages at a time, in
|
||
|
which case we might as well use the whole last page of request.
|
||
|
So we allocate enough more memory to hit a page boundary now,
|
||
|
which in turn causes future contiguous calls to page-align.
|
||
|
*/
|
||
|
|
||
|
else
|
||
|
{
|
||
|
front_misalign = 0;
|
||
|
end_misalign = 0;
|
||
|
correction = 0;
|
||
|
aligned_brk = brk;
|
||
|
|
||
|
/* handle contiguous cases */
|
||
|
if (contiguous (av))
|
||
|
{
|
||
|
/* Count foreign sbrk as system_mem. */
|
||
|
if (old_size)
|
||
|
av->system_mem += brk - old_end;
|
||
|
|
||
|
/* Guarantee alignment of first new chunk made from this space */
|
||
|
|
||
|
front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
|
||
|
if (front_misalign > 0)
|
||
|
{
|
||
|
/*
|
||
|
Skip over some bytes to arrive at an aligned position.
|
||
|
We don't need to specially mark these wasted front bytes.
|
||
|
They will never be accessed anyway because
|
||
|
prev_inuse of av->top (and any chunk created from its start)
|
||
|
is always true after initialization.
|
||
|
*/
|
||
|
|
||
|
correction = MALLOC_ALIGNMENT - front_misalign;
|
||
|
aligned_brk += correction;
|
||
|
}
|
||
|
|
||
|
/*
|
||
|
If this isn't adjacent to existing space, then we will not
|
||
|
be able to merge with old_top space, so must add to 2nd request.
|
||
|
*/
|
||
|
|
||
|
correction += old_size;
|
||
|
|
||
|
/* Extend the end address to hit a page boundary */
|
||
|
end_misalign = (INTERNAL_SIZE_T) (brk + size + correction);
|
||
|
correction += (ALIGN_UP (end_misalign, pagesize)) - end_misalign;
|
||
|
|
||
|
assert (correction >= 0);
|
||
|
snd_brk = (char *) (MORECORE (correction));
|
||
|
|
||
|
/*
|
||
|
If can't allocate correction, try to at least find out current
|
||
|
brk. It might be enough to proceed without failing.
|
||
|
|
||
|
Note that if second sbrk did NOT fail, we assume that space
|
||
|
is contiguous with first sbrk. This is a safe assumption unless
|
||
|
program is multithreaded but doesn't use locks and a foreign sbrk
|
||
|
occurred between our first and second calls.
|
||
|
*/
|
||
|
|
||
|
if (snd_brk == (char *) (MORECORE_FAILURE))
|
||
|
{
|
||
|
correction = 0;
|
||
|
snd_brk = (char *) (MORECORE (0));
|
||
|
}
|
||
|
else
|
||
|
madvise_thp (snd_brk, correction);
|
||
|
}
|
||
|
|
||
|
/* handle non-contiguous cases */
|
||
|
else
|
||
|
{
|
||
|
if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
|
||
|
/* MORECORE/mmap must correctly align */
|
||
|
assert (((unsigned long) chunk2mem (brk) & MALLOC_ALIGN_MASK) == 0);
|
||
|
else
|
||
|
{
|
||
|
front_misalign = (INTERNAL_SIZE_T) chunk2mem (brk) & MALLOC_ALIGN_MASK;
|
||
|
if (front_misalign > 0)
|
||
|
{
|
||
|
/*
|
||
|
Skip over some bytes to arrive at an aligned position.
|
||
|
We don't need to specially mark these wasted front bytes.
|
||
|
They will never be accessed anyway because
|
||
|
prev_inuse of av->top (and any chunk created from its start)
|
||
|
is always true after initialization.
|
||
|
*/
|
||
|
|
||
|
aligned_brk += MALLOC_ALIGNMENT - front_misalign;
|
||
|
}
|
||
|
}
|
||
|
|
||
|
/* Find out current end of memory */
|
||
|
if (snd_brk == (char *) (MORECORE_FAILURE))
|
||
|
{
|
||
|
snd_brk = (char *) (MORECORE (0));
|
||
|
}
|
||
|
}
|
||
|
|
||
|
/* Adjust top based on results of second sbrk */
|
||
|
if (snd_brk != (char *) (MORECORE_FAILURE))
|
||
|
{
|
||
|
av->top = (mchunkptr) aligned_brk;
|
||
|
set_head (av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
|
||
|
av->system_mem += correction;
|
||
|
|
||
|
/*
|
||
|
If not the first time through, we either have a
|
||
|
gap due to foreign sbrk or a non-contiguous region. Insert a
|
||
|
double fencepost at old_top to prevent consolidation with space
|
||
|
we don't own. These fenceposts are artificial chunks that are
|
||
|
marked as inuse and are in any case too small to use. We need
|
||
|
two to make sizes and alignments work out.
|
||
|
*/
|
||
|
|
||
|
if (old_size != 0)
|
||
|
{
|
||
|
/*
|
||
|
Shrink old_top to insert fenceposts, keeping size a
|
||
|
multiple of MALLOC_ALIGNMENT. We know there is at least
|
||
|
enough space in old_top to do this.
|
||
|
*/
|
||
|
old_size = (old_size - 2 * CHUNK_HDR_SZ) & ~MALLOC_ALIGN_MASK;
|
||
|
set_head (old_top, old_size | PREV_INUSE);
|
||
|
|
||
|
/*
|
||
|
Note that the following assignments completely overwrite
|
||
|
old_top when old_size was previously MINSIZE. This is
|
||
|
intentional. We need the fencepost, even if old_top otherwise gets
|
||
|
lost.
|
||
|
*/
|
||
|
set_head (chunk_at_offset (old_top, old_size),
|
||
|
CHUNK_HDR_SZ | PREV_INUSE);
|
||
|
set_head (chunk_at_offset (old_top,
|
||
|
old_size + CHUNK_HDR_SZ),
|
||
|
CHUNK_HDR_SZ | PREV_INUSE);
|
||
|
|
||
|
/* If possible, release the rest. */
|
||
|
if (old_size >= MINSIZE)
|
||
|
{
|
||
|
_int_free (av, old_top, 1);
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
} /* if (av != &main_arena) */
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
### sysmalloc final
|
||
|
|
||
|
Conclua a alocação atualizando as informações da arena
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2921C3-L2943C12
|
||
|
|
||
|
if ((unsigned long) av->system_mem > (unsigned long) (av->max_system_mem))
|
||
|
av->max_system_mem = av->system_mem;
|
||
|
check_malloc_state (av);
|
||
|
|
||
|
/* finally, do the allocation */
|
||
|
p = av->top;
|
||
|
size = chunksize (p);
|
||
|
|
||
|
/* check that one of the above allocation paths succeeded */
|
||
|
if ((unsigned long) (size) >= (unsigned long) (nb + MINSIZE))
|
||
|
{
|
||
|
remainder_size = size - nb;
|
||
|
remainder = chunk_at_offset (p, nb);
|
||
|
av->top = remainder;
|
||
|
set_head (p, nb | PREV_INUSE | (av != &main_arena ? NON_MAIN_ARENA : 0));
|
||
|
set_head (remainder, remainder_size | PREV_INUSE);
|
||
|
check_malloced_chunk (av, p, nb);
|
||
|
return chunk2mem (p);
|
||
|
}
|
||
|
|
||
|
/* catch all failure paths */
|
||
|
__set_errno (ENOMEM);
|
||
|
return 0;
|
||
|
```
|
||
|
## sysmalloc\_mmap
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Código sysmalloc_mmap</summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L2392C1-L2481C2
|
||
|
|
||
|
static void *
|
||
|
sysmalloc_mmap (INTERNAL_SIZE_T nb, size_t pagesize, int extra_flags, mstate av)
|
||
|
{
|
||
|
long int size;
|
||
|
|
||
|
/*
|
||
|
Round up size to nearest page. For mmapped chunks, the overhead is one
|
||
|
SIZE_SZ unit larger than for normal chunks, because there is no
|
||
|
following chunk whose prev_size field could be used.
|
||
|
|
||
|
See the front_misalign handling below, for glibc there is no need for
|
||
|
further alignments unless we have have high alignment.
|
||
|
*/
|
||
|
if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
|
||
|
size = ALIGN_UP (nb + SIZE_SZ, pagesize);
|
||
|
else
|
||
|
size = ALIGN_UP (nb + SIZE_SZ + MALLOC_ALIGN_MASK, pagesize);
|
||
|
|
||
|
/* Don't try if size wraps around 0. */
|
||
|
if ((unsigned long) (size) <= (unsigned long) (nb))
|
||
|
return MAP_FAILED;
|
||
|
|
||
|
char *mm = (char *) MMAP (0, size,
|
||
|
mtag_mmap_flags | PROT_READ | PROT_WRITE,
|
||
|
extra_flags);
|
||
|
if (mm == MAP_FAILED)
|
||
|
return mm;
|
||
|
|
||
|
#ifdef MAP_HUGETLB
|
||
|
if (!(extra_flags & MAP_HUGETLB))
|
||
|
madvise_thp (mm, size);
|
||
|
#endif
|
||
|
|
||
|
__set_vma_name (mm, size, " glibc: malloc");
|
||
|
|
||
|
/*
|
||
|
The offset to the start of the mmapped region is stored in the prev_size
|
||
|
field of the chunk. This allows us to adjust returned start address to
|
||
|
meet alignment requirements here and in memalign(), and still be able to
|
||
|
compute proper address argument for later munmap in free() and realloc().
|
||
|
*/
|
||
|
|
||
|
INTERNAL_SIZE_T front_misalign; /* unusable bytes at front of new space */
|
||
|
|
||
|
if (MALLOC_ALIGNMENT == CHUNK_HDR_SZ)
|
||
|
{
|
||
|
/* For glibc, chunk2mem increases the address by CHUNK_HDR_SZ and
|
||
|
MALLOC_ALIGN_MASK is CHUNK_HDR_SZ-1. Each mmap'ed area is page
|
||
|
aligned and therefore definitely MALLOC_ALIGN_MASK-aligned. */
|
||
|
assert (((INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK) == 0);
|
||
|
front_misalign = 0;
|
||
|
}
|
||
|
else
|
||
|
front_misalign = (INTERNAL_SIZE_T) chunk2mem (mm) & MALLOC_ALIGN_MASK;
|
||
|
|
||
|
mchunkptr p; /* the allocated/returned chunk */
|
||
|
|
||
|
if (front_misalign > 0)
|
||
|
{
|
||
|
ptrdiff_t correction = MALLOC_ALIGNMENT - front_misalign;
|
||
|
p = (mchunkptr) (mm + correction);
|
||
|
set_prev_size (p, correction);
|
||
|
set_head (p, (size - correction) | IS_MMAPPED);
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
p = (mchunkptr) mm;
|
||
|
set_prev_size (p, 0);
|
||
|
set_head (p, size | IS_MMAPPED);
|
||
|
}
|
||
|
|
||
|
/* update statistics */
|
||
|
int new = atomic_fetch_add_relaxed (&mp_.n_mmaps, 1) + 1;
|
||
|
atomic_max (&mp_.max_n_mmaps, new);
|
||
|
|
||
|
unsigned long sum;
|
||
|
sum = atomic_fetch_add_relaxed (&mp_.mmapped_mem, size) + size;
|
||
|
atomic_max (&mp_.max_mmapped_mem, sum);
|
||
|
|
||
|
check_chunk (av, p);
|
||
|
|
||
|
return chunk2mem (p);
|
||
|
}
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
{% hint style="success" %}
|
||
|
Aprenda e pratique AWS Hacking: <img src="/.gitbook/assets/arte.png" alt="" data-size="line">[**HackTricks Treinamento AWS Red Team Expert (ARTE)**](https://training.hacktricks.xyz/courses/arte)<img src="/.gitbook/assets/arte.png" alt="" data-size="line">\
|
||
|
Aprenda e pratique GCP Hacking: <img src="/.gitbook/assets/grte.png" alt="" data-size="line">[**HackTricks Treinamento GCP Red Team Expert (GRTE)**<img src="/.gitbook/assets/grte.png" alt="" data-size="line">](https://training.hacktricks.xyz/courses/grte)
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Suporte o HackTricks</summary>
|
||
|
|
||
|
* Verifique os [**planos de assinatura**](https://github.com/sponsors/carlospolop)!
|
||
|
* **Junte-se ao** 💬 [**grupo Discord**](https://discord.gg/hRep4RUj7f) ou ao [**grupo telegram**](https://t.me/peass) ou **siga-nos** no **Twitter** 🐦 [**@hacktricks\_live**](https://twitter.com/hacktricks\_live)**.**
|
||
|
* **Compartilhe truques de hacking enviando PRs para os repositórios** [**HackTricks**](https://github.com/carlospolop/hacktricks) e [**HackTricks Cloud**](https://github.com/carlospolop/hacktricks-cloud).
|
||
|
|
||
|
</details>
|
||
|
{% endhint %}
|