mirror of
https://github.com/carlospolop/hacktricks
synced 2024-12-29 22:43:11 +00:00
397 lines
14 KiB
Markdown
397 lines
14 KiB
Markdown
|
# free
|
||
|
|
||
|
{% hint style="success" %}
|
||
|
Aprenda e pratique Hacking AWS: <img src="/.gitbook/assets/arte.png" alt="" data-size="line">[**HackTricks Training AWS Red Team Expert (ARTE)**](https://training.hacktricks.xyz/courses/arte)<img src="/.gitbook/assets/arte.png" alt="" data-size="line">\
|
||
|
Aprenda e pratique Hacking GCP: <img src="/.gitbook/assets/grte.png" alt="" data-size="line">[**HackTricks Training GCP Red Team Expert (GRTE)**<img src="/.gitbook/assets/grte.png" alt="" data-size="line">](https://training.hacktricks.xyz/courses/grte)
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Apoie o HackTricks</summary>
|
||
|
|
||
|
* Verifique os [**planos de assinatura**](https://github.com/sponsors/carlospolop)!
|
||
|
* **Junte-se ao** 💬 [**grupo Discord**](https://discord.gg/hRep4RUj7f) ou ao [**grupo telegram**](https://t.me/peass) ou **siga-nos** no **Twitter** 🐦 [**@hacktricks\_live**](https://twitter.com/hacktricks\_live)**.**
|
||
|
* **Compartilhe truques de hacking enviando PRs para os repositórios** [**HackTricks**](https://github.com/carlospolop/hacktricks) e [**HackTricks Cloud**](https://github.com/carlospolop/hacktricks-cloud).
|
||
|
|
||
|
</details>
|
||
|
{% endhint %}
|
||
|
|
||
|
## Resumo do Pedido Gratuito <a href="#libc_free" id="libc_free"></a>
|
||
|
|
||
|
(Nenhuma verificação é explicada neste resumo e alguns casos foram omitidos por brevidade)
|
||
|
|
||
|
1. Se o endereço for nulo, não faça nada
|
||
|
2. Se o bloco foi mapeado, desmapeie-o e termine
|
||
|
3. Chame `_int_free`:
|
||
|
1. Se possível, adicione o bloco ao tcache
|
||
|
2. Se possível, adicione o bloco ao fast bin
|
||
|
3. Chame `_int_free_merge_chunk` para consolidar o bloco, se necessário, e adicione-o à lista não ordenada
|
||
|
|
||
|
## \_\_libc\_free <a href="#libc_free" id="libc_free"></a>
|
||
|
|
||
|
`Free` chama `__libc_free`.
|
||
|
|
||
|
* Se o endereço passado for Nulo (0), não faça nada.
|
||
|
* Verifique a tag do ponteiro
|
||
|
* Se o bloco estiver `mapeado`, `desmapeie` e é isso
|
||
|
* Se não, adicione a cor e chame `_int_free` sobre ele
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Código __lib_free</summary>
|
||
|
```c
|
||
|
void
|
||
|
__libc_free (void *mem)
|
||
|
{
|
||
|
mstate ar_ptr;
|
||
|
mchunkptr p; /* chunk corresponding to mem */
|
||
|
|
||
|
if (mem == 0) /* free(0) has no effect */
|
||
|
return;
|
||
|
|
||
|
/* Quickly check that the freed pointer matches the tag for the memory.
|
||
|
This gives a useful double-free detection. */
|
||
|
if (__glibc_unlikely (mtag_enabled))
|
||
|
*(volatile char *)mem;
|
||
|
|
||
|
int err = errno;
|
||
|
|
||
|
p = mem2chunk (mem);
|
||
|
|
||
|
if (chunk_is_mmapped (p)) /* release mmapped memory. */
|
||
|
{
|
||
|
/* See if the dynamic brk/mmap threshold needs adjusting.
|
||
|
Dumped fake mmapped chunks do not affect the threshold. */
|
||
|
if (!mp_.no_dyn_threshold
|
||
|
&& chunksize_nomask (p) > mp_.mmap_threshold
|
||
|
&& chunksize_nomask (p) <= DEFAULT_MMAP_THRESHOLD_MAX)
|
||
|
{
|
||
|
mp_.mmap_threshold = chunksize (p);
|
||
|
mp_.trim_threshold = 2 * mp_.mmap_threshold;
|
||
|
LIBC_PROBE (memory_mallopt_free_dyn_thresholds, 2,
|
||
|
mp_.mmap_threshold, mp_.trim_threshold);
|
||
|
}
|
||
|
munmap_chunk (p);
|
||
|
}
|
||
|
else
|
||
|
{
|
||
|
MAYBE_INIT_TCACHE ();
|
||
|
|
||
|
/* Mark the chunk as belonging to the library again. */
|
||
|
(void)tag_region (chunk2mem (p), memsize (p));
|
||
|
|
||
|
ar_ptr = arena_for_chunk (p);
|
||
|
_int_free (ar_ptr, p, 0);
|
||
|
}
|
||
|
|
||
|
__set_errno (err);
|
||
|
}
|
||
|
libc_hidden_def (__libc_free)
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
## \_int\_free <a href="#int_free" id="int_free"></a>
|
||
|
|
||
|
### Início do \_int\_free <a href="#int_free" id="int_free"></a>
|
||
|
|
||
|
Começa com algumas verificações para garantir que:
|
||
|
|
||
|
* o **ponteiro** está **alinhado**, ou aciona o erro `free(): invalid pointer`
|
||
|
* o **tamanho** não é menor que o mínimo e que o **tamanho** também está **alinhado** ou aciona o erro: `free(): invalid size`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Início do \_int\_free</summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4493C1-L4513C28
|
||
|
|
||
|
#define aligned_OK(m) (((unsigned long) (m) &MALLOC_ALIGN_MASK) == 0)
|
||
|
|
||
|
static void
|
||
|
_int_free (mstate av, mchunkptr p, int have_lock)
|
||
|
{
|
||
|
INTERNAL_SIZE_T size; /* its size */
|
||
|
mfastbinptr *fb; /* associated fastbin */
|
||
|
|
||
|
size = chunksize (p);
|
||
|
|
||
|
/* Little security check which won't hurt performance: the
|
||
|
allocator never wraps around at the end of the address space.
|
||
|
Therefore we can exclude some size values which might appear
|
||
|
here by accident or by "design" from some intruder. */
|
||
|
if (__builtin_expect ((uintptr_t) p > (uintptr_t) -size, 0)
|
||
|
|| __builtin_expect (misaligned_chunk (p), 0))
|
||
|
malloc_printerr ("free(): invalid pointer");
|
||
|
/* We know that each chunk is at least MINSIZE bytes in size or a
|
||
|
multiple of MALLOC_ALIGNMENT. */
|
||
|
if (__glibc_unlikely (size < MINSIZE || !aligned_OK (size)))
|
||
|
malloc_printerr ("free(): invalid size");
|
||
|
|
||
|
check_inuse_chunk(av, p);
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
### \_int\_free tcache <a href="#int_free" id="int_free"></a>
|
||
|
|
||
|
Ele primeiro tentará alocar esse chunk no tcache relacionado. No entanto, algumas verificações são realizadas anteriormente. Ele irá percorrer todos os chunks do tcache no mesmo índice do chunk liberado e:
|
||
|
|
||
|
- Se houver mais entradas do que `mp_.tcache_count`: `free(): too many chunks detected in tcache`
|
||
|
- Se a entrada não estiver alinhada: `free(): unaligned chunk detected in tcache 2`
|
||
|
- se o chunk liberado já foi liberado e está presente como chunk no tcache: `free(): double free detected in tcache 2`
|
||
|
|
||
|
Se tudo correr bem, o chunk é adicionado ao tcache e a função retorna.
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>_int_free tcache</summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4515C1-L4554C7
|
||
|
#if USE_TCACHE
|
||
|
{
|
||
|
size_t tc_idx = csize2tidx (size);
|
||
|
if (tcache != NULL && tc_idx < mp_.tcache_bins)
|
||
|
{
|
||
|
/* Check to see if it's already in the tcache. */
|
||
|
tcache_entry *e = (tcache_entry *) chunk2mem (p);
|
||
|
|
||
|
/* This test succeeds on double free. However, we don't 100%
|
||
|
trust it (it also matches random payload data at a 1 in
|
||
|
2^<size_t> chance), so verify it's not an unlikely
|
||
|
coincidence before aborting. */
|
||
|
if (__glibc_unlikely (e->key == tcache_key))
|
||
|
{
|
||
|
tcache_entry *tmp;
|
||
|
size_t cnt = 0;
|
||
|
LIBC_PROBE (memory_tcache_double_free, 2, e, tc_idx);
|
||
|
for (tmp = tcache->entries[tc_idx];
|
||
|
tmp;
|
||
|
tmp = REVEAL_PTR (tmp->next), ++cnt)
|
||
|
{
|
||
|
if (cnt >= mp_.tcache_count)
|
||
|
malloc_printerr ("free(): too many chunks detected in tcache");
|
||
|
if (__glibc_unlikely (!aligned_OK (tmp)))
|
||
|
malloc_printerr ("free(): unaligned chunk detected in tcache 2");
|
||
|
if (tmp == e)
|
||
|
malloc_printerr ("free(): double free detected in tcache 2");
|
||
|
/* If we get here, it was a coincidence. We've wasted a
|
||
|
few cycles, but don't abort. */
|
||
|
}
|
||
|
}
|
||
|
|
||
|
if (tcache->counts[tc_idx] < mp_.tcache_count)
|
||
|
{
|
||
|
tcache_put (p, tc_idx);
|
||
|
return;
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
#endif
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
### \_int\_free fast bin <a href="#int_free" id="int_free"></a>
|
||
|
|
||
|
Comece verificando se o tamanho é adequado para o fast bin e verifique se é possível colocá-lo perto do top chunk.
|
||
|
|
||
|
Em seguida, adicione o chunk liberado no topo do fast bin enquanto realiza algumas verificações:
|
||
|
|
||
|
- Se o tamanho do chunk for inválido (muito grande ou pequeno), acione: `free(): invalid next size (fast)`
|
||
|
- Se o chunk adicionado já estava no topo do fast bin: `double free or corruption (fasttop)`
|
||
|
- Se o tamanho do chunk no topo tiver um tamanho diferente do chunk que estamos adicionando: `invalid fastbin entry (free)`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>_int_free Fast Bin</summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4556C2-L4631C4
|
||
|
|
||
|
/*
|
||
|
If eligible, place chunk on a fastbin so it can be found
|
||
|
and used quickly in malloc.
|
||
|
*/
|
||
|
|
||
|
if ((unsigned long)(size) <= (unsigned long)(get_max_fast ())
|
||
|
|
||
|
#if TRIM_FASTBINS
|
||
|
/*
|
||
|
If TRIM_FASTBINS set, don't place chunks
|
||
|
bordering top into fastbins
|
||
|
*/
|
||
|
&& (chunk_at_offset(p, size) != av->top)
|
||
|
#endif
|
||
|
) {
|
||
|
|
||
|
if (__builtin_expect (chunksize_nomask (chunk_at_offset (p, size))
|
||
|
<= CHUNK_HDR_SZ, 0)
|
||
|
|| __builtin_expect (chunksize (chunk_at_offset (p, size))
|
||
|
>= av->system_mem, 0))
|
||
|
{
|
||
|
bool fail = true;
|
||
|
/* We might not have a lock at this point and concurrent modifications
|
||
|
of system_mem might result in a false positive. Redo the test after
|
||
|
getting the lock. */
|
||
|
if (!have_lock)
|
||
|
{
|
||
|
__libc_lock_lock (av->mutex);
|
||
|
fail = (chunksize_nomask (chunk_at_offset (p, size)) <= CHUNK_HDR_SZ
|
||
|
|| chunksize (chunk_at_offset (p, size)) >= av->system_mem);
|
||
|
__libc_lock_unlock (av->mutex);
|
||
|
}
|
||
|
|
||
|
if (fail)
|
||
|
malloc_printerr ("free(): invalid next size (fast)");
|
||
|
}
|
||
|
|
||
|
free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
|
||
|
|
||
|
atomic_store_relaxed (&av->have_fastchunks, true);
|
||
|
unsigned int idx = fastbin_index(size);
|
||
|
fb = &fastbin (av, idx);
|
||
|
|
||
|
/* Atomically link P to its fastbin: P->FD = *FB; *FB = P; */
|
||
|
mchunkptr old = *fb, old2;
|
||
|
|
||
|
if (SINGLE_THREAD_P)
|
||
|
{
|
||
|
/* Check that the top of the bin is not the record we are going to
|
||
|
add (i.e., double free). */
|
||
|
if (__builtin_expect (old == p, 0))
|
||
|
malloc_printerr ("double free or corruption (fasttop)");
|
||
|
p->fd = PROTECT_PTR (&p->fd, old);
|
||
|
*fb = p;
|
||
|
}
|
||
|
else
|
||
|
do
|
||
|
{
|
||
|
/* Check that the top of the bin is not the record we are going to
|
||
|
add (i.e., double free). */
|
||
|
if (__builtin_expect (old == p, 0))
|
||
|
malloc_printerr ("double free or corruption (fasttop)");
|
||
|
old2 = old;
|
||
|
p->fd = PROTECT_PTR (&p->fd, old);
|
||
|
}
|
||
|
while ((old = catomic_compare_and_exchange_val_rel (fb, p, old2))
|
||
|
!= old2);
|
||
|
|
||
|
/* Check that size of fastbin chunk at the top is the same as
|
||
|
size of the chunk that we are adding. We can dereference OLD
|
||
|
only if we have the lock, otherwise it might have already been
|
||
|
allocated again. */
|
||
|
if (have_lock && old != NULL
|
||
|
&& __builtin_expect (fastbin_index (chunksize (old)) != idx, 0))
|
||
|
malloc_printerr ("invalid fastbin entry (free)");
|
||
|
}
|
||
|
```
|
||
|
### \_int\_free final <a href="#int_free" id="int_free"></a>
|
||
|
|
||
|
Se o bloco ainda não foi alocado em nenhum bin, chame `_int_free_merge_chunk`
|
||
|
|
||
|
</details>
|
||
|
```c
|
||
|
/*
|
||
|
Consolidate other non-mmapped chunks as they arrive.
|
||
|
*/
|
||
|
|
||
|
else if (!chunk_is_mmapped(p)) {
|
||
|
|
||
|
/* If we're single-threaded, don't lock the arena. */
|
||
|
if (SINGLE_THREAD_P)
|
||
|
have_lock = true;
|
||
|
|
||
|
if (!have_lock)
|
||
|
__libc_lock_lock (av->mutex);
|
||
|
|
||
|
_int_free_merge_chunk (av, p, size);
|
||
|
|
||
|
if (!have_lock)
|
||
|
__libc_lock_unlock (av->mutex);
|
||
|
}
|
||
|
/*
|
||
|
If the chunk was allocated via mmap, release via munmap().
|
||
|
*/
|
||
|
|
||
|
else {
|
||
|
munmap_chunk (p);
|
||
|
}
|
||
|
}
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
## \_int\_free\_merge\_chunk
|
||
|
|
||
|
Esta função tentará mesclar o chunk P de SIZE bytes com seus vizinhos. Coloque o chunk resultante na lista de bins não ordenados.
|
||
|
|
||
|
Algumas verificações são realizadas:
|
||
|
|
||
|
* Se o chunk é o chunk superior: `double free or corruption (top)`
|
||
|
* Se o próximo chunk está fora dos limites da arena: `double free or corruption (out)`
|
||
|
* Se o chunk não está marcado como usado (no `prev_inuse` do chunk seguinte): `double free or corruption (!prev)`
|
||
|
* Se o próximo chunk tem um tamanho muito pequeno ou muito grande: `free(): invalid next size (normal)`
|
||
|
* se o chunk anterior não está em uso, ele tentará consolidar. Mas, se o prev\_size difere do tamanho indicado no chunk anterior: `corrupted size vs. prev_size while consolidating`
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Código \_int\_free\_merge\_chunk</summary>
|
||
|
```c
|
||
|
// From https://github.com/bminor/glibc/blob/f942a732d37a96217ef828116ebe64a644db18d7/malloc/malloc.c#L4660C1-L4702C2
|
||
|
|
||
|
/* Try to merge chunk P of SIZE bytes with its neighbors. Put the
|
||
|
resulting chunk on the appropriate bin list. P must not be on a
|
||
|
bin list yet, and it can be in use. */
|
||
|
static void
|
||
|
_int_free_merge_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size)
|
||
|
{
|
||
|
mchunkptr nextchunk = chunk_at_offset(p, size);
|
||
|
|
||
|
/* Lightweight tests: check whether the block is already the
|
||
|
top block. */
|
||
|
if (__glibc_unlikely (p == av->top))
|
||
|
malloc_printerr ("double free or corruption (top)");
|
||
|
/* Or whether the next chunk is beyond the boundaries of the arena. */
|
||
|
if (__builtin_expect (contiguous (av)
|
||
|
&& (char *) nextchunk
|
||
|
>= ((char *) av->top + chunksize(av->top)), 0))
|
||
|
malloc_printerr ("double free or corruption (out)");
|
||
|
/* Or whether the block is actually not marked used. */
|
||
|
if (__glibc_unlikely (!prev_inuse(nextchunk)))
|
||
|
malloc_printerr ("double free or corruption (!prev)");
|
||
|
|
||
|
INTERNAL_SIZE_T nextsize = chunksize(nextchunk);
|
||
|
if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
|
||
|
|| __builtin_expect (nextsize >= av->system_mem, 0))
|
||
|
malloc_printerr ("free(): invalid next size (normal)");
|
||
|
|
||
|
free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
|
||
|
|
||
|
/* Consolidate backward. */
|
||
|
if (!prev_inuse(p))
|
||
|
{
|
||
|
INTERNAL_SIZE_T prevsize = prev_size (p);
|
||
|
size += prevsize;
|
||
|
p = chunk_at_offset(p, -((long) prevsize));
|
||
|
if (__glibc_unlikely (chunksize(p) != prevsize))
|
||
|
malloc_printerr ("corrupted size vs. prev_size while consolidating");
|
||
|
unlink_chunk (av, p);
|
||
|
}
|
||
|
|
||
|
/* Write the chunk header, maybe after merging with the following chunk. */
|
||
|
size = _int_free_create_chunk (av, p, size, nextchunk, nextsize);
|
||
|
_int_free_maybe_consolidate (av, size);
|
||
|
}
|
||
|
```
|
||
|
</details>
|
||
|
|
||
|
{% hint style="success" %}
|
||
|
Aprenda e pratique AWS Hacking: <img src="/.gitbook/assets/arte.png" alt="" data-size="line">[**HackTricks Treinamento AWS Red Team Expert (ARTE)**](https://training.hacktricks.xyz/courses/arte)<img src="/.gitbook/assets/arte.png" alt="" data-size="line">\
|
||
|
Aprenda e pratique GCP Hacking: <img src="/.gitbook/assets/grte.png" alt="" data-size="line">[**HackTricks Treinamento GCP Red Team Expert (GRTE)**<img src="/.gitbook/assets/grte.png" alt="" data-size="line">](https://training.hacktricks.xyz/courses/grte)
|
||
|
|
||
|
<details>
|
||
|
|
||
|
<summary>Suporte o HackTricks</summary>
|
||
|
|
||
|
* Verifique os [**planos de assinatura**](https://github.com/sponsors/carlospolop)!
|
||
|
* **Junte-se ao** 💬 [**grupo Discord**](https://discord.gg/hRep4RUj7f) ou ao [**grupo telegram**](https://t.me/peass) ou **siga-nos** no **Twitter** 🐦 [**@hacktricks\_live**](https://twitter.com/hacktricks\_live)**.**
|
||
|
* **Compartilhe truques de hacking enviando PRs para os repositórios** [**HackTricks**](https://github.com/carlospolop/hacktricks) e [**HackTricks Cloud**](https://github.com/carlospolop/hacktricks-cloud).
|
||
|
|
||
|
</details>
|
||
|
{% endhint %}
|