TLSF memory allocator. Less free flash, moar free ram. (#3572)

* add tlsf as submodule
* libs: tlsf
* Furi: tlsf as allocator
* Furi: heap walker
* shmal fixshesh
* f18: tlsf
* PVS: ignore tlsf
* I like to moving
* merge upcoming changes
* memmgr: alloc aligned, realloc
* Furi: distinct name for auxiliary memory pool
* Furi: put idle and timer thread to mem2
* Furi: fix smal things in allocator
* Furi: remove aligned_free. Use free instead.
* aligned_malloc -> aligned_alloc
* aligned_alloc, parameters order
* aligned_alloc: check that alignment is correct
* unit test: malloc
* unit tests: realloc and test with memory fragmentation
* unit tests: aligned_alloc
* update api
* updater: properly read large update file

Co-authored-by: Aleksandr Kutuzov <alleteam@gmail.com>
This commit is contained in:
Sergei Gavrilov 2024-05-16 01:47:21 +10:00 committed by GitHub
parent 3d3db9f5b0
commit 1d17206e23
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 628 additions and 645 deletions

3
.gitmodules vendored
View file

@ -41,3 +41,6 @@
[submodule "documentation/doxygen/doxygen-awesome-css"]
path = documentation/doxygen/doxygen-awesome-css
url = https://github.com/jothepro/doxygen-awesome-css.git
[submodule "lib/tlsf"]
path = lib/tlsf
url = https://github.com/espressif/tlsf

View file

@ -1 +1 @@
--ignore-ccache -C gccarm --rules-config .pvsconfig -e lib/cmsis_core -e lib/fatfs -e lib/fnv1a-hash -e lib/FreeRTOS-Kernel -e lib/heatshrink -e lib/libusb_stm32 -e lib/littlefs -e lib/mbedtls -e lib/microtar -e lib/mlib -e lib/stm32wb_cmsis -e lib/stm32wb_copro -e lib/stm32wb_hal -e lib/u8g2 -e lib/nanopb -e lib/mjs -e */arm-none-eabi/*
--ignore-ccache -C gccarm --rules-config .pvsconfig -e lib/cmsis_core -e lib/tlsf -e lib/fatfs -e lib/fnv1a-hash -e lib/FreeRTOS-Kernel -e lib/heatshrink -e lib/libusb_stm32 -e lib/littlefs -e lib/mbedtls -e lib/microtar -e lib/mlib -e lib/stm32wb_cmsis -e lib/stm32wb_copro -e lib/stm32wb_hal -e lib/u8g2 -e lib/nanopb -e lib/mjs -e */arm-none-eabi/*

View file

@ -1,8 +1,5 @@
#include "../minunit.h"
#include <stdlib.h>
#include <string.h>
#include <stdbool.h>
#include <stdint.h>
#include <furi.h>
void test_furi_memmgr(void) {
void* ptr;
@ -37,3 +34,260 @@ void test_furi_memmgr(void) {
}
free(ptr);
}
static void test_memmgr_malloc(const size_t allocation_size) {
uint8_t* ptr = NULL;
const char* error_message = NULL;
FURI_CRITICAL_ENTER();
ptr = malloc(allocation_size);
// test that we can allocate memory
if(ptr == NULL) {
error_message = "malloc failed";
}
// test that memory is zero-initialized after allocation
for(size_t i = 0; i < allocation_size; i++) {
if(ptr[i] != 0) {
error_message = "memory is not zero-initialized after malloc";
break;
}
}
memset(ptr, 0x55, allocation_size);
free(ptr);
// test that memory is zero-initialized after free
// we know that allocator can use this memory for inner purposes
// so we check that memory at least partially zero-initialized
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wuse-after-free"
size_t zero_count = 0;
for(size_t i = 0; i < allocation_size; i++) {
if(ptr[i] == 0) {
zero_count++;
}
}
#pragma GCC diagnostic pop
// check that at least 75% of memory is zero-initialized
if(zero_count < (allocation_size * 0.75)) {
error_message = "seems that memory is not zero-initialized after free (malloc)";
}
FURI_CRITICAL_EXIT();
if(error_message != NULL) {
mu_fail(error_message);
}
}
static void test_memmgr_realloc(const size_t allocation_size) {
uint8_t* ptr = NULL;
const char* error_message = NULL;
FURI_CRITICAL_ENTER();
ptr = realloc(ptr, allocation_size);
// test that we can allocate memory
if(ptr == NULL) {
error_message = "realloc(NULL) failed";
}
// test that memory is zero-initialized after allocation
for(size_t i = 0; i < allocation_size; i++) {
if(ptr[i] != 0) {
error_message = "memory is not zero-initialized after realloc(NULL)";
break;
}
}
memset(ptr, 0x55, allocation_size);
ptr = realloc(ptr, allocation_size * 2);
// test that we can reallocate memory
if(ptr == NULL) {
error_message = "realloc failed";
}
// test that memory content is preserved
for(size_t i = 0; i < allocation_size; i++) {
if(ptr[i] != 0x55) {
error_message = "memory is not reallocated after realloc";
break;
}
}
// test that remaining memory is zero-initialized
size_t non_zero_count = 0;
for(size_t i = allocation_size; i < allocation_size * 2; i++) {
if(ptr[i] != 0) {
non_zero_count += 1;
}
}
// check that at most of memory is zero-initialized
// we know that allocator not always can restore content size from a pointer
// so we check against small threshold
if(non_zero_count > 4) {
error_message = "seems that memory is not zero-initialized after realloc";
}
uint8_t* null_ptr = realloc(ptr, 0);
// test that we can free memory
if(null_ptr != NULL) {
error_message = "realloc(0) failed";
}
// test that memory is zero-initialized after realloc(0)
// we know that allocator can use this memory for inner purposes
// so we check that memory at least partially zero-initialized
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wuse-after-free"
size_t zero_count = 0;
for(size_t i = 0; i < allocation_size; i++) {
if(ptr[i] == 0) {
zero_count++;
}
}
#pragma GCC diagnostic pop
// check that at least 75% of memory is zero-initialized
if(zero_count < (allocation_size * 0.75)) {
error_message = "seems that memory is not zero-initialized after realloc(0)";
}
FURI_CRITICAL_EXIT();
if(error_message != NULL) {
mu_fail(error_message);
}
}
static void test_memmgr_alloc_aligned(const size_t allocation_size, const size_t alignment) {
uint8_t* ptr = NULL;
const char* error_message = NULL;
FURI_CRITICAL_ENTER();
ptr = aligned_alloc(alignment, allocation_size);
// test that we can allocate memory
if(ptr == NULL) {
error_message = "aligned_alloc failed";
}
// test that memory is aligned
if(((uintptr_t)ptr % alignment) != 0) {
error_message = "memory is not aligned after aligned_alloc";
}
// test that memory is zero-initialized after allocation
for(size_t i = 0; i < allocation_size; i++) {
if(ptr[i] != 0) {
error_message = "memory is not zero-initialized after aligned_alloc";
break;
}
}
memset(ptr, 0x55, allocation_size);
free(ptr);
// test that memory is zero-initialized after free
// we know that allocator can use this memory for inner purposes
// so we check that memory at least partially zero-initialized
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wuse-after-free"
size_t zero_count = 0;
for(size_t i = 0; i < allocation_size; i++) {
if(ptr[i] == 0) {
zero_count++;
}
}
#pragma GCC diagnostic pop
// check that at least 75% of memory is zero-initialized
if(zero_count < (allocation_size * 0.75)) {
error_message = "seems that memory is not zero-initialized after free (aligned_alloc)";
}
FURI_CRITICAL_EXIT();
if(error_message != NULL) {
mu_fail(error_message);
}
}
void test_furi_memmgr_advanced(void) {
const size_t sizes[] = {50, 100, 500, 1000, 5000, 10000};
const size_t sizes_count = sizeof(sizes) / sizeof(sizes[0]);
const size_t alignments[] = {4, 8, 16, 32, 64, 128, 256, 512, 1024};
const size_t alignments_count = sizeof(alignments) / sizeof(alignments[0]);
// do test without memory fragmentation
{
for(size_t i = 0; i < sizes_count; i++) {
test_memmgr_malloc(sizes[i]);
}
for(size_t i = 0; i < sizes_count; i++) {
test_memmgr_realloc(sizes[i]);
}
for(size_t i = 0; i < sizes_count; i++) {
for(size_t j = 0; j < alignments_count; j++) {
test_memmgr_alloc_aligned(sizes[i], alignments[j]);
}
}
}
// do test with memory fragmentation
{
void* blocks[sizes_count];
void* guards[sizes_count - 1];
// setup guards
for(size_t i = 0; i < sizes_count; i++) {
blocks[i] = malloc(sizes[i]);
if(i < sizes_count - 1) {
guards[i] = malloc(sizes[i]);
}
}
for(size_t i = 0; i < sizes_count; i++) {
free(blocks[i]);
}
// do test
for(size_t i = 0; i < sizes_count; i++) {
test_memmgr_malloc(sizes[i]);
}
for(size_t i = 0; i < sizes_count; i++) {
test_memmgr_realloc(sizes[i]);
}
for(size_t i = 0; i < sizes_count; i++) {
for(size_t j = 0; j < alignments_count; j++) {
test_memmgr_alloc_aligned(sizes[i], alignments[j]);
}
}
// cleanup guards
for(size_t i = 0; i < sizes_count - 1; i++) {
free(guards[i]);
}
}
}

View file

@ -9,6 +9,7 @@ void test_furi_concurrent_access(void);
void test_furi_pubsub(void);
void test_furi_memmgr(void);
void test_furi_memmgr_advanced(void);
static int foo = 0;
@ -37,6 +38,7 @@ MU_TEST(mu_test_furi_memmgr) {
// this test is not accurate, but gives a basic understanding
// that memory management is working fine
test_furi_memmgr();
test_furi_memmgr_advanced();
}
MU_TEST_SUITE(test_suite) {

View file

@ -425,8 +425,34 @@ void cli_command_free(Cli* cli, FuriString* args, void* context) {
printf("Minimum heap size: %zu\r\n", memmgr_get_minimum_free_heap());
printf("Maximum heap block: %zu\r\n", memmgr_heap_get_max_free_block());
printf("Pool free: %zu\r\n", memmgr_pool_get_free());
printf("Maximum pool block: %zu\r\n", memmgr_pool_get_max_block());
printf("Aux pool total free: %zu\r\n", memmgr_aux_pool_get_free());
printf("Aux pool max free block: %zu\r\n", memmgr_pool_get_max_block());
}
typedef struct {
void* addr;
size_t size;
} FreeBlockInfo;
#define FREE_BLOCK_INFO_MAX 128
typedef struct {
FreeBlockInfo free_blocks[FREE_BLOCK_INFO_MAX];
size_t free_blocks_count;
} FreeBlockContext;
static bool free_block_walker(void* pointer, size_t size, bool used, void* context) {
FreeBlockContext* free_blocks = (FreeBlockContext*)context;
if(!used) {
if(free_blocks->free_blocks_count < FREE_BLOCK_INFO_MAX) {
free_blocks->free_blocks[free_blocks->free_blocks_count].addr = pointer;
free_blocks->free_blocks[free_blocks->free_blocks_count].size = size;
free_blocks->free_blocks_count++;
} else {
return false;
}
}
return true;
}
void cli_command_free_blocks(Cli* cli, FuriString* args, void* context) {
@ -434,7 +460,23 @@ void cli_command_free_blocks(Cli* cli, FuriString* args, void* context) {
UNUSED(args);
UNUSED(context);
memmgr_heap_printf_free_blocks();
FreeBlockContext* free_blocks = malloc(sizeof(FreeBlockContext));
free_blocks->free_blocks_count = 0;
memmgr_heap_walk_blocks(free_block_walker, free_blocks);
for(size_t i = 0; i < free_blocks->free_blocks_count; i++) {
printf(
"A %p S %zu\r\n",
(void*)free_blocks->free_blocks[i].addr,
free_blocks->free_blocks[i].size);
}
if(free_blocks->free_blocks_count == FREE_BLOCK_INFO_MAX) {
printf("... and more\r\n");
}
free(free_blocks);
}
void cli_command_i2c(Cli* cli, FuriString* args, void* context) {

View file

@ -4,6 +4,8 @@
#include <furi_hal_memory.h>
extern void* pvPortMalloc(size_t xSize);
extern void* pvPortAllocAligned(size_t xSize, size_t xAlignment);
extern void* pvPortRealloc(void* pv, size_t xSize);
extern void vPortFree(void* pv);
extern size_t xPortGetFreeHeapSize(void);
extern size_t xPortGetTotalHeapSize(void);
@ -18,18 +20,7 @@ void free(void* ptr) {
}
void* realloc(void* ptr, size_t size) {
if(size == 0) {
vPortFree(ptr);
return NULL;
}
void* p = pvPortMalloc(size);
if(ptr != NULL) {
memcpy(p, ptr, size);
vPortFree(ptr);
}
return p;
return pvPortRealloc(ptr, size);
}
void* calloc(size_t count, size_t size) {
@ -47,6 +38,10 @@ char* strdup(const char* s) {
return y;
}
void* aligned_alloc(size_t alignment, size_t size) {
return pvPortAllocAligned(size, alignment);
}
size_t memmgr_get_free_heap(void) {
return xPortGetFreeHeapSize();
}
@ -79,33 +74,17 @@ void* __wrap__realloc_r(struct _reent* r, void* ptr, size_t size) {
return realloc(ptr, size);
}
void* memmgr_alloc_from_pool(size_t size) {
void* memmgr_aux_pool_alloc(size_t size) {
void* p = furi_hal_memory_alloc(size);
if(p == NULL) p = malloc(size);
return p;
}
size_t memmgr_pool_get_free(void) {
size_t memmgr_aux_pool_get_free(void) {
return furi_hal_memory_get_free();
}
size_t memmgr_pool_get_max_block(void) {
return furi_hal_memory_max_pool_block();
}
void* aligned_malloc(size_t size, size_t alignment) {
void* p1; // original block
void** p2; // aligned block
int offset = alignment - 1 + sizeof(void*);
if((p1 = (void*)malloc(size + offset)) == NULL) {
return NULL;
}
p2 = (void**)(((size_t)(p1) + offset) & ~(alignment - 1));
p2[-1] = p1;
return p2;
}
void aligned_free(void* p) {
free(((void**)p)[-1]);
}

View file

@ -36,37 +36,22 @@ size_t memmgr_get_total_heap(void);
size_t memmgr_get_minimum_free_heap(void);
/**
* An aligned version of malloc, used when you need to get the aligned space on the heap
* Freeing the received address is performed ONLY through the aligned_free function
* @param size
* @param alignment
* @return void*
*/
void* aligned_malloc(size_t size, size_t alignment);
/**
* Freed space obtained through the aligned_malloc function
* @param p pointer to result of aligned_malloc
*/
void aligned_free(void* p);
/**
* @brief Allocate memory from separate memory pool. That memory can't be freed.
* @brief Allocate memory from the auxiliary memory pool. That memory can't be freed.
*
* @param size
* @return void*
*/
void* memmgr_alloc_from_pool(size_t size);
void* memmgr_aux_pool_alloc(size_t size);
/**
* @brief Get free memory pool size
* @brief Get the auxiliary pool free memory size
*
* @return size_t
*/
size_t memmgr_pool_get_free(void);
size_t memmgr_aux_pool_get_free(void);
/**
* @brief Get max free block size from memory pool
* @brief Get max free block size from the auxiliary memory pool
*
* @return size_t
*/

View file

@ -1,124 +1,18 @@
/*
* FreeRTOS Kernel V10.2.1
* Copyright (C) 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* http://www.FreeRTOS.org
* http://aws.amazon.com/freertos
*
* 1 tab == 4 spaces!
*/
/*
* A sample implementation of pvPortMalloc() and vPortFree() that combines
* (coalescences) adjacent memory blocks as they are freed, and in so doing
* limits memory fragmentation.
*
* See heap_1.c, heap_2.c and heap_3.c for alternative implementations, and the
* memory management pages of http://www.FreeRTOS.org for more information.
*/
#include "memmgr_heap.h"
#include "check.h"
#include <stdlib.h>
#include <stdio.h>
#include <stm32wbxx.h>
#include <core/log.h>
#include <core/common_defines.h>
/* Defining MPU_WRAPPERS_INCLUDED_FROM_API_FILE prevents task.h from redefining
all the API functions to use the MPU wrappers. That should only be done when
task.h is included from an application file. */
#define MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#include <furi.h>
#include <tlsf.h>
#include <tlsf_block_functions.h>
#include <FreeRTOS.h>
#include <task.h>
#undef MPU_WRAPPERS_INCLUDED_FROM_API_FILE
#ifdef HEAP_PRINT_DEBUG
#error This feature is broken, logging transport must be replaced with RTT
#endif
#if(configSUPPORT_DYNAMIC_ALLOCATION == 0)
#error This file must not be used if configSUPPORT_DYNAMIC_ALLOCATION is 0
#endif
/* Block sizes must not get too small. */
#define heapMINIMUM_BLOCK_SIZE ((size_t)(xHeapStructSize << 1))
/* Assumes 8bit bytes! */
#define heapBITS_PER_BYTE ((size_t)8)
/* Heap start end symbols provided by linker */
extern const void __heap_start__;
extern const void __heap_end__;
uint8_t* ucHeap = (uint8_t*)&__heap_start__;
/* Define the linked list structure. This is used to link free blocks in order
of their memory address. */
typedef struct A_BLOCK_LINK {
struct A_BLOCK_LINK* pxNextFreeBlock; /*<< The next free block in the list. */
size_t xBlockSize; /*<< The size of the free block. */
} BlockLink_t;
/*-----------------------------------------------------------*/
/*
* Inserts a block of memory that is being freed into the correct position in
* the list of free memory blocks. The block being freed will be merged with
* the block in front it and/or the block behind it if the memory blocks are
* adjacent to each other.
*/
static void prvInsertBlockIntoFreeList(BlockLink_t* pxBlockToInsert);
/*
* Called automatically to setup the required heap structures the first time
* pvPortMalloc() is called.
*/
static void prvHeapInit(void);
/*-----------------------------------------------------------*/
/* The size of the structure placed at the beginning of each allocated memory
block must by correctly byte aligned. */
static const size_t xHeapStructSize = (sizeof(BlockLink_t) + ((size_t)(portBYTE_ALIGNMENT - 1))) &
~((size_t)portBYTE_ALIGNMENT_MASK);
/* Create a couple of list links to mark the start and end of the list. */
static BlockLink_t xStart, *pxEnd = NULL;
/* Keeps track of the number of free bytes remaining, but says nothing about
fragmentation. */
static size_t xFreeBytesRemaining = 0U;
static size_t xMinimumEverFreeBytesRemaining = 0U;
/* Gets set to the top bit of an size_t type. When this bit in the xBlockSize
member of an BlockLink_t structure is set then the block belongs to the
application. When the bit is free the block is still part of the free heap
space. */
static size_t xBlockAllocatedBit = 0;
/* Furi heap extension */
#include <m-dict.h>
/* Allocation tracking types */
extern const void __heap_start__;
extern const void __heap_end__;
static tlsf_t tlsf = NULL;
static size_t heap_used = 0;
static size_t heap_max_used = 0;
// Allocation tracking types
DICT_DEF2(MemmgrHeapAllocDict, uint32_t, uint32_t) //-V1048
DICT_DEF2( //-V1048
@ -128,17 +22,35 @@ DICT_DEF2( //-V1048
MemmgrHeapAllocDict_t,
DICT_OPLIST(MemmgrHeapAllocDict))
/* Thread allocation tracing storage */
// Thread allocation tracing storage
static MemmgrHeapThreadDict_t memmgr_heap_thread_dict = {0};
static volatile uint32_t memmgr_heap_thread_trace_depth = 0;
/* Initialize tracing storage on start */
void memmgr_heap_init(void) {
static inline void memmgr_lock(void) {
vTaskSuspendAll();
}
static inline void memmgr_unlock(void) {
xTaskResumeAll();
}
static inline size_t memmgr_get_heap_size(void) {
return (size_t)&__heap_end__ - (size_t)&__heap_start__;
}
// Initialize tracing storage
static void memmgr_heap_init(void) {
MemmgrHeapThreadDict_init(memmgr_heap_thread_dict);
}
__attribute__((constructor)) static void memmgr_init(void) {
size_t pool_size = (size_t)&__heap_end__ - (size_t)&__heap_start__;
tlsf = tlsf_create_with_pool((void*)&__heap_start__, pool_size, pool_size);
memmgr_heap_init();
}
void memmgr_heap_enable_thread_trace(FuriThreadId thread_id) {
vTaskSuspendAll();
memmgr_lock();
{
memmgr_heap_thread_trace_depth++;
furi_check(MemmgrHeapThreadDict_get(memmgr_heap_thread_dict, (uint32_t)thread_id) == NULL);
@ -148,53 +60,20 @@ void memmgr_heap_enable_thread_trace(FuriThreadId thread_id) {
MemmgrHeapAllocDict_clear(alloc_dict);
memmgr_heap_thread_trace_depth--;
}
(void)xTaskResumeAll();
memmgr_unlock();
}
void memmgr_heap_disable_thread_trace(FuriThreadId thread_id) {
vTaskSuspendAll();
memmgr_lock();
{
memmgr_heap_thread_trace_depth++;
furi_check(MemmgrHeapThreadDict_erase(memmgr_heap_thread_dict, (uint32_t)thread_id));
memmgr_heap_thread_trace_depth--;
}
(void)xTaskResumeAll();
memmgr_unlock();
}
size_t memmgr_heap_get_thread_memory(FuriThreadId thread_id) {
size_t leftovers = MEMMGR_HEAP_UNKNOWN;
vTaskSuspendAll();
{
memmgr_heap_thread_trace_depth++;
MemmgrHeapAllocDict_t* alloc_dict =
MemmgrHeapThreadDict_get(memmgr_heap_thread_dict, (uint32_t)thread_id);
if(alloc_dict) {
leftovers = 0;
MemmgrHeapAllocDict_it_t alloc_dict_it;
for(MemmgrHeapAllocDict_it(alloc_dict_it, *alloc_dict);
!MemmgrHeapAllocDict_end_p(alloc_dict_it);
MemmgrHeapAllocDict_next(alloc_dict_it)) {
MemmgrHeapAllocDict_itref_t* data = MemmgrHeapAllocDict_ref(alloc_dict_it);
if(data->key != 0) {
uint8_t* puc = (uint8_t*)data->key;
puc -= xHeapStructSize;
BlockLink_t* pxLink = (void*)puc;
if((pxLink->xBlockSize & xBlockAllocatedBit) != 0 &&
pxLink->pxNextFreeBlock == NULL) {
leftovers += data->value;
}
}
}
}
memmgr_heap_thread_trace_depth--;
}
(void)xTaskResumeAll();
return leftovers;
}
#undef traceMALLOC
static inline void traceMALLOC(void* pointer, size_t size) {
static inline void memmgr_heap_trace_malloc(void* pointer, size_t size) {
FuriThreadId thread_id = furi_thread_get_current_id();
if(thread_id && memmgr_heap_thread_trace_depth == 0) {
memmgr_heap_thread_trace_depth++;
@ -207,9 +86,7 @@ static inline void traceMALLOC(void* pointer, size_t size) {
}
}
#undef traceFREE
static inline void traceFREE(void* pointer, size_t size) {
UNUSED(size);
static inline void memmgr_heap_trace_free(void* pointer) {
FuriThreadId thread_id = furi_thread_get_current_id();
if(thread_id && memmgr_heap_thread_trace_depth == 0) {
memmgr_heap_thread_trace_depth++;
@ -224,441 +101,248 @@ static inline void traceFREE(void* pointer, size_t size) {
}
}
size_t memmgr_heap_get_max_free_block(void) {
size_t max_free_size = 0;
BlockLink_t* pxBlock;
vTaskSuspendAll();
pxBlock = xStart.pxNextFreeBlock;
while(pxBlock->pxNextFreeBlock != NULL) {
if(pxBlock->xBlockSize > max_free_size) {
max_free_size = pxBlock->xBlockSize;
size_t memmgr_heap_get_thread_memory(FuriThreadId thread_id) {
size_t leftovers = MEMMGR_HEAP_UNKNOWN;
memmgr_lock();
{
memmgr_heap_thread_trace_depth++;
MemmgrHeapAllocDict_t* alloc_dict =
MemmgrHeapThreadDict_get(memmgr_heap_thread_dict, (uint32_t)thread_id);
if(alloc_dict) {
leftovers = 0;
MemmgrHeapAllocDict_it_t alloc_dict_it;
for(MemmgrHeapAllocDict_it(alloc_dict_it, *alloc_dict);
!MemmgrHeapAllocDict_end_p(alloc_dict_it);
MemmgrHeapAllocDict_next(alloc_dict_it)) {
MemmgrHeapAllocDict_itref_t* data = MemmgrHeapAllocDict_ref(alloc_dict_it);
if(data->key != 0) {
block_header_t* block = block_from_ptr((uint8_t*)data->key);
if(!block_is_free(block)) {
leftovers += data->value;
}
}
}
}
pxBlock = pxBlock->pxNextFreeBlock;
memmgr_heap_thread_trace_depth--;
}
memmgr_unlock();
return leftovers;
}
static bool tlsf_walker_max_free(void* ptr, size_t size, int used, void* user) {
UNUSED(ptr);
bool free = !used;
size_t* max_free_block_size = (size_t*)user;
if(free && size > *max_free_block_size) {
*max_free_block_size = size;
}
xTaskResumeAll();
return max_free_size;
return true;
}
void memmgr_heap_printf_free_blocks(void) {
BlockLink_t* pxBlock;
//TODO enable when we can do printf with a locked scheduler
//vTaskSuspendAll();
size_t memmgr_heap_get_max_free_block(void) {
size_t max_free_block_size = 0;
pxBlock = xStart.pxNextFreeBlock;
while(pxBlock->pxNextFreeBlock != NULL) {
printf("A %p S %lu\r\n", (void*)pxBlock, (uint32_t)pxBlock->xBlockSize);
pxBlock = pxBlock->pxNextFreeBlock;
}
memmgr_lock();
//xTaskResumeAll();
pool_t pool = tlsf_get_pool(tlsf);
tlsf_walk_pool(pool, tlsf_walker_max_free, &max_free_block_size);
memmgr_unlock();
return max_free_block_size;
}
#ifdef HEAP_PRINT_DEBUG
char* ultoa(unsigned long num, char* str, int radix) {
char temp[33]; // at radix 2 the string is at most 32 + 1 null long.
int temp_loc = 0;
int digit;
int str_loc = 0;
typedef struct {
BlockWalker walker;
void* context;
} BlockWalkerWrapper;
//construct a backward string of the number.
do {
digit = (unsigned long)num % ((unsigned long)radix);
if(digit < 10)
temp[temp_loc++] = digit + '0';
else
temp[temp_loc++] = digit - 10 + 'A';
num = ((unsigned long)num) / ((unsigned long)radix);
} while((unsigned long)num > 0);
temp_loc--;
//now reverse the string.
while(temp_loc >= 0) { // while there are still chars
str[str_loc++] = temp[temp_loc--];
}
str[str_loc] = 0; // add null termination.
return str;
static bool tlsf_walker_wrapper(void* ptr, size_t size, int used, void* user) {
BlockWalkerWrapper* wrapper = (BlockWalkerWrapper*)user;
return wrapper->walker(ptr, size, used, wrapper->context);
}
static void print_heap_init(void) {
char tmp_str[33];
size_t heap_start = (size_t)&__heap_start__;
size_t heap_end = (size_t)&__heap_end__;
void memmgr_heap_walk_blocks(BlockWalker walker, void* context) {
memmgr_lock();
// {PHStart|heap_start|heap_end}
FURI_CRITICAL_ENTER();
furi_log_puts("{PHStart|");
ultoa(heap_start, tmp_str, 16);
furi_log_puts(tmp_str);
furi_log_puts("|");
ultoa(heap_end, tmp_str, 16);
furi_log_puts(tmp_str);
furi_log_puts("}\r\n");
FURI_CRITICAL_EXIT();
BlockWalkerWrapper wrapper = {walker, context};
pool_t pool = tlsf_get_pool(tlsf);
tlsf_walk_pool(pool, tlsf_walker_wrapper, &wrapper);
memmgr_unlock();
}
static void print_heap_malloc(void* ptr, size_t size) {
char tmp_str[33];
const char* name = furi_thread_get_name(furi_thread_get_current_id());
if(!name) {
name = "";
}
// {thread name|m|address|size}
FURI_CRITICAL_ENTER();
furi_log_puts("{");
furi_log_puts(name);
furi_log_puts("|m|0x");
ultoa((unsigned long)ptr, tmp_str, 16);
furi_log_puts(tmp_str);
furi_log_puts("|");
utoa(size, tmp_str, 10);
furi_log_puts(tmp_str);
furi_log_puts("}\r\n");
FURI_CRITICAL_EXIT();
}
static void print_heap_free(void* ptr) {
char tmp_str[33];
const char* name = furi_thread_get_name(furi_thread_get_current_id());
if(!name) {
name = "";
}
// {thread name|f|address}
FURI_CRITICAL_ENTER();
furi_log_puts("{");
furi_log_puts(name);
furi_log_puts("|f|0x");
ultoa((unsigned long)ptr, tmp_str, 16);
furi_log_puts(tmp_str);
furi_log_puts("}\r\n");
FURI_CRITICAL_EXIT();
}
#endif
/*-----------------------------------------------------------*/
void* pvPortMalloc(size_t xWantedSize) {
BlockLink_t *pxBlock, *pxPreviousBlock, *pxNewBlockLink;
void* pvReturn = NULL;
size_t to_wipe = xWantedSize;
void* pvPortMalloc(size_t xSize) {
// memory management in ISR is not allowed
if(FURI_IS_IRQ_MODE()) {
furi_crash("memmgt in ISR");
}
#ifdef HEAP_PRINT_DEBUG
BlockLink_t* print_heap_block = NULL;
#endif
memmgr_lock();
/* If this is the first call to malloc then the heap will require
initialisation to setup the list of free blocks. */
if(pxEnd == NULL) {
#ifdef HEAP_PRINT_DEBUG
print_heap_init();
#endif
vTaskSuspendAll();
{
prvHeapInit();
memmgr_heap_init();
}
(void)xTaskResumeAll();
} else {
mtCOVERAGE_TEST_MARKER();
}
vTaskSuspendAll();
{
/* Check the requested block size is not so large that the top bit is
set. The top bit of the block size member of the BlockLink_t structure
is used to determine who owns the block - the application or the
kernel, so it must be free. */
if((xWantedSize & xBlockAllocatedBit) == 0) {
/* The wanted size is increased so it can contain a BlockLink_t
structure in addition to the requested amount of bytes. */
if(xWantedSize > 0) {
xWantedSize += xHeapStructSize;
/* Ensure that blocks are always aligned to the required number
of bytes. */
if((xWantedSize & portBYTE_ALIGNMENT_MASK) != 0x00) {
/* Byte alignment required. */
xWantedSize += (portBYTE_ALIGNMENT - (xWantedSize & portBYTE_ALIGNMENT_MASK));
configASSERT((xWantedSize & portBYTE_ALIGNMENT_MASK) == 0);
} else {
mtCOVERAGE_TEST_MARKER();
}
} else {
mtCOVERAGE_TEST_MARKER();
}
if((xWantedSize > 0) && (xWantedSize <= xFreeBytesRemaining)) {
/* Traverse the list from the start (lowest address) block until
one of adequate size is found. */
pxPreviousBlock = &xStart;
pxBlock = xStart.pxNextFreeBlock;
while((pxBlock->xBlockSize < xWantedSize) && (pxBlock->pxNextFreeBlock != NULL)) {
pxPreviousBlock = pxBlock;
pxBlock = pxBlock->pxNextFreeBlock;
}
/* If the end marker was reached then a block of adequate size
was not found. */
if(pxBlock != pxEnd) {
/* Return the memory space pointed to - jumping over the
BlockLink_t structure at its start. */
pvReturn =
(void*)(((uint8_t*)pxPreviousBlock->pxNextFreeBlock) + xHeapStructSize);
/* This block is being returned for use so must be taken out
of the list of free blocks. */
pxPreviousBlock->pxNextFreeBlock = pxBlock->pxNextFreeBlock;
/* If the block is larger than required it can be split into
two. */
if((pxBlock->xBlockSize - xWantedSize) > heapMINIMUM_BLOCK_SIZE) {
/* This block is to be split into two. Create a new
block following the number of bytes requested. The void
cast is used to prevent byte alignment warnings from the
compiler. */
pxNewBlockLink = (void*)(((uint8_t*)pxBlock) + xWantedSize);
configASSERT((((size_t)pxNewBlockLink) & portBYTE_ALIGNMENT_MASK) == 0);
/* Calculate the sizes of two blocks split from the
single block. */
pxNewBlockLink->xBlockSize = pxBlock->xBlockSize - xWantedSize;
pxBlock->xBlockSize = xWantedSize;
/* Insert the new block into the list of free blocks. */
prvInsertBlockIntoFreeList(pxNewBlockLink);
} else {
mtCOVERAGE_TEST_MARKER();
}
xFreeBytesRemaining -= pxBlock->xBlockSize;
if(xFreeBytesRemaining < xMinimumEverFreeBytesRemaining) {
xMinimumEverFreeBytesRemaining = xFreeBytesRemaining;
} else {
mtCOVERAGE_TEST_MARKER();
}
/* The block is being returned - it is allocated and owned
by the application and has no "next" block. */
pxBlock->xBlockSize |= xBlockAllocatedBit;
pxBlock->pxNextFreeBlock = NULL;
#ifdef HEAP_PRINT_DEBUG
print_heap_block = pxBlock;
#endif
} else {
mtCOVERAGE_TEST_MARKER();
}
} else {
mtCOVERAGE_TEST_MARKER();
}
// allocate block
void* data = tlsf_malloc(tlsf, xSize);
if(data == NULL) {
if(xSize == 0) {
furi_crash("malloc(0)");
} else {
mtCOVERAGE_TEST_MARKER();
}
traceMALLOC(pvReturn, xWantedSize);
}
(void)xTaskResumeAll();
#ifdef HEAP_PRINT_DEBUG
print_heap_malloc(print_heap_block, print_heap_block->xBlockSize & ~xBlockAllocatedBit);
#endif
#if(configUSE_MALLOC_FAILED_HOOK == 1)
{
if(pvReturn == NULL) {
extern void vApplicationMallocFailedHook(void);
vApplicationMallocFailedHook();
} else {
mtCOVERAGE_TEST_MARKER();
furi_crash("out of memory");
}
}
#endif
configASSERT((((size_t)pvReturn) & (size_t)portBYTE_ALIGNMENT_MASK) == 0);
// update heap usage
heap_used += tlsf_block_size(data);
heap_used += tlsf_alloc_overhead();
if(heap_used > heap_max_used) {
heap_max_used = heap_used;
}
furi_check(pvReturn, xWantedSize ? "out of memory" : "malloc(0)");
pvReturn = memset(pvReturn, 0, to_wipe);
return pvReturn;
// trace allocation
memmgr_heap_trace_malloc(data, xSize);
memmgr_unlock();
// clear block content
memset(data, 0, xSize);
return data;
}
/*-----------------------------------------------------------*/
void vPortFree(void* pv) {
uint8_t* puc = (uint8_t*)pv;
BlockLink_t* pxLink;
// memory management in ISR is not allowed
if(FURI_IS_IRQ_MODE()) {
furi_crash("memmgt in ISR");
}
// ignore NULL pointer
if(pv != NULL) {
/* The memory being freed will have an BlockLink_t structure immediately
before it. */
puc -= xHeapStructSize;
memmgr_lock();
/* This casting is to keep the compiler from issuing warnings. */
pxLink = (void*)puc;
// get block size
size_t block_size = tlsf_block_size(pv);
/* Check the block is actually allocated. */
configASSERT((pxLink->xBlockSize & xBlockAllocatedBit) != 0);
configASSERT(pxLink->pxNextFreeBlock == NULL);
// clear block content
memset(pv, 0, block_size);
if((pxLink->xBlockSize & xBlockAllocatedBit) != 0) {
if(pxLink->pxNextFreeBlock == NULL) {
/* The block is being returned to the heap - it is no longer
allocated. */
pxLink->xBlockSize &= ~xBlockAllocatedBit;
// update heap usage
heap_used -= block_size;
heap_used -= tlsf_alloc_overhead();
#ifdef HEAP_PRINT_DEBUG
print_heap_free(pxLink);
#endif
// free
tlsf_free(tlsf, pv);
vTaskSuspendAll();
{
furi_assert((size_t)pv >= SRAM_BASE);
furi_assert((size_t)pv < SRAM_BASE + 1024 * 256);
furi_assert(pxLink->xBlockSize >= xHeapStructSize);
furi_assert((pxLink->xBlockSize - xHeapStructSize) < 1024 * 256);
// trace free
memmgr_heap_trace_free(pv);
/* Add this block to the list of free blocks. */
xFreeBytesRemaining += pxLink->xBlockSize;
traceFREE(pv, pxLink->xBlockSize);
memset(pv, 0, pxLink->xBlockSize - xHeapStructSize);
prvInsertBlockIntoFreeList(((BlockLink_t*)pxLink));
}
(void)xTaskResumeAll();
} else {
mtCOVERAGE_TEST_MARKER();
}
} else {
mtCOVERAGE_TEST_MARKER();
}
} else {
#ifdef HEAP_PRINT_DEBUG
print_heap_free(pv);
#endif
memmgr_unlock();
}
}
/*-----------------------------------------------------------*/
size_t xPortGetTotalHeapSize(void) {
return (size_t)&__heap_end__ - (size_t)&__heap_start__;
extern void* pvPortAllocAligned(size_t xSize, size_t xAlignment) {
// memory management in ISR is not allowed
if(FURI_IS_IRQ_MODE()) {
furi_crash("memmgt in ISR");
}
// alignment must be power of 2
if((xAlignment & (xAlignment - 1)) != 0) {
furi_crash("invalid alignment");
}
memmgr_lock();
// allocate block
void* data = tlsf_memalign(tlsf, xAlignment, xSize);
if(data == NULL) {
if(xSize == 0) {
furi_crash("malloc_aligned(0)");
} else {
furi_crash("out of memory");
}
}
// update heap usage
heap_used += tlsf_block_size(data);
heap_used += tlsf_alloc_overhead();
if(heap_used > heap_max_used) {
heap_max_used = heap_used;
}
// trace allocation
memmgr_heap_trace_malloc(data, xSize);
memmgr_unlock();
// clear block content
memset(data, 0, xSize);
return data;
}
extern void* pvPortRealloc(void* pv, size_t xSize) {
// realloc(ptr, 0) is equivalent to free(ptr)
if(xSize == 0) {
vPortFree(pv);
return NULL;
}
// realloc(NULL, size) is equivalent to malloc(size)
if(pv == NULL) {
return pvPortMalloc(xSize);
}
/* realloc things */
// memory management in ISR is not allowed
if(FURI_IS_IRQ_MODE()) {
furi_crash("memmgt in ISR");
}
memmgr_lock();
// trace old block as free
size_t old_size = tlsf_block_size(pv);
// trace free
memmgr_heap_trace_free(pv);
// reallocate block
void* data = tlsf_realloc(tlsf, pv, xSize);
if(data == NULL) {
furi_crash("out of memory");
}
// update heap usage
heap_used -= old_size;
heap_used += tlsf_block_size(data);
if(heap_used > heap_max_used) {
heap_max_used = heap_used;
}
// trace allocation
memmgr_heap_trace_malloc(data, xSize);
memmgr_unlock();
// clear remain block content, if the new size is bigger
// can't guarantee that all data will be zeroed, cos tlsf_block_size is not always the same as xSize
if(xSize > old_size) {
memset((uint8_t*)data + old_size, 0, xSize - old_size);
}
return data;
}
/*-----------------------------------------------------------*/
size_t xPortGetFreeHeapSize(void) {
return xFreeBytesRemaining;
return memmgr_get_heap_size() - heap_used - tlsf_size(tlsf);
}
size_t xPortGetTotalHeapSize(void) {
return memmgr_get_heap_size();
}
/*-----------------------------------------------------------*/
size_t xPortGetMinimumEverFreeHeapSize(void) {
return xMinimumEverFreeBytesRemaining;
}
/*-----------------------------------------------------------*/
void vPortInitialiseBlocks(void) {
/* This just exists to keep the linker quiet. */
}
/*-----------------------------------------------------------*/
static void prvHeapInit(void) {
BlockLink_t* pxFirstFreeBlock;
uint8_t* pucAlignedHeap;
size_t uxAddress;
size_t xTotalHeapSize = (size_t)&__heap_end__ - (size_t)&__heap_start__;
/* Ensure the heap starts on a correctly aligned boundary. */
uxAddress = (size_t)ucHeap;
if((uxAddress & portBYTE_ALIGNMENT_MASK) != 0) {
uxAddress += (portBYTE_ALIGNMENT - 1);
uxAddress &= ~((size_t)portBYTE_ALIGNMENT_MASK);
xTotalHeapSize -= uxAddress - (size_t)ucHeap;
}
pucAlignedHeap = (uint8_t*)uxAddress;
/* xStart is used to hold a pointer to the first item in the list of free
blocks. The void cast is used to prevent compiler warnings. */
xStart.pxNextFreeBlock = (void*)pucAlignedHeap;
xStart.xBlockSize = (size_t)0;
/* pxEnd is used to mark the end of the list of free blocks and is inserted
at the end of the heap space. */
uxAddress = ((size_t)pucAlignedHeap) + xTotalHeapSize;
uxAddress -= xHeapStructSize;
uxAddress &= ~((size_t)portBYTE_ALIGNMENT_MASK);
pxEnd = (void*)uxAddress;
pxEnd->xBlockSize = 0;
pxEnd->pxNextFreeBlock = NULL;
/* To start with there is a single free block that is sized to take up the
entire heap space, minus the space taken by pxEnd. */
pxFirstFreeBlock = (void*)pucAlignedHeap;
pxFirstFreeBlock->xBlockSize = uxAddress - (size_t)pxFirstFreeBlock;
pxFirstFreeBlock->pxNextFreeBlock = pxEnd;
/* Only one block exists - and it covers the entire usable heap space. */
xMinimumEverFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
xFreeBytesRemaining = pxFirstFreeBlock->xBlockSize;
/* Work out the position of the top bit in a size_t variable. */
xBlockAllocatedBit = ((size_t)1) << ((sizeof(size_t) * heapBITS_PER_BYTE) - 1);
}
/*-----------------------------------------------------------*/
static void prvInsertBlockIntoFreeList(BlockLink_t* pxBlockToInsert) {
BlockLink_t* pxIterator;
uint8_t* puc;
/* Iterate through the list until a block is found that has a higher address
than the block being inserted. */
for(pxIterator = &xStart; pxIterator->pxNextFreeBlock < pxBlockToInsert;
pxIterator = pxIterator->pxNextFreeBlock) {
/* Nothing to do here, just iterate to the right position. */
}
/* Do the block being inserted, and the block it is being inserted after
make a contiguous block of memory? */
puc = (uint8_t*)pxIterator;
if((puc + pxIterator->xBlockSize) == (uint8_t*)pxBlockToInsert) {
pxIterator->xBlockSize += pxBlockToInsert->xBlockSize;
pxBlockToInsert = pxIterator;
} else {
mtCOVERAGE_TEST_MARKER();
}
/* Do the block being inserted, and the block it is being inserted before
make a contiguous block of memory? */
puc = (uint8_t*)pxBlockToInsert;
if((puc + pxBlockToInsert->xBlockSize) == (uint8_t*)pxIterator->pxNextFreeBlock) {
if(pxIterator->pxNextFreeBlock != pxEnd) {
/* Form one big block from the two blocks. */
pxBlockToInsert->xBlockSize += pxIterator->pxNextFreeBlock->xBlockSize;
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock->pxNextFreeBlock;
} else {
pxBlockToInsert->pxNextFreeBlock = pxEnd;
}
} else {
pxBlockToInsert->pxNextFreeBlock = pxIterator->pxNextFreeBlock;
}
/* If the block being inserted plugged a gab, so was merged with the block
before and the block after, then it's pxNextFreeBlock pointer will have
already been set, and should not be set here as that would make it point
to itself. */
if(pxIterator != pxBlockToInsert) {
pxIterator->pxNextFreeBlock = pxBlockToInsert;
} else {
mtCOVERAGE_TEST_MARKER();
}
}
return memmgr_get_heap_size() - heap_max_used - tlsf_size(tlsf);
}

View file

@ -40,9 +40,17 @@ size_t memmgr_heap_get_thread_memory(FuriThreadId thread_id);
*/
size_t memmgr_heap_get_max_free_block(void);
/** Print the address and size of all free blocks to stdout
typedef bool (*BlockWalker)(void* pointer, size_t size, bool used, void* context);
/**
* @brief Walk through all heap blocks
* @warning This function will lock memory manager and may cause deadlocks if any malloc/free is called inside the callback.
* Also, printf and furi_log contains malloc calls, so do not use them.
*
* @param walker
* @param context
*/
void memmgr_heap_printf_free_blocks(void);
void memmgr_heap_walk_blocks(BlockWalker walker, void* context);
#ifdef __cplusplus
}

View file

@ -276,8 +276,8 @@ void furi_thread_start(FuriThread* thread) {
stack,
thread,
priority,
memmgr_alloc_from_pool(sizeof(StackType_t) * stack),
memmgr_alloc_from_pool(sizeof(StaticTask_t)));
memmgr_aux_pool_alloc(sizeof(StackType_t) * stack),
memmgr_aux_pool_alloc(sizeof(StaticTask_t)));
} else {
BaseType_t ret = xTaskCreate(
furi_thread_body, thread->name, stack, thread, priority, &thread->task_handle);

View file

@ -51,12 +51,17 @@ void flipper_init(void) {
FURI_LOG_I(TAG, "Startup complete");
}
PLACE_IN_SECTION("MB_MEM2") static StaticTask_t idle_task_tcb;
PLACE_IN_SECTION("MB_MEM2") static StackType_t idle_task_stack[configIDLE_TASK_STACK_DEPTH];
PLACE_IN_SECTION("MB_MEM2") static StaticTask_t timer_task_tcb;
PLACE_IN_SECTION("MB_MEM2") static StackType_t timer_task_stack[configTIMER_TASK_STACK_DEPTH];
void vApplicationGetIdleTaskMemory(
StaticTask_t** tcb_ptr,
StackType_t** stack_ptr,
uint32_t* stack_size) {
*tcb_ptr = memmgr_alloc_from_pool(sizeof(StaticTask_t));
*stack_ptr = memmgr_alloc_from_pool(sizeof(StackType_t) * configIDLE_TASK_STACK_DEPTH);
*tcb_ptr = &idle_task_tcb;
*stack_ptr = idle_task_stack;
*stack_size = configIDLE_TASK_STACK_DEPTH;
}
@ -64,7 +69,7 @@ void vApplicationGetTimerTaskMemory(
StaticTask_t** tcb_ptr,
StackType_t** stack_ptr,
uint32_t* stack_size) {
*tcb_ptr = memmgr_alloc_from_pool(sizeof(StaticTask_t));
*stack_ptr = memmgr_alloc_from_pool(sizeof(StackType_t) * configTIMER_TASK_STACK_DEPTH);
*tcb_ptr = &timer_task_tcb;
*stack_ptr = timer_task_stack;
*stack_size = configTIMER_TASK_STACK_DEPTH;
}

View file

@ -13,6 +13,7 @@ env.Append(
libs = env.BuildModules(
[
"tlsf",
"mlib",
"stm32wb",
"freertos",

View file

@ -466,7 +466,7 @@ static bool elf_load_section_data(ELFFile* elf, ELFSection* section, Elf32_Shdr*
return true;
}
section->data = aligned_malloc(section_header->sh_size, section_header->sh_addralign);
section->data = aligned_alloc(section_header->sh_addralign, section_header->sh_size);
section->size = section_header->sh_size;
if(section_header->sh_type == SHT_NOBITS) {
@ -718,7 +718,7 @@ static bool elf_relocate_fast(ELFFile* elf, ELFSection* s) {
}
}
aligned_free(s->fast_rel->data);
free(s->fast_rel->data);
free(s->fast_rel);
s->fast_rel = NULL;
@ -785,10 +785,10 @@ void elf_file_free(ELFFile* elf) {
ELFSectionDict_next(it)) {
const ELFSectionDict_itref_t* itref = ELFSectionDict_cref(it);
if(itref->value.data) {
aligned_free(itref->value.data);
free(itref->value.data);
}
if(itref->value.fast_rel) {
aligned_free(itref->value.fast_rel->data);
free(itref->value.fast_rel->data);
free(itref->value.fast_rel);
}
free((void*)itref->key);

1
lib/tlsf Submodule

@ -0,0 +1 @@
Subproject commit 8fc595fe223cd0b3b5d7b29eb86825e4bd38e6e8

21
lib/tlsf.scons Normal file
View file

@ -0,0 +1,21 @@
Import("env")
env.Append(
CPPPATH=[
"#/lib/tlsf",
],
)
libenv = env.Clone(FW_LIB_NAME="tlsf")
libenv.ApplyLibFlags()
libenv.Append(
CPPDEFINES=[],
)
sources = [File("tlsf/tlsf.c")]
lib = libenv.StaticLibrary("${FW_LIB_NAME}", sources)
libenv.Install("${LIB_DIST_DIR}", lib)
Return("lib")

View file

@ -1,5 +1,5 @@
entry,status,name,type,params
Version,+,61.3,,
Version,+,62.0,,
Header,+,applications/services/bt/bt_service/bt.h,,
Header,+,applications/services/cli/cli.h,,
Header,+,applications/services/cli/cli_vcp.h,,
@ -515,9 +515,7 @@ Function,-,acosh,double,double
Function,-,acoshf,float,float
Function,-,acoshl,long double,long double
Function,-,acosl,long double,long double
Function,-,aligned_alloc,void*,"size_t, size_t"
Function,+,aligned_free,void,void*
Function,+,aligned_malloc,void*,"size_t, size_t"
Function,+,aligned_alloc,void*,"size_t, size_t"
Function,-,arc4random,__uint32_t,
Function,-,arc4random_buf,void,"void*, size_t"
Function,-,arc4random_uniform,__uint32_t,__uint32_t
@ -1984,7 +1982,8 @@ Function,+,memchr,void*,"const void*, int, size_t"
Function,+,memcmp,int,"const void*, const void*, size_t"
Function,+,memcpy,void*,"void*, const void*, size_t"
Function,-,memmem,void*,"const void*, size_t, const void*, size_t"
Function,-,memmgr_alloc_from_pool,void*,size_t
Function,+,memmgr_aux_pool_alloc,void*,size_t
Function,+,memmgr_aux_pool_get_free,size_t,
Function,+,memmgr_get_free_heap,size_t,
Function,+,memmgr_get_minimum_free_heap,size_t,
Function,+,memmgr_get_total_heap,size_t,
@ -1992,8 +1991,7 @@ Function,+,memmgr_heap_disable_thread_trace,void,FuriThreadId
Function,+,memmgr_heap_enable_thread_trace,void,FuriThreadId
Function,+,memmgr_heap_get_max_free_block,size_t,
Function,+,memmgr_heap_get_thread_memory,size_t,FuriThreadId
Function,+,memmgr_heap_printf_free_blocks,void,
Function,-,memmgr_pool_get_free,size_t,
Function,+,memmgr_heap_walk_blocks,void,"BlockWalker, void*"
Function,-,memmgr_pool_get_max_block,size_t,
Function,+,memmove,void*,"void*, const void*, size_t"
Function,-,mempcpy,void*,"void*, const void*, size_t"

1 entry status name type params
2 Version + 61.3 62.0
3 Header + applications/services/bt/bt_service/bt.h
4 Header + applications/services/cli/cli.h
5 Header + applications/services/cli/cli_vcp.h
515 Function - acoshf float float
516 Function - acoshl long double long double
517 Function - acosl long double long double
518 Function - + aligned_alloc void* size_t, size_t
Function + aligned_free void void*
Function + aligned_malloc void* size_t, size_t
519 Function - arc4random __uint32_t
520 Function - arc4random_buf void void*, size_t
521 Function - arc4random_uniform __uint32_t __uint32_t
1982 Function + memcmp int const void*, const void*, size_t
1983 Function + memcpy void* void*, const void*, size_t
1984 Function - memmem void* const void*, size_t, const void*, size_t
1985 Function - + memmgr_alloc_from_pool memmgr_aux_pool_alloc void* size_t
1986 Function + memmgr_aux_pool_get_free size_t
1987 Function + memmgr_get_free_heap size_t
1988 Function + memmgr_get_minimum_free_heap size_t
1989 Function + memmgr_get_total_heap size_t
1991 Function + memmgr_heap_enable_thread_trace void FuriThreadId
1992 Function + memmgr_heap_get_max_free_block size_t
1993 Function + memmgr_heap_get_thread_memory size_t FuriThreadId
1994 Function + memmgr_heap_printf_free_blocks memmgr_heap_walk_blocks void BlockWalker, void*
Function - memmgr_pool_get_free size_t
1995 Function - memmgr_pool_get_max_block size_t
1996 Function + memmove void* void*, const void*, size_t
1997 Function - mempcpy void* void*, const void*, size_t

View file

@ -13,6 +13,7 @@
"print",
"flipper18",
"furi",
"tlsf",
"freertos",
"stm32wb",
"hwdrivers",
@ -68,4 +69,4 @@
"ibutton",
"infrared"
]
}
}

View file

@ -1,5 +1,5 @@
entry,status,name,type,params
Version,+,61.3,,
Version,+,62.0,,
Header,+,applications/drivers/subghz/cc1101_ext/cc1101_ext_interconnect.h,,
Header,+,applications/services/bt/bt_service/bt.h,,
Header,+,applications/services/cli/cli.h,,
@ -586,9 +586,7 @@ Function,-,acosh,double,double
Function,-,acoshf,float,float
Function,-,acoshl,long double,long double
Function,-,acosl,long double,long double
Function,-,aligned_alloc,void*,"size_t, size_t"
Function,+,aligned_free,void,void*
Function,+,aligned_malloc,void*,"size_t, size_t"
Function,+,aligned_alloc,void*,"size_t, size_t"
Function,-,arc4random,__uint32_t,
Function,-,arc4random_buf,void,"void*, size_t"
Function,-,arc4random_uniform,__uint32_t,__uint32_t
@ -2394,7 +2392,8 @@ Function,+,memchr,void*,"const void*, int, size_t"
Function,+,memcmp,int,"const void*, const void*, size_t"
Function,+,memcpy,void*,"void*, const void*, size_t"
Function,-,memmem,void*,"const void*, size_t, const void*, size_t"
Function,-,memmgr_alloc_from_pool,void*,size_t
Function,+,memmgr_aux_pool_alloc,void*,size_t
Function,+,memmgr_aux_pool_get_free,size_t,
Function,+,memmgr_get_free_heap,size_t,
Function,+,memmgr_get_minimum_free_heap,size_t,
Function,+,memmgr_get_total_heap,size_t,
@ -2402,8 +2401,7 @@ Function,+,memmgr_heap_disable_thread_trace,void,FuriThreadId
Function,+,memmgr_heap_enable_thread_trace,void,FuriThreadId
Function,+,memmgr_heap_get_max_free_block,size_t,
Function,+,memmgr_heap_get_thread_memory,size_t,FuriThreadId
Function,+,memmgr_heap_printf_free_blocks,void,
Function,-,memmgr_pool_get_free,size_t,
Function,+,memmgr_heap_walk_blocks,void,"BlockWalker, void*"
Function,-,memmgr_pool_get_max_block,size_t,
Function,+,memmove,void*,"void*, const void*, size_t"
Function,-,mempcpy,void*,"void*, const void*, size_t"

1 entry status name type params
2 Version + 61.3 62.0
3 Header + applications/drivers/subghz/cc1101_ext/cc1101_ext_interconnect.h
4 Header + applications/services/bt/bt_service/bt.h
5 Header + applications/services/cli/cli.h
586 Function - acoshf float float
587 Function - acoshl long double long double
588 Function - acosl long double long double
589 Function - + aligned_alloc void* size_t, size_t
Function + aligned_free void void*
Function + aligned_malloc void* size_t, size_t
590 Function - arc4random __uint32_t
591 Function - arc4random_buf void void*, size_t
592 Function - arc4random_uniform __uint32_t __uint32_t
2392 Function + memcmp int const void*, const void*, size_t
2393 Function + memcpy void* void*, const void*, size_t
2394 Function - memmem void* const void*, size_t, const void*, size_t
2395 Function - + memmgr_alloc_from_pool memmgr_aux_pool_alloc void* size_t
2396 Function + memmgr_aux_pool_get_free size_t
2397 Function + memmgr_get_free_heap size_t
2398 Function + memmgr_get_minimum_free_heap size_t
2399 Function + memmgr_get_total_heap size_t
2401 Function + memmgr_heap_enable_thread_trace void FuriThreadId
2402 Function + memmgr_heap_get_max_free_block size_t
2403 Function + memmgr_heap_get_thread_memory size_t FuriThreadId
2404 Function + memmgr_heap_printf_free_blocks memmgr_heap_walk_blocks void BlockWalker, void*
Function - memmgr_pool_get_free size_t
2405 Function - memmgr_pool_get_max_block size_t
2406 Function + memmove void* void*, const void*, size_t
2407 Function - mempcpy void* void*, const void*, size_t

View file

@ -19,7 +19,7 @@ static SectorCache* cache = NULL;
void sector_cache_init(void) {
if(cache == NULL) {
cache = memmgr_alloc_from_pool(sizeof(SectorCache));
cache = memmgr_aux_pool_alloc(sizeof(SectorCache));
}
if(cache != NULL) {

View file

@ -78,21 +78,21 @@ static bool flipper_update_load_stage(const FuriString* work_dir, UpdateManifest
furi_string_free(loader_img_path);
void* img = malloc(stat.fsize);
uint32_t bytes_read = 0;
uint32_t read_total = 0;
uint16_t read_current = 0;
const uint16_t MAX_READ = 0xFFFF;
uint32_t crc = 0;
do {
uint16_t size_read = 0;
if(f_read(&file, img + bytes_read, MAX_READ, &size_read) != FR_OK) { //-V769
if(f_read(&file, img + read_total, MAX_READ, &read_current) != FR_OK) { //-V769
break;
}
crc = crc32_calc_buffer(crc, img + bytes_read, size_read);
bytes_read += size_read;
} while(bytes_read == MAX_READ);
crc = crc32_calc_buffer(crc, img + read_total, read_current);
read_total += read_current;
} while(read_current == MAX_READ);
do {
if((bytes_read != stat.fsize) || (crc != manifest->staged_loader_crc)) {
if((read_total != stat.fsize) || (crc != manifest->staged_loader_crc)) {
break;
}

View file

@ -22,6 +22,7 @@
"print",
"flipper7",
"furi",
"tlsf",
"freertos",
"stm32wb",
"hwdrivers",
@ -55,4 +56,4 @@
"bit_lib",
"datetime"
]
}
}