2022-07-20 10:56:33 +00:00
|
|
|
#include "timer.h"
|
|
|
|
#include "check.h"
|
2022-09-08 06:47:23 +00:00
|
|
|
#include "memmgr.h"
|
|
|
|
#include "kernel.h"
|
2022-07-20 10:56:33 +00:00
|
|
|
|
|
|
|
#include <FreeRTOS.h>
|
|
|
|
#include <timers.h>
|
|
|
|
|
|
|
|
typedef struct {
|
|
|
|
FuriTimerCallback func;
|
|
|
|
void* context;
|
|
|
|
} TimerCallback_t;
|
|
|
|
|
|
|
|
static void TimerCallback(TimerHandle_t hTimer) {
|
|
|
|
TimerCallback_t* callb;
|
|
|
|
|
|
|
|
/* Retrieve pointer to callback function and context */
|
|
|
|
callb = (TimerCallback_t*)pvTimerGetTimerID(hTimer);
|
|
|
|
|
|
|
|
/* Remove dynamic allocation flag */
|
|
|
|
callb = (TimerCallback_t*)((uint32_t)callb & ~1U);
|
|
|
|
|
|
|
|
if(callb != NULL) {
|
|
|
|
callb->func(callb->context);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
FuriTimer* furi_timer_alloc(FuriTimerCallback func, FuriTimerType type, void* context) {
|
2023-01-29 10:12:24 +00:00
|
|
|
furi_assert((furi_kernel_is_irq_or_masked() == 0U) && (func != NULL));
|
2022-07-20 10:56:33 +00:00
|
|
|
|
|
|
|
TimerHandle_t hTimer;
|
|
|
|
TimerCallback_t* callb;
|
|
|
|
UBaseType_t reload;
|
|
|
|
|
|
|
|
hTimer = NULL;
|
|
|
|
|
|
|
|
/* Dynamic memory allocation is available: if memory for callback and */
|
|
|
|
/* its context is not provided, allocate it from dynamic memory pool */
|
2022-12-26 12:13:30 +00:00
|
|
|
callb = (TimerCallback_t*)malloc(sizeof(TimerCallback_t));
|
2022-07-20 10:56:33 +00:00
|
|
|
|
2022-12-26 12:13:30 +00:00
|
|
|
callb->func = func;
|
|
|
|
callb->context = context;
|
2022-07-20 10:56:33 +00:00
|
|
|
|
2022-12-26 12:13:30 +00:00
|
|
|
if(type == FuriTimerTypeOnce) {
|
|
|
|
reload = pdFALSE;
|
|
|
|
} else {
|
|
|
|
reload = pdTRUE;
|
2022-07-20 10:56:33 +00:00
|
|
|
}
|
|
|
|
|
2022-12-26 12:13:30 +00:00
|
|
|
/* Store callback memory dynamic allocation flag */
|
|
|
|
callb = (TimerCallback_t*)((uint32_t)callb | 1U);
|
|
|
|
// TimerCallback function is always provided as a callback and is used to call application
|
|
|
|
// specified function with its context both stored in structure callb.
|
2023-11-01 15:23:02 +00:00
|
|
|
hTimer = xTimerCreate(NULL, portMAX_DELAY, reload, callb, TimerCallback);
|
2022-12-26 12:13:30 +00:00
|
|
|
furi_check(hTimer);
|
|
|
|
|
2022-07-20 10:56:33 +00:00
|
|
|
/* Return timer ID */
|
|
|
|
return ((FuriTimer*)hTimer);
|
|
|
|
}
|
|
|
|
|
|
|
|
void furi_timer_free(FuriTimer* instance) {
|
2023-01-29 10:12:24 +00:00
|
|
|
furi_assert(!furi_kernel_is_irq_or_masked());
|
2022-07-20 10:56:33 +00:00
|
|
|
furi_assert(instance);
|
|
|
|
|
|
|
|
TimerHandle_t hTimer = (TimerHandle_t)instance;
|
|
|
|
TimerCallback_t* callb;
|
|
|
|
|
|
|
|
callb = (TimerCallback_t*)pvTimerGetTimerID(hTimer);
|
|
|
|
|
2022-09-08 06:47:23 +00:00
|
|
|
if((uint32_t)callb & 1U) {
|
2024-02-08 09:22:03 +00:00
|
|
|
/* If callback memory was allocated, it is only safe to free it with
|
|
|
|
* the timer inactive. Send a stop command and wait for the timer to
|
|
|
|
* be in an inactive state.
|
|
|
|
*/
|
|
|
|
furi_check(xTimerStop(hTimer, portMAX_DELAY) == pdPASS);
|
|
|
|
while(furi_timer_is_running(instance)) furi_delay_tick(2);
|
|
|
|
|
2022-09-08 06:47:23 +00:00
|
|
|
/* Callback memory was allocated from dynamic pool, clear flag */
|
|
|
|
callb = (TimerCallback_t*)((uint32_t)callb & ~1U);
|
|
|
|
|
|
|
|
/* Return allocated memory to dynamic pool */
|
|
|
|
free(callb);
|
2022-07-20 10:56:33 +00:00
|
|
|
}
|
2024-02-08 09:22:03 +00:00
|
|
|
|
|
|
|
furi_check(xTimerDelete(hTimer, portMAX_DELAY) == pdPASS);
|
2022-07-20 10:56:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
FuriStatus furi_timer_start(FuriTimer* instance, uint32_t ticks) {
|
2023-01-29 10:12:24 +00:00
|
|
|
furi_assert(!furi_kernel_is_irq_or_masked());
|
2022-07-20 10:56:33 +00:00
|
|
|
furi_assert(instance);
|
2023-11-01 15:23:02 +00:00
|
|
|
furi_assert(ticks < portMAX_DELAY);
|
2022-07-20 10:56:33 +00:00
|
|
|
|
|
|
|
TimerHandle_t hTimer = (TimerHandle_t)instance;
|
|
|
|
FuriStatus stat;
|
|
|
|
|
|
|
|
if(xTimerChangePeriod(hTimer, ticks, portMAX_DELAY) == pdPASS) {
|
|
|
|
stat = FuriStatusOk;
|
|
|
|
} else {
|
|
|
|
stat = FuriStatusErrorResource;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return execution status */
|
|
|
|
return (stat);
|
|
|
|
}
|
|
|
|
|
2023-11-01 15:23:02 +00:00
|
|
|
FuriStatus furi_timer_restart(FuriTimer* instance, uint32_t ticks) {
|
2023-11-01 07:24:11 +00:00
|
|
|
furi_assert(!furi_kernel_is_irq_or_masked());
|
|
|
|
furi_assert(instance);
|
2023-11-01 15:23:02 +00:00
|
|
|
furi_assert(ticks < portMAX_DELAY);
|
2023-11-01 07:24:11 +00:00
|
|
|
|
|
|
|
TimerHandle_t hTimer = (TimerHandle_t)instance;
|
|
|
|
FuriStatus stat;
|
|
|
|
|
2023-11-01 15:23:02 +00:00
|
|
|
if(xTimerChangePeriod(hTimer, ticks, portMAX_DELAY) == pdPASS &&
|
|
|
|
xTimerReset(hTimer, portMAX_DELAY) == pdPASS) {
|
2023-11-01 07:24:11 +00:00
|
|
|
stat = FuriStatusOk;
|
|
|
|
} else {
|
|
|
|
stat = FuriStatusErrorResource;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return execution status */
|
|
|
|
return (stat);
|
|
|
|
}
|
|
|
|
|
2022-07-20 10:56:33 +00:00
|
|
|
FuriStatus furi_timer_stop(FuriTimer* instance) {
|
2023-01-29 10:12:24 +00:00
|
|
|
furi_assert(!furi_kernel_is_irq_or_masked());
|
2022-07-20 10:56:33 +00:00
|
|
|
furi_assert(instance);
|
|
|
|
|
|
|
|
TimerHandle_t hTimer = (TimerHandle_t)instance;
|
|
|
|
|
2023-11-15 16:11:05 +00:00
|
|
|
furi_check(xTimerStop(hTimer, portMAX_DELAY) == pdPASS);
|
2022-07-20 10:56:33 +00:00
|
|
|
|
2023-11-15 16:11:05 +00:00
|
|
|
return FuriStatusOk;
|
2022-07-20 10:56:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
uint32_t furi_timer_is_running(FuriTimer* instance) {
|
2023-01-29 10:12:24 +00:00
|
|
|
furi_assert(!furi_kernel_is_irq_or_masked());
|
2022-07-20 10:56:33 +00:00
|
|
|
furi_assert(instance);
|
|
|
|
|
|
|
|
TimerHandle_t hTimer = (TimerHandle_t)instance;
|
|
|
|
|
|
|
|
/* Return 0: not running, 1: running */
|
|
|
|
return (uint32_t)xTimerIsTimerActive(hTimer);
|
|
|
|
}
|
2023-05-05 12:40:55 +00:00
|
|
|
|
2023-11-01 07:24:11 +00:00
|
|
|
uint32_t furi_timer_get_expire_time(FuriTimer* instance) {
|
|
|
|
furi_assert(!furi_kernel_is_irq_or_masked());
|
|
|
|
furi_assert(instance);
|
|
|
|
|
|
|
|
TimerHandle_t hTimer = (TimerHandle_t)instance;
|
|
|
|
|
|
|
|
return (uint32_t)xTimerGetExpiryTime(hTimer);
|
|
|
|
}
|
|
|
|
|
2023-05-05 12:40:55 +00:00
|
|
|
void furi_timer_pending_callback(FuriTimerPendigCallback callback, void* context, uint32_t arg) {
|
|
|
|
BaseType_t ret = pdFAIL;
|
|
|
|
if(furi_kernel_is_irq_or_masked()) {
|
|
|
|
ret = xTimerPendFunctionCallFromISR(callback, context, arg, NULL);
|
|
|
|
} else {
|
|
|
|
ret = xTimerPendFunctionCall(callback, context, arg, FuriWaitForever);
|
|
|
|
}
|
|
|
|
furi_check(ret == pdPASS);
|
2023-11-01 07:24:11 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
void furi_timer_set_thread_priority(FuriTimerThreadPriority priority) {
|
|
|
|
furi_assert(!furi_kernel_is_irq_or_masked());
|
2023-11-01 15:23:02 +00:00
|
|
|
|
|
|
|
TaskHandle_t task_handle = xTimerGetTimerDaemonTaskHandle();
|
|
|
|
furi_check(task_handle); // Don't call this method before timer task start
|
2023-11-01 07:24:11 +00:00
|
|
|
|
|
|
|
if(priority == FuriTimerThreadPriorityNormal) {
|
|
|
|
vTaskPrioritySet(task_handle, configTIMER_TASK_PRIORITY);
|
|
|
|
} else if(priority == FuriTimerThreadPriorityElevated) {
|
|
|
|
vTaskPrioritySet(task_handle, configMAX_PRIORITIES - 1);
|
|
|
|
} else {
|
|
|
|
furi_crash();
|
|
|
|
}
|
2024-02-08 09:22:03 +00:00
|
|
|
}
|