u-boot/common/cyclic.c
Rasmus Villemoes 6b84b1db2d cyclic: drop redundant cyclic_ready flag
We're already relying on gd->cyclic being NULL before cyclic_init() is
called - i.e., we're relying on all of gd being zeroed before entering
any C code. And when we do populate gd->cyclic, its ->cyclic_ready
member is automatically set to true. So we can actually just rely on
testing gd->cyclic itself.

The only wrinkle is that cyclic_uninit() actually did set
->cyclic_ready to false. However, since it doesn't free gd->cyclic,
the cyclic infrastructure is actually still ready (i.e., the list_head
is properly initialized as an empty list).

Signed-off-by: Rasmus Villemoes <rasmus.villemoes@prevas.dk>
Reviewed-by: Stefan Roese <sr@denx.de>
Tested-by: Stefan Roese <sr@denx.de>
Tested-by: Tim Harvey <tharvey@gateworks.com> # imx8mm-venice-*
2022-11-02 08:41:42 +01:00

141 lines
3.2 KiB
C

// SPDX-License-Identifier: GPL-2.0+
/*
* A general-purpose cyclic execution infrastructure, to allow "small"
* (run-time wise) functions to be executed at a specified frequency.
* Things like LED blinking or watchdog triggering are examples for such
* tasks.
*
* Copyright (C) 2022 Stefan Roese <sr@denx.de>
*/
#include <cyclic.h>
#include <log.h>
#include <malloc.h>
#include <time.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <asm/global_data.h>
DECLARE_GLOBAL_DATA_PTR;
void hw_watchdog_reset(void);
struct list_head *cyclic_get_list(void)
{
return &gd->cyclic->cyclic_list;
}
struct cyclic_info *cyclic_register(cyclic_func_t func, uint64_t delay_us,
const char *name, void *ctx)
{
struct cyclic_info *cyclic;
if (!gd->cyclic) {
pr_debug("Cyclic IF not ready yet\n");
return NULL;
}
cyclic = calloc(1, sizeof(struct cyclic_info));
if (!cyclic) {
pr_debug("Memory allocation error\n");
return NULL;
}
/* Store values in struct */
cyclic->func = func;
cyclic->ctx = ctx;
cyclic->name = strdup(name);
cyclic->delay_us = delay_us;
cyclic->start_time_us = timer_get_us();
list_add_tail(&cyclic->list, &gd->cyclic->cyclic_list);
return cyclic;
}
int cyclic_unregister(struct cyclic_info *cyclic)
{
list_del(&cyclic->list);
free(cyclic);
return 0;
}
void cyclic_run(void)
{
struct cyclic_info *cyclic, *tmp;
uint64_t now, cpu_time;
/* Prevent recursion */
if (gd->flags & GD_FLG_CYCLIC_RUNNING)
return;
gd->flags |= GD_FLG_CYCLIC_RUNNING;
list_for_each_entry_safe(cyclic, tmp, &gd->cyclic->cyclic_list, list) {
/*
* Check if this cyclic function needs to get called, e.g.
* do not call the cyclic func too often
*/
now = timer_get_us();
if (time_after_eq64(now, cyclic->next_call)) {
/* Call cyclic function and account it's cpu-time */
cyclic->next_call = now + cyclic->delay_us;
cyclic->func(cyclic->ctx);
cyclic->run_cnt++;
cpu_time = timer_get_us() - now;
cyclic->cpu_time_us += cpu_time;
/* Check if cpu-time exceeds max allowed time */
if ((cpu_time > CONFIG_CYCLIC_MAX_CPU_TIME_US) &&
(!cyclic->already_warned)) {
pr_err("cyclic function %s took too long: %lldus vs %dus max\n",
cyclic->name, cpu_time,
CONFIG_CYCLIC_MAX_CPU_TIME_US);
/*
* Don't disable this function, just warn once
* about this exceeding CPU time usage
*/
cyclic->already_warned = true;
}
}
}
gd->flags &= ~GD_FLG_CYCLIC_RUNNING;
}
void schedule(void)
{
/* The HW watchdog is not integrated into the cyclic IF (yet) */
if (IS_ENABLED(CONFIG_HW_WATCHDOG))
hw_watchdog_reset();
/*
* schedule() might get called very early before the cyclic IF is
* ready. Make sure to only call cyclic_run() when it's initalized.
*/
if (gd && gd->cyclic)
cyclic_run();
}
int cyclic_uninit(void)
{
struct cyclic_info *cyclic, *tmp;
list_for_each_entry_safe(cyclic, tmp, &gd->cyclic->cyclic_list, list)
cyclic_unregister(cyclic);
return 0;
}
int cyclic_init(void)
{
int size = sizeof(struct cyclic_drv);
gd->cyclic = (struct cyclic_drv *)malloc(size);
if (!gd->cyclic)
return -ENOMEM;
memset(gd->cyclic, '\0', size);
INIT_LIST_HEAD(&gd->cyclic->cyclic_list);
return 0;
}