u-boot/common/cyclic.c
Rasmus Villemoes 50128aeb0f cyclic: get rid of cyclic_init()
Currently, we must call cyclic_init() at some point before
cyclic_register() becomes possible. That turns out to be somewhat
awkward, especially with SPL, and has resulted in a watchdog callback
not being registered, thus causing the board to prematurely reset.

We already rely on gd->cyclic reliably being set to NULL by the asm
code that clears all of gd. Now that the cyclic list is a hlist, and
thus an empty list is represented by a NULL head pointer, and struct
cyclic_drv has no other members, we can just as well drop a level of
indirection and put the hlist_head directly in struct
global_data. This doesn't increase the size of struct global_data,
gets rid of an early malloc(), and generates slightly smaller code.

But primarily, this avoids having to call cyclic_init() early; the cyclic
infrastructure is simply ready to register callbacks as soon as we
enter C code.

We can still end up with schedule() being called from asm very early,
so we still need to check that gd itself has been properly initialized
[*], but once it has, gd->cyclic_list is perfectly fine to access, and
will just be an empty list.

As for cyclic_uninit(), it was never really the opposite of
cyclic_init() since it didn't free the struct cyclic_drv nor set
gd->cyclic to NULL. Rename it to cyclic_unregister_all() and use that
in test/, and also insert a call at the end of the board_init_f
sequence so that gd->cyclic_list is a fresh empty list before we enter
board_init_r().

A small piece of ugliness is that I had to add a cast in
cyclic_get_list() to silence a "discards 'volatile' qualifier"
warning, but that is completely equivalent to the existing handling of
the uclass_root_s list_head member.

[*] I'm not really sure where we guarantee that the register used for
gd contains 0 until it gets explicitly initialized, but that must be
the case, otherwise testing gd for being NULL would not make much sense.

Signed-off-by: Rasmus Villemoes <rasmus.villemoes@prevas.dk>
Reviewed-by: Stefan Roese <sr@denx.de>
Tested-by: Stefan Roese <sr@denx.de>
Tested-by: Tim Harvey <tharvey@gateworks.com> # imx8mm-venice-*
2022-11-02 08:42:03 +01:00

125 lines
2.9 KiB
C

// SPDX-License-Identifier: GPL-2.0+
/*
* A general-purpose cyclic execution infrastructure, to allow "small"
* (run-time wise) functions to be executed at a specified frequency.
* Things like LED blinking or watchdog triggering are examples for such
* tasks.
*
* Copyright (C) 2022 Stefan Roese <sr@denx.de>
*/
#include <cyclic.h>
#include <log.h>
#include <malloc.h>
#include <time.h>
#include <linux/errno.h>
#include <linux/list.h>
#include <asm/global_data.h>
DECLARE_GLOBAL_DATA_PTR;
void hw_watchdog_reset(void);
struct hlist_head *cyclic_get_list(void)
{
/* Silence "discards 'volatile' qualifier" warning. */
return (struct hlist_head *)&gd->cyclic_list;
}
struct cyclic_info *cyclic_register(cyclic_func_t func, uint64_t delay_us,
const char *name, void *ctx)
{
struct cyclic_info *cyclic;
cyclic = calloc(1, sizeof(struct cyclic_info));
if (!cyclic) {
pr_debug("Memory allocation error\n");
return NULL;
}
/* Store values in struct */
cyclic->func = func;
cyclic->ctx = ctx;
cyclic->name = strdup(name);
cyclic->delay_us = delay_us;
cyclic->start_time_us = timer_get_us();
hlist_add_head(&cyclic->list, cyclic_get_list());
return cyclic;
}
int cyclic_unregister(struct cyclic_info *cyclic)
{
hlist_del(&cyclic->list);
free(cyclic);
return 0;
}
void cyclic_run(void)
{
struct cyclic_info *cyclic;
struct hlist_node *tmp;
uint64_t now, cpu_time;
/* Prevent recursion */
if (gd->flags & GD_FLG_CYCLIC_RUNNING)
return;
gd->flags |= GD_FLG_CYCLIC_RUNNING;
hlist_for_each_entry_safe(cyclic, tmp, cyclic_get_list(), list) {
/*
* Check if this cyclic function needs to get called, e.g.
* do not call the cyclic func too often
*/
now = timer_get_us();
if (time_after_eq64(now, cyclic->next_call)) {
/* Call cyclic function and account it's cpu-time */
cyclic->next_call = now + cyclic->delay_us;
cyclic->func(cyclic->ctx);
cyclic->run_cnt++;
cpu_time = timer_get_us() - now;
cyclic->cpu_time_us += cpu_time;
/* Check if cpu-time exceeds max allowed time */
if ((cpu_time > CONFIG_CYCLIC_MAX_CPU_TIME_US) &&
(!cyclic->already_warned)) {
pr_err("cyclic function %s took too long: %lldus vs %dus max\n",
cyclic->name, cpu_time,
CONFIG_CYCLIC_MAX_CPU_TIME_US);
/*
* Don't disable this function, just warn once
* about this exceeding CPU time usage
*/
cyclic->already_warned = true;
}
}
}
gd->flags &= ~GD_FLG_CYCLIC_RUNNING;
}
void schedule(void)
{
/* The HW watchdog is not integrated into the cyclic IF (yet) */
if (IS_ENABLED(CONFIG_HW_WATCHDOG))
hw_watchdog_reset();
/*
* schedule() might get called very early before the cyclic IF is
* ready. Make sure to only call cyclic_run() when it's initalized.
*/
if (gd)
cyclic_run();
}
int cyclic_unregister_all(void)
{
struct cyclic_info *cyclic;
struct hlist_node *tmp;
hlist_for_each_entry_safe(cyclic, tmp, cyclic_get_list(), list)
cyclic_unregister(cyclic);
return 0;
}