mirror of
https://github.com/AsahiLinux/u-boot
synced 2024-12-13 23:02:59 +00:00
46dc542870
Exposing set/way cache maintenance to a virtual machine is unsafe, not least because the instructions are not permission-checked but also because they are not broadcast between CPUs. Consequently, KVM traps and emulates such maintenance in the host kernel using by-VA operations and looping over the stage-2 page-tables. However, when running under protected KVM, these instructions are not able to be emulated and will instead result in an exception being delivered to the guest. Introduce CONFIG_CMO_BY_VA_ONLY so that virtual platforms can select this option and perform by-VA cache maintenance instead of using the set/way instructions. Signed-off-by: Marc Zyngier <maz@kernel.org> Signed-off-by: Will Deacon <willdeacon@google.com> Signed-off-by: Pierre-Clément Tosi <ptosi@google.com> [ Paul: pick from the Android tree. Fixup Pierre's commit. And fix some checkpatch warnings. Rebased to upstream. ] Signed-off-by: Ying-Chun Liu (PaulLiu) <paul.liu@linaro.org> Cc: Tom Rini <trini@konsulko.com> Link:db5507f47f
Link:2baf54e743
97 lines
2 KiB
C
97 lines
2 KiB
C
// SPDX-License-Identifier: GPL-2.0+
|
|
/*
|
|
* (C) Copyright 2008 Texas Insturments
|
|
*
|
|
* (C) Copyright 2002
|
|
* Sysgo Real-Time Solutions, GmbH <www.elinos.com>
|
|
* Marius Groeger <mgroeger@sysgo.de>
|
|
*
|
|
* (C) Copyright 2002
|
|
* Gary Jennejohn, DENX Software Engineering, <garyj@denx.de>
|
|
*/
|
|
|
|
#include <common.h>
|
|
#include <command.h>
|
|
#include <cpu_func.h>
|
|
#include <irq_func.h>
|
|
#include <asm/cache.h>
|
|
#include <asm/system.h>
|
|
#include <asm/secure.h>
|
|
#include <linux/compiler.h>
|
|
|
|
/*
|
|
* sdelay() - simple spin loop.
|
|
*
|
|
* Will delay execution by roughly (@loops * 2) cycles.
|
|
* This is necessary to be used before timers are accessible.
|
|
*
|
|
* A value of "0" will results in 2^64 loops.
|
|
*/
|
|
void sdelay(unsigned long loops)
|
|
{
|
|
__asm__ volatile ("1:\n" "subs %0, %0, #1\n"
|
|
"b.ne 1b" : "=r" (loops) : "0"(loops) : "cc");
|
|
}
|
|
|
|
void __weak board_cleanup_before_linux(void){}
|
|
|
|
int cleanup_before_linux(void)
|
|
{
|
|
/*
|
|
* this function is called just before we call linux
|
|
* it prepares the processor for linux
|
|
*
|
|
* disable interrupt and turn off caches etc ...
|
|
*/
|
|
|
|
board_cleanup_before_linux();
|
|
|
|
disable_interrupts();
|
|
|
|
if (IS_ENABLED(CONFIG_CMO_BY_VA_ONLY)) {
|
|
/*
|
|
* Disable D-cache.
|
|
*/
|
|
dcache_disable();
|
|
} else {
|
|
/*
|
|
* Turn off I-cache and invalidate it
|
|
*/
|
|
icache_disable();
|
|
invalidate_icache_all();
|
|
|
|
/*
|
|
* turn off D-cache
|
|
* dcache_disable() in turn flushes the d-cache and disables
|
|
* MMU
|
|
*/
|
|
dcache_disable();
|
|
invalidate_dcache_all();
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
#ifdef CONFIG_ARMV8_PSCI
|
|
static void relocate_secure_section(void)
|
|
{
|
|
#ifdef CONFIG_ARMV8_SECURE_BASE
|
|
size_t sz = __secure_end - __secure_start;
|
|
|
|
memcpy((void *)CONFIG_ARMV8_SECURE_BASE, __secure_start, sz);
|
|
flush_dcache_range(CONFIG_ARMV8_SECURE_BASE,
|
|
CONFIG_ARMV8_SECURE_BASE + sz + 1);
|
|
invalidate_icache_all();
|
|
#endif
|
|
}
|
|
|
|
void armv8_setup_psci(void)
|
|
{
|
|
if (current_el() != 3)
|
|
return;
|
|
|
|
relocate_secure_section();
|
|
secure_ram_addr(psci_setup_vectors)();
|
|
secure_ram_addr(psci_arch_init)();
|
|
}
|
|
#endif
|