2018-05-06 21:58:06 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0+ */
|
2013-12-14 03:47:35 +00:00
|
|
|
/*
|
|
|
|
* (C) Copyright 2013
|
|
|
|
* David Feng <fenghua@phytium.com.cn>
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <asm-offsets.h>
|
|
|
|
#include <config.h>
|
|
|
|
#include <linux/linkage.h>
|
|
|
|
#include <asm/macro.h>
|
|
|
|
#include <asm/armv8/mmu.h>
|
|
|
|
|
|
|
|
/*************************************************************************
|
|
|
|
*
|
|
|
|
* Startup Code (reset vector)
|
|
|
|
*
|
|
|
|
*************************************************************************/
|
|
|
|
|
|
|
|
.globl _start
|
|
|
|
_start:
|
2019-06-13 12:46:44 +00:00
|
|
|
#if defined(CONFIG_LINUX_KERNEL_IMAGE_HEADER)
|
2018-01-03 21:31:51 +00:00
|
|
|
#include <asm/boot0-linux-kernel-header.h>
|
|
|
|
#elif defined(CONFIG_ENABLE_ARM_SOC_BOOT0_HOOK)
|
2016-05-31 17:45:06 +00:00
|
|
|
/*
|
|
|
|
* Various SoCs need something special and SoC-specific up front in
|
|
|
|
* order to boot, allow them to set that in their boot0.h file and then
|
|
|
|
* use it here.
|
|
|
|
*/
|
|
|
|
#include <asm/arch/boot0.h>
|
2017-01-02 11:48:33 +00:00
|
|
|
#else
|
|
|
|
b reset
|
2016-05-31 17:45:06 +00:00
|
|
|
#endif
|
|
|
|
|
2013-12-14 03:47:35 +00:00
|
|
|
.align 3
|
|
|
|
|
|
|
|
.globl _TEXT_BASE
|
|
|
|
_TEXT_BASE:
|
2022-10-21 00:22:39 +00:00
|
|
|
.quad CONFIG_TEXT_BASE
|
2013-12-14 03:47:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* These are defined in the linker script.
|
|
|
|
*/
|
|
|
|
.globl _end_ofs
|
|
|
|
_end_ofs:
|
|
|
|
.quad _end - _start
|
|
|
|
|
|
|
|
.globl _bss_start_ofs
|
|
|
|
_bss_start_ofs:
|
|
|
|
.quad __bss_start - _start
|
|
|
|
|
|
|
|
.globl _bss_end_ofs
|
|
|
|
_bss_end_ofs:
|
|
|
|
.quad __bss_end - _start
|
|
|
|
|
|
|
|
reset:
|
2016-07-18 23:01:50 +00:00
|
|
|
/* Allow the board to save important registers */
|
|
|
|
b save_boot_params
|
|
|
|
.globl save_boot_params_ret
|
|
|
|
save_boot_params_ret:
|
|
|
|
|
2023-08-03 08:12:18 +00:00
|
|
|
#if CONFIG_POSITION_INDEPENDENT && !defined(CONFIG_SPL_BUILD)
|
2020-09-09 17:07:25 +00:00
|
|
|
/* Verify that we're 4K aligned. */
|
|
|
|
adr x0, _start
|
|
|
|
ands x0, x0, #0xfff
|
|
|
|
b.eq 1f
|
|
|
|
0:
|
|
|
|
/*
|
|
|
|
* FATAL, can't continue.
|
|
|
|
* U-Boot needs to be loaded at a 4K aligned address.
|
|
|
|
*
|
|
|
|
* We use ADRP and ADD to load some symbol addresses during startup.
|
|
|
|
* The ADD uses an absolute (non pc-relative) lo12 relocation
|
|
|
|
* thus requiring 4K alignment.
|
|
|
|
*/
|
|
|
|
wfi
|
|
|
|
b 0b
|
|
|
|
1:
|
|
|
|
|
2017-11-03 00:11:27 +00:00
|
|
|
/*
|
|
|
|
* Fix .rela.dyn relocations. This allows U-Boot to be loaded to and
|
|
|
|
* executed at a different address than it was linked at.
|
|
|
|
*/
|
|
|
|
pie_fixup:
|
|
|
|
adr x0, _start /* x0 <- Runtime value of _start */
|
|
|
|
ldr x1, _TEXT_BASE /* x1 <- Linked value of _start */
|
2020-09-30 16:39:14 +00:00
|
|
|
subs x9, x0, x1 /* x9 <- Run-vs-link offset */
|
|
|
|
beq pie_fixup_done
|
2020-09-09 17:07:26 +00:00
|
|
|
adrp x2, __rel_dyn_start /* x2 <- Runtime &__rel_dyn_start */
|
|
|
|
add x2, x2, #:lo12:__rel_dyn_start
|
|
|
|
adrp x3, __rel_dyn_end /* x3 <- Runtime &__rel_dyn_end */
|
|
|
|
add x3, x3, #:lo12:__rel_dyn_end
|
2017-11-03 00:11:27 +00:00
|
|
|
pie_fix_loop:
|
|
|
|
ldp x0, x1, [x2], #16 /* (x0, x1) <- (Link location, fixup) */
|
|
|
|
ldr x4, [x2], #8 /* x4 <- addend */
|
|
|
|
cmp w1, #1027 /* relative fixup? */
|
|
|
|
bne pie_skip_reloc
|
|
|
|
/* relative fix: store addend plus offset at dest location */
|
|
|
|
add x0, x0, x9
|
|
|
|
add x4, x4, x9
|
|
|
|
str x4, [x0]
|
|
|
|
pie_skip_reloc:
|
|
|
|
cmp x2, x3
|
|
|
|
b.lo pie_fix_loop
|
|
|
|
pie_fixup_done:
|
|
|
|
#endif
|
|
|
|
|
2019-02-20 16:14:49 +00:00
|
|
|
#if defined(CONFIG_ARMV8_SPL_EXCEPTION_VECTORS) || !defined(CONFIG_SPL_BUILD)
|
armv8: make SPL exception vectors optional
Even though the exception vector table is a fundamental part of the ARM
architecture, U-Boot mostly does not make real use of it, except when
crash dumping. But having it in takes up quite some space, partly due to
the architectural alignment requirement of 2KB. Since we don't take special
care of that, the compiler adds a more or less random amount of padding
space, which increases the image size quite a bit, especially for the SPL.
On a typical Allwinner build this is around 1.5KB of padding, plus 1KB
for the vector table (mostly padding space again), then some extra code
to do the actual handling. This amounts to almost 10% of the maximum image
size, which is quite a lot for a pure debugging feature.
Add a Kconfig symbol to allow the exception vector table to be left out
of the build for the SPL.
For now this is "default y" for everyone, but specific defconfigs,
platforms or .config files can opt out here at will, to mitigate the code
size pressure we see for some SPLs.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
2018-07-24 23:57:01 +00:00
|
|
|
.macro set_vbar, regname, reg
|
|
|
|
msr \regname, \reg
|
|
|
|
.endm
|
|
|
|
adr x0, vectors
|
|
|
|
#else
|
|
|
|
.macro set_vbar, regname, reg
|
|
|
|
.endm
|
2015-10-14 16:55:45 +00:00
|
|
|
#endif
|
2013-12-14 03:47:35 +00:00
|
|
|
/*
|
|
|
|
* Could be EL3/EL2/EL1, Initial State:
|
|
|
|
* Little Endian, MMU Disabled, i/dCache Disabled
|
|
|
|
*/
|
|
|
|
switch_el x1, 3f, 2f, 1f
|
armv8: make SPL exception vectors optional
Even though the exception vector table is a fundamental part of the ARM
architecture, U-Boot mostly does not make real use of it, except when
crash dumping. But having it in takes up quite some space, partly due to
the architectural alignment requirement of 2KB. Since we don't take special
care of that, the compiler adds a more or less random amount of padding
space, which increases the image size quite a bit, especially for the SPL.
On a typical Allwinner build this is around 1.5KB of padding, plus 1KB
for the vector table (mostly padding space again), then some extra code
to do the actual handling. This amounts to almost 10% of the maximum image
size, which is quite a lot for a pure debugging feature.
Add a Kconfig symbol to allow the exception vector table to be left out
of the build for the SPL.
For now this is "default y" for everyone, but specific defconfigs,
platforms or .config files can opt out here at will, to mitigate the code
size pressure we see for some SPLs.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
2018-07-24 23:57:01 +00:00
|
|
|
3: set_vbar vbar_el3, x0
|
2014-04-19 01:45:21 +00:00
|
|
|
mrs x0, scr_el3
|
2014-03-14 06:26:27 +00:00
|
|
|
orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
|
|
|
|
msr scr_el3, x0
|
2013-12-14 03:47:35 +00:00
|
|
|
msr cptr_el3, xzr /* Enable FP/SIMD */
|
|
|
|
b 0f
|
2021-02-10 19:14:55 +00:00
|
|
|
2: mrs x1, hcr_el2
|
armv8: always use current exception level for TCR_ELx access
Currently get_tcr() takes an "el" parameter, to select the proper
version of the TCR_ELx system register.
This is problematic in case of the Apple M1, since it runs with
HCR_EL2.E2H fixed to 1, so TCR_EL2 is actually using the TCR_EL1 layout,
and we get the wrong version.
For U-Boot's purposes the only sensible choice here is the current
exception level, and indeed most callers treat it like that, so let's
remove that parameter and read the current EL inside the function.
This allows us to check for the E2H bit, and pretend it's EL1 in this
case.
There are two callers which don't care about the EL, and they pass 0,
which looks wrong, but is irrelevant in these two cases, since we don't
use the return value there. So the change cannot affect those two.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
Reviewed-by: Mark Kettenis <kettenis@openbsd.org>
Tested-by: Mark Kettenis <kettenis@openbsd.org>
2022-06-13 23:11:10 +00:00
|
|
|
tbnz x1, #HCR_EL2_E2H_BIT, 1f /* HCR_EL2.E2H */
|
armv8: Always unmask SErrors
The ARMv8 architecture describes the "SError interrupt" as the fourth
kind of exception, next to synchronous exceptions, IRQs, and FIQs.
Those SErrors signal exceptional conditions from which the system might
not easily recover, and are normally generated by the interconnect as a
response to some bus error. A typical situation is access to a
non-existing memory address or device, but it might be deliberately
triggered by a device as well.
The SError interrupt replaces the Armv7 asynchronous abort.
Trusted Firmware enters U-Boot (BL33) typically with SErrors masked,
and we never enable them. However any SError condition still triggers
the SError interrupt, and this condition stays pending, it just won't be
handled. If now later on the Linux kernel unmasks the "A" bit in PState,
it will immediately take the exception, leading to a kernel crash.
This leaves many people scratching their head about the reason for
this, and leads to long debug sessions, possibly looking at the wrong
places (the kernel, but not U-Boot).
To avoid the situation, just unmask SErrors early in the ARMv8 boot
process, so that the U-Boot exception handlers reports them in a timely
manner. As SErrors are typically asynchronous, the register dump does
not need to point at the actual culprit, but it should happen very
shortly after the condition.
For those exceptions to be taken, we also need to route them to EL2,
if U-Boot is running in this exception level.
This removes the respective code snippet from the Freescale lowlevel
routine, as this is now handled in generic ARMv8 code.
Reported-by: Nishanth Menon <nm@ti.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
2022-02-11 11:29:35 +00:00
|
|
|
orr x1, x1, #HCR_EL2_AMO_EL2 /* Route SErrors to EL2 */
|
|
|
|
msr hcr_el2, x1
|
2021-02-10 19:14:55 +00:00
|
|
|
set_vbar vbar_el2, x0
|
2013-12-14 03:47:35 +00:00
|
|
|
mov x0, #0x33ff
|
|
|
|
msr cptr_el2, x0 /* Enable FP/SIMD */
|
|
|
|
b 0f
|
2021-02-10 19:14:55 +00:00
|
|
|
1: set_vbar vbar_el1, x0
|
2013-12-14 03:47:35 +00:00
|
|
|
mov x0, #3 << 20
|
|
|
|
msr cpacr_el1, x0 /* Enable FP/SIMD */
|
|
|
|
0:
|
armv8: Always unmask SErrors
The ARMv8 architecture describes the "SError interrupt" as the fourth
kind of exception, next to synchronous exceptions, IRQs, and FIQs.
Those SErrors signal exceptional conditions from which the system might
not easily recover, and are normally generated by the interconnect as a
response to some bus error. A typical situation is access to a
non-existing memory address or device, but it might be deliberately
triggered by a device as well.
The SError interrupt replaces the Armv7 asynchronous abort.
Trusted Firmware enters U-Boot (BL33) typically with SErrors masked,
and we never enable them. However any SError condition still triggers
the SError interrupt, and this condition stays pending, it just won't be
handled. If now later on the Linux kernel unmasks the "A" bit in PState,
it will immediately take the exception, leading to a kernel crash.
This leaves many people scratching their head about the reason for
this, and leads to long debug sessions, possibly looking at the wrong
places (the kernel, but not U-Boot).
To avoid the situation, just unmask SErrors early in the ARMv8 boot
process, so that the U-Boot exception handlers reports them in a timely
manner. As SErrors are typically asynchronous, the register dump does
not need to point at the actual culprit, but it should happen very
shortly after the condition.
For those exceptions to be taken, we also need to route them to EL2,
if U-Boot is running in this exception level.
This removes the respective code snippet from the Freescale lowlevel
routine, as this is now handled in generic ARMv8 code.
Reported-by: Nishanth Menon <nm@ti.com>
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
2022-02-11 11:29:35 +00:00
|
|
|
msr daifclr, #0x4 /* Unmask SError interrupts */
|
2021-07-12 14:04:21 +00:00
|
|
|
|
2022-04-13 09:47:22 +00:00
|
|
|
#if CONFIG_COUNTER_FREQUENCY
|
2021-07-12 14:04:21 +00:00
|
|
|
branch_if_not_highest_el x0, 4f
|
2022-04-13 09:47:22 +00:00
|
|
|
ldr x0, =CONFIG_COUNTER_FREQUENCY
|
2021-07-12 14:04:21 +00:00
|
|
|
msr cntfrq_el0, x0 /* Initialize CNTFRQ */
|
|
|
|
#endif
|
|
|
|
|
|
|
|
4: isb
|
2013-12-14 03:47:35 +00:00
|
|
|
|
2017-01-06 09:41:10 +00:00
|
|
|
/*
|
2017-04-27 04:36:03 +00:00
|
|
|
* Enable SMPEN bit for coherency.
|
2017-01-06 09:41:10 +00:00
|
|
|
* This register is not architectural but at the moment
|
|
|
|
* this bit should be set for A53/A57/A72.
|
|
|
|
*/
|
|
|
|
#ifdef CONFIG_ARMV8_SET_SMPEN
|
2017-05-15 15:51:59 +00:00
|
|
|
switch_el x1, 3f, 1f, 1f
|
|
|
|
3:
|
2017-04-27 04:36:03 +00:00
|
|
|
mrs x0, S3_1_c15_c2_1 /* cpuectlr_el1 */
|
2017-01-06 09:41:10 +00:00
|
|
|
orr x0, x0, #0x40
|
|
|
|
msr S3_1_c15_c2_1, x0
|
2020-06-24 01:05:19 +00:00
|
|
|
isb
|
2017-05-15 15:51:59 +00:00
|
|
|
1:
|
2017-01-06 09:41:10 +00:00
|
|
|
#endif
|
|
|
|
|
2015-01-23 10:20:04 +00:00
|
|
|
/* Apply ARM core specific erratas */
|
|
|
|
bl apply_core_errata
|
|
|
|
|
2014-02-26 21:26:04 +00:00
|
|
|
/*
|
|
|
|
* Cache/BPB/TLB Invalidate
|
|
|
|
* i-cache is invalidated before enabled in icache_enable()
|
|
|
|
* tlb is invalidated before mmu is enabled in dcache_enable()
|
|
|
|
* d-cache is invalidated before enabled in dcache_enable()
|
|
|
|
*/
|
2013-12-14 03:47:35 +00:00
|
|
|
|
|
|
|
/* Processor specific initialization */
|
|
|
|
bl lowlevel_init
|
|
|
|
|
2016-12-27 09:19:43 +00:00
|
|
|
#if defined(CONFIG_ARMV8_SPIN_TABLE) && !defined(CONFIG_SPL_BUILD)
|
armv8: Fix and simplify branch_if_master/branch_if_slave
The branch_if_master macro jumps to a label if the CPU is the "master"
core, which we define as having all affinity levels set to 0. To check
for this condition, we need to mask off some bits from the MPIDR
register, then compare the remaining register value against zero.
The implementation of this was slighly broken (it preserved the upper
RES0 bits), overly complicated and hard to understand, especially since
it lacked comments. The same was true for the very similar
branch_if_slave macro.
Use a much shorter assembly sequence for those checks, use the same
masking for both macros (just negate the final branch), and put some
comments on them, to make it clear what the code does.
This allows to drop the second temporary register for branch_if_master,
so we adjust all call sites as well.
Also use the opportunity to remove a misleading comment: the macro
works fine on SoCs with multiple clusters. Judging by the commit
message, the original problem with the Juno SoC stems from the fact that
the master CPU *can* be configured to be from cluster 1, so the
assumption that the master CPU has all affinity values set to 0 does not
hold there. But this is already mentioned above in a comment, so remove
the extra comment.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
2022-02-11 11:29:39 +00:00
|
|
|
branch_if_master x0, master_cpu
|
2016-06-27 10:31:05 +00:00
|
|
|
b spin_table_secondary_jump
|
|
|
|
/* never return */
|
|
|
|
#elif defined(CONFIG_ARMV8_MULTIENTRY)
|
armv8: Fix and simplify branch_if_master/branch_if_slave
The branch_if_master macro jumps to a label if the CPU is the "master"
core, which we define as having all affinity levels set to 0. To check
for this condition, we need to mask off some bits from the MPIDR
register, then compare the remaining register value against zero.
The implementation of this was slighly broken (it preserved the upper
RES0 bits), overly complicated and hard to understand, especially since
it lacked comments. The same was true for the very similar
branch_if_slave macro.
Use a much shorter assembly sequence for those checks, use the same
masking for both macros (just negate the final branch), and put some
comments on them, to make it clear what the code does.
This allows to drop the second temporary register for branch_if_master,
so we adjust all call sites as well.
Also use the opportunity to remove a misleading comment: the macro
works fine on SoCs with multiple clusters. Judging by the commit
message, the original problem with the Juno SoC stems from the fact that
the master CPU *can* be configured to be from cluster 1, so the
assumption that the master CPU has all affinity values set to 0 does not
hold there. But this is already mentioned above in a comment, so remove
the extra comment.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
2022-02-11 11:29:39 +00:00
|
|
|
branch_if_master x0, master_cpu
|
2013-12-14 03:47:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Slave CPUs
|
|
|
|
*/
|
|
|
|
slave_cpu:
|
|
|
|
wfe
|
|
|
|
ldr x1, =CPU_RELEASE_ADDR
|
|
|
|
ldr x0, [x1]
|
|
|
|
cbz x0, slave_cpu
|
|
|
|
br x0 /* branch to the given address */
|
2015-03-09 09:53:21 +00:00
|
|
|
#endif /* CONFIG_ARMV8_MULTIENTRY */
|
2016-06-27 10:31:05 +00:00
|
|
|
master_cpu:
|
armv8: Force SP_ELx stack pointer usage
In ARMv8 we have the choice between two stack pointers to use: SP_EL0 or
SP_ELx, which is banked per exception level. This choice is stored in
the SP field of PState, and can be read and set via the SPSel special
register. When the CPU takes an exception, it automatically switches to
the SP_ELx stack pointer.
Trusted Firmware enters U-Boot typically with SPSel set to 1, so we use
SP_ELx all along as our sole stack pointer, both for normal operation and
for exceptions.
But if we now for some reason enter U-Boot with SPSel cleared, we will
setup and use SP_EL0, which is fine, but leaves SP_ELx uninitialised.
When we now take an exception, we try to save the GPRs to some undefined
location, which will usually end badly.
To make sure we always have SP_ELx pointing to some memory, set SPSel
to 1 in the early boot code, to ensure safe operation at all times.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
2022-02-11 11:29:36 +00:00
|
|
|
msr SPSel, #1 /* make sure we use SP_ELx */
|
2013-12-14 03:47:35 +00:00
|
|
|
bl _main
|
|
|
|
|
|
|
|
/*-----------------------------------------------------------------------*/
|
|
|
|
|
2015-01-23 10:20:04 +00:00
|
|
|
WEAK(apply_core_errata)
|
|
|
|
|
|
|
|
mov x29, lr /* Save LR */
|
2017-12-28 05:00:55 +00:00
|
|
|
/* For now, we support Cortex-A53, Cortex-A57 specific errata */
|
|
|
|
|
|
|
|
/* Check if we are running on a Cortex-A53 core */
|
|
|
|
branch_if_a53_core x0, apply_a53_core_errata
|
2015-01-23 10:20:04 +00:00
|
|
|
|
|
|
|
/* Check if we are running on a Cortex-A57 core */
|
|
|
|
branch_if_a57_core x0, apply_a57_core_errata
|
|
|
|
0:
|
|
|
|
mov lr, x29 /* Restore LR */
|
|
|
|
ret
|
|
|
|
|
2017-12-28 05:00:55 +00:00
|
|
|
apply_a53_core_errata:
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARM_ERRATA_855873
|
|
|
|
mrs x0, midr_el1
|
|
|
|
tst x0, #(0xf << 20)
|
|
|
|
b.ne 0b
|
|
|
|
|
|
|
|
mrs x0, midr_el1
|
|
|
|
and x0, x0, #0xf
|
|
|
|
cmp x0, #3
|
|
|
|
b.lt 0b
|
|
|
|
|
|
|
|
mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
|
|
|
|
/* Enable data cache clean as data cache clean/invalidate */
|
|
|
|
orr x0, x0, #1 << 44
|
|
|
|
msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
|
2020-06-24 01:05:19 +00:00
|
|
|
isb
|
2017-12-28 05:00:55 +00:00
|
|
|
#endif
|
|
|
|
b 0b
|
|
|
|
|
2015-01-23 10:20:04 +00:00
|
|
|
apply_a57_core_errata:
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARM_ERRATA_828024
|
|
|
|
mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
|
|
|
|
/* Disable non-allocate hint of w-b-n-a memory type */
|
2015-05-28 09:24:13 +00:00
|
|
|
orr x0, x0, #1 << 49
|
2015-01-23 10:20:04 +00:00
|
|
|
/* Disable write streaming no L1-allocate threshold */
|
2015-05-28 09:24:13 +00:00
|
|
|
orr x0, x0, #3 << 25
|
2015-01-23 10:20:04 +00:00
|
|
|
/* Disable write streaming no-allocate threshold */
|
2015-05-28 09:24:13 +00:00
|
|
|
orr x0, x0, #3 << 27
|
2015-01-23 10:20:04 +00:00
|
|
|
msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
|
2020-06-24 01:05:19 +00:00
|
|
|
isb
|
2015-01-23 10:20:04 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARM_ERRATA_826974
|
|
|
|
mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
|
|
|
|
/* Disable speculative load execution ahead of a DMB */
|
2015-05-28 09:24:13 +00:00
|
|
|
orr x0, x0, #1 << 59
|
2015-01-23 10:20:04 +00:00
|
|
|
msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
|
2020-06-24 01:05:19 +00:00
|
|
|
isb
|
2015-01-23 10:20:04 +00:00
|
|
|
#endif
|
|
|
|
|
2016-01-27 12:39:32 +00:00
|
|
|
#ifdef CONFIG_ARM_ERRATA_833471
|
|
|
|
mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
|
|
|
|
/* FPSCR write flush.
|
|
|
|
* Note that in some cases where a flush is unnecessary this
|
|
|
|
could impact performance. */
|
|
|
|
orr x0, x0, #1 << 38
|
|
|
|
msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
|
2020-06-24 01:05:19 +00:00
|
|
|
isb
|
2016-01-27 12:39:32 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
#ifdef CONFIG_ARM_ERRATA_829520
|
|
|
|
mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
|
|
|
|
/* Disable Indirect Predictor bit will prevent this erratum
|
|
|
|
from occurring
|
|
|
|
* Note that in some cases where a flush is unnecessary this
|
|
|
|
could impact performance. */
|
|
|
|
orr x0, x0, #1 << 4
|
|
|
|
msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
|
2020-06-24 01:05:19 +00:00
|
|
|
isb
|
2016-01-27 12:39:32 +00:00
|
|
|
#endif
|
|
|
|
|
2015-01-23 10:20:04 +00:00
|
|
|
#ifdef CONFIG_ARM_ERRATA_833069
|
|
|
|
mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
|
|
|
|
/* Disable Enable Invalidates of BTB bit */
|
|
|
|
and x0, x0, #0xE
|
|
|
|
msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
|
2020-06-24 01:05:19 +00:00
|
|
|
isb
|
2015-01-23 10:20:04 +00:00
|
|
|
#endif
|
|
|
|
b 0b
|
|
|
|
ENDPROC(apply_core_errata)
|
|
|
|
|
|
|
|
/*-----------------------------------------------------------------------*/
|
|
|
|
|
2013-12-14 03:47:35 +00:00
|
|
|
WEAK(lowlevel_init)
|
|
|
|
mov x29, lr /* Save LR */
|
|
|
|
|
2014-03-14 06:26:27 +00:00
|
|
|
#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
|
|
|
|
branch_if_slave x0, 1f
|
|
|
|
ldr x0, =GICD_BASE
|
|
|
|
bl gic_init_secure
|
|
|
|
1:
|
|
|
|
#if defined(CONFIG_GICV3)
|
|
|
|
ldr x0, =GICR_BASE
|
|
|
|
bl gic_init_secure_percpu
|
|
|
|
#elif defined(CONFIG_GICV2)
|
|
|
|
ldr x0, =GICD_BASE
|
|
|
|
ldr x1, =GICC_BASE
|
|
|
|
bl gic_init_secure_percpu
|
|
|
|
#endif
|
2016-04-28 18:45:44 +00:00
|
|
|
#endif
|
2014-03-14 06:26:27 +00:00
|
|
|
|
2016-05-20 03:13:10 +00:00
|
|
|
#ifdef CONFIG_ARMV8_MULTIENTRY
|
armv8: Fix and simplify branch_if_master/branch_if_slave
The branch_if_master macro jumps to a label if the CPU is the "master"
core, which we define as having all affinity levels set to 0. To check
for this condition, we need to mask off some bits from the MPIDR
register, then compare the remaining register value against zero.
The implementation of this was slighly broken (it preserved the upper
RES0 bits), overly complicated and hard to understand, especially since
it lacked comments. The same was true for the very similar
branch_if_slave macro.
Use a much shorter assembly sequence for those checks, use the same
masking for both macros (just negate the final branch), and put some
comments on them, to make it clear what the code does.
This allows to drop the second temporary register for branch_if_master,
so we adjust all call sites as well.
Also use the opportunity to remove a misleading comment: the macro
works fine on SoCs with multiple clusters. Judging by the commit
message, the original problem with the Juno SoC stems from the fact that
the master CPU *can* be configured to be from cluster 1, so the
assumption that the master CPU has all affinity values set to 0 does not
hold there. But this is already mentioned above in a comment, so remove
the extra comment.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
2022-02-11 11:29:39 +00:00
|
|
|
branch_if_master x0, 2f
|
2013-12-14 03:47:35 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Slave should wait for master clearing spin table.
|
|
|
|
* This sync prevent salves observing incorrect
|
|
|
|
* value of spin table and jumping to wrong place.
|
|
|
|
*/
|
2014-03-14 06:26:27 +00:00
|
|
|
#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
|
|
|
|
#ifdef CONFIG_GICV2
|
|
|
|
ldr x0, =GICC_BASE
|
|
|
|
#endif
|
|
|
|
bl gic_wait_for_interrupt
|
|
|
|
#endif
|
2013-12-14 03:47:35 +00:00
|
|
|
|
|
|
|
/*
|
2014-03-14 06:26:27 +00:00
|
|
|
* All slaves will enter EL2 and optionally EL1.
|
2013-12-14 03:47:35 +00:00
|
|
|
*/
|
2017-01-17 01:39:17 +00:00
|
|
|
adr x4, lowlevel_in_el2
|
|
|
|
ldr x5, =ES_TO_AARCH64
|
2013-12-14 03:47:35 +00:00
|
|
|
bl armv8_switch_to_el2
|
2016-11-10 02:49:03 +00:00
|
|
|
|
|
|
|
lowlevel_in_el2:
|
2013-12-14 03:47:35 +00:00
|
|
|
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
|
2017-01-17 01:39:17 +00:00
|
|
|
adr x4, lowlevel_in_el1
|
|
|
|
ldr x5, =ES_TO_AARCH64
|
2013-12-14 03:47:35 +00:00
|
|
|
bl armv8_switch_to_el1
|
2016-11-10 02:49:03 +00:00
|
|
|
|
|
|
|
lowlevel_in_el1:
|
2013-12-14 03:47:35 +00:00
|
|
|
#endif
|
|
|
|
|
2015-03-09 09:53:21 +00:00
|
|
|
#endif /* CONFIG_ARMV8_MULTIENTRY */
|
|
|
|
|
2014-03-14 06:26:27 +00:00
|
|
|
2:
|
2013-12-14 03:47:35 +00:00
|
|
|
mov lr, x29 /* Restore LR */
|
|
|
|
ret
|
|
|
|
ENDPROC(lowlevel_init)
|
|
|
|
|
2014-03-14 06:26:27 +00:00
|
|
|
WEAK(smp_kick_all_cpus)
|
|
|
|
/* Kick secondary cpus up by SGI 0 interrupt */
|
|
|
|
#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
|
|
|
|
ldr x0, =GICD_BASE
|
2016-06-17 09:32:47 +00:00
|
|
|
b gic_kick_secondary_cpus
|
2014-03-14 06:26:27 +00:00
|
|
|
#endif
|
|
|
|
ret
|
|
|
|
ENDPROC(smp_kick_all_cpus)
|
|
|
|
|
2013-12-14 03:47:35 +00:00
|
|
|
/*-----------------------------------------------------------------------*/
|
|
|
|
|
|
|
|
ENTRY(c_runtime_cpu_setup)
|
2019-02-20 16:14:49 +00:00
|
|
|
#if defined(CONFIG_ARMV8_SPL_EXCEPTION_VECTORS) || !defined(CONFIG_SPL_BUILD)
|
2013-12-14 03:47:35 +00:00
|
|
|
/* Relocate vBAR */
|
|
|
|
adr x0, vectors
|
|
|
|
switch_el x1, 3f, 2f, 1f
|
|
|
|
3: msr vbar_el3, x0
|
|
|
|
b 0f
|
|
|
|
2: msr vbar_el2, x0
|
|
|
|
b 0f
|
|
|
|
1: msr vbar_el1, x0
|
|
|
|
0:
|
armv8: make SPL exception vectors optional
Even though the exception vector table is a fundamental part of the ARM
architecture, U-Boot mostly does not make real use of it, except when
crash dumping. But having it in takes up quite some space, partly due to
the architectural alignment requirement of 2KB. Since we don't take special
care of that, the compiler adds a more or less random amount of padding
space, which increases the image size quite a bit, especially for the SPL.
On a typical Allwinner build this is around 1.5KB of padding, plus 1KB
for the vector table (mostly padding space again), then some extra code
to do the actual handling. This amounts to almost 10% of the maximum image
size, which is quite a lot for a pure debugging feature.
Add a Kconfig symbol to allow the exception vector table to be left out
of the build for the SPL.
For now this is "default y" for everyone, but specific defconfigs,
platforms or .config files can opt out here at will, to mitigate the code
size pressure we see for some SPLs.
Signed-off-by: Andre Przywara <andre.przywara@arm.com>
2018-07-24 23:57:01 +00:00
|
|
|
#endif
|
2013-12-14 03:47:35 +00:00
|
|
|
|
|
|
|
ret
|
|
|
|
ENDPROC(c_runtime_cpu_setup)
|
2016-07-18 23:01:50 +00:00
|
|
|
|
|
|
|
WEAK(save_boot_params)
|
|
|
|
b save_boot_params_ret /* back to my caller */
|
|
|
|
ENDPROC(save_boot_params)
|