mirror of
https://github.com/AsahiLinux/u-boot
synced 2024-11-28 15:41:40 +00:00
armv8: Always unmask SErrors
The ARMv8 architecture describes the "SError interrupt" as the fourth kind of exception, next to synchronous exceptions, IRQs, and FIQs. Those SErrors signal exceptional conditions from which the system might not easily recover, and are normally generated by the interconnect as a response to some bus error. A typical situation is access to a non-existing memory address or device, but it might be deliberately triggered by a device as well. The SError interrupt replaces the Armv7 asynchronous abort. Trusted Firmware enters U-Boot (BL33) typically with SErrors masked, and we never enable them. However any SError condition still triggers the SError interrupt, and this condition stays pending, it just won't be handled. If now later on the Linux kernel unmasks the "A" bit in PState, it will immediately take the exception, leading to a kernel crash. This leaves many people scratching their head about the reason for this, and leads to long debug sessions, possibly looking at the wrong places (the kernel, but not U-Boot). To avoid the situation, just unmask SErrors early in the ARMv8 boot process, so that the U-Boot exception handlers reports them in a timely manner. As SErrors are typically asynchronous, the register dump does not need to point at the actual culprit, but it should happen very shortly after the condition. For those exceptions to be taken, we also need to route them to EL2, if U-Boot is running in this exception level. This removes the respective code snippet from the Freescale lowlevel routine, as this is now handled in generic ARMv8 code. Reported-by: Nishanth Menon <nm@ti.com> Signed-off-by: Andre Przywara <andre.przywara@arm.com>
This commit is contained in:
parent
e7588d81cd
commit
6c7691edd5
3 changed files with 4 additions and 9 deletions
|
@ -74,15 +74,6 @@ ENDPROC(smp_kick_all_cpus)
|
|||
ENTRY(lowlevel_init)
|
||||
mov x29, lr /* Save LR */
|
||||
|
||||
/* unmask SError and abort */
|
||||
msr daifclr, #4
|
||||
|
||||
/* Set HCR_EL2[AMO] so SError @EL2 is taken */
|
||||
mrs x0, hcr_el2
|
||||
orr x0, x0, #0x20 /* AMO */
|
||||
msr hcr_el2, x0
|
||||
isb
|
||||
|
||||
switch_el x1, 1f, 100f, 100f /* skip if not in EL3 */
|
||||
1:
|
||||
|
||||
|
|
|
@ -126,6 +126,8 @@ pie_fixup_done:
|
|||
b 0f
|
||||
2: mrs x1, hcr_el2
|
||||
tbnz x1, #34, 1f /* HCR_EL2.E2H */
|
||||
orr x1, x1, #HCR_EL2_AMO_EL2 /* Route SErrors to EL2 */
|
||||
msr hcr_el2, x1
|
||||
set_vbar vbar_el2, x0
|
||||
mov x0, #0x33ff
|
||||
msr cptr_el2, x0 /* Enable FP/SIMD */
|
||||
|
@ -134,6 +136,7 @@ pie_fixup_done:
|
|||
mov x0, #3 << 20
|
||||
msr cpacr_el1, x0 /* Enable FP/SIMD */
|
||||
0:
|
||||
msr daifclr, #0x4 /* Unmask SError interrupts */
|
||||
|
||||
#ifdef COUNTER_FREQUENCY
|
||||
branch_if_not_highest_el x0, 4f
|
||||
|
|
|
@ -82,6 +82,7 @@
|
|||
#define HCR_EL2_RW_AARCH64 (1 << 31) /* EL1 is AArch64 */
|
||||
#define HCR_EL2_RW_AARCH32 (0 << 31) /* Lower levels are AArch32 */
|
||||
#define HCR_EL2_HCD_DIS (1 << 29) /* Hypervisor Call disabled */
|
||||
#define HCR_EL2_AMO_EL2 (1 << 5) /* Route SErrors to EL2 */
|
||||
|
||||
/*
|
||||
* ID_AA64ISAR1_EL1 bits definitions
|
||||
|
|
Loading…
Reference in a new issue