u-boot/arch/arm/cpu/armv8/start.S
Rob Herring 7682a99826 remove unnecessary version.h includes
Various files are needlessly rebuilt every time due to the version and
build time changing. As version.h is not actually needed, remove the
include.

Signed-off-by: Rob Herring <robh@kernel.org>
Cc: Albert Aribaud <albert.u.boot@aribaud.net>
Cc: Stefano Babic <sbabic@denx.de>
Cc: Minkyu Kang <mk7.kang@samsung.com>
Cc: Marek Vasut <marex@denx.de>
Cc: Tom Warren <twarren@nvidia.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Macpaul Lin <macpaul@andestech.com>
Cc: Wolfgang Denk <wd@denx.de>
Cc: York Sun <yorksun@freescale.com>
Cc: Stefan Roese <sr@denx.de>
Cc: Nobuhiro Iwamatsu <iwamatsu@nigauri.org>
Cc: Simon Glass <sjg@chromium.org>
Cc: Philippe Reynes <tremyfr@yahoo.fr>
Cc: Eric Jarrige <eric.jarrige@armadeus.org>
Cc: "David Müller" <d.mueller@elsoft.ch>
Cc: Phil Edworthy <phil.edworthy@renesas.com>
Cc: Robert Baldyga <r.baldyga@samsung.com>
Cc: Torsten Koschorrek <koschorrek@synertronixx.de>
Cc: Anatolij Gustschin <agust@denx.de>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Łukasz Majewski <l.majewski@samsung.com>
2015-03-24 10:50:50 -04:00

225 lines
4.7 KiB
ArmAsm

/*
* (C) Copyright 2013
* David Feng <fenghua@phytium.com.cn>
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <asm-offsets.h>
#include <config.h>
#include <linux/linkage.h>
#include <asm/macro.h>
#include <asm/armv8/mmu.h>
/*************************************************************************
*
* Startup Code (reset vector)
*
*************************************************************************/
.globl _start
_start:
b reset
.align 3
.globl _TEXT_BASE
_TEXT_BASE:
.quad CONFIG_SYS_TEXT_BASE
/*
* These are defined in the linker script.
*/
.globl _end_ofs
_end_ofs:
.quad _end - _start
.globl _bss_start_ofs
_bss_start_ofs:
.quad __bss_start - _start
.globl _bss_end_ofs
_bss_end_ofs:
.quad __bss_end - _start
reset:
/*
* Could be EL3/EL2/EL1, Initial State:
* Little Endian, MMU Disabled, i/dCache Disabled
*/
adr x0, vectors
switch_el x1, 3f, 2f, 1f
3: msr vbar_el3, x0
mrs x0, scr_el3
orr x0, x0, #0xf /* SCR_EL3.NS|IRQ|FIQ|EA */
msr scr_el3, x0
msr cptr_el3, xzr /* Enable FP/SIMD */
ldr x0, =COUNTER_FREQUENCY
msr cntfrq_el0, x0 /* Initialize CNTFRQ */
b 0f
2: msr vbar_el2, x0
mov x0, #0x33ff
msr cptr_el2, x0 /* Enable FP/SIMD */
b 0f
1: msr vbar_el1, x0
mov x0, #3 << 20
msr cpacr_el1, x0 /* Enable FP/SIMD */
0:
/* Apply ARM core specific erratas */
bl apply_core_errata
/*
* Cache/BPB/TLB Invalidate
* i-cache is invalidated before enabled in icache_enable()
* tlb is invalidated before mmu is enabled in dcache_enable()
* d-cache is invalidated before enabled in dcache_enable()
*/
/* Processor specific initialization */
bl lowlevel_init
#ifdef CONFIG_ARMV8_MULTIENTRY
branch_if_master x0, x1, master_cpu
/*
* Slave CPUs
*/
slave_cpu:
wfe
ldr x1, =CPU_RELEASE_ADDR
ldr x0, [x1]
cbz x0, slave_cpu
br x0 /* branch to the given address */
master_cpu:
/* On the master CPU */
#endif /* CONFIG_ARMV8_MULTIENTRY */
bl _main
/*-----------------------------------------------------------------------*/
WEAK(apply_core_errata)
mov x29, lr /* Save LR */
/* For now, we support Cortex-A57 specific errata only */
/* Check if we are running on a Cortex-A57 core */
branch_if_a57_core x0, apply_a57_core_errata
0:
mov lr, x29 /* Restore LR */
ret
apply_a57_core_errata:
#ifdef CONFIG_ARM_ERRATA_828024
mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
/* Disable non-allocate hint of w-b-n-a memory type */
mov x0, #0x1 << 49
/* Disable write streaming no L1-allocate threshold */
mov x0, #0x3 << 25
/* Disable write streaming no-allocate threshold */
mov x0, #0x3 << 27
msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
#endif
#ifdef CONFIG_ARM_ERRATA_826974
mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
/* Disable speculative load execution ahead of a DMB */
mov x0, #0x1 << 59
msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
#endif
#ifdef CONFIG_ARM_ERRATA_833069
mrs x0, S3_1_c15_c2_0 /* cpuactlr_el1 */
/* Disable Enable Invalidates of BTB bit */
and x0, x0, #0xE
msr S3_1_c15_c2_0, x0 /* cpuactlr_el1 */
#endif
b 0b
ENDPROC(apply_core_errata)
/*-----------------------------------------------------------------------*/
WEAK(lowlevel_init)
mov x29, lr /* Save LR */
#ifndef CONFIG_ARMV8_MULTIENTRY
/*
* For single-entry systems the lowlevel init is very simple.
*/
ldr x0, =GICD_BASE
bl gic_init_secure
#else /* CONFIG_ARMV8_MULTIENTRY is set */
#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
branch_if_slave x0, 1f
ldr x0, =GICD_BASE
bl gic_init_secure
1:
#if defined(CONFIG_GICV3)
ldr x0, =GICR_BASE
bl gic_init_secure_percpu
#elif defined(CONFIG_GICV2)
ldr x0, =GICD_BASE
ldr x1, =GICC_BASE
bl gic_init_secure_percpu
#endif
#endif
branch_if_master x0, x1, 2f
/*
* Slave should wait for master clearing spin table.
* This sync prevent salves observing incorrect
* value of spin table and jumping to wrong place.
*/
#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
#ifdef CONFIG_GICV2
ldr x0, =GICC_BASE
#endif
bl gic_wait_for_interrupt
#endif
/*
* All slaves will enter EL2 and optionally EL1.
*/
bl armv8_switch_to_el2
#ifdef CONFIG_ARMV8_SWITCH_TO_EL1
bl armv8_switch_to_el1
#endif
#endif /* CONFIG_ARMV8_MULTIENTRY */
2:
mov lr, x29 /* Restore LR */
ret
ENDPROC(lowlevel_init)
WEAK(smp_kick_all_cpus)
/* Kick secondary cpus up by SGI 0 interrupt */
mov x29, lr /* Save LR */
#if defined(CONFIG_GICV2) || defined(CONFIG_GICV3)
ldr x0, =GICD_BASE
bl gic_kick_secondary_cpus
#endif
mov lr, x29 /* Restore LR */
ret
ENDPROC(smp_kick_all_cpus)
/*-----------------------------------------------------------------------*/
ENTRY(c_runtime_cpu_setup)
/* Relocate vBAR */
adr x0, vectors
switch_el x1, 3f, 2f, 1f
3: msr vbar_el3, x0
b 0f
2: msr vbar_el2, x0
b 0f
1: msr vbar_el1, x0
0:
ret
ENDPROC(c_runtime_cpu_setup)