2018-05-06 21:58:06 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0+ */
|
2013-05-19 01:48:15 +00:00
|
|
|
/*
|
|
|
|
* relocate - common relocation function for ARM U-Boot
|
|
|
|
*
|
|
|
|
* Copyright (c) 2013 Albert ARIBAUD <albert.u.boot@aribaud.net>
|
|
|
|
*/
|
|
|
|
|
2014-10-28 22:16:11 +00:00
|
|
|
#include <asm-offsets.h>
|
2018-08-31 23:57:06 +00:00
|
|
|
#include <asm/assembler.h>
|
2014-10-28 22:16:11 +00:00
|
|
|
#include <config.h>
|
2016-11-07 15:47:09 +00:00
|
|
|
#include <elf.h>
|
2013-05-19 01:48:15 +00:00
|
|
|
#include <linux/linkage.h>
|
2015-03-01 11:44:39 +00:00
|
|
|
#ifdef CONFIG_CPU_V7M
|
|
|
|
#include <asm/armv7m.h>
|
|
|
|
#endif
|
2013-05-19 01:48:15 +00:00
|
|
|
|
2014-11-13 16:59:15 +00:00
|
|
|
/*
|
|
|
|
* Default/weak exception vectors relocation routine
|
|
|
|
*
|
|
|
|
* This routine covers the standard ARM cases: normal (0x00000000),
|
|
|
|
* high (0xffff0000) and VBAR. SoCs which do not comply with any of
|
|
|
|
* the standard cases must provide their own, strong, version.
|
|
|
|
*/
|
|
|
|
|
|
|
|
.section .text.relocate_vectors,"ax",%progbits
|
|
|
|
.weak relocate_vectors
|
|
|
|
|
|
|
|
ENTRY(relocate_vectors)
|
|
|
|
|
2015-03-01 11:44:39 +00:00
|
|
|
#ifdef CONFIG_CPU_V7M
|
|
|
|
/*
|
|
|
|
* On ARMv7-M we only have to write the new vector address
|
|
|
|
* to VTOR register.
|
|
|
|
*/
|
|
|
|
ldr r0, [r9, #GD_RELOCADDR] /* r0 = gd->relocaddr */
|
|
|
|
ldr r1, =V7M_SCB_BASE
|
|
|
|
str r0, [r1, V7M_SCB_VTOR]
|
|
|
|
#else
|
2014-11-13 16:59:15 +00:00
|
|
|
#ifdef CONFIG_HAS_VBAR
|
|
|
|
/*
|
|
|
|
* If the ARM processor has the security extensions,
|
|
|
|
* use VBAR to relocate the exception vectors.
|
|
|
|
*/
|
|
|
|
ldr r0, [r9, #GD_RELOCADDR] /* r0 = gd->relocaddr */
|
|
|
|
mcr p15, 0, r0, c12, c0, 0 /* Set VBAR */
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* Copy the relocated exception vectors to the
|
|
|
|
* correct address
|
|
|
|
* CP15 c1 V bit gives us the location of the vectors:
|
|
|
|
* 0x00000000 or 0xFFFF0000.
|
|
|
|
*/
|
|
|
|
ldr r0, [r9, #GD_RELOCADDR] /* r0 = gd->relocaddr */
|
|
|
|
mrc p15, 0, r2, c1, c0, 0 /* V bit (bit[13]) in CP15 c1 */
|
|
|
|
ands r2, r2, #(1 << 13)
|
|
|
|
ldreq r1, =0x00000000 /* If V=0 */
|
|
|
|
ldrne r1, =0xFFFF0000 /* If V=1 */
|
|
|
|
ldmia r0!, {r2-r8,r10}
|
|
|
|
stmia r1!, {r2-r8,r10}
|
|
|
|
ldmia r0!, {r2-r8,r10}
|
|
|
|
stmia r1!, {r2-r8,r10}
|
2015-03-01 11:44:39 +00:00
|
|
|
#endif
|
2014-11-13 16:59:15 +00:00
|
|
|
#endif
|
|
|
|
bx lr
|
|
|
|
|
|
|
|
ENDPROC(relocate_vectors)
|
|
|
|
|
2013-05-19 01:48:15 +00:00
|
|
|
/*
|
|
|
|
* void relocate_code(addr_moni)
|
|
|
|
*
|
|
|
|
* This function relocates the monitor code.
|
|
|
|
*
|
|
|
|
* NOTE:
|
|
|
|
* To prevent the code below from containing references with an R_ARM_ABS32
|
|
|
|
* relocation record type, we never refer to linker-defined symbols directly.
|
|
|
|
* Instead, we declare literals which contain their relative location with
|
|
|
|
* respect to relocate_code, and at run time, add relocate_code back to them.
|
|
|
|
*/
|
|
|
|
|
|
|
|
ENTRY(relocate_code)
|
2021-08-03 02:50:10 +00:00
|
|
|
adr r3, relocate_code
|
|
|
|
ldr r1, _image_copy_start_ofs
|
|
|
|
add r1, r3 /* r1 <- Run &__image_copy_start */
|
|
|
|
subs r4, r0, r1 /* r4 <- Run to copy offset */
|
|
|
|
beq relocate_done /* skip relocation */
|
|
|
|
ldr r1, _image_copy_start_ofs
|
|
|
|
add r1, r3 /* r1 <- Run &__image_copy_start */
|
|
|
|
ldr r2, _image_copy_end_ofs
|
|
|
|
add r2, r3 /* r2 <- Run &__image_copy_end */
|
2013-05-19 01:48:15 +00:00
|
|
|
copy_loop:
|
2021-08-03 02:50:10 +00:00
|
|
|
ldmia r1!, {r10-r11} /* copy from source address [r1] */
|
|
|
|
stmia r0!, {r10-r11} /* copy to target address [r0] */
|
|
|
|
cmp r1, r2 /* until source end address [r2] */
|
2013-05-19 01:48:15 +00:00
|
|
|
blo copy_loop
|
|
|
|
|
|
|
|
/*
|
|
|
|
* fix .rel.dyn relocations
|
|
|
|
*/
|
2021-08-03 02:50:10 +00:00
|
|
|
ldr r1, _rel_dyn_start_ofs
|
|
|
|
add r2, r1, r3 /* r2 <- Run &__rel_dyn_start */
|
|
|
|
ldr r1, _rel_dyn_end_ofs
|
|
|
|
add r3, r1, r3 /* r3 <- Run &__rel_dyn_end */
|
2013-05-19 01:48:15 +00:00
|
|
|
fixloop:
|
2013-06-11 12:17:35 +00:00
|
|
|
ldmia r2!, {r0-r1} /* (r0,r1) <- (SRC location,fixup) */
|
|
|
|
and r1, r1, #0xff
|
2016-11-07 15:47:09 +00:00
|
|
|
cmp r1, #R_ARM_RELATIVE
|
2013-06-11 12:17:35 +00:00
|
|
|
bne fixnext
|
|
|
|
|
2013-05-19 01:48:15 +00:00
|
|
|
/* relative fix: increase location by offset */
|
2013-09-21 12:04:40 +00:00
|
|
|
add r0, r0, r4
|
2013-05-19 01:48:15 +00:00
|
|
|
ldr r1, [r0]
|
2013-09-21 12:04:40 +00:00
|
|
|
add r1, r1, r4
|
2013-05-19 01:48:15 +00:00
|
|
|
str r1, [r0]
|
2013-06-11 12:17:35 +00:00
|
|
|
fixnext:
|
2013-05-19 01:48:15 +00:00
|
|
|
cmp r2, r3
|
|
|
|
blo fixloop
|
|
|
|
|
|
|
|
relocate_done:
|
|
|
|
|
2013-06-21 16:12:28 +00:00
|
|
|
#ifdef __XSCALE__
|
|
|
|
/*
|
|
|
|
* On xscale, icache must be invalidated and write buffers drained,
|
|
|
|
* even with cache disabled - 4.2.7 of xscale core developer's manual
|
|
|
|
*/
|
|
|
|
mcr p15, 0, r0, c7, c7, 0 /* invalidate icache */
|
|
|
|
mcr p15, 0, r0, c7, c10, 4 /* drain write buffer */
|
|
|
|
#endif
|
|
|
|
|
2013-05-19 01:48:15 +00:00
|
|
|
/* ARMv4- don't know bx lr but the assembler fails to see that */
|
|
|
|
|
|
|
|
#ifdef __ARM_ARCH_4__
|
2014-11-13 16:59:14 +00:00
|
|
|
mov pc, lr
|
2013-05-19 01:48:15 +00:00
|
|
|
#else
|
2014-11-13 16:59:14 +00:00
|
|
|
bx lr
|
2013-05-19 01:48:15 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
ENDPROC(relocate_code)
|
2021-08-03 02:50:10 +00:00
|
|
|
|
|
|
|
_image_copy_start_ofs:
|
|
|
|
.word __image_copy_start - relocate_code
|
|
|
|
_image_copy_end_ofs:
|
|
|
|
.word __image_copy_end - relocate_code
|
|
|
|
_rel_dyn_start_ofs:
|
|
|
|
.word __rel_dyn_start - relocate_code
|
|
|
|
_rel_dyn_end_ofs:
|
|
|
|
.word __rel_dyn_end - relocate_code
|