2018-05-06 21:58:06 +00:00
|
|
|
/* SPDX-License-Identifier: GPL-2.0+ */
|
2003-10-15 23:53:47 +00:00
|
|
|
/*
|
2012-02-14 22:49:29 +00:00
|
|
|
* Copyright 2004, 2007-2012 Freescale Semiconductor, Inc.
|
2003-10-15 23:53:47 +00:00
|
|
|
* Copyright (C) 2003 Motorola,Inc.
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* U-Boot Startup Code for Motorola 85xx PowerPC based Embedded Boards
|
|
|
|
*
|
|
|
|
* The processor starts at 0xfffffffc and the code is first executed in the
|
|
|
|
* last 4K page(0xfffff000-0xffffffff) in flash/rom.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2010-10-26 12:34:52 +00:00
|
|
|
#include <asm-offsets.h>
|
2003-10-15 23:53:47 +00:00
|
|
|
#include <config.h>
|
|
|
|
#include <mpc85xx.h>
|
|
|
|
|
|
|
|
#include <ppc_asm.tmpl>
|
|
|
|
#include <ppc_defs.h>
|
|
|
|
|
|
|
|
#include <asm/cache.h>
|
|
|
|
#include <asm/mmu.h>
|
|
|
|
|
|
|
|
#undef MSR_KERNEL
|
2007-08-14 06:34:21 +00:00
|
|
|
#define MSR_KERNEL ( MSR_ME ) /* Machine Check */
|
2003-10-15 23:53:47 +00:00
|
|
|
|
2014-04-08 13:42:05 +00:00
|
|
|
#define LAW_EN 0x80000000
|
|
|
|
|
2012-09-20 21:35:21 +00:00
|
|
|
#if defined(CONFIG_NAND_SPL) || \
|
|
|
|
(defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_INIT_MINIMAL))
|
|
|
|
#define MINIMAL_SPL
|
|
|
|
#endif
|
|
|
|
|
2013-06-28 09:58:37 +00:00
|
|
|
#if !defined(CONFIG_SPL) && !defined(CONFIG_SYS_RAMBOOT) && \
|
2019-11-07 16:11:39 +00:00
|
|
|
!defined(CONFIG_NXP_ESBC) && !defined(CONFIG_SRIO_PCIE_BOOT_SLAVE)
|
2012-09-20 21:35:21 +00:00
|
|
|
#define NOR_BOOT
|
|
|
|
#endif
|
|
|
|
|
2003-10-15 23:53:47 +00:00
|
|
|
/*
|
|
|
|
* Set up GOT: Global Offset Table
|
|
|
|
*
|
2010-01-19 13:41:56 +00:00
|
|
|
* Use r12 to access the GOT
|
2003-10-15 23:53:47 +00:00
|
|
|
*/
|
|
|
|
START_GOT
|
|
|
|
GOT_ENTRY(_GOT2_TABLE_)
|
|
|
|
GOT_ENTRY(_FIXUP_TABLE_)
|
|
|
|
|
2012-09-20 21:35:21 +00:00
|
|
|
#ifndef MINIMAL_SPL
|
2003-10-15 23:53:47 +00:00
|
|
|
GOT_ENTRY(_start_of_vectors)
|
|
|
|
GOT_ENTRY(_end_of_vectors)
|
|
|
|
GOT_ENTRY(transfer_to_handler)
|
ppc/85xx: add boot from NAND/eSDHC/eSPI support
The MPC8536E is capable of booting form NAND/eSDHC/eSPI, this patch
implements these three bootup methods in a unified way - all of these
use the general cpu/mpc85xx/start.S, and load the main image to L2SRAM
which lets us use the SPD to initialize the SDRAM.
For all three bootup methods, the bootup process can be divided into two
stages: the first stage will initialize the corresponding controller,
configure the L2SRAM, then copy the second stage image to L2SRAM and
jump to it. The second stage image is just like the general U-Boot image
to configure all the hardware and boot up to U-Boot command line.
When boot from NAND, the eLBC controller will first load the first stage
image to internal 4K RAM buffer because it's also stored on the NAND
flash. The first stage image, also call 4K NAND loader, will initialize
the L2SRAM, load the second stage image to L2SRAM and jump to it. The 4K
NAND loader's code comes from the corresponding nand_spl directory, along
with the code twisted by CONFIG_NAND_SPL.
When boot from eSDHC/eSPI, there's no such a first stage image because
the CPU ROM code does the same work. It will initialize the L2SRAM
according to the config addr/word pairs on the fixed address and
initialize the eSDHC/eSPI controller, then load the second stage image
to L2SRAM and jump to it.
The macro CONFIG_SYS_RAMBOOT is used to control the code to produce the
second stage image for all different bootup methods. It's set in the
board config file when one of the bootup methods above is selected.
Signed-off-by: Mingkai Hu <Mingkai.hu@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2009-09-11 06:19:10 +00:00
|
|
|
#endif
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
GOT_ENTRY(__init_end)
|
2013-03-14 06:54:53 +00:00
|
|
|
GOT_ENTRY(__bss_end)
|
2003-10-15 23:53:47 +00:00
|
|
|
GOT_ENTRY(__bss_start)
|
|
|
|
END_GOT
|
|
|
|
|
|
|
|
/*
|
|
|
|
* e500 Startup -- after reset only the last 4KB of the effective
|
|
|
|
* address space is mapped in the MMU L2 TLB1 Entry0. The .bootpg
|
|
|
|
* section is located at THIS LAST page and basically does three
|
|
|
|
* things: clear some registers, set up exception tables and
|
|
|
|
* add more TLB entries for 'larger spaces'(e.g. the boot rom) to
|
|
|
|
* continue the boot procedure.
|
|
|
|
|
|
|
|
* Once the boot rom is mapped by TLB entries we can proceed
|
|
|
|
* with normal startup.
|
|
|
|
*
|
|
|
|
*/
|
|
|
|
|
2007-08-14 06:34:21 +00:00
|
|
|
.section .bootpg,"ax"
|
|
|
|
.globl _start_e500
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
_start_e500:
|
2012-04-29 23:56:30 +00:00
|
|
|
/* Enable debug exception */
|
|
|
|
li r1,MSR_DE
|
2021-09-27 15:42:39 +00:00
|
|
|
mtmsr r1
|
2004-06-09 00:34:46 +00:00
|
|
|
|
2014-04-11 15:09:45 +00:00
|
|
|
/*
|
|
|
|
* If we got an ePAPR device tree pointer passed in as r3, we need that
|
|
|
|
* later in cpu_init_early_f(). Save it to a safe register before we
|
|
|
|
* clobber it so that we can fetch it from there later.
|
|
|
|
*/
|
|
|
|
mr r24, r3
|
|
|
|
|
powerpc/fsl-corenet: work around erratum A004510
Erratum A004510 says that under certain load conditions, modified
cache lines can be discarded, causing data corruption.
To work around this, several CCSR and DCSR register updates need to be
made in a careful manner, so that there is no other transaction in
corenet when the update is made.
The update is made from a locked cacheline, with a delay before to flush
any previous activity, and a delay after to flush the CCSR/DCSR update.
We can't use a readback because that would be another corenet
transaction, which is not allowed.
We lock the subsequent cacheline to prevent it from being fetched while
we're executing the previous cacheline. It is filled with nops so that a
branch doesn't cause us to fetch another cacheline.
Ordinarily we are running in a cache-inhibited mapping at this point, so
we temporarily change that. We make it guarded so that we should never
see a speculative load, and we never do an explicit load. Thus, only the
I-cache should ever fill from this mapping, and we flush/unlock it
afterward. Thus we should avoid problems from any potential cache
aliasing between inhibited and non-inhibited mappings.
NOTE that if PAMU is used with this patch, it will need to use a
dedicated LAW as described in the erratum. This is the responsibility
of the OS that sets up PAMU.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Andy Fleming <afleming@freescale.com>
2012-08-14 10:14:53 +00:00
|
|
|
#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
|
|
|
|
mfspr r3,SPRN_SVR
|
|
|
|
rlwinm r3,r3,0,0xff
|
|
|
|
li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV
|
|
|
|
cmpw r3,r4
|
|
|
|
beq 1f
|
|
|
|
|
|
|
|
#ifdef CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
|
|
|
|
li r4,CONFIG_SYS_FSL_ERRATUM_A004510_SVR_REV2
|
|
|
|
cmpw r3,r4
|
|
|
|
beq 1f
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Not a supported revision affected by erratum */
|
|
|
|
li r27,0
|
|
|
|
b 2f
|
|
|
|
|
|
|
|
1: li r27,1 /* Remember for later that we have the erratum */
|
|
|
|
/* Erratum says set bits 55:60 to 001001 */
|
|
|
|
msync
|
|
|
|
isync
|
2013-03-25 07:33:10 +00:00
|
|
|
mfspr r3,SPRN_HDBCR0
|
powerpc/fsl-corenet: work around erratum A004510
Erratum A004510 says that under certain load conditions, modified
cache lines can be discarded, causing data corruption.
To work around this, several CCSR and DCSR register updates need to be
made in a careful manner, so that there is no other transaction in
corenet when the update is made.
The update is made from a locked cacheline, with a delay before to flush
any previous activity, and a delay after to flush the CCSR/DCSR update.
We can't use a readback because that would be another corenet
transaction, which is not allowed.
We lock the subsequent cacheline to prevent it from being fetched while
we're executing the previous cacheline. It is filled with nops so that a
branch doesn't cause us to fetch another cacheline.
Ordinarily we are running in a cache-inhibited mapping at this point, so
we temporarily change that. We make it guarded so that we should never
see a speculative load, and we never do an explicit load. Thus, only the
I-cache should ever fill from this mapping, and we flush/unlock it
afterward. Thus we should avoid problems from any potential cache
aliasing between inhibited and non-inhibited mappings.
NOTE that if PAMU is used with this patch, it will need to use a
dedicated LAW as described in the erratum. This is the responsibility
of the OS that sets up PAMU.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Andy Fleming <afleming@freescale.com>
2012-08-14 10:14:53 +00:00
|
|
|
li r4,0x48
|
|
|
|
rlwimi r3,r4,0,0x1f8
|
2013-03-25 07:33:10 +00:00
|
|
|
mtspr SPRN_HDBCR0,r3
|
powerpc/fsl-corenet: work around erratum A004510
Erratum A004510 says that under certain load conditions, modified
cache lines can be discarded, causing data corruption.
To work around this, several CCSR and DCSR register updates need to be
made in a careful manner, so that there is no other transaction in
corenet when the update is made.
The update is made from a locked cacheline, with a delay before to flush
any previous activity, and a delay after to flush the CCSR/DCSR update.
We can't use a readback because that would be another corenet
transaction, which is not allowed.
We lock the subsequent cacheline to prevent it from being fetched while
we're executing the previous cacheline. It is filled with nops so that a
branch doesn't cause us to fetch another cacheline.
Ordinarily we are running in a cache-inhibited mapping at this point, so
we temporarily change that. We make it guarded so that we should never
see a speculative load, and we never do an explicit load. Thus, only the
I-cache should ever fill from this mapping, and we flush/unlock it
afterward. Thus we should avoid problems from any potential cache
aliasing between inhibited and non-inhibited mappings.
NOTE that if PAMU is used with this patch, it will need to use a
dedicated LAW as described in the erratum. This is the responsibility
of the OS that sets up PAMU.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Andy Fleming <afleming@freescale.com>
2012-08-14 10:14:53 +00:00
|
|
|
isync
|
|
|
|
2:
|
|
|
|
#endif
|
2013-08-20 22:09:43 +00:00
|
|
|
#ifdef CONFIG_SYS_FSL_ERRATUM_A005125
|
|
|
|
msync
|
|
|
|
isync
|
|
|
|
mfspr r3, SPRN_HDBCR0
|
|
|
|
oris r3, r3, 0x0080
|
|
|
|
mtspr SPRN_HDBCR0, r3
|
|
|
|
#endif
|
|
|
|
|
powerpc/fsl-corenet: work around erratum A004510
Erratum A004510 says that under certain load conditions, modified
cache lines can be discarded, causing data corruption.
To work around this, several CCSR and DCSR register updates need to be
made in a careful manner, so that there is no other transaction in
corenet when the update is made.
The update is made from a locked cacheline, with a delay before to flush
any previous activity, and a delay after to flush the CCSR/DCSR update.
We can't use a readback because that would be another corenet
transaction, which is not allowed.
We lock the subsequent cacheline to prevent it from being fetched while
we're executing the previous cacheline. It is filled with nops so that a
branch doesn't cause us to fetch another cacheline.
Ordinarily we are running in a cache-inhibited mapping at this point, so
we temporarily change that. We make it guarded so that we should never
see a speculative load, and we never do an explicit load. Thus, only the
I-cache should ever fill from this mapping, and we flush/unlock it
afterward. Thus we should avoid problems from any potential cache
aliasing between inhibited and non-inhibited mappings.
NOTE that if PAMU is used with this patch, it will need to use a
dedicated LAW as described in the erratum. This is the responsibility
of the OS that sets up PAMU.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Andy Fleming <afleming@freescale.com>
2012-08-14 10:14:53 +00:00
|
|
|
|
2019-11-07 16:11:39 +00:00
|
|
|
#if defined(CONFIG_NXP_ESBC) && defined(CONFIG_E500MC) && \
|
2014-03-18 18:10:26 +00:00
|
|
|
!defined(CONFIG_E6500)
|
2010-12-15 17:02:08 +00:00
|
|
|
/* ISBC uses L2 as stack.
|
|
|
|
* Disable L2 cache here so that u-boot can enable it later
|
|
|
|
* as part of it's normal flow
|
|
|
|
*/
|
|
|
|
|
|
|
|
/* Check if L2 is enabled */
|
|
|
|
mfspr r3, SPRN_L2CSR0
|
|
|
|
lis r2, L2CSR0_L2E@h
|
|
|
|
ori r2, r2, L2CSR0_L2E@l
|
|
|
|
and. r4, r3, r2
|
|
|
|
beq l2_disabled
|
|
|
|
|
|
|
|
mfspr r3, SPRN_L2CSR0
|
|
|
|
/* Flush L2 cache */
|
|
|
|
lis r2,(L2CSR0_L2FL)@h
|
|
|
|
ori r2, r2, (L2CSR0_L2FL)@l
|
|
|
|
or r3, r2, r3
|
|
|
|
sync
|
|
|
|
isync
|
|
|
|
mtspr SPRN_L2CSR0,r3
|
|
|
|
isync
|
|
|
|
1:
|
|
|
|
mfspr r3, SPRN_L2CSR0
|
|
|
|
and. r1, r3, r2
|
|
|
|
bne 1b
|
|
|
|
|
|
|
|
mfspr r3, SPRN_L2CSR0
|
|
|
|
lis r2, L2CSR0_L2E@h
|
|
|
|
ori r2, r2, L2CSR0_L2E@l
|
|
|
|
andc r4, r3, r2
|
|
|
|
sync
|
|
|
|
isync
|
|
|
|
mtspr SPRN_L2CSR0,r4
|
|
|
|
isync
|
|
|
|
|
|
|
|
l2_disabled:
|
|
|
|
#endif
|
|
|
|
|
2007-08-14 06:34:21 +00:00
|
|
|
/* clear registers/arrays not reset by hardware */
|
2003-10-15 23:53:47 +00:00
|
|
|
|
2007-08-14 06:34:21 +00:00
|
|
|
/* L1 */
|
|
|
|
li r0,2
|
|
|
|
mtspr L1CSR0,r0 /* invalidate d-cache */
|
2008-05-20 14:00:29 +00:00
|
|
|
mtspr L1CSR1,r0 /* invalidate i-cache */
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
mfspr r1,DBSR
|
|
|
|
mtspr DBSR,r1 /* Clear all valid bits */
|
|
|
|
|
|
|
|
|
2012-10-08 07:44:07 +00:00
|
|
|
.macro create_tlb1_entry esel ts tsize epn wimg rpn perm phy_high scratch
|
|
|
|
lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
|
|
|
|
ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
|
|
|
|
mtspr MAS0, \scratch
|
|
|
|
lis \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@h
|
|
|
|
ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 1, 0, \ts, \tsize)@l
|
|
|
|
mtspr MAS1, \scratch
|
|
|
|
lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
|
|
|
|
ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
|
|
|
|
mtspr MAS2, \scratch
|
|
|
|
lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
|
|
|
|
ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
|
|
|
|
mtspr MAS3, \scratch
|
|
|
|
lis \scratch, \phy_high@h
|
|
|
|
ori \scratch, \scratch, \phy_high@l
|
|
|
|
mtspr MAS7, \scratch
|
|
|
|
isync
|
|
|
|
msync
|
|
|
|
tlbwe
|
|
|
|
isync
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro create_tlb0_entry esel ts tsize epn wimg rpn perm phy_high scratch
|
|
|
|
lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
|
|
|
|
ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
|
|
|
|
mtspr MAS0, \scratch
|
|
|
|
lis \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@h
|
|
|
|
ori \scratch, \scratch, FSL_BOOKE_MAS1(1, 0, 0, \ts, \tsize)@l
|
|
|
|
mtspr MAS1, \scratch
|
|
|
|
lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
|
|
|
|
ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
|
|
|
|
mtspr MAS2, \scratch
|
|
|
|
lis \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@h
|
|
|
|
ori \scratch, \scratch, FSL_BOOKE_MAS3(\rpn, 0, \perm)@l
|
|
|
|
mtspr MAS3, \scratch
|
|
|
|
lis \scratch, \phy_high@h
|
|
|
|
ori \scratch, \scratch, \phy_high@l
|
|
|
|
mtspr MAS7, \scratch
|
|
|
|
isync
|
|
|
|
msync
|
|
|
|
tlbwe
|
|
|
|
isync
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro delete_tlb1_entry esel scratch
|
|
|
|
lis \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@h
|
|
|
|
ori \scratch, \scratch, FSL_BOOKE_MAS0(1, \esel, 0)@l
|
|
|
|
mtspr MAS0, \scratch
|
|
|
|
li \scratch, 0
|
|
|
|
mtspr MAS1, \scratch
|
|
|
|
isync
|
|
|
|
msync
|
|
|
|
tlbwe
|
|
|
|
isync
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro delete_tlb0_entry esel epn wimg scratch
|
|
|
|
lis \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@h
|
|
|
|
ori \scratch, \scratch, FSL_BOOKE_MAS0(0, \esel, 0)@l
|
|
|
|
mtspr MAS0, \scratch
|
|
|
|
li \scratch, 0
|
|
|
|
mtspr MAS1, \scratch
|
|
|
|
lis \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@h
|
|
|
|
ori \scratch, \scratch, FSL_BOOKE_MAS2(\epn, \wimg)@l
|
|
|
|
mtspr MAS2, \scratch
|
|
|
|
isync
|
|
|
|
msync
|
|
|
|
tlbwe
|
|
|
|
isync
|
|
|
|
.endm
|
|
|
|
|
2012-09-20 21:35:21 +00:00
|
|
|
/* Interrupt vectors do not fit in minimal SPL. */
|
|
|
|
#if !defined(MINIMAL_SPL)
|
2003-10-15 23:53:47 +00:00
|
|
|
/* Setup interrupt vectors */
|
2022-03-24 21:18:05 +00:00
|
|
|
lis r1,CONFIG_VAL(SYS_MONITOR_BASE)@h
|
2007-08-14 06:34:21 +00:00
|
|
|
mtspr IVPR,r1
|
2003-10-15 23:53:47 +00:00
|
|
|
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,CriticalInput@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR0,r4 /* 0: Critical input */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,MachineCheck@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR1,r4 /* 1: Machine check */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,DataStorage@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR2,r4 /* 2: Data storage */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,InstStorage@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR3,r4 /* 3: Instruction storage */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,ExtInterrupt@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR4,r4 /* 4: External interrupt */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,Alignment@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR5,r4 /* 5: Alignment */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,ProgramCheck@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR6,r4 /* 6: Program check */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,FPUnavailable@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR7,r4 /* 7: floating point unavailable */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,SystemCall@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR8,r4 /* 8: System call */
|
2003-10-15 23:53:47 +00:00
|
|
|
/* 9: Auxiliary processor unavailable(unsupported) */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,Decrementer@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR10,r4 /* 10: Decrementer */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,IntervalTimer@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR11,r4 /* 11: Interval timer */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,WatchdogTimer@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR12,r4 /* 12: Watchdog timer */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,DataTLBError@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR13,r4 /* 13: Data TLB error */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,InstructionTLBError@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR14,r4 /* 14: Instruction TLB error */
|
2015-04-08 01:20:00 +00:00
|
|
|
li r4,DebugBreakpoint@l
|
2012-02-14 22:49:29 +00:00
|
|
|
mtspr IVOR15,r4 /* 15: Debug */
|
2012-02-14 22:50:02 +00:00
|
|
|
#endif
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
/* Clear and set up some registers. */
|
2008-01-17 04:38:34 +00:00
|
|
|
li r0,0x0000
|
2003-10-15 23:53:47 +00:00
|
|
|
lis r1,0xffff
|
|
|
|
mtspr DEC,r0 /* prevent dec exceptions */
|
|
|
|
mttbl r0 /* prevent fit & wdt exceptions */
|
|
|
|
mttbu r0
|
|
|
|
mtspr TSR,r1 /* clear all timer exception status */
|
|
|
|
mtspr TCR,r0 /* disable all */
|
|
|
|
mtspr ESR,r0 /* clear exception syndrome register */
|
|
|
|
mtspr MCSR,r0 /* machine check syndrome register */
|
|
|
|
mtxer r0 /* clear integer exception register */
|
|
|
|
|
2009-08-20 22:45:05 +00:00
|
|
|
#ifdef CONFIG_SYS_BOOK3E_HV
|
|
|
|
mtspr MAS8,r0 /* make sure MAS8 is clear */
|
|
|
|
#endif
|
|
|
|
|
2003-10-15 23:53:47 +00:00
|
|
|
/* Enable Time Base and Select Time Base Clock */
|
2004-07-09 23:27:13 +00:00
|
|
|
lis r0,HID0_EMCP@h /* Enable machine check */
|
2005-07-25 19:05:07 +00:00
|
|
|
#if defined(CONFIG_ENABLE_36BIT_PHYS)
|
2008-01-17 04:38:34 +00:00
|
|
|
ori r0,r0,HID0_ENMAS7@l /* Enable MAS7 */
|
2005-07-25 19:05:07 +00:00
|
|
|
#endif
|
2009-03-19 14:16:10 +00:00
|
|
|
#ifndef CONFIG_E500MC
|
2008-01-17 04:38:34 +00:00
|
|
|
ori r0,r0,HID0_TBEN@l /* Enable Timebase */
|
2009-03-19 14:16:10 +00:00
|
|
|
#endif
|
2003-10-15 23:53:47 +00:00
|
|
|
mtspr HID0,r0
|
|
|
|
|
2016-11-18 20:29:51 +00:00
|
|
|
#if !defined(CONFIG_E500MC) && !defined(CONFIG_ARCH_QEMU_E500)
|
2007-08-14 06:34:21 +00:00
|
|
|
li r0,(HID1_ASTME|HID1_ABE)@l /* Addr streaming & broadcast */
|
2010-03-12 05:15:02 +00:00
|
|
|
mfspr r3,PVR
|
|
|
|
andi. r3,r3, 0xff
|
|
|
|
cmpwi r3,0x50@l /* if we are rev 5.0 or greater set MBDD */
|
|
|
|
blt 1f
|
|
|
|
/* Set MBDD bit also */
|
|
|
|
ori r0, r0, HID1_MBDD@l
|
|
|
|
1:
|
2003-10-15 23:53:47 +00:00
|
|
|
mtspr HID1,r0
|
2008-10-23 06:47:38 +00:00
|
|
|
#endif
|
2003-10-15 23:53:47 +00:00
|
|
|
|
2011-11-22 12:51:15 +00:00
|
|
|
#ifdef CONFIG_SYS_FSL_ERRATUM_CPU_A003999
|
2013-03-25 07:33:10 +00:00
|
|
|
mfspr r3,SPRN_HDBCR1
|
2011-11-22 12:51:15 +00:00
|
|
|
oris r3,r3,0x0100
|
2013-03-25 07:33:10 +00:00
|
|
|
mtspr SPRN_HDBCR1,r3
|
2011-11-22 12:51:15 +00:00
|
|
|
#endif
|
|
|
|
|
2003-10-15 23:53:47 +00:00
|
|
|
/* Enable Branch Prediction */
|
|
|
|
#if defined(CONFIG_BTB)
|
2010-03-29 18:50:31 +00:00
|
|
|
lis r0,BUCSR_ENABLE@h
|
|
|
|
ori r0,r0,BUCSR_ENABLE@l
|
|
|
|
mtspr SPRN_BUCSR,r0
|
2003-10-15 23:53:47 +00:00
|
|
|
#endif
|
|
|
|
|
2008-10-16 13:01:15 +00:00
|
|
|
#if defined(CONFIG_SYS_INIT_DBCR)
|
2003-10-15 23:53:47 +00:00
|
|
|
lis r1,0xffff
|
|
|
|
ori r1,r1,0xffff
|
2004-07-09 23:27:13 +00:00
|
|
|
mtspr DBSR,r1 /* Clear all status bits */
|
2008-10-16 13:01:15 +00:00
|
|
|
lis r0,CONFIG_SYS_INIT_DBCR@h /* DBCR0[IDM] must be set */
|
|
|
|
ori r0,r0,CONFIG_SYS_INIT_DBCR@l
|
2004-07-09 23:27:13 +00:00
|
|
|
mtspr DBCR0,r0
|
2003-10-15 23:53:47 +00:00
|
|
|
#endif
|
|
|
|
|
2011-10-31 18:30:45 +00:00
|
|
|
/*
|
|
|
|
* Search for the TLB that covers the code we're executing, and shrink it
|
|
|
|
* so that it covers only this 4K page. That will ensure that any other
|
|
|
|
* TLB we create won't interfere with it. We assume that the TLB exists,
|
2012-08-20 13:10:08 +00:00
|
|
|
* which is why we don't check the Valid bit of MAS1. We also assume
|
|
|
|
* it is in TLB1.
|
2011-10-31 18:30:45 +00:00
|
|
|
*
|
|
|
|
* This is necessary, for example, when booting from the on-chip ROM,
|
|
|
|
* which (oddly) creates a single 4GB TLB that covers CCSR and DDR.
|
|
|
|
*/
|
|
|
|
bl nexti /* Find our address */
|
|
|
|
nexti: mflr r1 /* R1 = our PC */
|
|
|
|
li r2, 0
|
|
|
|
mtspr MAS6, r2 /* Assume the current PID and AS are 0 */
|
|
|
|
isync
|
|
|
|
msync
|
|
|
|
tlbsx 0, r1 /* This must succeed */
|
|
|
|
|
2012-08-20 13:10:08 +00:00
|
|
|
mfspr r14, MAS0 /* Save ESEL for later */
|
|
|
|
rlwinm r14, r14, 16, 0xfff
|
|
|
|
|
2011-10-31 18:30:45 +00:00
|
|
|
/* Set the size of the TLB to 4KB */
|
|
|
|
mfspr r3, MAS1
|
2013-01-18 15:45:58 +00:00
|
|
|
li r2, 0xF80
|
2011-10-31 18:30:45 +00:00
|
|
|
andc r3, r3, r2 /* Clear the TSIZE bits */
|
|
|
|
ori r3, r3, MAS1_TSIZE(BOOKE_PAGESZ_4K)@l
|
2012-08-20 13:10:08 +00:00
|
|
|
oris r3, r3, MAS1_IPROT@h
|
2011-10-31 18:30:45 +00:00
|
|
|
mtspr MAS1, r3
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Set the base address of the TLB to our PC. We assume that
|
|
|
|
* virtual == physical. We also assume that MAS2_EPN == MAS3_RPN.
|
|
|
|
*/
|
|
|
|
lis r3, MAS2_EPN@h
|
|
|
|
ori r3, r3, MAS2_EPN@l /* R3 = MAS2_EPN */
|
|
|
|
|
|
|
|
and r1, r1, r3 /* Our PC, rounded down to the nearest page */
|
|
|
|
|
|
|
|
mfspr r2, MAS2
|
|
|
|
andc r2, r2, r3
|
|
|
|
or r2, r2, r1
|
powerpc/fsl-corenet: work around erratum A004510
Erratum A004510 says that under certain load conditions, modified
cache lines can be discarded, causing data corruption.
To work around this, several CCSR and DCSR register updates need to be
made in a careful manner, so that there is no other transaction in
corenet when the update is made.
The update is made from a locked cacheline, with a delay before to flush
any previous activity, and a delay after to flush the CCSR/DCSR update.
We can't use a readback because that would be another corenet
transaction, which is not allowed.
We lock the subsequent cacheline to prevent it from being fetched while
we're executing the previous cacheline. It is filled with nops so that a
branch doesn't cause us to fetch another cacheline.
Ordinarily we are running in a cache-inhibited mapping at this point, so
we temporarily change that. We make it guarded so that we should never
see a speculative load, and we never do an explicit load. Thus, only the
I-cache should ever fill from this mapping, and we flush/unlock it
afterward. Thus we should avoid problems from any potential cache
aliasing between inhibited and non-inhibited mappings.
NOTE that if PAMU is used with this patch, it will need to use a
dedicated LAW as described in the erratum. This is the responsibility
of the OS that sets up PAMU.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Andy Fleming <afleming@freescale.com>
2012-08-14 10:14:53 +00:00
|
|
|
#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
|
|
|
|
cmpwi r27,0
|
|
|
|
beq 1f
|
|
|
|
andi. r15, r2, MAS2_I|MAS2_G /* save the old I/G for later */
|
|
|
|
rlwinm r2, r2, 0, ~MAS2_I
|
|
|
|
ori r2, r2, MAS2_G
|
|
|
|
1:
|
|
|
|
#endif
|
2011-10-31 18:30:45 +00:00
|
|
|
mtspr MAS2, r2 /* Set the EPN to our PC base address */
|
|
|
|
|
|
|
|
mfspr r2, MAS3
|
|
|
|
andc r2, r2, r3
|
|
|
|
or r2, r2, r1
|
|
|
|
mtspr MAS3, r2 /* Set the RPN to our PC base address */
|
|
|
|
|
|
|
|
isync
|
|
|
|
msync
|
|
|
|
tlbwe
|
|
|
|
|
2012-08-20 13:10:08 +00:00
|
|
|
/*
|
|
|
|
* Clear out any other TLB entries that may exist, to avoid conflicts.
|
|
|
|
* Our TLB entry is in r14.
|
|
|
|
*/
|
|
|
|
li r0, TLBIVAX_ALL | TLBIVAX_TLB0
|
|
|
|
tlbivax 0, r0
|
|
|
|
tlbsync
|
|
|
|
|
|
|
|
mfspr r4, SPRN_TLB1CFG
|
|
|
|
rlwinm r4, r4, 0, TLBnCFG_NENTRY_MASK
|
|
|
|
|
|
|
|
li r3, 0
|
|
|
|
mtspr MAS1, r3
|
|
|
|
1: cmpw r3, r14
|
|
|
|
rlwinm r5, r3, 16, MAS0_ESEL_MSK
|
|
|
|
addi r3, r3, 1
|
|
|
|
beq 2f /* skip the entry we're executing from */
|
|
|
|
|
|
|
|
oris r5, r5, MAS0_TLBSEL(1)@h
|
|
|
|
mtspr MAS0, r5
|
|
|
|
|
|
|
|
isync
|
|
|
|
tlbwe
|
|
|
|
isync
|
|
|
|
msync
|
|
|
|
|
|
|
|
2: cmpw r3, r4
|
|
|
|
blt 1b
|
|
|
|
|
2014-03-11 18:37:27 +00:00
|
|
|
#if defined(CONFIG_SYS_PPC_E500_DEBUG_TLB) && !defined(MINIMAL_SPL) && \
|
2019-11-07 16:11:39 +00:00
|
|
|
!defined(CONFIG_NXP_ESBC)
|
2012-10-26 00:27:41 +00:00
|
|
|
/*
|
|
|
|
* TLB entry for debuggging in AS1
|
|
|
|
* Create temporary TLB entry in AS0 to handle debug exception
|
|
|
|
* As on debug exception MSR is cleared i.e. Address space is changed
|
|
|
|
* to 0. A TLB entry (in AS0) is required to handle debug exception generated
|
|
|
|
* in AS1.
|
|
|
|
*/
|
|
|
|
|
2012-09-20 21:35:21 +00:00
|
|
|
#ifdef NOR_BOOT
|
2012-10-26 00:27:41 +00:00
|
|
|
/*
|
|
|
|
* TLB entry is created for IVPR + IVOR15 to map on valid OP code address
|
|
|
|
* bacause flash's virtual address maps to 0xff800000 - 0xffffffff.
|
|
|
|
* and this window is outside of 4K boot window.
|
|
|
|
*/
|
|
|
|
create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
|
|
|
|
0, BOOKE_PAGESZ_4M, \
|
2022-03-24 21:18:05 +00:00
|
|
|
CONFIG_VAL(SYS_MONITOR_BASE) & 0xffc00000, MAS2_I|MAS2_G, \
|
2012-10-26 00:27:41 +00:00
|
|
|
0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
|
|
|
|
0, r6
|
|
|
|
|
|
|
|
#else
|
|
|
|
/*
|
|
|
|
* TLB entry is created for IVPR + IVOR15 to map on valid OP code address
|
|
|
|
* because "nexti" will resize TLB to 4K
|
|
|
|
*/
|
|
|
|
create_tlb1_entry CONFIG_SYS_PPC_E500_DEBUG_TLB, \
|
|
|
|
0, BOOKE_PAGESZ_256K, \
|
2022-03-24 21:18:05 +00:00
|
|
|
CONFIG_VAL(SYS_MONITOR_BASE) & 0xfffc0000, MAS2_I, \
|
|
|
|
CONFIG_VAL(SYS_MONITOR_BASE) & 0xfffc0000, MAS3_SX|MAS3_SW|MAS3_SR, \
|
2012-10-26 00:27:41 +00:00
|
|
|
0, r6
|
|
|
|
#endif
|
|
|
|
#endif
|
|
|
|
|
powerpc/85xx: relocate CCSR before creating the initial RAM area
Before main memory (DDR) is initialized, the on-chip L1 cache is used as a
memory area for the stack and the global data (gd_t) structure. This is
called the initial RAM area, or initram. The L1 cache is locked and the TLBs
point to a non-existent address (so that there's no chance it will overlap
main memory or any device). The L1 cache is also configured not to write
out to memory or the L2 cache, so everything stays in the L1 cache.
One of the things we might do while running out of initram is relocate CCSR.
On reset, CCSR is typically located at some high 32-bit address, like
0xfe000000, and this may not be the best place for CCSR. For example, on
36-bit systems, CCSR is relocated to 0xffe000000, near the top of 36-bit
memory space.
On some future Freescale SOCs, the L1 cache will be forced to write to the
backing store, so we can no longer have the TLBs point to non-existent address.
Instead, we will point the TLBs to an unused area in CCSR. In order for this
technique to work, CCSR needs to be relocated before the initram memory is
enabled.
Unlike the original CCSR relocation code in cpu_init_early_f(), the TLBs
we create now for relocating CCSR are deleted after the relocation is finished.
cpu_init_early_f() will still need to create a TLB for CCSR (at the new
location) for normal U-Boot purposes. This is done to keep the impact to
existing U-Boot code minimal and to better isolate the CCSR relocation code.
Signed-off-by: Timur Tabi <timur@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2011-08-03 21:30:10 +00:00
|
|
|
/*
|
|
|
|
* Relocate CCSR, if necessary. We relocate CCSR if (obviously) the default
|
|
|
|
* location is not where we want it. This typically happens on a 36-bit
|
|
|
|
* system, where we want to move CCSR to near the top of 36-bit address space.
|
|
|
|
*
|
|
|
|
* To move CCSR, we create two temporary TLBs, one for the old location, and
|
|
|
|
* another for the new location. On CoreNet systems, we also need to create
|
|
|
|
* a special, temporary LAW.
|
|
|
|
*
|
|
|
|
* As a general rule, TLB0 is used for short-term TLBs, and TLB1 is used for
|
|
|
|
* long-term TLBs, so we use TLB0 here.
|
|
|
|
*/
|
|
|
|
#if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS)
|
|
|
|
|
|
|
|
#if !defined(CONFIG_SYS_CCSRBAR_PHYS_HIGH) || !defined(CONFIG_SYS_CCSRBAR_PHYS_LOW)
|
|
|
|
#error "CONFIG_SYS_CCSRBAR_PHYS_HIGH and CONFIG_SYS_CCSRBAR_PHYS_LOW) must be defined."
|
|
|
|
#endif
|
|
|
|
|
|
|
|
create_ccsr_new_tlb:
|
|
|
|
/*
|
|
|
|
* Create a TLB for the new location of CCSR. Register R8 is reserved
|
|
|
|
* for the virtual address of this TLB (CONFIG_SYS_CCSRBAR).
|
|
|
|
*/
|
2012-08-20 13:10:08 +00:00
|
|
|
lis r8, CONFIG_SYS_CCSRBAR@h
|
|
|
|
ori r8, r8, CONFIG_SYS_CCSRBAR@l
|
|
|
|
lis r9, (CONFIG_SYS_CCSRBAR + 0x1000)@h
|
|
|
|
ori r9, r9, (CONFIG_SYS_CCSRBAR + 0x1000)@l
|
2012-10-08 07:44:07 +00:00
|
|
|
create_tlb0_entry 0, \
|
|
|
|
0, BOOKE_PAGESZ_4K, \
|
|
|
|
CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, \
|
|
|
|
CONFIG_SYS_CCSRBAR_PHYS_LOW, MAS3_SW|MAS3_SR, \
|
|
|
|
CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
|
powerpc/85xx: relocate CCSR before creating the initial RAM area
Before main memory (DDR) is initialized, the on-chip L1 cache is used as a
memory area for the stack and the global data (gd_t) structure. This is
called the initial RAM area, or initram. The L1 cache is locked and the TLBs
point to a non-existent address (so that there's no chance it will overlap
main memory or any device). The L1 cache is also configured not to write
out to memory or the L2 cache, so everything stays in the L1 cache.
One of the things we might do while running out of initram is relocate CCSR.
On reset, CCSR is typically located at some high 32-bit address, like
0xfe000000, and this may not be the best place for CCSR. For example, on
36-bit systems, CCSR is relocated to 0xffe000000, near the top of 36-bit
memory space.
On some future Freescale SOCs, the L1 cache will be forced to write to the
backing store, so we can no longer have the TLBs point to non-existent address.
Instead, we will point the TLBs to an unused area in CCSR. In order for this
technique to work, CCSR needs to be relocated before the initram memory is
enabled.
Unlike the original CCSR relocation code in cpu_init_early_f(), the TLBs
we create now for relocating CCSR are deleted after the relocation is finished.
cpu_init_early_f() will still need to create a TLB for CCSR (at the new
location) for normal U-Boot purposes. This is done to keep the impact to
existing U-Boot code minimal and to better isolate the CCSR relocation code.
Signed-off-by: Timur Tabi <timur@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2011-08-03 21:30:10 +00:00
|
|
|
/*
|
2011-10-31 18:30:42 +00:00
|
|
|
* Create a TLB for the current location of CCSR. Register R9 is reserved
|
powerpc/85xx: relocate CCSR before creating the initial RAM area
Before main memory (DDR) is initialized, the on-chip L1 cache is used as a
memory area for the stack and the global data (gd_t) structure. This is
called the initial RAM area, or initram. The L1 cache is locked and the TLBs
point to a non-existent address (so that there's no chance it will overlap
main memory or any device). The L1 cache is also configured not to write
out to memory or the L2 cache, so everything stays in the L1 cache.
One of the things we might do while running out of initram is relocate CCSR.
On reset, CCSR is typically located at some high 32-bit address, like
0xfe000000, and this may not be the best place for CCSR. For example, on
36-bit systems, CCSR is relocated to 0xffe000000, near the top of 36-bit
memory space.
On some future Freescale SOCs, the L1 cache will be forced to write to the
backing store, so we can no longer have the TLBs point to non-existent address.
Instead, we will point the TLBs to an unused area in CCSR. In order for this
technique to work, CCSR needs to be relocated before the initram memory is
enabled.
Unlike the original CCSR relocation code in cpu_init_early_f(), the TLBs
we create now for relocating CCSR are deleted after the relocation is finished.
cpu_init_early_f() will still need to create a TLB for CCSR (at the new
location) for normal U-Boot purposes. This is done to keep the impact to
existing U-Boot code minimal and to better isolate the CCSR relocation code.
Signed-off-by: Timur Tabi <timur@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2011-08-03 21:30:10 +00:00
|
|
|
* for the virtual address of this TLB (CONFIG_SYS_CCSRBAR + 0x1000).
|
|
|
|
*/
|
|
|
|
create_ccsr_old_tlb:
|
2012-10-08 07:44:07 +00:00
|
|
|
create_tlb0_entry 1, \
|
|
|
|
0, BOOKE_PAGESZ_4K, \
|
|
|
|
CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, \
|
|
|
|
CONFIG_SYS_CCSRBAR_DEFAULT, MAS3_SW|MAS3_SR, \
|
|
|
|
0, r3 /* The default CCSR address is always a 32-bit number */
|
|
|
|
|
powerpc/85xx: relocate CCSR before creating the initial RAM area
Before main memory (DDR) is initialized, the on-chip L1 cache is used as a
memory area for the stack and the global data (gd_t) structure. This is
called the initial RAM area, or initram. The L1 cache is locked and the TLBs
point to a non-existent address (so that there's no chance it will overlap
main memory or any device). The L1 cache is also configured not to write
out to memory or the L2 cache, so everything stays in the L1 cache.
One of the things we might do while running out of initram is relocate CCSR.
On reset, CCSR is typically located at some high 32-bit address, like
0xfe000000, and this may not be the best place for CCSR. For example, on
36-bit systems, CCSR is relocated to 0xffe000000, near the top of 36-bit
memory space.
On some future Freescale SOCs, the L1 cache will be forced to write to the
backing store, so we can no longer have the TLBs point to non-existent address.
Instead, we will point the TLBs to an unused area in CCSR. In order for this
technique to work, CCSR needs to be relocated before the initram memory is
enabled.
Unlike the original CCSR relocation code in cpu_init_early_f(), the TLBs
we create now for relocating CCSR are deleted after the relocation is finished.
cpu_init_early_f() will still need to create a TLB for CCSR (at the new
location) for normal U-Boot purposes. This is done to keep the impact to
existing U-Boot code minimal and to better isolate the CCSR relocation code.
Signed-off-by: Timur Tabi <timur@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2011-08-03 21:30:10 +00:00
|
|
|
|
2011-10-31 18:30:44 +00:00
|
|
|
/*
|
|
|
|
* We have a TLB for what we think is the current (old) CCSR. Let's
|
|
|
|
* verify that, otherwise we won't be able to move it.
|
|
|
|
* CONFIG_SYS_CCSRBAR_DEFAULT is always a 32-bit number, so we only
|
|
|
|
* need to compare the lower 32 bits of CCSRBAR on CoreNet systems.
|
|
|
|
*/
|
|
|
|
verify_old_ccsr:
|
|
|
|
lis r0, CONFIG_SYS_CCSRBAR_DEFAULT@h
|
|
|
|
ori r0, r0, CONFIG_SYS_CCSRBAR_DEFAULT@l
|
|
|
|
#ifdef CONFIG_FSL_CORENET
|
|
|
|
lwz r1, 4(r9) /* CCSRBARL */
|
|
|
|
#else
|
|
|
|
lwz r1, 0(r9) /* CCSRBAR, shifted right by 12 */
|
|
|
|
slwi r1, r1, 12
|
|
|
|
#endif
|
|
|
|
|
|
|
|
cmpl 0, r0, r1
|
|
|
|
|
|
|
|
/*
|
|
|
|
* If the value we read from CCSRBARL is not what we expect, then
|
|
|
|
* enter an infinite loop. This will at least allow a debugger to
|
|
|
|
* halt execution and examine TLBs, etc. There's no point in going
|
|
|
|
* on.
|
|
|
|
*/
|
|
|
|
infinite_debug_loop:
|
|
|
|
bne infinite_debug_loop
|
|
|
|
|
powerpc/85xx: relocate CCSR before creating the initial RAM area
Before main memory (DDR) is initialized, the on-chip L1 cache is used as a
memory area for the stack and the global data (gd_t) structure. This is
called the initial RAM area, or initram. The L1 cache is locked and the TLBs
point to a non-existent address (so that there's no chance it will overlap
main memory or any device). The L1 cache is also configured not to write
out to memory or the L2 cache, so everything stays in the L1 cache.
One of the things we might do while running out of initram is relocate CCSR.
On reset, CCSR is typically located at some high 32-bit address, like
0xfe000000, and this may not be the best place for CCSR. For example, on
36-bit systems, CCSR is relocated to 0xffe000000, near the top of 36-bit
memory space.
On some future Freescale SOCs, the L1 cache will be forced to write to the
backing store, so we can no longer have the TLBs point to non-existent address.
Instead, we will point the TLBs to an unused area in CCSR. In order for this
technique to work, CCSR needs to be relocated before the initram memory is
enabled.
Unlike the original CCSR relocation code in cpu_init_early_f(), the TLBs
we create now for relocating CCSR are deleted after the relocation is finished.
cpu_init_early_f() will still need to create a TLB for CCSR (at the new
location) for normal U-Boot purposes. This is done to keep the impact to
existing U-Boot code minimal and to better isolate the CCSR relocation code.
Signed-off-by: Timur Tabi <timur@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2011-08-03 21:30:10 +00:00
|
|
|
#ifdef CONFIG_FSL_CORENET
|
|
|
|
|
|
|
|
#define CCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000)
|
|
|
|
#define LAW_SIZE_4K 0xb
|
|
|
|
#define CCSRBAR_LAWAR (LAW_EN | (0x1e << 20) | LAW_SIZE_4K)
|
|
|
|
#define CCSRAR_C 0x80000000 /* Commit */
|
|
|
|
|
|
|
|
create_temp_law:
|
|
|
|
/*
|
|
|
|
* On CoreNet systems, we create the temporary LAW using a special LAW
|
|
|
|
* target ID of 0x1e. LAWBARH is at offset 0xc00 in CCSR.
|
|
|
|
*/
|
|
|
|
lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
|
|
|
|
ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
|
|
|
|
lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
|
|
|
|
ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
|
|
|
|
lis r2, CCSRBAR_LAWAR@h
|
|
|
|
ori r2, r2, CCSRBAR_LAWAR@l
|
|
|
|
|
|
|
|
stw r0, 0xc00(r9) /* LAWBARH0 */
|
|
|
|
stw r1, 0xc04(r9) /* LAWBARL0 */
|
|
|
|
sync
|
|
|
|
stw r2, 0xc08(r9) /* LAWAR0 */
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read back from LAWAR to ensure the update is complete. e500mc
|
|
|
|
* cores also require an isync.
|
|
|
|
*/
|
|
|
|
lwz r0, 0xc08(r9) /* LAWAR0 */
|
|
|
|
isync
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the current CCSRBARH and CCSRBARL using load word instructions.
|
|
|
|
* Follow this with an isync instruction. This forces any outstanding
|
|
|
|
* accesses to configuration space to completion.
|
|
|
|
*/
|
|
|
|
read_old_ccsrbar:
|
|
|
|
lwz r0, 0(r9) /* CCSRBARH */
|
2011-10-31 18:30:42 +00:00
|
|
|
lwz r0, 4(r9) /* CCSRBARL */
|
powerpc/85xx: relocate CCSR before creating the initial RAM area
Before main memory (DDR) is initialized, the on-chip L1 cache is used as a
memory area for the stack and the global data (gd_t) structure. This is
called the initial RAM area, or initram. The L1 cache is locked and the TLBs
point to a non-existent address (so that there's no chance it will overlap
main memory or any device). The L1 cache is also configured not to write
out to memory or the L2 cache, so everything stays in the L1 cache.
One of the things we might do while running out of initram is relocate CCSR.
On reset, CCSR is typically located at some high 32-bit address, like
0xfe000000, and this may not be the best place for CCSR. For example, on
36-bit systems, CCSR is relocated to 0xffe000000, near the top of 36-bit
memory space.
On some future Freescale SOCs, the L1 cache will be forced to write to the
backing store, so we can no longer have the TLBs point to non-existent address.
Instead, we will point the TLBs to an unused area in CCSR. In order for this
technique to work, CCSR needs to be relocated before the initram memory is
enabled.
Unlike the original CCSR relocation code in cpu_init_early_f(), the TLBs
we create now for relocating CCSR are deleted after the relocation is finished.
cpu_init_early_f() will still need to create a TLB for CCSR (at the new
location) for normal U-Boot purposes. This is done to keep the impact to
existing U-Boot code minimal and to better isolate the CCSR relocation code.
Signed-off-by: Timur Tabi <timur@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2011-08-03 21:30:10 +00:00
|
|
|
isync
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write the new values for CCSRBARH and CCSRBARL to their old
|
|
|
|
* locations. The CCSRBARH has a shadow register. When the CCSRBARH
|
|
|
|
* has a new value written it loads a CCSRBARH shadow register. When
|
|
|
|
* the CCSRBARL is written, the CCSRBARH shadow register contents
|
|
|
|
* along with the CCSRBARL value are loaded into the CCSRBARH and
|
|
|
|
* CCSRBARL registers, respectively. Follow this with a sync
|
|
|
|
* instruction.
|
|
|
|
*/
|
|
|
|
write_new_ccsrbar:
|
|
|
|
lis r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
|
|
|
|
ori r0, r0, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
|
|
|
|
lis r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@h
|
|
|
|
ori r1, r1, CONFIG_SYS_CCSRBAR_PHYS_LOW@l
|
|
|
|
lis r2, CCSRAR_C@h
|
|
|
|
ori r2, r2, CCSRAR_C@l
|
|
|
|
|
|
|
|
stw r0, 0(r9) /* Write to CCSRBARH */
|
|
|
|
sync /* Make sure we write to CCSRBARH first */
|
|
|
|
stw r1, 4(r9) /* Write to CCSRBARL */
|
|
|
|
sync
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Write a 1 to the commit bit (C) of CCSRAR at the old location.
|
|
|
|
* Follow this with a sync instruction.
|
|
|
|
*/
|
|
|
|
stw r2, 8(r9)
|
|
|
|
sync
|
|
|
|
|
|
|
|
/* Delete the temporary LAW */
|
|
|
|
delete_temp_law:
|
|
|
|
li r1, 0
|
|
|
|
stw r1, 0xc08(r8)
|
|
|
|
sync
|
|
|
|
stw r1, 0xc00(r8)
|
|
|
|
stw r1, 0xc04(r8)
|
|
|
|
sync
|
|
|
|
|
|
|
|
#else /* #ifdef CONFIG_FSL_CORENET */
|
|
|
|
|
|
|
|
write_new_ccsrbar:
|
|
|
|
/*
|
|
|
|
* Read the current value of CCSRBAR using a load word instruction
|
|
|
|
* followed by an isync. This forces all accesses to configuration
|
|
|
|
* space to complete.
|
|
|
|
*/
|
|
|
|
sync
|
|
|
|
lwz r0, 0(r9)
|
|
|
|
isync
|
|
|
|
|
|
|
|
/* CONFIG_SYS_CCSRBAR_PHYS right shifted by 12 */
|
|
|
|
#define CCSRBAR_PHYS_RS12 ((CONFIG_SYS_CCSRBAR_PHYS_HIGH << 20) | \
|
|
|
|
(CONFIG_SYS_CCSRBAR_PHYS_LOW >> 12))
|
|
|
|
|
|
|
|
/* Write the new value to CCSRBAR. */
|
|
|
|
lis r0, CCSRBAR_PHYS_RS12@h
|
|
|
|
ori r0, r0, CCSRBAR_PHYS_RS12@l
|
|
|
|
stw r0, 0(r9)
|
|
|
|
sync
|
|
|
|
|
|
|
|
/*
|
|
|
|
* The manual says to perform a load of an address that does not
|
|
|
|
* access configuration space or the on-chip SRAM using an existing TLB,
|
|
|
|
* but that doesn't appear to be necessary. We will do the isync,
|
|
|
|
* though.
|
|
|
|
*/
|
|
|
|
isync
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Read the contents of CCSRBAR from its new location, followed by
|
|
|
|
* another isync.
|
|
|
|
*/
|
|
|
|
lwz r0, 0(r8)
|
|
|
|
isync
|
|
|
|
|
|
|
|
#endif /* #ifdef CONFIG_FSL_CORENET */
|
|
|
|
|
|
|
|
/* Delete the temporary TLBs */
|
|
|
|
delete_temp_tlbs:
|
2012-10-08 07:44:07 +00:00
|
|
|
delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR, MAS2_I|MAS2_G, r3
|
|
|
|
delete_tlb0_entry 1, CONFIG_SYS_CCSRBAR + 0x1000, MAS2_I|MAS2_G, r3
|
powerpc/85xx: relocate CCSR before creating the initial RAM area
Before main memory (DDR) is initialized, the on-chip L1 cache is used as a
memory area for the stack and the global data (gd_t) structure. This is
called the initial RAM area, or initram. The L1 cache is locked and the TLBs
point to a non-existent address (so that there's no chance it will overlap
main memory or any device). The L1 cache is also configured not to write
out to memory or the L2 cache, so everything stays in the L1 cache.
One of the things we might do while running out of initram is relocate CCSR.
On reset, CCSR is typically located at some high 32-bit address, like
0xfe000000, and this may not be the best place for CCSR. For example, on
36-bit systems, CCSR is relocated to 0xffe000000, near the top of 36-bit
memory space.
On some future Freescale SOCs, the L1 cache will be forced to write to the
backing store, so we can no longer have the TLBs point to non-existent address.
Instead, we will point the TLBs to an unused area in CCSR. In order for this
technique to work, CCSR needs to be relocated before the initram memory is
enabled.
Unlike the original CCSR relocation code in cpu_init_early_f(), the TLBs
we create now for relocating CCSR are deleted after the relocation is finished.
cpu_init_early_f() will still need to create a TLB for CCSR (at the new
location) for normal U-Boot purposes. This is done to keep the impact to
existing U-Boot code minimal and to better isolate the CCSR relocation code.
Signed-off-by: Timur Tabi <timur@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2011-08-03 21:30:10 +00:00
|
|
|
|
|
|
|
#endif /* #if (CONFIG_SYS_CCSRBAR_DEFAULT != CONFIG_SYS_CCSRBAR_PHYS) */
|
|
|
|
|
2013-08-29 07:40:38 +00:00
|
|
|
#if defined(CONFIG_SYS_FSL_QORIQ_CHASSIS2) && defined(CONFIG_E6500)
|
2012-10-08 07:44:08 +00:00
|
|
|
create_ccsr_l2_tlb:
|
|
|
|
/*
|
|
|
|
* Create a TLB for the MMR location of CCSR
|
|
|
|
* to access L2CSR0 register
|
|
|
|
*/
|
|
|
|
create_tlb0_entry 0, \
|
|
|
|
0, BOOKE_PAGESZ_4K, \
|
|
|
|
CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, \
|
|
|
|
CONFIG_SYS_CCSRBAR_PHYS_LOW + 0xC20000, MAS3_SW|MAS3_SR, \
|
|
|
|
CONFIG_SYS_CCSRBAR_PHYS_HIGH, r3
|
|
|
|
|
|
|
|
enable_l2_cluster_l2:
|
|
|
|
/* enable L2 cache */
|
|
|
|
lis r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@h
|
|
|
|
ori r3, r3, (CONFIG_SYS_CCSRBAR + 0xC20000)@l
|
|
|
|
li r4, 33 /* stash id */
|
|
|
|
stw r4, 4(r3)
|
|
|
|
lis r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@h
|
|
|
|
ori r4, r4, (L2CSR0_L2FI|L2CSR0_L2LFC)@l
|
|
|
|
sync
|
|
|
|
stw r4, 0(r3) /* invalidate L2 */
|
2016-04-18 17:28:33 +00:00
|
|
|
/* Poll till the bits are cleared */
|
2012-10-08 07:44:08 +00:00
|
|
|
1: sync
|
|
|
|
lwz r0, 0(r3)
|
|
|
|
twi 0, r0, 0
|
|
|
|
isync
|
|
|
|
and. r1, r0, r4
|
|
|
|
bne 1b
|
2016-04-18 17:28:33 +00:00
|
|
|
|
|
|
|
/* L2PE must be set before L2 cache is enabled */
|
|
|
|
lis r4, (L2CSR0_L2PE)@h
|
|
|
|
ori r4, r4, (L2CSR0_L2PE)@l
|
|
|
|
sync
|
|
|
|
stw r4, 0(r3) /* enable L2 parity/ECC error checking */
|
|
|
|
/* Poll till the bit is set */
|
|
|
|
1: sync
|
|
|
|
lwz r0, 0(r3)
|
|
|
|
twi 0, r0, 0
|
|
|
|
isync
|
|
|
|
and. r1, r0, r4
|
|
|
|
beq 1b
|
|
|
|
|
2013-03-25 07:39:58 +00:00
|
|
|
lis r4, (L2CSR0_L2E|L2CSR0_L2PE)@h
|
2013-03-25 07:40:03 +00:00
|
|
|
ori r4, r4, (L2CSR0_L2REP_MODE)@l
|
2012-10-08 07:44:08 +00:00
|
|
|
sync
|
2013-03-25 07:33:14 +00:00
|
|
|
stw r4, 0(r3) /* enable L2 */
|
2016-04-18 17:28:33 +00:00
|
|
|
/* Poll till the bit is set */
|
|
|
|
1: sync
|
|
|
|
lwz r0, 0(r3)
|
|
|
|
twi 0, r0, 0
|
|
|
|
isync
|
|
|
|
and. r1, r0, r4
|
|
|
|
beq 1b
|
|
|
|
|
2012-10-08 07:44:08 +00:00
|
|
|
delete_ccsr_l2_tlb:
|
|
|
|
delete_tlb0_entry 0, CONFIG_SYS_CCSRBAR + 0xC20000, MAS2_I|MAS2_G, r3
|
|
|
|
#endif
|
|
|
|
|
2013-03-25 07:33:14 +00:00
|
|
|
/*
|
|
|
|
* Enable the L1. On e6500, this has to be done
|
|
|
|
* after the L2 is up.
|
|
|
|
*/
|
|
|
|
|
|
|
|
#ifdef CONFIG_SYS_CACHE_STASHING
|
|
|
|
/* set stash id to (coreID) * 2 + 32 + L1 CT (0) */
|
|
|
|
li r2,(32 + 0)
|
|
|
|
mtspr L1CSR2,r2
|
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Enable/invalidate the I-Cache */
|
|
|
|
lis r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
|
|
|
|
ori r2,r2,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
|
|
|
|
mtspr SPRN_L1CSR1,r2
|
|
|
|
1:
|
|
|
|
mfspr r3,SPRN_L1CSR1
|
|
|
|
and. r1,r3,r2
|
|
|
|
bne 1b
|
|
|
|
|
|
|
|
lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h
|
|
|
|
ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
|
|
|
|
mtspr SPRN_L1CSR1,r3
|
|
|
|
isync
|
|
|
|
2:
|
|
|
|
mfspr r3,SPRN_L1CSR1
|
|
|
|
andi. r1,r3,L1CSR1_ICE@l
|
|
|
|
beq 2b
|
|
|
|
|
|
|
|
/* Enable/invalidate the D-Cache */
|
|
|
|
lis r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@h
|
|
|
|
ori r2,r2,(L1CSR0_DCFI|L1CSR0_DCLFR)@l
|
|
|
|
mtspr SPRN_L1CSR0,r2
|
|
|
|
1:
|
|
|
|
mfspr r3,SPRN_L1CSR0
|
|
|
|
and. r1,r3,r2
|
|
|
|
bne 1b
|
|
|
|
|
|
|
|
lis r3,(L1CSR0_CPE|L1CSR0_DCE)@h
|
|
|
|
ori r3,r3,(L1CSR0_CPE|L1CSR0_DCE)@l
|
|
|
|
mtspr SPRN_L1CSR0,r3
|
|
|
|
isync
|
|
|
|
2:
|
|
|
|
mfspr r3,SPRN_L1CSR0
|
|
|
|
andi. r1,r3,L1CSR0_DCE@l
|
|
|
|
beq 2b
|
powerpc/fsl-corenet: work around erratum A004510
Erratum A004510 says that under certain load conditions, modified
cache lines can be discarded, causing data corruption.
To work around this, several CCSR and DCSR register updates need to be
made in a careful manner, so that there is no other transaction in
corenet when the update is made.
The update is made from a locked cacheline, with a delay before to flush
any previous activity, and a delay after to flush the CCSR/DCSR update.
We can't use a readback because that would be another corenet
transaction, which is not allowed.
We lock the subsequent cacheline to prevent it from being fetched while
we're executing the previous cacheline. It is filled with nops so that a
branch doesn't cause us to fetch another cacheline.
Ordinarily we are running in a cache-inhibited mapping at this point, so
we temporarily change that. We make it guarded so that we should never
see a speculative load, and we never do an explicit load. Thus, only the
I-cache should ever fill from this mapping, and we flush/unlock it
afterward. Thus we should avoid problems from any potential cache
aliasing between inhibited and non-inhibited mappings.
NOTE that if PAMU is used with this patch, it will need to use a
dedicated LAW as described in the erratum. This is the responsibility
of the OS that sets up PAMU.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Andy Fleming <afleming@freescale.com>
2012-08-14 10:14:53 +00:00
|
|
|
#ifdef CONFIG_SYS_FSL_ERRATUM_A004510
|
|
|
|
#define DCSR_LAWBARH0 (CONFIG_SYS_CCSRBAR + 0x1000)
|
|
|
|
#define LAW_SIZE_1M 0x13
|
|
|
|
#define DCSRBAR_LAWAR (LAW_EN | (0x1d << 20) | LAW_SIZE_1M)
|
|
|
|
|
|
|
|
cmpwi r27,0
|
|
|
|
beq 9f
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Create a TLB entry for CCSR
|
|
|
|
*
|
|
|
|
* We're executing out of TLB1 entry in r14, and that's the only
|
|
|
|
* TLB entry that exists. To allocate some TLB entries for our
|
|
|
|
* own use, flip a bit high enough that we won't flip it again
|
|
|
|
* via incrementing.
|
|
|
|
*/
|
|
|
|
|
|
|
|
xori r8, r14, 32
|
|
|
|
lis r0, MAS0_TLBSEL(1)@h
|
|
|
|
rlwimi r0, r8, 16, MAS0_ESEL_MSK
|
|
|
|
lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@h
|
|
|
|
ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_16M)@l
|
|
|
|
lis r7, CONFIG_SYS_CCSRBAR@h
|
|
|
|
ori r7, r7, CONFIG_SYS_CCSRBAR@l
|
|
|
|
ori r2, r7, MAS2_I|MAS2_G
|
|
|
|
lis r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@h
|
|
|
|
ori r3, r3, FSL_BOOKE_MAS3(CONFIG_SYS_CCSRBAR_PHYS_LOW, 0, (MAS3_SW|MAS3_SR))@l
|
|
|
|
lis r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@h
|
|
|
|
ori r4, r4, CONFIG_SYS_CCSRBAR_PHYS_HIGH@l
|
|
|
|
mtspr MAS0, r0
|
|
|
|
mtspr MAS1, r1
|
|
|
|
mtspr MAS2, r2
|
|
|
|
mtspr MAS3, r3
|
|
|
|
mtspr MAS7, r4
|
|
|
|
isync
|
|
|
|
tlbwe
|
|
|
|
isync
|
|
|
|
msync
|
|
|
|
|
|
|
|
/* Map DCSR temporarily to physical address zero */
|
|
|
|
li r0, 0
|
|
|
|
lis r3, DCSRBAR_LAWAR@h
|
|
|
|
ori r3, r3, DCSRBAR_LAWAR@l
|
|
|
|
|
|
|
|
stw r0, 0xc00(r7) /* LAWBARH0 */
|
|
|
|
stw r0, 0xc04(r7) /* LAWBARL0 */
|
|
|
|
sync
|
|
|
|
stw r3, 0xc08(r7) /* LAWAR0 */
|
|
|
|
|
|
|
|
/* Read back from LAWAR to ensure the update is complete. */
|
|
|
|
lwz r3, 0xc08(r7) /* LAWAR0 */
|
|
|
|
isync
|
|
|
|
|
|
|
|
/* Create a TLB entry for DCSR at zero */
|
|
|
|
|
|
|
|
addi r9, r8, 1
|
|
|
|
lis r0, MAS0_TLBSEL(1)@h
|
|
|
|
rlwimi r0, r9, 16, MAS0_ESEL_MSK
|
|
|
|
lis r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@h
|
|
|
|
ori r1, r1, FSL_BOOKE_MAS1(1, 1, 0, 0, BOOKE_PAGESZ_1M)@l
|
|
|
|
li r6, 0 /* DCSR effective address */
|
|
|
|
ori r2, r6, MAS2_I|MAS2_G
|
|
|
|
li r3, MAS3_SW|MAS3_SR
|
|
|
|
li r4, 0
|
|
|
|
mtspr MAS0, r0
|
|
|
|
mtspr MAS1, r1
|
|
|
|
mtspr MAS2, r2
|
|
|
|
mtspr MAS3, r3
|
|
|
|
mtspr MAS7, r4
|
|
|
|
isync
|
|
|
|
tlbwe
|
|
|
|
isync
|
|
|
|
msync
|
|
|
|
|
|
|
|
/* enable the timebase */
|
|
|
|
#define CTBENR 0xe2084
|
|
|
|
li r3, 1
|
|
|
|
addis r4, r7, CTBENR@ha
|
|
|
|
stw r3, CTBENR@l(r4)
|
|
|
|
lwz r3, CTBENR@l(r4)
|
|
|
|
twi 0,r3,0
|
|
|
|
isync
|
|
|
|
|
|
|
|
.macro erratum_set_ccsr offset value
|
|
|
|
addis r3, r7, \offset@ha
|
|
|
|
lis r4, \value@h
|
|
|
|
addi r3, r3, \offset@l
|
|
|
|
ori r4, r4, \value@l
|
|
|
|
bl erratum_set_value
|
|
|
|
.endm
|
|
|
|
|
|
|
|
.macro erratum_set_dcsr offset value
|
|
|
|
addis r3, r6, \offset@ha
|
|
|
|
lis r4, \value@h
|
|
|
|
addi r3, r3, \offset@l
|
|
|
|
ori r4, r4, \value@l
|
|
|
|
bl erratum_set_value
|
|
|
|
.endm
|
|
|
|
|
|
|
|
erratum_set_dcsr 0xb0e08 0xe0201800
|
|
|
|
erratum_set_dcsr 0xb0e18 0xe0201800
|
|
|
|
erratum_set_dcsr 0xb0e38 0xe0400000
|
|
|
|
erratum_set_dcsr 0xb0008 0x00900000
|
|
|
|
erratum_set_dcsr 0xb0e40 0xe00a0000
|
|
|
|
erratum_set_ccsr 0x18600 CONFIG_SYS_FSL_CORENET_SNOOPVEC_COREONLY
|
2013-11-28 06:58:08 +00:00
|
|
|
#ifdef CONFIG_RAMBOOT_PBL
|
|
|
|
erratum_set_ccsr 0x10f00 0x495e5000
|
|
|
|
#else
|
powerpc/fsl-corenet: work around erratum A004510
Erratum A004510 says that under certain load conditions, modified
cache lines can be discarded, causing data corruption.
To work around this, several CCSR and DCSR register updates need to be
made in a careful manner, so that there is no other transaction in
corenet when the update is made.
The update is made from a locked cacheline, with a delay before to flush
any previous activity, and a delay after to flush the CCSR/DCSR update.
We can't use a readback because that would be another corenet
transaction, which is not allowed.
We lock the subsequent cacheline to prevent it from being fetched while
we're executing the previous cacheline. It is filled with nops so that a
branch doesn't cause us to fetch another cacheline.
Ordinarily we are running in a cache-inhibited mapping at this point, so
we temporarily change that. We make it guarded so that we should never
see a speculative load, and we never do an explicit load. Thus, only the
I-cache should ever fill from this mapping, and we flush/unlock it
afterward. Thus we should avoid problems from any potential cache
aliasing between inhibited and non-inhibited mappings.
NOTE that if PAMU is used with this patch, it will need to use a
dedicated LAW as described in the erratum. This is the responsibility
of the OS that sets up PAMU.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Andy Fleming <afleming@freescale.com>
2012-08-14 10:14:53 +00:00
|
|
|
erratum_set_ccsr 0x10f00 0x415e5000
|
2013-11-28 06:58:08 +00:00
|
|
|
#endif
|
powerpc/fsl-corenet: work around erratum A004510
Erratum A004510 says that under certain load conditions, modified
cache lines can be discarded, causing data corruption.
To work around this, several CCSR and DCSR register updates need to be
made in a careful manner, so that there is no other transaction in
corenet when the update is made.
The update is made from a locked cacheline, with a delay before to flush
any previous activity, and a delay after to flush the CCSR/DCSR update.
We can't use a readback because that would be another corenet
transaction, which is not allowed.
We lock the subsequent cacheline to prevent it from being fetched while
we're executing the previous cacheline. It is filled with nops so that a
branch doesn't cause us to fetch another cacheline.
Ordinarily we are running in a cache-inhibited mapping at this point, so
we temporarily change that. We make it guarded so that we should never
see a speculative load, and we never do an explicit load. Thus, only the
I-cache should ever fill from this mapping, and we flush/unlock it
afterward. Thus we should avoid problems from any potential cache
aliasing between inhibited and non-inhibited mappings.
NOTE that if PAMU is used with this patch, it will need to use a
dedicated LAW as described in the erratum. This is the responsibility
of the OS that sets up PAMU.
Signed-off-by: Scott Wood <scottwood@freescale.com>
Signed-off-by: Andy Fleming <afleming@freescale.com>
2012-08-14 10:14:53 +00:00
|
|
|
erratum_set_ccsr 0x11f00 0x415e5000
|
|
|
|
|
|
|
|
/* Make temp mapping uncacheable again, if it was initially */
|
|
|
|
bl 2f
|
|
|
|
2: mflr r3
|
|
|
|
tlbsx 0, r3
|
|
|
|
mfspr r4, MAS2
|
|
|
|
rlwimi r4, r15, 0, MAS2_I
|
|
|
|
rlwimi r4, r15, 0, MAS2_G
|
|
|
|
mtspr MAS2, r4
|
|
|
|
isync
|
|
|
|
tlbwe
|
|
|
|
isync
|
|
|
|
msync
|
|
|
|
|
|
|
|
/* Clear the cache */
|
|
|
|
lis r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@h
|
|
|
|
ori r3,r3,(L1CSR1_ICFI|L1CSR1_ICLFR)@l
|
|
|
|
sync
|
|
|
|
isync
|
|
|
|
mtspr SPRN_L1CSR1,r3
|
|
|
|
isync
|
|
|
|
2: sync
|
|
|
|
mfspr r4,SPRN_L1CSR1
|
|
|
|
and. r4,r4,r3
|
|
|
|
bne 2b
|
|
|
|
|
|
|
|
lis r3,(L1CSR1_CPE|L1CSR1_ICE)@h
|
|
|
|
ori r3,r3,(L1CSR1_CPE|L1CSR1_ICE)@l
|
|
|
|
sync
|
|
|
|
isync
|
|
|
|
mtspr SPRN_L1CSR1,r3
|
|
|
|
isync
|
|
|
|
2: sync
|
|
|
|
mfspr r4,SPRN_L1CSR1
|
|
|
|
and. r4,r4,r3
|
|
|
|
beq 2b
|
|
|
|
|
|
|
|
/* Remove temporary mappings */
|
|
|
|
lis r0, MAS0_TLBSEL(1)@h
|
|
|
|
rlwimi r0, r9, 16, MAS0_ESEL_MSK
|
|
|
|
li r3, 0
|
|
|
|
mtspr MAS0, r0
|
|
|
|
mtspr MAS1, r3
|
|
|
|
isync
|
|
|
|
tlbwe
|
|
|
|
isync
|
|
|
|
msync
|
|
|
|
|
|
|
|
li r3, 0
|
|
|
|
stw r3, 0xc08(r7) /* LAWAR0 */
|
|
|
|
lwz r3, 0xc08(r7)
|
|
|
|
isync
|
|
|
|
|
|
|
|
lis r0, MAS0_TLBSEL(1)@h
|
|
|
|
rlwimi r0, r8, 16, MAS0_ESEL_MSK
|
|
|
|
li r3, 0
|
|
|
|
mtspr MAS0, r0
|
|
|
|
mtspr MAS1, r3
|
|
|
|
isync
|
|
|
|
tlbwe
|
|
|
|
isync
|
|
|
|
msync
|
|
|
|
|
|
|
|
b 9f
|
|
|
|
|
|
|
|
/* r3 = addr, r4 = value, clobbers r5, r11, r12 */
|
|
|
|
erratum_set_value:
|
|
|
|
/* Lock two cache lines into I-Cache */
|
|
|
|
sync
|
|
|
|
mfspr r11, SPRN_L1CSR1
|
|
|
|
rlwinm r11, r11, 0, ~L1CSR1_ICUL
|
|
|
|
sync
|
|
|
|
isync
|
|
|
|
mtspr SPRN_L1CSR1, r11
|
|
|
|
isync
|
|
|
|
|
|
|
|
mflr r12
|
|
|
|
bl 5f
|
|
|
|
5: mflr r5
|
|
|
|
addi r5, r5, 2f - 5b
|
|
|
|
icbtls 0, 0, r5
|
|
|
|
addi r5, r5, 64
|
|
|
|
|
|
|
|
sync
|
|
|
|
mfspr r11, SPRN_L1CSR1
|
|
|
|
3: andi. r11, r11, L1CSR1_ICUL
|
|
|
|
bne 3b
|
|
|
|
|
|
|
|
icbtls 0, 0, r5
|
|
|
|
addi r5, r5, 64
|
|
|
|
|
|
|
|
sync
|
|
|
|
mfspr r11, SPRN_L1CSR1
|
|
|
|
3: andi. r11, r11, L1CSR1_ICUL
|
|
|
|
bne 3b
|
|
|
|
|
|
|
|
b 2f
|
|
|
|
.align 6
|
|
|
|
/* Inside a locked cacheline, wait a while, write, then wait a while */
|
|
|
|
2: sync
|
|
|
|
|
|
|
|
mfspr r5, SPRN_TBRL
|
|
|
|
addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */
|
|
|
|
4: mfspr r5, SPRN_TBRL
|
|
|
|
subf. r5, r5, r11
|
|
|
|
bgt 4b
|
|
|
|
|
|
|
|
stw r4, 0(r3)
|
|
|
|
|
|
|
|
mfspr r5, SPRN_TBRL
|
|
|
|
addis r11, r5, 0x10000@h /* wait 65536 timebase ticks */
|
|
|
|
4: mfspr r5, SPRN_TBRL
|
|
|
|
subf. r5, r5, r11
|
|
|
|
bgt 4b
|
|
|
|
|
|
|
|
sync
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Fill out the rest of this cache line and the next with nops,
|
|
|
|
* to ensure that nothing outside the locked area will be
|
|
|
|
* fetched due to a branch.
|
|
|
|
*/
|
|
|
|
.rept 19
|
|
|
|
nop
|
|
|
|
.endr
|
|
|
|
|
|
|
|
sync
|
|
|
|
mfspr r11, SPRN_L1CSR1
|
|
|
|
rlwinm r11, r11, 0, ~L1CSR1_ICUL
|
|
|
|
sync
|
|
|
|
isync
|
|
|
|
mtspr SPRN_L1CSR1, r11
|
|
|
|
isync
|
|
|
|
|
|
|
|
mtlr r12
|
|
|
|
blr
|
|
|
|
|
|
|
|
9:
|
|
|
|
#endif
|
|
|
|
|
powerpc/85xx: relocate CCSR before creating the initial RAM area
Before main memory (DDR) is initialized, the on-chip L1 cache is used as a
memory area for the stack and the global data (gd_t) structure. This is
called the initial RAM area, or initram. The L1 cache is locked and the TLBs
point to a non-existent address (so that there's no chance it will overlap
main memory or any device). The L1 cache is also configured not to write
out to memory or the L2 cache, so everything stays in the L1 cache.
One of the things we might do while running out of initram is relocate CCSR.
On reset, CCSR is typically located at some high 32-bit address, like
0xfe000000, and this may not be the best place for CCSR. For example, on
36-bit systems, CCSR is relocated to 0xffe000000, near the top of 36-bit
memory space.
On some future Freescale SOCs, the L1 cache will be forced to write to the
backing store, so we can no longer have the TLBs point to non-existent address.
Instead, we will point the TLBs to an unused area in CCSR. In order for this
technique to work, CCSR needs to be relocated before the initram memory is
enabled.
Unlike the original CCSR relocation code in cpu_init_early_f(), the TLBs
we create now for relocating CCSR are deleted after the relocation is finished.
cpu_init_early_f() will still need to create a TLB for CCSR (at the new
location) for normal U-Boot purposes. This is done to keep the impact to
existing U-Boot code minimal and to better isolate the CCSR relocation code.
Signed-off-by: Timur Tabi <timur@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2011-08-03 21:30:10 +00:00
|
|
|
create_init_ram_area:
|
2008-01-17 04:38:34 +00:00
|
|
|
lis r6,FSL_BOOKE_MAS0(1, 15, 0)@h
|
|
|
|
ori r6,r6,FSL_BOOKE_MAS0(1, 15, 0)@l
|
|
|
|
|
2012-09-20 21:35:21 +00:00
|
|
|
#ifdef NOR_BOOT
|
ppc/85xx: add boot from NAND/eSDHC/eSPI support
The MPC8536E is capable of booting form NAND/eSDHC/eSPI, this patch
implements these three bootup methods in a unified way - all of these
use the general cpu/mpc85xx/start.S, and load the main image to L2SRAM
which lets us use the SPD to initialize the SDRAM.
For all three bootup methods, the bootup process can be divided into two
stages: the first stage will initialize the corresponding controller,
configure the L2SRAM, then copy the second stage image to L2SRAM and
jump to it. The second stage image is just like the general U-Boot image
to configure all the hardware and boot up to U-Boot command line.
When boot from NAND, the eLBC controller will first load the first stage
image to internal 4K RAM buffer because it's also stored on the NAND
flash. The first stage image, also call 4K NAND loader, will initialize
the L2SRAM, load the second stage image to L2SRAM and jump to it. The 4K
NAND loader's code comes from the corresponding nand_spl directory, along
with the code twisted by CONFIG_NAND_SPL.
When boot from eSDHC/eSPI, there's no such a first stage image because
the CPU ROM code does the same work. It will initialize the L2SRAM
according to the config addr/word pairs on the fixed address and
initialize the eSDHC/eSPI controller, then load the second stage image
to L2SRAM and jump to it.
The macro CONFIG_SYS_RAMBOOT is used to control the code to produce the
second stage image for all different bootup methods. It's set in the
board config file when one of the bootup methods above is selected.
Signed-off-by: Mingkai Hu <Mingkai.hu@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2009-09-11 06:19:10 +00:00
|
|
|
/* create a temp mapping in AS=1 to the 4M boot window */
|
2012-10-08 07:44:07 +00:00
|
|
|
create_tlb1_entry 15, \
|
|
|
|
1, BOOKE_PAGESZ_4M, \
|
2022-03-24 21:18:05 +00:00
|
|
|
CONFIG_VAL(SYS_MONITOR_BASE) & 0xffc00000, MAS2_I|MAS2_G, \
|
2012-10-08 07:44:07 +00:00
|
|
|
0xffc00000, MAS3_SX|MAS3_SW|MAS3_SR, \
|
|
|
|
0, r6
|
2008-01-17 04:38:34 +00:00
|
|
|
|
2019-11-07 16:11:39 +00:00
|
|
|
#elif !defined(CONFIG_SYS_RAMBOOT) && defined(CONFIG_NXP_ESBC)
|
2010-12-15 17:02:08 +00:00
|
|
|
/* create a temp mapping in AS = 1 for Flash mapping
|
|
|
|
* created by PBL for ISBC code
|
2016-07-14 16:27:52 +00:00
|
|
|
*/
|
2012-10-08 07:44:07 +00:00
|
|
|
create_tlb1_entry 15, \
|
|
|
|
1, BOOKE_PAGESZ_1M, \
|
2022-03-24 21:18:05 +00:00
|
|
|
CONFIG_VAL(SYS_MONITOR_BASE) & 0xfff00000, MAS2_I|MAS2_G, \
|
2012-09-20 23:34:49 +00:00
|
|
|
CONFIG_SYS_PBI_FLASH_WINDOW & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
|
2012-10-08 07:44:07 +00:00
|
|
|
0, r6
|
2015-06-16 05:06:00 +00:00
|
|
|
|
2016-07-14 16:27:52 +00:00
|
|
|
/*
|
|
|
|
* For Targets without CONFIG_SPL like P3, P5
|
|
|
|
* and for targets with CONFIG_SPL like T1, T2, T4, only for
|
|
|
|
* u-boot-spl i.e. CONFIG_SPL_BUILD
|
|
|
|
*/
|
2019-11-07 16:11:39 +00:00
|
|
|
#elif defined(CONFIG_RAMBOOT_PBL) && defined(CONFIG_NXP_ESBC) && \
|
2016-07-14 16:27:52 +00:00
|
|
|
(!defined(CONFIG_SPL) || defined(CONFIG_SPL_BUILD))
|
2022-03-24 21:18:05 +00:00
|
|
|
/* create a temp mapping in AS = 1 for mapping CONFIG_VAL(SYS_MONITOR_BASE)
|
2015-06-16 05:06:00 +00:00
|
|
|
* to L3 Address configured by PBL for ISBC code
|
2016-07-14 16:27:52 +00:00
|
|
|
*/
|
2015-06-16 05:06:00 +00:00
|
|
|
create_tlb1_entry 15, \
|
|
|
|
1, BOOKE_PAGESZ_1M, \
|
2022-03-24 21:18:05 +00:00
|
|
|
CONFIG_VAL(SYS_MONITOR_BASE) & 0xfff00000, MAS2_I|MAS2_G, \
|
2015-06-16 05:06:00 +00:00
|
|
|
CONFIG_SYS_INIT_L3_ADDR & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
|
|
|
|
0, r6
|
|
|
|
|
ppc/85xx: add boot from NAND/eSDHC/eSPI support
The MPC8536E is capable of booting form NAND/eSDHC/eSPI, this patch
implements these three bootup methods in a unified way - all of these
use the general cpu/mpc85xx/start.S, and load the main image to L2SRAM
which lets us use the SPD to initialize the SDRAM.
For all three bootup methods, the bootup process can be divided into two
stages: the first stage will initialize the corresponding controller,
configure the L2SRAM, then copy the second stage image to L2SRAM and
jump to it. The second stage image is just like the general U-Boot image
to configure all the hardware and boot up to U-Boot command line.
When boot from NAND, the eLBC controller will first load the first stage
image to internal 4K RAM buffer because it's also stored on the NAND
flash. The first stage image, also call 4K NAND loader, will initialize
the L2SRAM, load the second stage image to L2SRAM and jump to it. The 4K
NAND loader's code comes from the corresponding nand_spl directory, along
with the code twisted by CONFIG_NAND_SPL.
When boot from eSDHC/eSPI, there's no such a first stage image because
the CPU ROM code does the same work. It will initialize the L2SRAM
according to the config addr/word pairs on the fixed address and
initialize the eSDHC/eSPI controller, then load the second stage image
to L2SRAM and jump to it.
The macro CONFIG_SYS_RAMBOOT is used to control the code to produce the
second stage image for all different bootup methods. It's set in the
board config file when one of the bootup methods above is selected.
Signed-off-by: Mingkai Hu <Mingkai.hu@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2009-09-11 06:19:10 +00:00
|
|
|
#else
|
|
|
|
/*
|
2022-03-24 21:18:05 +00:00
|
|
|
* create a temp mapping in AS=1 to the 1M CONFIG_VAL(SYS_MONITOR_BASE) space, the main
|
|
|
|
* image has been relocated to CONFIG_VAL(SYS_MONITOR_BASE) on the second stage.
|
ppc/85xx: add boot from NAND/eSDHC/eSPI support
The MPC8536E is capable of booting form NAND/eSDHC/eSPI, this patch
implements these three bootup methods in a unified way - all of these
use the general cpu/mpc85xx/start.S, and load the main image to L2SRAM
which lets us use the SPD to initialize the SDRAM.
For all three bootup methods, the bootup process can be divided into two
stages: the first stage will initialize the corresponding controller,
configure the L2SRAM, then copy the second stage image to L2SRAM and
jump to it. The second stage image is just like the general U-Boot image
to configure all the hardware and boot up to U-Boot command line.
When boot from NAND, the eLBC controller will first load the first stage
image to internal 4K RAM buffer because it's also stored on the NAND
flash. The first stage image, also call 4K NAND loader, will initialize
the L2SRAM, load the second stage image to L2SRAM and jump to it. The 4K
NAND loader's code comes from the corresponding nand_spl directory, along
with the code twisted by CONFIG_NAND_SPL.
When boot from eSDHC/eSPI, there's no such a first stage image because
the CPU ROM code does the same work. It will initialize the L2SRAM
according to the config addr/word pairs on the fixed address and
initialize the eSDHC/eSPI controller, then load the second stage image
to L2SRAM and jump to it.
The macro CONFIG_SYS_RAMBOOT is used to control the code to produce the
second stage image for all different bootup methods. It's set in the
board config file when one of the bootup methods above is selected.
Signed-off-by: Mingkai Hu <Mingkai.hu@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2009-09-11 06:19:10 +00:00
|
|
|
*/
|
2012-10-08 07:44:07 +00:00
|
|
|
create_tlb1_entry 15, \
|
|
|
|
1, BOOKE_PAGESZ_1M, \
|
2022-03-24 21:18:05 +00:00
|
|
|
CONFIG_VAL(SYS_MONITOR_BASE) & 0xfff00000, MAS2_I|MAS2_G, \
|
|
|
|
CONFIG_VAL(SYS_MONITOR_BASE) & 0xfff00000, MAS3_SX|MAS3_SW|MAS3_SR, \
|
2012-10-08 07:44:07 +00:00
|
|
|
0, r6
|
ppc/85xx: add boot from NAND/eSDHC/eSPI support
The MPC8536E is capable of booting form NAND/eSDHC/eSPI, this patch
implements these three bootup methods in a unified way - all of these
use the general cpu/mpc85xx/start.S, and load the main image to L2SRAM
which lets us use the SPD to initialize the SDRAM.
For all three bootup methods, the bootup process can be divided into two
stages: the first stage will initialize the corresponding controller,
configure the L2SRAM, then copy the second stage image to L2SRAM and
jump to it. The second stage image is just like the general U-Boot image
to configure all the hardware and boot up to U-Boot command line.
When boot from NAND, the eLBC controller will first load the first stage
image to internal 4K RAM buffer because it's also stored on the NAND
flash. The first stage image, also call 4K NAND loader, will initialize
the L2SRAM, load the second stage image to L2SRAM and jump to it. The 4K
NAND loader's code comes from the corresponding nand_spl directory, along
with the code twisted by CONFIG_NAND_SPL.
When boot from eSDHC/eSPI, there's no such a first stage image because
the CPU ROM code does the same work. It will initialize the L2SRAM
according to the config addr/word pairs on the fixed address and
initialize the eSDHC/eSPI controller, then load the second stage image
to L2SRAM and jump to it.
The macro CONFIG_SYS_RAMBOOT is used to control the code to produce the
second stage image for all different bootup methods. It's set in the
board config file when one of the bootup methods above is selected.
Signed-off-by: Mingkai Hu <Mingkai.hu@freescale.com>
Signed-off-by: Kumar Gala <galak@kernel.crashing.org>
2009-09-11 06:19:10 +00:00
|
|
|
#endif
|
2008-01-17 04:38:34 +00:00
|
|
|
|
|
|
|
/* create a temp mapping in AS=1 to the stack */
|
2010-07-02 22:25:57 +00:00
|
|
|
#if defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW) && \
|
|
|
|
defined(CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH)
|
2012-10-08 07:44:07 +00:00
|
|
|
create_tlb1_entry 14, \
|
|
|
|
1, BOOKE_PAGESZ_16K, \
|
|
|
|
CONFIG_SYS_INIT_RAM_ADDR, 0, \
|
|
|
|
CONFIG_SYS_INIT_RAM_ADDR_PHYS_LOW, MAS3_SX|MAS3_SW|MAS3_SR, \
|
|
|
|
CONFIG_SYS_INIT_RAM_ADDR_PHYS_HIGH, r6
|
|
|
|
|
2010-07-02 22:25:57 +00:00
|
|
|
#else
|
2012-10-08 07:44:07 +00:00
|
|
|
create_tlb1_entry 14, \
|
|
|
|
1, BOOKE_PAGESZ_16K, \
|
|
|
|
CONFIG_SYS_INIT_RAM_ADDR, 0, \
|
|
|
|
CONFIG_SYS_INIT_RAM_ADDR, MAS3_SX|MAS3_SW|MAS3_SR, \
|
|
|
|
0, r6
|
2010-07-02 22:25:57 +00:00
|
|
|
#endif
|
2008-01-17 04:38:34 +00:00
|
|
|
|
2012-04-29 23:56:30 +00:00
|
|
|
lis r6,MSR_IS|MSR_DS|MSR_DE@h
|
|
|
|
ori r6,r6,MSR_IS|MSR_DS|MSR_DE@l
|
2008-01-17 04:38:34 +00:00
|
|
|
lis r7,switch_as@h
|
|
|
|
ori r7,r7,switch_as@l
|
|
|
|
|
|
|
|
mtspr SPRN_SRR0,r7
|
|
|
|
mtspr SPRN_SRR1,r6
|
|
|
|
rfi
|
|
|
|
|
|
|
|
switch_as:
|
2007-08-07 23:07:27 +00:00
|
|
|
/* L1 DCache is used for initial RAM */
|
|
|
|
|
|
|
|
/* Allocate Initial RAM in data cache.
|
|
|
|
*/
|
2008-10-16 13:01:15 +00:00
|
|
|
lis r3,CONFIG_SYS_INIT_RAM_ADDR@h
|
|
|
|
ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
|
2008-01-08 07:22:21 +00:00
|
|
|
mfspr r2, L1CFG0
|
|
|
|
andi. r2, r2, 0x1ff
|
|
|
|
/* cache size * 1024 / (2 * L1 line size) */
|
|
|
|
slwi r2, r2, (10 - 1 - L1_CACHE_SHIFT)
|
2007-08-07 23:07:27 +00:00
|
|
|
mtctr r2
|
|
|
|
li r0,0
|
|
|
|
1:
|
|
|
|
dcbz r0,r3
|
2017-03-02 08:42:41 +00:00
|
|
|
#ifdef CONFIG_E6500 /* Lock/unlock L2 cache long with L1 */
|
2015-08-17 20:31:52 +00:00
|
|
|
dcbtls 2, r0, r3
|
2017-03-02 08:42:41 +00:00
|
|
|
dcbtls 0, r0, r3
|
2015-08-17 20:31:52 +00:00
|
|
|
#else
|
|
|
|
dcbtls 0, r0, r3
|
|
|
|
#endif
|
2008-10-16 13:01:15 +00:00
|
|
|
addi r3,r3,CONFIG_SYS_CACHELINE_SIZE
|
2007-08-07 23:07:27 +00:00
|
|
|
bdnz 1b
|
|
|
|
|
2007-08-14 06:34:21 +00:00
|
|
|
/* Jump out the last 4K page and continue to 'normal' start */
|
2012-09-20 21:35:21 +00:00
|
|
|
#if defined(CONFIG_SYS_RAMBOOT) || defined(CONFIG_SPL)
|
|
|
|
/* We assume that we're already running at the address we're linked at */
|
2007-08-14 06:34:21 +00:00
|
|
|
b _start_cont
|
2007-08-07 23:07:27 +00:00
|
|
|
#else
|
|
|
|
/* Calculate absolute address in FLASH and jump there */
|
|
|
|
/*--------------------------------------------------------------*/
|
2022-03-24 21:18:05 +00:00
|
|
|
lis r3,CONFIG_VAL(SYS_MONITOR_BASE)@h
|
|
|
|
ori r3,r3,CONFIG_VAL(SYS_MONITOR_BASE)@l
|
2022-04-25 03:59:08 +00:00
|
|
|
addi r3,r3,_start_cont - _start_cont
|
2007-08-07 23:07:27 +00:00
|
|
|
mtlr r3
|
2007-09-24 17:36:01 +00:00
|
|
|
blr
|
2007-08-07 23:07:27 +00:00
|
|
|
#endif
|
2007-08-14 06:34:21 +00:00
|
|
|
|
|
|
|
.text
|
|
|
|
.globl _start_cont
|
|
|
|
_start_cont:
|
2003-10-15 23:53:47 +00:00
|
|
|
/* Setup the stack in initial RAM,could be L2-as-SRAM or L1 dcache*/
|
2012-07-23 10:58:02 +00:00
|
|
|
lis r3,(CONFIG_SYS_INIT_RAM_ADDR)@h
|
|
|
|
ori r3,r3,((CONFIG_SYS_INIT_SP_OFFSET-16)&~0xf)@l /* Align to 16 */
|
2016-04-05 13:05:37 +00:00
|
|
|
|
2017-07-24 09:47:27 +00:00
|
|
|
#if CONFIG_VAL(SYS_MALLOC_F_LEN)
|
|
|
|
#if CONFIG_VAL(SYS_MALLOC_F_LEN) + GENERATED_GBL_DATA_SIZE > CONFIG_SYS_INIT_RAM_SIZE
|
|
|
|
#error "SYS_MALLOC_F_LEN too large to fit into initial RAM."
|
2016-04-05 13:05:37 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* Leave 16+ byte for back chain termination and NULL return address */
|
2017-07-24 09:47:27 +00:00
|
|
|
subi r3,r3,((CONFIG_VAL(SYS_MALLOC_F_LEN)+16+15)&~0xf)
|
2016-04-05 13:05:37 +00:00
|
|
|
#endif
|
|
|
|
|
|
|
|
/* End of RAM */
|
|
|
|
lis r4,(CONFIG_SYS_INIT_RAM_ADDR)@h
|
|
|
|
ori r4,r4,(CONFIG_SYS_INIT_RAM_SIZE)@l
|
|
|
|
|
|
|
|
li r0,0
|
|
|
|
|
2021-09-27 15:42:39 +00:00
|
|
|
1: subi r4,r4,4
|
|
|
|
stw r0,0(r4)
|
|
|
|
cmplw r4,r3
|
2016-04-05 13:05:37 +00:00
|
|
|
bne 1b
|
|
|
|
|
2017-07-24 09:47:27 +00:00
|
|
|
#if CONFIG_VAL(SYS_MALLOC_F_LEN)
|
2016-04-05 13:05:37 +00:00
|
|
|
lis r4,(CONFIG_SYS_INIT_RAM_ADDR)@h
|
|
|
|
ori r4,r4,(CONFIG_SYS_GBL_DATA_OFFSET)@l
|
|
|
|
|
|
|
|
addi r3,r3,16 /* Pre-relocation malloc area */
|
|
|
|
stw r3,GD_MALLOC_BASE(r4)
|
|
|
|
subi r3,r3,16
|
|
|
|
#endif
|
2003-10-15 23:53:47 +00:00
|
|
|
li r0,0
|
2012-07-23 10:58:02 +00:00
|
|
|
stw r0,0(r3) /* Terminate Back Chain */
|
|
|
|
stw r0,+4(r3) /* NULL return address. */
|
|
|
|
mr r1,r3 /* Transfer to SP(r1) */
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
GET_GOT
|
2018-12-06 16:20:53 +00:00
|
|
|
/* Needed for -msingle-pic-base */
|
|
|
|
bl _GLOBAL_OFFSET_TABLE_@local-4
|
|
|
|
mflr r30
|
2014-04-11 15:09:45 +00:00
|
|
|
|
|
|
|
/* Pass our potential ePAPR device tree pointer to cpu_init_early_f */
|
|
|
|
mr r3, r24
|
|
|
|
|
2008-01-17 04:38:34 +00:00
|
|
|
bl cpu_init_early_f
|
|
|
|
|
|
|
|
/* switch back to AS = 0 */
|
|
|
|
lis r3,(MSR_CE|MSR_ME|MSR_DE)@h
|
|
|
|
ori r3,r3,(MSR_CE|MSR_ME|MSR_DE)@l
|
|
|
|
mtmsr r3
|
|
|
|
isync
|
|
|
|
|
2014-04-30 21:43:47 +00:00
|
|
|
bl cpu_init_f /* return boot_flag for calling board_init_f */
|
2003-10-15 23:53:47 +00:00
|
|
|
bl board_init_f
|
2004-07-09 23:27:13 +00:00
|
|
|
isync
|
2003-10-15 23:53:47 +00:00
|
|
|
|
2010-09-15 00:13:53 +00:00
|
|
|
/* NOTREACHED - board_init_f() does not return */
|
|
|
|
|
2012-09-20 21:35:21 +00:00
|
|
|
#ifndef MINIMAL_SPL
|
2003-10-15 23:53:47 +00:00
|
|
|
.globl _start_of_vectors
|
|
|
|
_start_of_vectors:
|
2007-08-14 06:34:21 +00:00
|
|
|
|
2003-10-15 23:53:47 +00:00
|
|
|
/* Critical input. */
|
2007-08-14 06:34:21 +00:00
|
|
|
CRIT_EXCEPTION(0x0100, CriticalInput, CritcalInputException)
|
|
|
|
|
|
|
|
/* Machine check */
|
|
|
|
MCK_EXCEPTION(0x200, MachineCheck, MachineCheckException)
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
/* Data Storage exception. */
|
|
|
|
STD_EXCEPTION(0x0300, DataStorage, UnknownException)
|
|
|
|
|
|
|
|
/* Instruction Storage exception. */
|
|
|
|
STD_EXCEPTION(0x0400, InstStorage, UnknownException)
|
|
|
|
|
|
|
|
/* External Interrupt exception. */
|
2007-08-14 06:34:21 +00:00
|
|
|
STD_EXCEPTION(0x0500, ExtInterrupt, ExtIntException)
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
/* Alignment exception. */
|
|
|
|
Alignment:
|
2007-06-22 12:58:04 +00:00
|
|
|
EXCEPTION_PROLOG(SRR0, SRR1)
|
2003-10-15 23:53:47 +00:00
|
|
|
mfspr r4,DAR
|
|
|
|
stw r4,_DAR(r21)
|
|
|
|
mfspr r5,DSISR
|
|
|
|
stw r5,_DSISR(r21)
|
|
|
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
2015-04-08 01:20:00 +00:00
|
|
|
EXC_XFER_TEMPLATE(0x600, Alignment, AlignmentException,
|
|
|
|
MSR_KERNEL, COPY_EE)
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
/* Program check exception */
|
|
|
|
ProgramCheck:
|
2007-06-22 12:58:04 +00:00
|
|
|
EXCEPTION_PROLOG(SRR0, SRR1)
|
2003-10-15 23:53:47 +00:00
|
|
|
addi r3,r1,STACK_FRAME_OVERHEAD
|
2015-04-08 01:20:00 +00:00
|
|
|
EXC_XFER_TEMPLATE(0x700, ProgramCheck, ProgramCheckException,
|
2010-01-19 13:41:55 +00:00
|
|
|
MSR_KERNEL, COPY_EE)
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
/* No FPU on MPC85xx. This exception is not supposed to happen.
|
|
|
|
*/
|
|
|
|
STD_EXCEPTION(0x0800, FPUnavailable, UnknownException)
|
2015-04-08 01:20:01 +00:00
|
|
|
STD_EXCEPTION(0x0900, SystemCall, UnknownException)
|
2005-05-13 22:49:36 +00:00
|
|
|
STD_EXCEPTION(0x0a00, Decrementer, timer_interrupt)
|
|
|
|
STD_EXCEPTION(0x0b00, IntervalTimer, UnknownException)
|
|
|
|
STD_EXCEPTION(0x0c00, WatchdogTimer, UnknownException)
|
2003-10-15 23:53:47 +00:00
|
|
|
|
2005-05-13 22:49:36 +00:00
|
|
|
STD_EXCEPTION(0x0d00, DataTLBError, UnknownException)
|
|
|
|
STD_EXCEPTION(0x0e00, InstructionTLBError, UnknownException)
|
2003-10-15 23:53:47 +00:00
|
|
|
|
2005-05-13 22:49:36 +00:00
|
|
|
CRIT_EXCEPTION(0x0f00, DebugBreakpoint, DebugException )
|
2003-10-15 23:53:47 +00:00
|
|
|
|
2005-05-13 22:49:36 +00:00
|
|
|
.globl _end_of_vectors
|
2003-10-15 23:53:47 +00:00
|
|
|
_end_of_vectors:
|
|
|
|
|
|
|
|
|
2007-08-14 06:34:21 +00:00
|
|
|
. = . + (0x100 - ( . & 0xff )) /* align for debug */
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* This code finishes saving the registers to the exception frame
|
|
|
|
* and jumps to the appropriate handler for the exception.
|
|
|
|
* Register r21 is pointer into trap frame, r1 has new stack pointer.
|
2015-04-08 01:20:00 +00:00
|
|
|
* r23 is the address of the handler.
|
2003-10-15 23:53:47 +00:00
|
|
|
*/
|
|
|
|
.globl transfer_to_handler
|
|
|
|
transfer_to_handler:
|
|
|
|
SAVE_GPR(7, r21)
|
|
|
|
SAVE_4GPRS(8, r21)
|
|
|
|
SAVE_8GPRS(12, r21)
|
|
|
|
SAVE_8GPRS(24, r21)
|
|
|
|
|
|
|
|
li r22,0
|
|
|
|
stw r22,RESULT(r21)
|
|
|
|
mtspr SPRG2,r22 /* r1 is now kernel sp */
|
|
|
|
|
2015-04-08 01:20:00 +00:00
|
|
|
mtctr r23 /* virtual address of handler */
|
|
|
|
mtmsr r20
|
|
|
|
bctrl
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
int_return:
|
|
|
|
mfmsr r28 /* Disable interrupts */
|
|
|
|
li r4,0
|
|
|
|
ori r4,r4,MSR_EE
|
|
|
|
andc r28,r28,r4
|
|
|
|
SYNC /* Some chip revs need this... */
|
|
|
|
mtmsr r28
|
|
|
|
SYNC
|
|
|
|
lwz r2,_CTR(r1)
|
|
|
|
lwz r0,_LINK(r1)
|
|
|
|
mtctr r2
|
|
|
|
mtlr r0
|
|
|
|
lwz r2,_XER(r1)
|
|
|
|
lwz r0,_CCR(r1)
|
|
|
|
mtspr XER,r2
|
|
|
|
mtcrf 0xFF,r0
|
|
|
|
REST_10GPRS(3, r1)
|
|
|
|
REST_10GPRS(13, r1)
|
|
|
|
REST_8GPRS(23, r1)
|
|
|
|
REST_GPR(31, r1)
|
|
|
|
lwz r2,_NIP(r1) /* Restore environment */
|
|
|
|
lwz r0,_MSR(r1)
|
|
|
|
mtspr SRR0,r2
|
|
|
|
mtspr SRR1,r0
|
|
|
|
lwz r0,GPR0(r1)
|
|
|
|
lwz r2,GPR2(r1)
|
|
|
|
lwz r1,GPR1(r1)
|
|
|
|
SYNC
|
|
|
|
rfi
|
|
|
|
|
|
|
|
/* Cache functions.
|
|
|
|
*/
|
2011-05-23 08:38:53 +00:00
|
|
|
.globl flush_icache
|
|
|
|
flush_icache:
|
2008-09-22 19:11:10 +00:00
|
|
|
.globl invalidate_icache
|
2003-10-15 23:53:47 +00:00
|
|
|
invalidate_icache:
|
|
|
|
mfspr r0,L1CSR1
|
2007-08-14 06:34:21 +00:00
|
|
|
ori r0,r0,L1CSR1_ICFI
|
|
|
|
msync
|
|
|
|
isync
|
2003-10-15 23:53:47 +00:00
|
|
|
mtspr L1CSR1,r0
|
|
|
|
isync
|
2007-08-14 06:34:21 +00:00
|
|
|
blr /* entire I cache */
|
2003-10-15 23:53:47 +00:00
|
|
|
|
2008-09-22 19:11:10 +00:00
|
|
|
.globl invalidate_dcache
|
2003-10-15 23:53:47 +00:00
|
|
|
invalidate_dcache:
|
|
|
|
mfspr r0,L1CSR0
|
2007-08-14 06:34:21 +00:00
|
|
|
ori r0,r0,L1CSR0_DCFI
|
2003-10-15 23:53:47 +00:00
|
|
|
msync
|
|
|
|
isync
|
|
|
|
mtspr L1CSR0,r0
|
|
|
|
isync
|
|
|
|
blr
|
|
|
|
|
|
|
|
.globl icache_enable
|
|
|
|
icache_enable:
|
|
|
|
mflr r8
|
|
|
|
bl invalidate_icache
|
|
|
|
mtlr r8
|
|
|
|
isync
|
|
|
|
mfspr r4,L1CSR1
|
2017-01-24 14:40:23 +00:00
|
|
|
ori r4,r4,(L1CSR1_CPE | L1CSR1_ICE)@l
|
|
|
|
oris r4,r4,(L1CSR1_CPE | L1CSR1_ICE)@h
|
2003-10-15 23:53:47 +00:00
|
|
|
mtspr L1CSR1,r4
|
|
|
|
isync
|
|
|
|
blr
|
|
|
|
|
|
|
|
.globl icache_disable
|
|
|
|
icache_disable:
|
|
|
|
mfspr r0,L1CSR1
|
2007-08-14 06:34:21 +00:00
|
|
|
lis r3,0
|
|
|
|
ori r3,r3,L1CSR1_ICE
|
|
|
|
andc r0,r0,r3
|
2003-10-15 23:53:47 +00:00
|
|
|
mtspr L1CSR1,r0
|
|
|
|
isync
|
|
|
|
blr
|
|
|
|
|
|
|
|
.globl icache_status
|
|
|
|
icache_status:
|
|
|
|
mfspr r3,L1CSR1
|
2007-08-14 06:34:21 +00:00
|
|
|
andi. r3,r3,L1CSR1_ICE
|
2003-10-15 23:53:47 +00:00
|
|
|
blr
|
|
|
|
|
|
|
|
.globl dcache_enable
|
|
|
|
dcache_enable:
|
|
|
|
mflr r8
|
|
|
|
bl invalidate_dcache
|
|
|
|
mtlr r8
|
|
|
|
isync
|
|
|
|
mfspr r0,L1CSR0
|
2017-01-24 14:40:23 +00:00
|
|
|
ori r0,r0,(L1CSR0_CPE | L1CSR0_DCE)@l
|
|
|
|
oris r0,r0,(L1CSR0_CPE | L1CSR0_DCE)@h
|
2003-10-15 23:53:47 +00:00
|
|
|
msync
|
|
|
|
isync
|
|
|
|
mtspr L1CSR0,r0
|
|
|
|
isync
|
|
|
|
blr
|
|
|
|
|
|
|
|
.globl dcache_disable
|
|
|
|
dcache_disable:
|
2007-08-14 06:34:21 +00:00
|
|
|
mfspr r3,L1CSR0
|
|
|
|
lis r4,0
|
|
|
|
ori r4,r4,L1CSR0_DCE
|
|
|
|
andc r3,r3,r4
|
2011-01-05 16:33:46 +00:00
|
|
|
mtspr L1CSR0,r3
|
2003-10-15 23:53:47 +00:00
|
|
|
isync
|
|
|
|
blr
|
|
|
|
|
|
|
|
.globl dcache_status
|
|
|
|
dcache_status:
|
|
|
|
mfspr r3,L1CSR0
|
2007-08-14 06:34:21 +00:00
|
|
|
andi. r3,r3,L1CSR0_DCE
|
2003-10-15 23:53:47 +00:00
|
|
|
blr
|
|
|
|
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
/* Function: in8 */
|
|
|
|
/* Description: Input 8 bits */
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
.globl in8
|
|
|
|
in8:
|
|
|
|
lbz r3,0x0000(r3)
|
|
|
|
blr
|
|
|
|
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
/* Function: out8 */
|
|
|
|
/* Description: Output 8 bits */
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
.globl out8
|
|
|
|
out8:
|
|
|
|
stb r4,0x0000(r3)
|
2007-09-26 21:35:54 +00:00
|
|
|
sync
|
2003-10-15 23:53:47 +00:00
|
|
|
blr
|
|
|
|
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
/* Function: out16 */
|
|
|
|
/* Description: Output 16 bits */
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
.globl out16
|
|
|
|
out16:
|
|
|
|
sth r4,0x0000(r3)
|
2007-09-26 21:35:54 +00:00
|
|
|
sync
|
2003-10-15 23:53:47 +00:00
|
|
|
blr
|
|
|
|
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
/* Function: out16r */
|
|
|
|
/* Description: Byte reverse and output 16 bits */
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
.globl out16r
|
|
|
|
out16r:
|
|
|
|
sthbrx r4,r0,r3
|
2007-09-26 21:35:54 +00:00
|
|
|
sync
|
2003-10-15 23:53:47 +00:00
|
|
|
blr
|
|
|
|
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
/* Function: out32 */
|
|
|
|
/* Description: Output 32 bits */
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
.globl out32
|
|
|
|
out32:
|
|
|
|
stw r4,0x0000(r3)
|
2007-09-26 21:35:54 +00:00
|
|
|
sync
|
2003-10-15 23:53:47 +00:00
|
|
|
blr
|
|
|
|
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
/* Function: out32r */
|
|
|
|
/* Description: Byte reverse and output 32 bits */
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
.globl out32r
|
|
|
|
out32r:
|
|
|
|
stwbrx r4,r0,r3
|
2007-09-26 21:35:54 +00:00
|
|
|
sync
|
2003-10-15 23:53:47 +00:00
|
|
|
blr
|
|
|
|
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
/* Function: in16 */
|
|
|
|
/* Description: Input 16 bits */
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
.globl in16
|
|
|
|
in16:
|
|
|
|
lhz r3,0x0000(r3)
|
|
|
|
blr
|
|
|
|
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
/* Function: in16r */
|
|
|
|
/* Description: Input 16 bits and byte reverse */
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
.globl in16r
|
|
|
|
in16r:
|
|
|
|
lhbrx r3,r0,r3
|
|
|
|
blr
|
|
|
|
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
/* Function: in32 */
|
|
|
|
/* Description: Input 32 bits */
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
.globl in32
|
|
|
|
in32:
|
|
|
|
lwz 3,0x0000(3)
|
|
|
|
blr
|
|
|
|
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
/* Function: in32r */
|
|
|
|
/* Description: Input 32 bits and byte reverse */
|
|
|
|
/*------------------------------------------------------------------------------- */
|
|
|
|
.globl in32r
|
|
|
|
in32r:
|
|
|
|
lwbrx r3,r0,r3
|
|
|
|
blr
|
2012-09-20 21:35:21 +00:00
|
|
|
#endif /* !MINIMAL_SPL */
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
/*------------------------------------------------------------------------------*/
|
|
|
|
|
2009-09-11 16:27:00 +00:00
|
|
|
/*
|
|
|
|
* void write_tlb(mas0, mas1, mas2, mas3, mas7)
|
|
|
|
*/
|
|
|
|
.globl write_tlb
|
|
|
|
write_tlb:
|
|
|
|
mtspr MAS0,r3
|
|
|
|
mtspr MAS1,r4
|
|
|
|
mtspr MAS2,r5
|
|
|
|
mtspr MAS3,r6
|
|
|
|
#ifdef CONFIG_ENABLE_36BIT_PHYS
|
|
|
|
mtspr MAS7,r7
|
|
|
|
#endif
|
|
|
|
li r3,0
|
|
|
|
#ifdef CONFIG_SYS_BOOK3E_HV
|
|
|
|
mtspr MAS8,r3
|
|
|
|
#endif
|
|
|
|
isync
|
|
|
|
tlbwe
|
|
|
|
msync
|
|
|
|
isync
|
|
|
|
blr
|
|
|
|
|
2003-10-15 23:53:47 +00:00
|
|
|
/*
|
2019-12-28 17:44:45 +00:00
|
|
|
* void relocate_code(addr_sp, gd, addr_moni)
|
2003-10-15 23:53:47 +00:00
|
|
|
*
|
|
|
|
* This "function" does not return, instead it continues in RAM
|
|
|
|
* after relocating the monitor code.
|
|
|
|
*
|
|
|
|
* r3 = dest
|
|
|
|
* r4 = src
|
|
|
|
* r5 = length in bytes
|
|
|
|
* r6 = cachelinesize
|
|
|
|
*/
|
|
|
|
.globl relocate_code
|
|
|
|
relocate_code:
|
2007-08-14 06:34:21 +00:00
|
|
|
mr r1,r3 /* Set new stack pointer */
|
|
|
|
mr r9,r4 /* Save copy of Init Data pointer */
|
|
|
|
mr r10,r5 /* Save copy of Destination Address */
|
2003-10-15 23:53:47 +00:00
|
|
|
|
2010-01-19 13:41:56 +00:00
|
|
|
GET_GOT
|
2014-04-08 13:42:31 +00:00
|
|
|
#ifndef CONFIG_SPL_SKIP_RELOCATE
|
2007-08-14 06:34:21 +00:00
|
|
|
mr r3,r5 /* Destination Address */
|
2022-03-24 21:18:05 +00:00
|
|
|
lis r4,CONFIG_VAL(SYS_MONITOR_BASE)@h /* Source Address */
|
|
|
|
ori r4,r4,CONFIG_VAL(SYS_MONITOR_BASE)@l
|
2003-10-15 23:53:47 +00:00
|
|
|
lwz r5,GOT(__init_end)
|
|
|
|
sub r5,r5,r4
|
2008-10-16 13:01:15 +00:00
|
|
|
li r6,CONFIG_SYS_CACHELINE_SIZE /* Cache Line Size */
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Fix GOT pointer:
|
|
|
|
*
|
2022-03-24 21:18:05 +00:00
|
|
|
* New GOT-PTR = (old GOT-PTR - CONFIG_VAL(SYS_MONITOR_BASE)) + Destination Address
|
2003-10-15 23:53:47 +00:00
|
|
|
*
|
|
|
|
* Offset:
|
|
|
|
*/
|
2007-08-14 06:34:21 +00:00
|
|
|
sub r15,r10,r4
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
/* First our own GOT */
|
2010-01-19 13:41:56 +00:00
|
|
|
add r12,r12,r15
|
2003-10-15 23:53:47 +00:00
|
|
|
/* the the one used by the C code */
|
2007-08-14 06:34:21 +00:00
|
|
|
add r30,r30,r15
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now relocate code
|
|
|
|
*/
|
|
|
|
|
|
|
|
cmplw cr1,r3,r4
|
|
|
|
addi r0,r5,3
|
|
|
|
srwi. r0,r0,2
|
|
|
|
beq cr1,4f /* In place copy is not necessary */
|
|
|
|
beq 7f /* Protect against 0 count */
|
|
|
|
mtctr r0
|
|
|
|
bge cr1,2f
|
|
|
|
|
|
|
|
la r8,-4(r4)
|
|
|
|
la r7,-4(r3)
|
|
|
|
1: lwzu r0,4(r8)
|
|
|
|
stwu r0,4(r7)
|
|
|
|
bdnz 1b
|
|
|
|
b 4f
|
|
|
|
|
|
|
|
2: slwi r0,r0,2
|
|
|
|
add r8,r4,r0
|
|
|
|
add r7,r3,r0
|
|
|
|
3: lwzu r0,-4(r8)
|
|
|
|
stwu r0,-4(r7)
|
|
|
|
bdnz 3b
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Now flush the cache: note that we must start from a cache aligned
|
|
|
|
* address. Otherwise we might miss one cache line.
|
|
|
|
*/
|
|
|
|
4: cmpwi r6,0
|
|
|
|
add r5,r3,r5
|
|
|
|
beq 7f /* Always flush prefetch queue in any case */
|
|
|
|
subi r0,r6,1
|
|
|
|
andc r3,r3,r0
|
|
|
|
mr r4,r3
|
|
|
|
5: dcbst 0,r4
|
|
|
|
add r4,r4,r6
|
|
|
|
cmplw r4,r5
|
|
|
|
blt 5b
|
|
|
|
sync /* Wait for all dcbst to complete on bus */
|
|
|
|
mr r4,r3
|
|
|
|
6: icbi 0,r4
|
|
|
|
add r4,r4,r6
|
|
|
|
cmplw r4,r5
|
|
|
|
blt 6b
|
|
|
|
7: sync /* Wait for all icbi to complete on bus */
|
|
|
|
isync
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We are done. Do not return, instead branch to second part of board
|
|
|
|
* initialization, now running from RAM.
|
|
|
|
*/
|
|
|
|
|
2022-04-25 03:59:08 +00:00
|
|
|
addi r0,r10,in_ram - _start_cont
|
2012-04-29 23:56:43 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* As IVPR is going to point RAM address,
|
|
|
|
* Make sure IVOR15 has valid opcode to support debugger
|
|
|
|
*/
|
|
|
|
mtspr IVOR15,r0
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Re-point the IVPR at RAM
|
|
|
|
*/
|
|
|
|
mtspr IVPR,r10
|
|
|
|
|
2003-10-15 23:53:47 +00:00
|
|
|
mtlr r0
|
|
|
|
blr /* NEVER RETURNS! */
|
2014-04-08 13:42:31 +00:00
|
|
|
#endif
|
2007-08-14 06:34:21 +00:00
|
|
|
.globl in_ram
|
2003-10-15 23:53:47 +00:00
|
|
|
in_ram:
|
|
|
|
|
|
|
|
/*
|
2010-01-19 13:41:56 +00:00
|
|
|
* Relocation Function, r12 point to got2+0x8000
|
2003-10-15 23:53:47 +00:00
|
|
|
*
|
|
|
|
* Adjust got2 pointers, no need to check for 0, this code
|
|
|
|
* already puts a few entries in the table.
|
|
|
|
*/
|
|
|
|
li r0,__got2_entries@sectoff@l
|
|
|
|
la r3,GOT(_GOT2_TABLE_)
|
|
|
|
lwz r11,GOT(_GOT2_TABLE_)
|
|
|
|
mtctr r0
|
|
|
|
sub r11,r3,r11
|
|
|
|
addi r3,r3,-4
|
|
|
|
1: lwzu r0,4(r3)
|
2009-10-08 00:03:51 +00:00
|
|
|
cmpwi r0,0
|
|
|
|
beq- 2f
|
2003-10-15 23:53:47 +00:00
|
|
|
add r0,r0,r11
|
|
|
|
stw r0,0(r3)
|
2009-10-08 00:03:51 +00:00
|
|
|
2: bdnz 1b
|
2003-10-15 23:53:47 +00:00
|
|
|
|
|
|
|
/*
|
|
|
|
* Now adjust the fixups and the pointers to the fixups
|
|
|
|
* in case we need to move ourselves again.
|
|
|
|
*/
|
2009-10-08 00:03:51 +00:00
|
|
|
li r0,__fixup_entries@sectoff@l
|
2003-10-15 23:53:47 +00:00
|
|
|
lwz r3,GOT(_FIXUP_TABLE_)
|
|
|
|
cmpwi r0,0
|
|
|
|
mtctr r0
|
|
|
|
addi r3,r3,-4
|
|
|
|
beq 4f
|
|
|
|
3: lwzu r4,4(r3)
|
|
|
|
lwzux r0,r4,r11
|
2010-10-14 09:51:44 +00:00
|
|
|
cmpwi r0,0
|
2003-10-15 23:53:47 +00:00
|
|
|
add r0,r0,r11
|
2010-11-04 18:02:00 +00:00
|
|
|
stw r4,0(r3)
|
2010-10-14 09:51:44 +00:00
|
|
|
beq- 5f
|
2003-10-15 23:53:47 +00:00
|
|
|
stw r0,0(r4)
|
2010-10-14 09:51:44 +00:00
|
|
|
5: bdnz 3b
|
2003-10-15 23:53:47 +00:00
|
|
|
4:
|
|
|
|
clear_bss:
|
|
|
|
/*
|
|
|
|
* Now clear BSS segment
|
|
|
|
*/
|
|
|
|
lwz r3,GOT(__bss_start)
|
2013-03-14 06:54:53 +00:00
|
|
|
lwz r4,GOT(__bss_end)
|
2003-10-15 23:53:47 +00:00
|
|
|
|
2007-08-14 06:34:21 +00:00
|
|
|
cmplw 0,r3,r4
|
2003-10-15 23:53:47 +00:00
|
|
|
beq 6f
|
|
|
|
|
2007-08-14 06:34:21 +00:00
|
|
|
li r0,0
|
2003-10-15 23:53:47 +00:00
|
|
|
5:
|
2007-08-14 06:34:21 +00:00
|
|
|
stw r0,0(r3)
|
|
|
|
addi r3,r3,4
|
|
|
|
cmplw 0,r3,r4
|
2013-06-07 09:25:16 +00:00
|
|
|
blt 5b
|
2003-10-15 23:53:47 +00:00
|
|
|
6:
|
|
|
|
|
2007-08-14 06:34:21 +00:00
|
|
|
mr r3,r9 /* Init Data pointer */
|
|
|
|
mr r4,r10 /* Destination Address */
|
2003-10-15 23:53:47 +00:00
|
|
|
bl board_init_r
|
|
|
|
|
2012-09-20 21:35:21 +00:00
|
|
|
#ifndef MINIMAL_SPL
|
2003-10-15 23:53:47 +00:00
|
|
|
/*
|
|
|
|
* Copy exception vector code to low memory
|
|
|
|
*
|
|
|
|
* r3: dest_addr
|
|
|
|
* r7: source address, r8: end address, r9: target address
|
|
|
|
*/
|
2005-05-13 22:49:36 +00:00
|
|
|
.globl trap_init
|
2003-10-15 23:53:47 +00:00
|
|
|
trap_init:
|
2015-04-24 01:01:56 +00:00
|
|
|
mflr r11
|
|
|
|
bl _GLOBAL_OFFSET_TABLE_-4
|
|
|
|
mflr r12
|
|
|
|
|
2015-04-08 01:20:00 +00:00
|
|
|
/* Update IVORs as per relocation */
|
|
|
|
mtspr IVPR,r3
|
2003-10-15 23:53:47 +00:00
|
|
|
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,CriticalInput@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR0,r4 /* 0: Critical input */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,MachineCheck@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR1,r4 /* 1: Machine check */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,DataStorage@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR2,r4 /* 2: Data storage */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,InstStorage@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR3,r4 /* 3: Instruction storage */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,ExtInterrupt@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR4,r4 /* 4: External interrupt */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,Alignment@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR5,r4 /* 5: Alignment */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,ProgramCheck@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR6,r4 /* 6: Program check */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,FPUnavailable@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR7,r4 /* 7: floating point unavailable */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,SystemCall@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR8,r4 /* 8: System call */
|
2012-02-14 22:49:49 +00:00
|
|
|
/* 9: Auxiliary processor unavailable(unsupported) */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,Decrementer@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR10,r4 /* 10: Decrementer */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,IntervalTimer@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR11,r4 /* 11: Interval timer */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,WatchdogTimer@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR12,r4 /* 12: Watchdog timer */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,DataTLBError@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR13,r4 /* 13: Data TLB error */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,InstructionTLBError@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR14,r4 /* 14: Instruction TLB error */
|
2015-04-24 01:01:56 +00:00
|
|
|
lwz r4,DebugBreakpoint@got(r12)
|
2015-04-08 01:20:00 +00:00
|
|
|
mtspr IVOR15,r4 /* 15: Debug */
|
|
|
|
|
2015-04-24 01:01:56 +00:00
|
|
|
mtlr r11
|
2003-10-15 23:53:47 +00:00
|
|
|
blr
|
|
|
|
|
|
|
|
.globl unlock_ram_in_cache
|
|
|
|
unlock_ram_in_cache:
|
|
|
|
/* invalidate the INIT_RAM section */
|
2008-10-23 06:47:37 +00:00
|
|
|
lis r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@h
|
|
|
|
ori r3,r3,(CONFIG_SYS_INIT_RAM_ADDR & ~(CONFIG_SYS_CACHELINE_SIZE-1))@l
|
2008-01-08 07:22:21 +00:00
|
|
|
mfspr r4,L1CFG0
|
|
|
|
andi. r4,r4,0x1ff
|
|
|
|
slwi r4,r4,(10 - 1 - L1_CACHE_SHIFT)
|
2007-08-14 06:34:21 +00:00
|
|
|
mtctr r4
|
2008-02-27 22:30:47 +00:00
|
|
|
1: dcbi r0,r3
|
2017-03-02 08:42:41 +00:00
|
|
|
#ifdef CONFIG_E6500 /* lock/unlock L2 cache long with L1 */
|
2015-08-17 20:31:52 +00:00
|
|
|
dcblc 2, r0, r3
|
2017-03-02 08:42:41 +00:00
|
|
|
dcblc 0, r0, r3
|
2015-08-17 20:31:52 +00:00
|
|
|
#else
|
2013-04-05 13:07:13 +00:00
|
|
|
dcblc r0,r3
|
2015-08-17 20:31:52 +00:00
|
|
|
#endif
|
2008-10-16 13:01:15 +00:00
|
|
|
addi r3,r3,CONFIG_SYS_CACHELINE_SIZE
|
2003-10-15 23:53:47 +00:00
|
|
|
bdnz 1b
|
2008-02-27 22:30:47 +00:00
|
|
|
sync
|
2008-02-27 20:29:58 +00:00
|
|
|
|
|
|
|
/* Invalidate the TLB entries for the cache */
|
2008-10-16 13:01:15 +00:00
|
|
|
lis r3,CONFIG_SYS_INIT_RAM_ADDR@h
|
|
|
|
ori r3,r3,CONFIG_SYS_INIT_RAM_ADDR@l
|
2008-02-27 20:29:58 +00:00
|
|
|
tlbivax 0,r3
|
|
|
|
addi r3,r3,0x1000
|
|
|
|
tlbivax 0,r3
|
|
|
|
addi r3,r3,0x1000
|
|
|
|
tlbivax 0,r3
|
|
|
|
addi r3,r3,0x1000
|
|
|
|
tlbivax 0,r3
|
2003-10-15 23:53:47 +00:00
|
|
|
isync
|
|
|
|
blr
|
2008-09-22 19:11:10 +00:00
|
|
|
|
|
|
|
.globl flush_dcache
|
|
|
|
flush_dcache:
|
|
|
|
mfspr r3,SPRN_L1CFG0
|
|
|
|
|
|
|
|
rlwinm r5,r3,9,3 /* Extract cache block size */
|
|
|
|
twlgti r5,1 /* Only 32 and 64 byte cache blocks
|
|
|
|
* are currently defined.
|
|
|
|
*/
|
|
|
|
li r4,32
|
|
|
|
subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
|
|
|
|
* log2(number of ways)
|
|
|
|
*/
|
|
|
|
slw r5,r4,r5 /* r5 = cache block size */
|
|
|
|
|
|
|
|
rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
|
|
|
|
mulli r7,r7,13 /* An 8-way cache will require 13
|
|
|
|
* loads per set.
|
|
|
|
*/
|
|
|
|
slw r7,r7,r6
|
|
|
|
|
|
|
|
/* save off HID0 and set DCFA */
|
|
|
|
mfspr r8,SPRN_HID0
|
|
|
|
ori r9,r8,HID0_DCFA@l
|
|
|
|
mtspr SPRN_HID0,r9
|
|
|
|
isync
|
|
|
|
|
|
|
|
lis r4,0
|
|
|
|
mtctr r7
|
|
|
|
|
|
|
|
1: lwz r3,0(r4) /* Load... */
|
|
|
|
add r4,r4,r5
|
|
|
|
bdnz 1b
|
|
|
|
|
|
|
|
msync
|
|
|
|
lis r4,0
|
|
|
|
mtctr r7
|
|
|
|
|
|
|
|
1: dcbf 0,r4 /* ...and flush. */
|
|
|
|
add r4,r4,r5
|
|
|
|
bdnz 1b
|
|
|
|
|
|
|
|
/* restore HID0 */
|
|
|
|
mtspr SPRN_HID0,r8
|
|
|
|
isync
|
|
|
|
|
|
|
|
blr
|
2012-09-20 21:35:21 +00:00
|
|
|
#endif /* !MINIMAL_SPL */
|