u-boot/arch/arm/include/asm/system.h
Tom Rini a78cd86132 ARM: Rework and correct barrier definitions
As part of testing booting Linux kernels on Rockchip devices, it was
discovered by Ziyuan Xu and Sandy Patterson that we had multiple and for
some cases incomplete isb definitions.  This was causing a failure to
boot of the Linux kernel.

In order to solve this problem as well as cover any corner cases that we
may also have had a number of changes are made in order to consolidate
things.  First, <asm/barriers.h> now becomes the source of isb/dsb/dmb
definitions.  This however introduces another complexity.  Due to
needing to build SPL for 32bit tegra with -march=armv4 we need to borrow
the __LINUX_ARM_ARCH__ logic from the Linux Kernel in a more complete
form.  Move this from arch/arm/lib/Makefile to arch/arm/Makefile and add
a comment about it.  Now that we can always know what the target CPU is
capable off we can get always do the correct thing for the barrier.  The
final part of this is that need to be consistent everywhere and call
isb()/dsb()/dmb() and NOT call ISB/DSB/DMB in some cases and the
function names in others.

Reviewed-by: Stephen Warren <swarren@nvidia.com>
Tested-by: Stephen Warren <swarren@nvidia.com>
Acked-by: Ziyuan Xu <xzy.xu@rock-chips.com>
Acked-by: Sandy Patterson <apatterson@sightlogix.com>
Reported-by: Ziyuan Xu <xzy.xu@rock-chips.com>
Reported-by: Sandy Patterson <apatterson@sightlogix.com>
Signed-off-by: Tom Rini <trini@konsulko.com>
2016-08-05 07:23:57 -04:00

431 lines
12 KiB
C

#ifndef __ASM_ARM_SYSTEM_H
#define __ASM_ARM_SYSTEM_H
#include <common.h>
#include <linux/compiler.h>
#include <asm/barriers.h>
#ifdef CONFIG_ARM64
/*
* SCTLR_EL1/SCTLR_EL2/SCTLR_EL3 bits definitions
*/
#define CR_M (1 << 0) /* MMU enable */
#define CR_A (1 << 1) /* Alignment abort enable */
#define CR_C (1 << 2) /* Dcache enable */
#define CR_SA (1 << 3) /* Stack Alignment Check Enable */
#define CR_I (1 << 12) /* Icache enable */
#define CR_WXN (1 << 19) /* Write Permision Imply XN */
#define CR_EE (1 << 25) /* Exception (Big) Endian */
#ifndef __ASSEMBLY__
u64 get_page_table_size(void);
#define PGTABLE_SIZE get_page_table_size()
/* 2MB granularity */
#define MMU_SECTION_SHIFT 21
#define MMU_SECTION_SIZE (1 << MMU_SECTION_SHIFT)
/* These constants need to be synced to the MT_ types in asm/armv8/mmu.h */
enum dcache_option {
DCACHE_OFF = 0 << 2,
DCACHE_WRITETHROUGH = 3 << 2,
DCACHE_WRITEBACK = 4 << 2,
DCACHE_WRITEALLOC = 4 << 2,
};
#define wfi() \
({asm volatile( \
"wfi" : : : "memory"); \
})
static inline unsigned int current_el(void)
{
unsigned int el;
asm volatile("mrs %0, CurrentEL" : "=r" (el) : : "cc");
return el >> 2;
}
static inline unsigned int get_sctlr(void)
{
unsigned int el, val;
el = current_el();
if (el == 1)
asm volatile("mrs %0, sctlr_el1" : "=r" (val) : : "cc");
else if (el == 2)
asm volatile("mrs %0, sctlr_el2" : "=r" (val) : : "cc");
else
asm volatile("mrs %0, sctlr_el3" : "=r" (val) : : "cc");
return val;
}
static inline void set_sctlr(unsigned int val)
{
unsigned int el;
el = current_el();
if (el == 1)
asm volatile("msr sctlr_el1, %0" : : "r" (val) : "cc");
else if (el == 2)
asm volatile("msr sctlr_el2, %0" : : "r" (val) : "cc");
else
asm volatile("msr sctlr_el3, %0" : : "r" (val) : "cc");
asm volatile("isb");
}
static inline unsigned long read_mpidr(void)
{
unsigned long val;
asm volatile("mrs %0, mpidr_el1" : "=r" (val));
return val;
}
#define BSP_COREID 0
void __asm_flush_dcache_all(void);
void __asm_invalidate_dcache_all(void);
void __asm_flush_dcache_range(u64 start, u64 end);
void __asm_invalidate_tlb_all(void);
void __asm_invalidate_icache_all(void);
int __asm_flush_l3_cache(void);
void __asm_switch_ttbr(u64 new_ttbr);
void armv8_switch_to_el2(void);
void armv8_switch_to_el1(void);
void gic_init(void);
void gic_send_sgi(unsigned long sgino);
void wait_for_wakeup(void);
void protect_secure_region(void);
void smp_kick_all_cpus(void);
void flush_l3_cache(void);
/*
*Issue a hypervisor call in accordance with ARM "SMC Calling convention",
* DEN0028A
*
* @args: input and output arguments
*
*/
void hvc_call(struct pt_regs *args);
/*
*Issue a secure monitor call in accordance with ARM "SMC Calling convention",
* DEN0028A
*
* @args: input and output arguments
*
*/
void smc_call(struct pt_regs *args);
void __noreturn psci_system_reset(bool smc);
#endif /* __ASSEMBLY__ */
#else /* CONFIG_ARM64 */
#ifdef __KERNEL__
#define CPU_ARCH_UNKNOWN 0
#define CPU_ARCH_ARMv3 1
#define CPU_ARCH_ARMv4 2
#define CPU_ARCH_ARMv4T 3
#define CPU_ARCH_ARMv5 4
#define CPU_ARCH_ARMv5T 5
#define CPU_ARCH_ARMv5TE 6
#define CPU_ARCH_ARMv5TEJ 7
#define CPU_ARCH_ARMv6 8
#define CPU_ARCH_ARMv7 9
/*
* CR1 bits (CP#15 CR1)
*/
#define CR_M (1 << 0) /* MMU enable */
#define CR_A (1 << 1) /* Alignment abort enable */
#define CR_C (1 << 2) /* Dcache enable */
#define CR_W (1 << 3) /* Write buffer enable */
#define CR_P (1 << 4) /* 32-bit exception handler */
#define CR_D (1 << 5) /* 32-bit data address range */
#define CR_L (1 << 6) /* Implementation defined */
#define CR_B (1 << 7) /* Big endian */
#define CR_S (1 << 8) /* System MMU protection */
#define CR_R (1 << 9) /* ROM MMU protection */
#define CR_F (1 << 10) /* Implementation defined */
#define CR_Z (1 << 11) /* Implementation defined */
#define CR_I (1 << 12) /* Icache enable */
#define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
#define CR_RR (1 << 14) /* Round Robin cache replacement */
#define CR_L4 (1 << 15) /* LDR pc can set T bit */
#define CR_DT (1 << 16)
#define CR_IT (1 << 18)
#define CR_ST (1 << 19)
#define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
#define CR_U (1 << 22) /* Unaligned access operation */
#define CR_XP (1 << 23) /* Extended page tables */
#define CR_VE (1 << 24) /* Vectored interrupts */
#define CR_EE (1 << 25) /* Exception (Big) Endian */
#define CR_TRE (1 << 28) /* TEX remap enable */
#define CR_AFE (1 << 29) /* Access flag enable */
#define CR_TE (1 << 30) /* Thumb exception enable */
#if defined(CONFIG_ARMV7_LPAE) && !defined(PGTABLE_SIZE)
#define PGTABLE_SIZE (4096 * 5)
#elif !defined(PGTABLE_SIZE)
#define PGTABLE_SIZE (4096 * 4)
#endif
/*
* This is used to ensure the compiler did actually allocate the register we
* asked it for some inline assembly sequences. Apparently we can't trust
* the compiler from one version to another so a bit of paranoia won't hurt.
* This string is meant to be concatenated with the inline asm string and
* will cause compilation to stop on mismatch.
* (for details, see gcc PR 15089)
*/
#define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
#ifndef __ASSEMBLY__
/**
* save_boot_params() - Save boot parameters before starting reset sequence
*
* If you provide this function it will be called immediately U-Boot starts,
* both for SPL and U-Boot proper.
*
* All registers are unchanged from U-Boot entry. No registers need be
* preserved.
*
* This is not a normal C function. There is no stack. Return by branching to
* save_boot_params_ret.
*
* void save_boot_params(u32 r0, u32 r1, u32 r2, u32 r3);
*/
/**
* save_boot_params_ret() - Return from save_boot_params()
*
* If you provide save_boot_params(), then you should jump back to this
* function when done. Try to preserve all registers.
*
* If your implementation of save_boot_params() is in C then it is acceptable
* to simply call save_boot_params_ret() at the end of your function. Since
* there is no link register set up, you cannot just exit the function. U-Boot
* will return to the (initialised) value of lr, and likely crash/hang.
*
* If your implementation of save_boot_params() is in assembler then you
* should use 'b' or 'bx' to return to save_boot_params_ret.
*/
void save_boot_params_ret(void);
#define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
#ifdef __ARM_ARCH_7A__
#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
#else
#define wfi()
#endif
static inline unsigned long get_cpsr(void)
{
unsigned long cpsr;
asm volatile("mrs %0, cpsr" : "=r"(cpsr): );
return cpsr;
}
static inline int is_hyp(void)
{
#ifdef CONFIG_ARMV7_LPAE
/* HYP mode requires LPAE ... */
return ((get_cpsr() & 0x1f) == 0x1a);
#else
/* ... so without LPAE support we can optimize all hyp code away */
return 0;
#endif
}
static inline unsigned int get_cr(void)
{
unsigned int val;
if (is_hyp())
asm volatile("mrc p15, 4, %0, c1, c0, 0 @ get CR" : "=r" (val)
:
: "cc");
else
asm volatile("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val)
:
: "cc");
return val;
}
static inline void set_cr(unsigned int val)
{
if (is_hyp())
asm volatile("mcr p15, 4, %0, c1, c0, 0 @ set CR" :
: "r" (val)
: "cc");
else
asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" :
: "r" (val)
: "cc");
isb();
}
static inline unsigned int get_dacr(void)
{
unsigned int val;
asm("mrc p15, 0, %0, c3, c0, 0 @ get DACR" : "=r" (val) : : "cc");
return val;
}
static inline void set_dacr(unsigned int val)
{
asm volatile("mcr p15, 0, %0, c3, c0, 0 @ set DACR"
: : "r" (val) : "cc");
isb();
}
#ifdef CONFIG_ARMV7_LPAE
/* Long-Descriptor Translation Table Level 1/2 Bits */
#define TTB_SECT_XN_MASK (1ULL << 54)
#define TTB_SECT_NG_MASK (1 << 11)
#define TTB_SECT_AF (1 << 10)
#define TTB_SECT_SH_MASK (3 << 8)
#define TTB_SECT_NS_MASK (1 << 5)
#define TTB_SECT_AP (1 << 6)
/* Note: TTB AP bits are set elsewhere */
#define TTB_SECT_MAIR(x) ((x & 0x7) << 2) /* Index into MAIR */
#define TTB_SECT (1 << 0)
#define TTB_PAGETABLE (3 << 0)
/* TTBCR flags */
#define TTBCR_EAE (1 << 31)
#define TTBCR_T0SZ(x) ((x) << 0)
#define TTBCR_T1SZ(x) ((x) << 16)
#define TTBCR_USING_TTBR0 (TTBCR_T0SZ(0) | TTBCR_T1SZ(0))
#define TTBCR_IRGN0_NC (0 << 8)
#define TTBCR_IRGN0_WBWA (1 << 8)
#define TTBCR_IRGN0_WT (2 << 8)
#define TTBCR_IRGN0_WBNWA (3 << 8)
#define TTBCR_IRGN0_MASK (3 << 8)
#define TTBCR_ORGN0_NC (0 << 10)
#define TTBCR_ORGN0_WBWA (1 << 10)
#define TTBCR_ORGN0_WT (2 << 10)
#define TTBCR_ORGN0_WBNWA (3 << 10)
#define TTBCR_ORGN0_MASK (3 << 10)
#define TTBCR_SHARED_NON (0 << 12)
#define TTBCR_SHARED_OUTER (2 << 12)
#define TTBCR_SHARED_INNER (3 << 12)
#define TTBCR_EPD0 (0 << 7)
/*
* Memory types
*/
#define MEMORY_ATTRIBUTES ((0x00 << (0 * 8)) | (0x88 << (1 * 8)) | \
(0xcc << (2 * 8)) | (0xff << (3 * 8)))
/* options available for data cache on each page */
enum dcache_option {
DCACHE_OFF = TTB_SECT | TTB_SECT_MAIR(0),
DCACHE_WRITETHROUGH = TTB_SECT | TTB_SECT_MAIR(1),
DCACHE_WRITEBACK = TTB_SECT | TTB_SECT_MAIR(2),
DCACHE_WRITEALLOC = TTB_SECT | TTB_SECT_MAIR(3),
};
#elif defined(CONFIG_CPU_V7)
/* Short-Descriptor Translation Table Level 1 Bits */
#define TTB_SECT_NS_MASK (1 << 19)
#define TTB_SECT_NG_MASK (1 << 17)
#define TTB_SECT_S_MASK (1 << 16)
/* Note: TTB AP bits are set elsewhere */
#define TTB_SECT_AP (3 << 10)
#define TTB_SECT_TEX(x) ((x & 0x7) << 12)
#define TTB_SECT_DOMAIN(x) ((x & 0xf) << 5)
#define TTB_SECT_XN_MASK (1 << 4)
#define TTB_SECT_C_MASK (1 << 3)
#define TTB_SECT_B_MASK (1 << 2)
#define TTB_SECT (2 << 0)
/* options available for data cache on each page */
enum dcache_option {
DCACHE_OFF = TTB_SECT_DOMAIN(0) | TTB_SECT_XN_MASK | TTB_SECT,
DCACHE_WRITETHROUGH = DCACHE_OFF | TTB_SECT_C_MASK,
DCACHE_WRITEBACK = DCACHE_WRITETHROUGH | TTB_SECT_B_MASK,
DCACHE_WRITEALLOC = DCACHE_WRITEBACK | TTB_SECT_TEX(1),
};
#else
#define TTB_SECT_AP (3 << 10)
/* options available for data cache on each page */
enum dcache_option {
DCACHE_OFF = 0x12,
DCACHE_WRITETHROUGH = 0x1a,
DCACHE_WRITEBACK = 0x1e,
DCACHE_WRITEALLOC = 0x16,
};
#endif
/* Size of an MMU section */
enum {
#ifdef CONFIG_ARMV7_LPAE
MMU_SECTION_SHIFT = 21, /* 2MB */
#else
MMU_SECTION_SHIFT = 20, /* 1MB */
#endif
MMU_SECTION_SIZE = 1 << MMU_SECTION_SHIFT,
};
#ifdef CONFIG_CPU_V7
/* TTBR0 bits */
#define TTBR0_BASE_ADDR_MASK 0xFFFFC000
#define TTBR0_RGN_NC (0 << 3)
#define TTBR0_RGN_WBWA (1 << 3)
#define TTBR0_RGN_WT (2 << 3)
#define TTBR0_RGN_WB (3 << 3)
/* TTBR0[6] is IRGN[0] and TTBR[0] is IRGN[1] */
#define TTBR0_IRGN_NC (0 << 0 | 0 << 6)
#define TTBR0_IRGN_WBWA (0 << 0 | 1 << 6)
#define TTBR0_IRGN_WT (1 << 0 | 0 << 6)
#define TTBR0_IRGN_WB (1 << 0 | 1 << 6)
#endif
/**
* Register an update to the page tables, and flush the TLB
*
* \param start start address of update in page table
* \param stop stop address of update in page table
*/
void mmu_page_table_flush(unsigned long start, unsigned long stop);
#endif /* __ASSEMBLY__ */
#define arch_align_stack(x) (x)
#endif /* __KERNEL__ */
#endif /* CONFIG_ARM64 */
#ifndef __ASSEMBLY__
/**
* Change the cache settings for a region.
*
* \param start start address of memory region to change
* \param size size of memory region to change
* \param option dcache option to select
*/
void mmu_set_region_dcache_behaviour(phys_addr_t start, size_t size,
enum dcache_option option);
#ifdef CONFIG_SYS_NONCACHED_MEMORY
void noncached_init(void);
phys_addr_t noncached_alloc(size_t size, size_t align);
#endif /* CONFIG_SYS_NONCACHED_MEMORY */
#endif /* __ASSEMBLY__ */
#endif