u-boot/arch/arm/cpu/armv8/cache.S
Rob Herring 7682a99826 remove unnecessary version.h includes
Various files are needlessly rebuilt every time due to the version and
build time changing. As version.h is not actually needed, remove the
include.

Signed-off-by: Rob Herring <robh@kernel.org>
Cc: Albert Aribaud <albert.u.boot@aribaud.net>
Cc: Stefano Babic <sbabic@denx.de>
Cc: Minkyu Kang <mk7.kang@samsung.com>
Cc: Marek Vasut <marex@denx.de>
Cc: Tom Warren <twarren@nvidia.com>
Cc: Michal Simek <monstr@monstr.eu>
Cc: Macpaul Lin <macpaul@andestech.com>
Cc: Wolfgang Denk <wd@denx.de>
Cc: York Sun <yorksun@freescale.com>
Cc: Stefan Roese <sr@denx.de>
Cc: Nobuhiro Iwamatsu <iwamatsu@nigauri.org>
Cc: Simon Glass <sjg@chromium.org>
Cc: Philippe Reynes <tremyfr@yahoo.fr>
Cc: Eric Jarrige <eric.jarrige@armadeus.org>
Cc: "David Müller" <d.mueller@elsoft.ch>
Cc: Phil Edworthy <phil.edworthy@renesas.com>
Cc: Robert Baldyga <r.baldyga@samsung.com>
Cc: Torsten Koschorrek <koschorrek@synertronixx.de>
Cc: Anatolij Gustschin <agust@denx.de>
Reviewed-by: Linus Walleij <linus.walleij@linaro.org>
Reviewed-by: Łukasz Majewski <l.majewski@samsung.com>
2015-03-24 10:50:50 -04:00

162 lines
3.5 KiB
ArmAsm

/*
* (C) Copyright 2013
* David Feng <fenghua@phytium.com.cn>
*
* This file is based on sample code from ARMv8 ARM.
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <asm-offsets.h>
#include <config.h>
#include <asm/macro.h>
#include <linux/linkage.h>
/*
* void __asm_flush_dcache_level(level)
*
* clean and invalidate one level cache.
*
* x0: cache level
* x1: 0 flush & invalidate, 1 invalidate only
* x2~x9: clobbered
*/
ENTRY(__asm_flush_dcache_level)
lsl x12, x0, #1
msr csselr_el1, x12 /* select cache level */
isb /* sync change of cssidr_el1 */
mrs x6, ccsidr_el1 /* read the new cssidr_el1 */
and x2, x6, #7 /* x2 <- log2(cache line size)-4 */
add x2, x2, #4 /* x2 <- log2(cache line size) */
mov x3, #0x3ff
and x3, x3, x6, lsr #3 /* x3 <- max number of #ways */
clz w5, w3 /* bit position of #ways */
mov x4, #0x7fff
and x4, x4, x6, lsr #13 /* x4 <- max number of #sets */
/* x12 <- cache level << 1 */
/* x2 <- line length offset */
/* x3 <- number of cache ways - 1 */
/* x4 <- number of cache sets - 1 */
/* x5 <- bit position of #ways */
loop_set:
mov x6, x3 /* x6 <- working copy of #ways */
loop_way:
lsl x7, x6, x5
orr x9, x12, x7 /* map way and level to cisw value */
lsl x7, x4, x2
orr x9, x9, x7 /* map set number to cisw value */
tbz w1, #0, 1f
dc isw, x9
b 2f
1: dc cisw, x9 /* clean & invalidate by set/way */
2: subs x6, x6, #1 /* decrement the way */
b.ge loop_way
subs x4, x4, #1 /* decrement the set */
b.ge loop_set
ret
ENDPROC(__asm_flush_dcache_level)
/*
* void __asm_flush_dcache_all(int invalidate_only)
*
* x0: 0 flush & invalidate, 1 invalidate only
*
* clean and invalidate all data cache by SET/WAY.
*/
ENTRY(__asm_dcache_all)
mov x1, x0
dsb sy
mrs x10, clidr_el1 /* read clidr_el1 */
lsr x11, x10, #24
and x11, x11, #0x7 /* x11 <- loc */
cbz x11, finished /* if loc is 0, exit */
mov x15, lr
mov x0, #0 /* start flush at cache level 0 */
/* x0 <- cache level */
/* x10 <- clidr_el1 */
/* x11 <- loc */
/* x15 <- return address */
loop_level:
lsl x12, x0, #1
add x12, x12, x0 /* x0 <- tripled cache level */
lsr x12, x10, x12
and x12, x12, #7 /* x12 <- cache type */
cmp x12, #2
b.lt skip /* skip if no cache or icache */
bl __asm_flush_dcache_level /* x1 = 0 flush, 1 invalidate */
skip:
add x0, x0, #1 /* increment cache level */
cmp x11, x0
b.gt loop_level
mov x0, #0
msr csselr_el1, x0 /* restore csselr_el1 */
dsb sy
isb
mov lr, x15
finished:
ret
ENDPROC(__asm_dcache_all)
ENTRY(__asm_flush_dcache_all)
mov x16, lr
mov x0, #0
bl __asm_dcache_all
mov lr, x16
ret
ENDPROC(__asm_flush_dcache_all)
ENTRY(__asm_invalidate_dcache_all)
mov x16, lr
mov x0, #0xffff
bl __asm_dcache_all
mov lr, x16
ret
ENDPROC(__asm_invalidate_dcache_all)
/*
* void __asm_flush_dcache_range(start, end)
*
* clean & invalidate data cache in the range
*
* x0: start address
* x1: end address
*/
ENTRY(__asm_flush_dcache_range)
mrs x3, ctr_el0
lsr x3, x3, #16
and x3, x3, #0xf
mov x2, #4
lsl x2, x2, x3 /* cache line size */
/* x2 <- minimal cache line size in cache system */
sub x3, x2, #1
bic x0, x0, x3
1: dc civac, x0 /* clean & invalidate data or unified cache */
add x0, x0, x2
cmp x0, x1
b.lo 1b
dsb sy
ret
ENDPROC(__asm_flush_dcache_range)
/*
* void __asm_invalidate_icache_all(void)
*
* invalidate all tlb entries.
*/
ENTRY(__asm_invalidate_icache_all)
ic ialluis
isb sy
ret
ENDPROC(__asm_invalidate_icache_all)
ENTRY(__asm_flush_l3_cache)
mov x0, #0 /* return status as success */
ret
ENDPROC(__asm_flush_l3_cache)
.weak __asm_flush_l3_cache