mirror of
https://github.com/AsahiLinux/u-boot
synced 2024-11-10 23:24:38 +00:00
1c64b98c1e
Ensure that every write is flushed to memory and afterward reads are
from memory.
Since the algorithm rely on the fact that accessing to not existent
memory lead to write at addr / 2 without this modification accesses
to aliased (not physically present) addresses are cached and
wrong size is returned.
This was discovered while working on a TI AM625 based board
where cache is normally enabled, see commit c02712a748
("arm: mach-k3: Enable dcache in SPL").
Signed-off-by: Emanuele Ghidoli <emanuele.ghidoli@toradex.com>
Signed-off-by: Francesco Dolcini <francesco.dolcini@toradex.com>
142 lines
3.3 KiB
C
142 lines
3.3 KiB
C
// SPDX-License-Identifier: GPL-2.0+
|
|
/*
|
|
* (C) Copyright 2004
|
|
* Wolfgang Denk, DENX Software Engineering, wd@denx.de.
|
|
*/
|
|
|
|
#include <common.h>
|
|
#include <init.h>
|
|
#include <asm/global_data.h>
|
|
#include <cpu_func.h>
|
|
#include <stdint.h>
|
|
|
|
DECLARE_GLOBAL_DATA_PTR;
|
|
|
|
#ifdef CONFIG_SYS_CACHELINE_SIZE
|
|
# define MEMSIZE_CACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE
|
|
#else
|
|
/* Just use the greatest cache flush alignment requirement I'm aware of */
|
|
# define MEMSIZE_CACHELINE_SIZE 128
|
|
#endif
|
|
|
|
#ifdef __PPC__
|
|
/*
|
|
* At least on G2 PowerPC cores, sequential accesses to non-existent
|
|
* memory must be synchronized.
|
|
*/
|
|
# include <asm/io.h> /* for sync() */
|
|
#else
|
|
# define sync() /* nothing */
|
|
#endif
|
|
|
|
static void dcache_flush_invalidate(volatile long *p)
|
|
{
|
|
uintptr_t start, stop;
|
|
start = ALIGN_DOWN((uintptr_t)p, MEMSIZE_CACHELINE_SIZE);
|
|
stop = start + MEMSIZE_CACHELINE_SIZE;
|
|
flush_dcache_range(start, stop);
|
|
invalidate_dcache_range(start, stop);
|
|
}
|
|
|
|
/*
|
|
* Check memory range for valid RAM. A simple memory test determines
|
|
* the actually available RAM size between addresses `base' and
|
|
* `base + maxsize'.
|
|
*/
|
|
long get_ram_size(long *base, long maxsize)
|
|
{
|
|
volatile long *addr;
|
|
long save[BITS_PER_LONG - 1];
|
|
long save_base;
|
|
long cnt;
|
|
long val;
|
|
long size;
|
|
int i = 0;
|
|
int dcache_en = dcache_status();
|
|
|
|
for (cnt = (maxsize / sizeof(long)) >> 1; cnt > 0; cnt >>= 1) {
|
|
addr = base + cnt; /* pointer arith! */
|
|
sync();
|
|
save[i++] = *addr;
|
|
sync();
|
|
*addr = ~cnt;
|
|
if (dcache_en)
|
|
dcache_flush_invalidate(addr);
|
|
}
|
|
|
|
addr = base;
|
|
sync();
|
|
save_base = *addr;
|
|
sync();
|
|
*addr = 0;
|
|
|
|
sync();
|
|
if (dcache_en)
|
|
dcache_flush_invalidate(addr);
|
|
|
|
if ((val = *addr) != 0) {
|
|
/* Restore the original data before leaving the function. */
|
|
sync();
|
|
*base = save_base;
|
|
for (cnt = 1; cnt < maxsize / sizeof(long); cnt <<= 1) {
|
|
addr = base + cnt;
|
|
sync();
|
|
*addr = save[--i];
|
|
}
|
|
return (0);
|
|
}
|
|
|
|
for (cnt = 1; cnt < maxsize / sizeof(long); cnt <<= 1) {
|
|
addr = base + cnt; /* pointer arith! */
|
|
val = *addr;
|
|
*addr = save[--i];
|
|
if (val != ~cnt) {
|
|
size = cnt * sizeof(long);
|
|
/*
|
|
* Restore the original data
|
|
* before leaving the function.
|
|
*/
|
|
for (cnt <<= 1;
|
|
cnt < maxsize / sizeof(long);
|
|
cnt <<= 1) {
|
|
addr = base + cnt;
|
|
*addr = save[--i];
|
|
}
|
|
/* warning: don't restore save_base in this case,
|
|
* it is already done in the loop because
|
|
* base and base+size share the same physical memory
|
|
* and *base is saved after *(base+size) modification
|
|
* in first loop
|
|
*/
|
|
return (size);
|
|
}
|
|
}
|
|
*base = save_base;
|
|
|
|
return (maxsize);
|
|
}
|
|
|
|
phys_size_t __weak get_effective_memsize(void)
|
|
{
|
|
phys_size_t ram_size = gd->ram_size;
|
|
|
|
#ifdef CONFIG_MPC85xx
|
|
/*
|
|
* Check for overflow and limit ram size to some representable value.
|
|
* It is required that ram_base + ram_size must be representable by
|
|
* phys_size_t type and must be aligned by direct access, therefore
|
|
* calculate it from last 4kB sector which should work as alignment
|
|
* on any platform.
|
|
*/
|
|
if (gd->ram_base + ram_size < gd->ram_base)
|
|
ram_size = ((phys_size_t)~0xfffULL) - gd->ram_base;
|
|
#endif
|
|
|
|
#ifndef CFG_MAX_MEM_MAPPED
|
|
return ram_size;
|
|
#else
|
|
/* limit stack to what we can reasonable map */
|
|
return ((ram_size > CFG_MAX_MEM_MAPPED) ?
|
|
CFG_MAX_MEM_MAPPED : ram_size);
|
|
#endif
|
|
}
|