2018-05-06 21:58:06 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
2004-01-06 22:38:14 +00:00
|
|
|
/*
|
|
|
|
* (C) Copyright 2004
|
|
|
|
* Wolfgang Denk, DENX Software Engineering, wd@denx.de.
|
|
|
|
*/
|
|
|
|
|
2014-02-11 19:57:26 +00:00
|
|
|
#include <common.h>
|
2019-12-28 17:45:05 +00:00
|
|
|
#include <init.h>
|
2020-10-31 03:38:53 +00:00
|
|
|
#include <asm/global_data.h>
|
2023-05-30 13:33:27 +00:00
|
|
|
#include <cpu_func.h>
|
|
|
|
#include <stdint.h>
|
2014-02-11 19:57:26 +00:00
|
|
|
|
|
|
|
DECLARE_GLOBAL_DATA_PTR;
|
|
|
|
|
2023-05-30 13:33:27 +00:00
|
|
|
#ifdef CONFIG_SYS_CACHELINE_SIZE
|
|
|
|
# define MEMSIZE_CACHELINE_SIZE CONFIG_SYS_CACHELINE_SIZE
|
|
|
|
#else
|
|
|
|
/* Just use the greatest cache flush alignment requirement I'm aware of */
|
|
|
|
# define MEMSIZE_CACHELINE_SIZE 128
|
|
|
|
#endif
|
|
|
|
|
2006-11-06 16:06:36 +00:00
|
|
|
#ifdef __PPC__
|
|
|
|
/*
|
|
|
|
* At least on G2 PowerPC cores, sequential accesses to non-existent
|
|
|
|
* memory must be synchronized.
|
|
|
|
*/
|
|
|
|
# include <asm/io.h> /* for sync() */
|
|
|
|
#else
|
|
|
|
# define sync() /* nothing */
|
|
|
|
#endif
|
2004-01-06 22:38:14 +00:00
|
|
|
|
2023-05-30 13:33:27 +00:00
|
|
|
static void dcache_flush_invalidate(volatile long *p)
|
|
|
|
{
|
|
|
|
uintptr_t start, stop;
|
|
|
|
start = ALIGN_DOWN((uintptr_t)p, MEMSIZE_CACHELINE_SIZE);
|
|
|
|
stop = start + MEMSIZE_CACHELINE_SIZE;
|
|
|
|
flush_dcache_range(start, stop);
|
|
|
|
invalidate_dcache_range(start, stop);
|
|
|
|
}
|
|
|
|
|
2004-01-06 22:38:14 +00:00
|
|
|
/*
|
|
|
|
* Check memory range for valid RAM. A simple memory test determines
|
|
|
|
* the actually available RAM size between addresses `base' and
|
|
|
|
* `base + maxsize'.
|
|
|
|
*/
|
2011-07-03 05:55:33 +00:00
|
|
|
long get_ram_size(long *base, long maxsize)
|
2004-01-06 22:38:14 +00:00
|
|
|
{
|
|
|
|
volatile long *addr;
|
2018-06-20 07:06:20 +00:00
|
|
|
long save[BITS_PER_LONG - 1];
|
2018-01-25 17:07:45 +00:00
|
|
|
long save_base;
|
2004-01-06 22:38:14 +00:00
|
|
|
long cnt;
|
|
|
|
long val;
|
|
|
|
long size;
|
|
|
|
int i = 0;
|
2023-05-30 13:33:27 +00:00
|
|
|
int dcache_en = dcache_status();
|
2004-01-06 22:38:14 +00:00
|
|
|
|
2016-02-09 21:38:31 +00:00
|
|
|
for (cnt = (maxsize / sizeof(long)) >> 1; cnt > 0; cnt >>= 1) {
|
2004-01-06 22:38:14 +00:00
|
|
|
addr = base + cnt; /* pointer arith! */
|
2014-10-21 20:14:10 +00:00
|
|
|
sync();
|
2016-02-09 21:38:31 +00:00
|
|
|
save[i++] = *addr;
|
2014-10-21 20:14:10 +00:00
|
|
|
sync();
|
2016-02-09 21:38:31 +00:00
|
|
|
*addr = ~cnt;
|
2023-05-30 13:33:27 +00:00
|
|
|
if (dcache_en)
|
|
|
|
dcache_flush_invalidate(addr);
|
2004-01-06 22:38:14 +00:00
|
|
|
}
|
|
|
|
|
2016-02-09 21:38:31 +00:00
|
|
|
addr = base;
|
|
|
|
sync();
|
2018-01-25 17:07:45 +00:00
|
|
|
save_base = *addr;
|
2016-02-09 21:38:31 +00:00
|
|
|
sync();
|
|
|
|
*addr = 0;
|
|
|
|
|
2016-02-02 20:15:28 +00:00
|
|
|
sync();
|
2023-05-30 13:33:27 +00:00
|
|
|
if (dcache_en)
|
|
|
|
dcache_flush_invalidate(addr);
|
|
|
|
|
2016-02-09 21:38:31 +00:00
|
|
|
if ((val = *addr) != 0) {
|
|
|
|
/* Restore the original data before leaving the function. */
|
|
|
|
sync();
|
2018-01-25 17:07:45 +00:00
|
|
|
*base = save_base;
|
2016-02-09 21:38:31 +00:00
|
|
|
for (cnt = 1; cnt < maxsize / sizeof(long); cnt <<= 1) {
|
|
|
|
addr = base + cnt;
|
|
|
|
sync();
|
|
|
|
*addr = save[--i];
|
|
|
|
}
|
|
|
|
return (0);
|
|
|
|
}
|
|
|
|
|
|
|
|
for (cnt = 1; cnt < maxsize / sizeof(long); cnt <<= 1) {
|
2004-01-06 22:38:14 +00:00
|
|
|
addr = base + cnt; /* pointer arith! */
|
|
|
|
val = *addr;
|
2016-02-09 21:38:31 +00:00
|
|
|
*addr = save[--i];
|
|
|
|
if (val != ~cnt) {
|
2014-10-21 20:14:10 +00:00
|
|
|
size = cnt * sizeof(long);
|
|
|
|
/*
|
|
|
|
* Restore the original data
|
|
|
|
* before leaving the function.
|
2004-01-06 22:38:14 +00:00
|
|
|
*/
|
2014-10-21 20:14:10 +00:00
|
|
|
for (cnt <<= 1;
|
|
|
|
cnt < maxsize / sizeof(long);
|
|
|
|
cnt <<= 1) {
|
2004-01-06 22:38:14 +00:00
|
|
|
addr = base + cnt;
|
2016-02-09 21:38:31 +00:00
|
|
|
*addr = save[--i];
|
2004-01-06 22:38:14 +00:00
|
|
|
}
|
2018-01-25 17:07:46 +00:00
|
|
|
/* warning: don't restore save_base in this case,
|
|
|
|
* it is already done in the loop because
|
|
|
|
* base and base+size share the same physical memory
|
|
|
|
* and *base is saved after *(base+size) modification
|
|
|
|
* in first loop
|
|
|
|
*/
|
2004-01-06 22:38:14 +00:00
|
|
|
return (size);
|
|
|
|
}
|
2016-02-09 21:38:31 +00:00
|
|
|
}
|
2018-01-25 17:07:46 +00:00
|
|
|
*base = save_base;
|
2004-01-06 22:38:14 +00:00
|
|
|
|
|
|
|
return (maxsize);
|
|
|
|
}
|
2014-02-11 19:57:26 +00:00
|
|
|
|
|
|
|
phys_size_t __weak get_effective_memsize(void)
|
|
|
|
{
|
2022-09-09 15:32:39 +00:00
|
|
|
phys_size_t ram_size = gd->ram_size;
|
|
|
|
|
2023-01-07 21:55:26 +00:00
|
|
|
#ifdef CONFIG_MPC85xx
|
2022-09-09 15:32:39 +00:00
|
|
|
/*
|
|
|
|
* Check for overflow and limit ram size to some representable value.
|
|
|
|
* It is required that ram_base + ram_size must be representable by
|
|
|
|
* phys_size_t type and must be aligned by direct access, therefore
|
|
|
|
* calculate it from last 4kB sector which should work as alignment
|
|
|
|
* on any platform.
|
|
|
|
*/
|
|
|
|
if (gd->ram_base + ram_size < gd->ram_base)
|
|
|
|
ram_size = ((phys_size_t)~0xfffULL) - gd->ram_base;
|
2023-01-07 21:55:26 +00:00
|
|
|
#endif
|
2022-09-09 15:32:39 +00:00
|
|
|
|
2022-12-04 15:04:50 +00:00
|
|
|
#ifndef CFG_MAX_MEM_MAPPED
|
2022-09-09 15:32:39 +00:00
|
|
|
return ram_size;
|
2014-02-11 19:57:26 +00:00
|
|
|
#else
|
|
|
|
/* limit stack to what we can reasonable map */
|
2022-12-04 15:04:50 +00:00
|
|
|
return ((ram_size > CFG_MAX_MEM_MAPPED) ?
|
|
|
|
CFG_MAX_MEM_MAPPED : ram_size);
|
2014-02-11 19:57:26 +00:00
|
|
|
#endif
|
|
|
|
}
|