mirror of
https://github.com/AsahiLinux/u-boot
synced 2024-11-25 06:00:43 +00:00
mips: octeon: Misc changes required because of the newly added headers
With the newly added headers and their restructuring (which macro is defined where), some changes in the already existing Octeon files are necessary. This patch makes the necessary changes. Signed-off-by: Stefan Roese <sr@denx.de>
This commit is contained in:
parent
fe3334d0a3
commit
b0f4ba0242
9 changed files with 343 additions and 236 deletions
|
@ -25,6 +25,7 @@
|
|||
#include <mach/octeon-model.h>
|
||||
#include <mach/octeon-feature.h>
|
||||
#include <mach/bootoct_cmd.h>
|
||||
#include <mach/cvmx-ciu-defs.h>
|
||||
|
||||
DECLARE_GLOBAL_DATA_PTR;
|
||||
|
||||
|
|
|
@ -21,12 +21,6 @@
|
|||
|
||||
DECLARE_GLOBAL_DATA_PTR;
|
||||
|
||||
#define CVMX_MIPS32_SPACE_KSEG0 1L
|
||||
#define CVMX_MIPS_SPACE_XKPHYS 2LL
|
||||
|
||||
#define CVMX_ADD_SEG(seg, add) ((((u64)(seg)) << 62) | (add))
|
||||
#define CVMX_ADD_SEG32(seg, add) (((u32)(seg) << 31) | (u32)(add))
|
||||
|
||||
/**
|
||||
* This is the physical location of a struct cvmx_bootmem_desc
|
||||
* structure in Octeon's memory. Note that dues to addressing
|
||||
|
|
|
@ -14,6 +14,7 @@
|
|||
#include <mach/cvmx-fuse.h>
|
||||
#include <mach/octeon-model.h>
|
||||
#include <mach/octeon-feature.h>
|
||||
#include <mach/cvmx-ciu-defs.h>
|
||||
|
||||
struct cvmx_coremask *get_coremask_override(struct cvmx_coremask *pcm)
|
||||
{
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include <linux/bitfield.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/io.h>
|
||||
#include <mach/cvmx-address.h>
|
||||
|
||||
/* General defines */
|
||||
#define CVMX_MAX_CORES 48
|
||||
|
@ -26,48 +27,116 @@
|
|||
|
||||
#define MAX_CORE_TADS 8
|
||||
|
||||
#define CAST_ULL(v) ((unsigned long long)(v))
|
||||
#define CASTPTR(type, v) ((type *)(long)(v))
|
||||
#define CAST64(v) ((long long)(long)(v))
|
||||
|
||||
/* Regs */
|
||||
#define CVMX_CIU_PP_RST 0x0001010000000100ULL
|
||||
#define CVMX_CIU3_NMI 0x0001010000000160ULL
|
||||
#define CVMX_CIU_FUSE 0x00010100000001a0ULL
|
||||
#define CVMX_CIU_NMI 0x0001070000000718ULL
|
||||
|
||||
#define CVMX_MIO_BOOT_LOC_CFGX(x) (0x0001180000000080ULL + ((x) & 1) * 8)
|
||||
#define MIO_BOOT_LOC_CFG_BASE GENMASK_ULL(27, 3)
|
||||
#define MIO_BOOT_LOC_CFG_EN BIT_ULL(31)
|
||||
#define MIO_BOOT_LOC_CFG_BASE GENMASK_ULL(27, 3)
|
||||
#define MIO_BOOT_LOC_CFG_EN BIT_ULL(31)
|
||||
|
||||
#define CVMX_MIO_BOOT_LOC_ADR 0x0001180000000090ULL
|
||||
#define MIO_BOOT_LOC_ADR_ADR GENMASK_ULL(7, 3)
|
||||
#define MIO_BOOT_LOC_ADR_ADR GENMASK_ULL(7, 3)
|
||||
|
||||
#define CVMX_MIO_BOOT_LOC_DAT 0x0001180000000098ULL
|
||||
|
||||
#define CVMX_MIO_FUS_DAT2 0x0001180000001410ULL
|
||||
#define MIO_FUS_DAT2_NOCRYPTO BIT_ULL(26)
|
||||
#define MIO_FUS_DAT2_NOMUL BIT_ULL(27)
|
||||
#define MIO_FUS_DAT2_DORM_CRYPTO BIT_ULL(34)
|
||||
#define MIO_FUS_DAT2_NOCRYPTO BIT_ULL(26)
|
||||
#define MIO_FUS_DAT2_NOMUL BIT_ULL(27)
|
||||
#define MIO_FUS_DAT2_DORM_CRYPTO BIT_ULL(34)
|
||||
|
||||
#define CVMX_MIO_FUS_RCMD 0x0001180000001500ULL
|
||||
#define MIO_FUS_RCMD_ADDR GENMASK_ULL(7, 0)
|
||||
#define MIO_FUS_RCMD_PEND BIT_ULL(12)
|
||||
#define MIO_FUS_RCMD_DAT GENMASK_ULL(23, 16)
|
||||
#define MIO_FUS_RCMD_ADDR GENMASK_ULL(7, 0)
|
||||
#define MIO_FUS_RCMD_PEND BIT_ULL(12)
|
||||
#define MIO_FUS_RCMD_DAT GENMASK_ULL(23, 16)
|
||||
|
||||
#define CVMX_RNM_CTL_STATUS 0x0001180040000000ULL
|
||||
#define RNM_CTL_STATUS_EER_VAL BIT_ULL(9)
|
||||
#define RNM_CTL_STATUS_EER_VAL BIT_ULL(9)
|
||||
|
||||
#define CVMX_IOBDMA_ORDERED_IO_ADDR 0xffffffffffffa200ull
|
||||
|
||||
/* turn the variable name into a string */
|
||||
#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
|
||||
#define CVMX_TMP_STR2(x) #x
|
||||
|
||||
#define CVMX_RDHWR(result, regstr) \
|
||||
asm volatile("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
|
||||
#define CVMX_RDHWRNV(result, regstr) \
|
||||
asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
|
||||
asm("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d"(result))
|
||||
#define CVMX_POP(result, input) \
|
||||
asm("pop %[rd],%[rs]" : [rd] "=d"(result) : [rs] "d"(input))
|
||||
|
||||
#define CVMX_SYNCW \
|
||||
asm volatile ("syncw\nsyncw\n" : : : "memory")
|
||||
#define CVMX_SYNCW asm volatile("syncw\nsyncw\n" : : : "memory")
|
||||
#define CVMX_SYNCS asm volatile("syncs\n" : : : "memory")
|
||||
#define CVMX_SYNCWS asm volatile("syncws\n" : : : "memory")
|
||||
|
||||
#define CVMX_CACHE_LINE_SIZE 128 // In bytes
|
||||
#define CVMX_CACHE_LINE_MASK (CVMX_CACHE_LINE_SIZE - 1) // In bytes
|
||||
#define CVMX_CACHE_LINE_ALIGNED __aligned(CVMX_CACHE_LINE_SIZE)
|
||||
|
||||
#define CVMX_SYNCIOBDMA asm volatile("synciobdma" : : : "memory")
|
||||
|
||||
#define CVMX_MF_CHORD(dest) CVMX_RDHWR(dest, 30)
|
||||
|
||||
/*
|
||||
* The macros cvmx_likely and cvmx_unlikely use the
|
||||
* __builtin_expect GCC operation to control branch
|
||||
* probabilities for a conditional. For example, an "if"
|
||||
* statement in the code that will almost always be
|
||||
* executed should be written as "if (cvmx_likely(...))".
|
||||
* If the "else" section of an if statement is more
|
||||
* probable, use "if (cvmx_unlikey(...))".
|
||||
*/
|
||||
#define cvmx_likely(x) __builtin_expect(!!(x), 1)
|
||||
#define cvmx_unlikely(x) __builtin_expect(!!(x), 0)
|
||||
|
||||
#define CVMX_WAIT_FOR_FIELD64(address, type, field, op, value, to_us) \
|
||||
({ \
|
||||
int result; \
|
||||
do { \
|
||||
u64 done = get_timer(0); \
|
||||
type c; \
|
||||
while (1) { \
|
||||
c.u64 = csr_rd(address); \
|
||||
if ((c.s.field)op(value)) { \
|
||||
result = 0; \
|
||||
break; \
|
||||
} else if (get_timer(done) > ((to_us) / 1000)) { \
|
||||
result = -1; \
|
||||
break; \
|
||||
} else \
|
||||
udelay(100); \
|
||||
} \
|
||||
} while (0); \
|
||||
result; \
|
||||
})
|
||||
|
||||
#define CVMX_WAIT_FOR_FIELD64_NODE(node, address, type, field, op, value, to_us) \
|
||||
({ \
|
||||
int result; \
|
||||
do { \
|
||||
u64 done = get_timer(0); \
|
||||
type c; \
|
||||
while (1) { \
|
||||
c.u64 = csr_rd(address); \
|
||||
if ((c.s.field)op(value)) { \
|
||||
result = 0; \
|
||||
break; \
|
||||
} else if (get_timer(done) > ((to_us) / 1000)) { \
|
||||
result = -1; \
|
||||
break; \
|
||||
} else \
|
||||
udelay(100); \
|
||||
} \
|
||||
} while (0); \
|
||||
result; \
|
||||
})
|
||||
|
||||
/* ToDo: Currently only node = 0 supported */
|
||||
#define cvmx_get_node_num() 0
|
||||
|
||||
static inline u64 csr_rd_node(int node, u64 addr)
|
||||
{
|
||||
void __iomem *base;
|
||||
|
@ -76,11 +145,24 @@ static inline u64 csr_rd_node(int node, u64 addr)
|
|||
return ioread64(base);
|
||||
}
|
||||
|
||||
static inline u32 csr_rd32_node(int node, u64 addr)
|
||||
{
|
||||
void __iomem *base;
|
||||
|
||||
base = ioremap_nocache(addr, 0x100);
|
||||
return ioread32(base);
|
||||
}
|
||||
|
||||
static inline u64 csr_rd(u64 addr)
|
||||
{
|
||||
return csr_rd_node(0, addr);
|
||||
}
|
||||
|
||||
static inline u32 csr_rd32(u64 addr)
|
||||
{
|
||||
return csr_rd32_node(0, addr);
|
||||
}
|
||||
|
||||
static inline void csr_wr_node(int node, u64 addr, u64 val)
|
||||
{
|
||||
void __iomem *base;
|
||||
|
@ -89,11 +171,24 @@ static inline void csr_wr_node(int node, u64 addr, u64 val)
|
|||
iowrite64(val, base);
|
||||
}
|
||||
|
||||
static inline void csr_wr32_node(int node, u64 addr, u32 val)
|
||||
{
|
||||
void __iomem *base;
|
||||
|
||||
base = ioremap_nocache(addr, 0x100);
|
||||
iowrite32(val, base);
|
||||
}
|
||||
|
||||
static inline void csr_wr(u64 addr, u64 val)
|
||||
{
|
||||
csr_wr_node(0, addr, val);
|
||||
}
|
||||
|
||||
static inline void csr_wr32(u64 addr, u32 val)
|
||||
{
|
||||
csr_wr32_node(0, addr, val);
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to use the volatile access here, otherwise the IO accessor
|
||||
* functions might swap the bytes
|
||||
|
@ -103,21 +198,173 @@ static inline u64 cvmx_read64_uint64(u64 addr)
|
|||
return *(volatile u64 *)addr;
|
||||
}
|
||||
|
||||
static inline s64 cvmx_read64_int64(u64 addr)
|
||||
{
|
||||
return *(volatile s64 *)addr;
|
||||
}
|
||||
|
||||
static inline void cvmx_write64_uint64(u64 addr, u64 val)
|
||||
{
|
||||
*(volatile u64 *)addr = val;
|
||||
}
|
||||
|
||||
static inline void cvmx_write64_int64(u64 addr, s64 val)
|
||||
{
|
||||
*(volatile s64 *)addr = val;
|
||||
}
|
||||
|
||||
static inline u32 cvmx_read64_uint32(u64 addr)
|
||||
{
|
||||
return *(volatile u32 *)addr;
|
||||
}
|
||||
|
||||
static inline s32 cvmx_read64_int32(u64 addr)
|
||||
{
|
||||
return *(volatile s32 *)addr;
|
||||
}
|
||||
|
||||
static inline void cvmx_write64_uint32(u64 addr, u32 val)
|
||||
{
|
||||
*(volatile u32 *)addr = val;
|
||||
}
|
||||
|
||||
static inline void cvmx_write64_int32(u64 addr, s32 val)
|
||||
{
|
||||
*(volatile s32 *)addr = val;
|
||||
}
|
||||
|
||||
static inline void cvmx_write64_int16(u64 addr, s16 val)
|
||||
{
|
||||
*(volatile s16 *)addr = val;
|
||||
}
|
||||
|
||||
static inline void cvmx_write64_uint16(u64 addr, u16 val)
|
||||
{
|
||||
*(volatile u16 *)addr = val;
|
||||
}
|
||||
|
||||
static inline void cvmx_write64_int8(u64 addr, int8_t val)
|
||||
{
|
||||
*(volatile int8_t *)addr = val;
|
||||
}
|
||||
|
||||
static inline void cvmx_write64_uint8(u64 addr, u8 val)
|
||||
{
|
||||
*(volatile u8 *)addr = val;
|
||||
}
|
||||
|
||||
static inline s16 cvmx_read64_int16(u64 addr)
|
||||
{
|
||||
return *(volatile s16 *)addr;
|
||||
}
|
||||
|
||||
static inline u16 cvmx_read64_uint16(u64 addr)
|
||||
{
|
||||
return *(volatile u16 *)addr;
|
||||
}
|
||||
|
||||
static inline int8_t cvmx_read64_int8(u64 addr)
|
||||
{
|
||||
return *(volatile int8_t *)addr;
|
||||
}
|
||||
|
||||
static inline u8 cvmx_read64_uint8(u64 addr)
|
||||
{
|
||||
return *(volatile u8 *)addr;
|
||||
}
|
||||
|
||||
static inline void cvmx_send_single(u64 data)
|
||||
{
|
||||
cvmx_write64_uint64(CVMX_IOBDMA_ORDERED_IO_ADDR, data);
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a 64-bit write to an IO address
|
||||
*
|
||||
* @param io_addr I/O address to write to
|
||||
* @param val 64-bit value to write
|
||||
*/
|
||||
static inline void cvmx_write_io(u64 io_addr, u64 val)
|
||||
{
|
||||
cvmx_write64_uint64(io_addr, val);
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a memory address for I/O based on the Major and Sub DID.
|
||||
*
|
||||
* @param major_did 5 bit major did
|
||||
* @param sub_did 3 bit sub did
|
||||
* @return I/O base address
|
||||
*/
|
||||
static inline u64 cvmx_build_io_address(u64 major_did, u64 sub_did)
|
||||
{
|
||||
return ((0x1ull << 48) | (major_did << 43) | (sub_did << 40));
|
||||
}
|
||||
|
||||
/**
|
||||
* Builds a bit mask given the required size in bits.
|
||||
*
|
||||
* @param bits Number of bits in the mask
|
||||
* @return The mask
|
||||
*/
|
||||
static inline u64 cvmx_build_mask(u64 bits)
|
||||
{
|
||||
if (bits == 64)
|
||||
return -1;
|
||||
|
||||
return ~((~0x0ull) << bits);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract bits out of a number
|
||||
*
|
||||
* @param input Number to extract from
|
||||
* @param lsb Starting bit, least significant (0-63)
|
||||
* @param width Width in bits (1-64)
|
||||
*
|
||||
* @return Extracted number
|
||||
*/
|
||||
static inline u64 cvmx_bit_extract(u64 input, int lsb, int width)
|
||||
{
|
||||
u64 result = input >> lsb;
|
||||
|
||||
result &= cvmx_build_mask(width);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform mask and shift to place the supplied value into
|
||||
* the supplied bit rage.
|
||||
*
|
||||
* Example: cvmx_build_bits(39,24,value)
|
||||
* <pre>
|
||||
* 6 5 4 3 3 2 1
|
||||
* 3 5 7 9 1 3 5 7 0
|
||||
* +-------+-------+-------+-------+-------+-------+-------+------+
|
||||
* 000000000000000000000000___________value000000000000000000000000
|
||||
* </pre>
|
||||
*
|
||||
* @param high_bit Highest bit value can occupy (inclusive) 0-63
|
||||
* @param low_bit Lowest bit value can occupy inclusive 0-high_bit
|
||||
* @param value Value to use
|
||||
* @return Value masked and shifted
|
||||
*/
|
||||
static inline u64 cvmx_build_bits(u64 high_bit, u64 low_bit, u64 value)
|
||||
{
|
||||
return ((value & cvmx_build_mask(high_bit - low_bit + 1)) << low_bit);
|
||||
}
|
||||
|
||||
static inline u64 cvmx_mask_to_localaddr(u64 addr)
|
||||
{
|
||||
return (addr & 0xffffffffff);
|
||||
}
|
||||
|
||||
static inline u64 cvmx_addr_on_node(u64 node, u64 addr)
|
||||
{
|
||||
return (node << 40) | cvmx_mask_to_localaddr(addr);
|
||||
}
|
||||
|
||||
static inline void *cvmx_phys_to_ptr(u64 addr)
|
||||
{
|
||||
return (void *)CKSEG0ADDR(addr);
|
||||
|
@ -141,4 +388,53 @@ static inline unsigned int cvmx_get_core_num(void)
|
|||
return core_num;
|
||||
}
|
||||
|
||||
/**
|
||||
* Node-local number of the core on which the program is currently running.
|
||||
*
|
||||
* @return core number on local node
|
||||
*/
|
||||
static inline unsigned int cvmx_get_local_core_num(void)
|
||||
{
|
||||
unsigned int core_num, core_mask;
|
||||
|
||||
CVMX_RDHWRNV(core_num, 0);
|
||||
/* note that MAX_CORES may not be power of 2 */
|
||||
core_mask = (1 << CVMX_NODE_NO_SHIFT) - 1;
|
||||
|
||||
return core_num & core_mask;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the number of bits set in the provided value.
|
||||
* Simple wrapper for POP instruction.
|
||||
*
|
||||
* @param val 32 bit value to count set bits in
|
||||
*
|
||||
* @return Number of bits set
|
||||
*/
|
||||
static inline u32 cvmx_pop(u32 val)
|
||||
{
|
||||
u32 pop;
|
||||
|
||||
CVMX_POP(pop, val);
|
||||
|
||||
return pop;
|
||||
}
|
||||
|
||||
#define cvmx_read_csr_node(node, addr) csr_rd(addr)
|
||||
#define cvmx_write_csr_node(node, addr, val) csr_wr(addr, val)
|
||||
|
||||
#define cvmx_printf printf
|
||||
#define cvmx_vprintf vprintf
|
||||
|
||||
#if defined(DEBUG)
|
||||
void cvmx_warn(const char *format, ...) __printf(1, 2);
|
||||
#else
|
||||
void cvmx_warn(const char *format, ...);
|
||||
#endif
|
||||
|
||||
#define cvmx_warn_if(expression, format, ...) \
|
||||
if (expression) \
|
||||
cvmx_warn(format, ##__VA_ARGS__)
|
||||
|
||||
#endif /* __CVMX_REGS_H__ */
|
||||
|
|
|
@ -6,6 +6,8 @@
|
|||
#ifndef __OCTEON_FEATURE_H__
|
||||
#define __OCTEON_FEATURE_H__
|
||||
|
||||
#include "cvmx-fuse.h"
|
||||
|
||||
/*
|
||||
* Octeon models are declared after the macros in octeon-model.h with the
|
||||
* suffix _FEATURE. The individual features are declared with the
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
* use only, and may change without notice.
|
||||
*/
|
||||
|
||||
#include <asm/mipsregs.h>
|
||||
|
||||
#define OCTEON_FAMILY_MASK 0x00ffff00
|
||||
#define OCTEON_PRID_MASK 0x00ffffff
|
||||
|
||||
|
|
|
@ -12,12 +12,8 @@
|
|||
#include <linux/io.h>
|
||||
#include <mach/octeon-model.h>
|
||||
#include <mach/cvmx/cvmx-lmcx-defs.h>
|
||||
|
||||
/* Mapping is done starting from 0x11800.80000000 */
|
||||
#define CVMX_L2C_CTL 0x00800000
|
||||
#define CVMX_L2C_BIG_CTL 0x00800030
|
||||
#define CVMX_L2C_TADX_INT(i) (0x00a00028 + (((i) & 7) * 0x40000))
|
||||
#define CVMX_L2C_MCIX_INT(i) (0x00c00028 + (((i) & 3) * 0x40000))
|
||||
#include <mach/cvmx-regs.h>
|
||||
#include <mach/cvmx-l2c-defs.h>
|
||||
|
||||
/* Some "external" (non-LMC) registers */
|
||||
#define CVMX_IPD_CLK_COUNT 0x00014F0000000338
|
||||
|
@ -68,34 +64,6 @@ static inline void l2c_wr(struct ddr_priv *priv, u64 addr, u64 val)
|
|||
iowrite64(val, priv->l2c_base + addr);
|
||||
}
|
||||
|
||||
/* Access other CSR registers not located inside the LMC address space */
|
||||
static inline u64 csr_rd(u64 addr)
|
||||
{
|
||||
void __iomem *base;
|
||||
|
||||
base = ioremap_nocache(addr, 0x100);
|
||||
return ioread64(base);
|
||||
}
|
||||
|
||||
static inline void csr_wr(u64 addr, u64 val)
|
||||
{
|
||||
void __iomem *base;
|
||||
|
||||
base = ioremap_nocache(addr, 0x100);
|
||||
return iowrite64(val, base);
|
||||
}
|
||||
|
||||
/* "Normal" access, without any offsets and/or mapping */
|
||||
static inline u64 cvmx_read64_uint64(u64 addr)
|
||||
{
|
||||
return readq((void *)addr);
|
||||
}
|
||||
|
||||
static inline void cvmx_write64_uint64(u64 addr, u64 val)
|
||||
{
|
||||
writeq(val, (void *)addr);
|
||||
}
|
||||
|
||||
/* Failsafe mode */
|
||||
#define FLAG_FAILSAFE_MODE 0x01000
|
||||
/* Note that the DDR clock initialized flags must be contiguous */
|
||||
|
@ -167,157 +135,6 @@ static inline int ddr_verbose(void)
|
|||
#define CVMX_DCACHE_INVALIDATE \
|
||||
{ CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); }
|
||||
|
||||
/**
|
||||
* cvmx_l2c_cfg
|
||||
*
|
||||
* Specify the RSL base addresses for the block
|
||||
*
|
||||
* L2C_CFG = L2C Configuration
|
||||
*
|
||||
* Description:
|
||||
*/
|
||||
union cvmx_l2c_cfg {
|
||||
u64 u64;
|
||||
struct cvmx_l2c_cfg_s {
|
||||
uint64_t reserved_20_63:44;
|
||||
uint64_t bstrun:1;
|
||||
uint64_t lbist:1;
|
||||
uint64_t xor_bank:1;
|
||||
uint64_t dpres1:1;
|
||||
uint64_t dpres0:1;
|
||||
uint64_t dfill_dis:1;
|
||||
uint64_t fpexp:4;
|
||||
uint64_t fpempty:1;
|
||||
uint64_t fpen:1;
|
||||
uint64_t idxalias:1;
|
||||
uint64_t mwf_crd:4;
|
||||
uint64_t rsp_arb_mode:1;
|
||||
uint64_t rfb_arb_mode:1;
|
||||
uint64_t lrf_arb_mode:1;
|
||||
} s;
|
||||
};
|
||||
|
||||
/**
|
||||
* cvmx_l2c_ctl
|
||||
*
|
||||
* L2C_CTL = L2C Control
|
||||
*
|
||||
*
|
||||
* Notes:
|
||||
* (1) If MAXVAB is != 0, VAB_THRESH should be less than MAXVAB.
|
||||
*
|
||||
* (2) L2DFDBE and L2DFSBE allows software to generate L2DSBE, L2DDBE, VBFSBE,
|
||||
* and VBFDBE errors for the purposes of testing error handling code. When
|
||||
* one (or both) of these bits are set a PL2 which misses in the L2 will fill
|
||||
* with the appropriate error in the first 2 OWs of the fill. Software can
|
||||
* determine which OW pair gets the error by choosing the desired fill order
|
||||
* (address<6:5>). A PL2 which hits in the L2 will not inject any errors.
|
||||
* Therefore sending a WBIL2 prior to the PL2 is recommended to make a miss
|
||||
* likely (if multiple processors are involved software must be careful to be
|
||||
* sure no other processor or IO device can bring the block into the L2).
|
||||
*
|
||||
* To generate a VBFSBE or VBFDBE, software must first get the cache block
|
||||
* into the cache with an error using a PL2 which misses the L2. Then a
|
||||
* store partial to a portion of the cache block without the error must
|
||||
* change the block to dirty. Then, a subsequent WBL2/WBIL2/victim will
|
||||
* trigger the VBFSBE/VBFDBE error.
|
||||
*/
|
||||
union cvmx_l2c_ctl {
|
||||
u64 u64;
|
||||
struct cvmx_l2c_ctl_s {
|
||||
uint64_t reserved_29_63:35;
|
||||
uint64_t rdf_fast:1;
|
||||
uint64_t disstgl2i:1;
|
||||
uint64_t l2dfsbe:1;
|
||||
uint64_t l2dfdbe:1;
|
||||
uint64_t discclk:1;
|
||||
uint64_t maxvab:4;
|
||||
uint64_t maxlfb:4;
|
||||
uint64_t rsp_arb_mode:1;
|
||||
uint64_t xmc_arb_mode:1;
|
||||
uint64_t reserved_2_13:12;
|
||||
uint64_t disecc:1;
|
||||
uint64_t disidxalias:1;
|
||||
} s;
|
||||
|
||||
struct cvmx_l2c_ctl_cn73xx {
|
||||
uint64_t reserved_32_63:32;
|
||||
uint64_t ocla_qos:3;
|
||||
uint64_t reserved_28_28:1;
|
||||
uint64_t disstgl2i:1;
|
||||
uint64_t reserved_25_26:2;
|
||||
uint64_t discclk:1;
|
||||
uint64_t reserved_16_23:8;
|
||||
uint64_t rsp_arb_mode:1;
|
||||
uint64_t xmc_arb_mode:1;
|
||||
uint64_t rdf_cnt:8;
|
||||
uint64_t reserved_4_5:2;
|
||||
uint64_t disldwb:1;
|
||||
uint64_t dissblkdty:1;
|
||||
uint64_t disecc:1;
|
||||
uint64_t disidxalias:1;
|
||||
} cn73xx;
|
||||
|
||||
struct cvmx_l2c_ctl_cn73xx cn78xx;
|
||||
};
|
||||
|
||||
/**
|
||||
* cvmx_l2c_big_ctl
|
||||
*
|
||||
* L2C_BIG_CTL = L2C Big memory control register
|
||||
*
|
||||
*
|
||||
* Notes:
|
||||
* (1) BIGRD interrupts can occur during normal operation as the PP's are
|
||||
* allowed to prefetch to non-existent memory locations. Therefore,
|
||||
* BIGRD is for informational purposes only.
|
||||
*
|
||||
* (2) When HOLEWR/BIGWR blocks a store L2C_VER_ID, L2C_VER_PP, L2C_VER_IOB,
|
||||
* and L2C_VER_MSC will be loaded just like a store which is blocked by VRTWR.
|
||||
* Additionally, L2C_ERR_XMC will be loaded.
|
||||
*/
|
||||
union cvmx_l2c_big_ctl {
|
||||
u64 u64;
|
||||
struct cvmx_l2c_big_ctl_s {
|
||||
uint64_t reserved_8_63:56;
|
||||
uint64_t maxdram:4;
|
||||
uint64_t reserved_0_3:4;
|
||||
} s;
|
||||
struct cvmx_l2c_big_ctl_cn61xx {
|
||||
uint64_t reserved_8_63:56;
|
||||
uint64_t maxdram:4;
|
||||
uint64_t reserved_1_3:3;
|
||||
uint64_t disable:1;
|
||||
} cn61xx;
|
||||
struct cvmx_l2c_big_ctl_cn61xx cn63xx;
|
||||
struct cvmx_l2c_big_ctl_cn61xx cn66xx;
|
||||
struct cvmx_l2c_big_ctl_cn61xx cn68xx;
|
||||
struct cvmx_l2c_big_ctl_cn61xx cn68xxp1;
|
||||
struct cvmx_l2c_big_ctl_cn70xx {
|
||||
uint64_t reserved_8_63:56;
|
||||
uint64_t maxdram:4;
|
||||
uint64_t reserved_1_3:3;
|
||||
uint64_t disbig:1;
|
||||
} cn70xx;
|
||||
struct cvmx_l2c_big_ctl_cn70xx cn70xxp1;
|
||||
struct cvmx_l2c_big_ctl_cn70xx cn73xx;
|
||||
struct cvmx_l2c_big_ctl_cn70xx cn78xx;
|
||||
struct cvmx_l2c_big_ctl_cn70xx cn78xxp1;
|
||||
struct cvmx_l2c_big_ctl_cn61xx cnf71xx;
|
||||
struct cvmx_l2c_big_ctl_cn70xx cnf75xx;
|
||||
};
|
||||
|
||||
struct rlevel_byte_data {
|
||||
int delay;
|
||||
int loop_total;
|
||||
int loop_count;
|
||||
int best;
|
||||
u64 bm;
|
||||
int bmerrs;
|
||||
int sqerrs;
|
||||
int bestsq;
|
||||
};
|
||||
|
||||
#define DEBUG_VALIDATE_BITMASK 0
|
||||
#if DEBUG_VALIDATE_BITMASK
|
||||
#define debug_bitmask_print printf
|
||||
|
|
|
@ -17,14 +17,8 @@
|
|||
|
||||
/* Random number generator stuff */
|
||||
|
||||
#define CVMX_RNM_CTL_STATUS 0x0001180040000000
|
||||
#define CVMX_OCT_DID_RNG 8ULL
|
||||
|
||||
static u64 cvmx_build_io_address(u64 major_did, u64 sub_did)
|
||||
{
|
||||
return ((0x1ull << 48) | (major_did << 43) | (sub_did << 40));
|
||||
}
|
||||
|
||||
static u64 cvmx_rng_get_random64(void)
|
||||
{
|
||||
return csr_rd(cvmx_build_io_address(CVMX_OCT_DID_RNG, 0));
|
||||
|
@ -285,10 +279,10 @@ static int test_dram_byte64(struct ddr_priv *priv, int lmc, u64 p,
|
|||
int node = 0;
|
||||
|
||||
// Force full cacheline write-backs to boost traffic
|
||||
l2c_ctl.u64 = l2c_rd(priv, CVMX_L2C_CTL);
|
||||
l2c_ctl.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
|
||||
saved_dissblkdty = l2c_ctl.cn78xx.dissblkdty;
|
||||
l2c_ctl.cn78xx.dissblkdty = 1;
|
||||
l2c_wr(priv, CVMX_L2C_CTL, l2c_ctl.u64);
|
||||
l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_ctl.u64);
|
||||
|
||||
if (octeon_is_cpuid(OCTEON_CN73XX) || octeon_is_cpuid(OCTEON_CNF75XX))
|
||||
kbitno = 18;
|
||||
|
@ -489,9 +483,9 @@ static int test_dram_byte64(struct ddr_priv *priv, int lmc, u64 p,
|
|||
}
|
||||
|
||||
// Restore original setting that could enable partial cacheline writes
|
||||
l2c_ctl.u64 = l2c_rd(priv, CVMX_L2C_CTL);
|
||||
l2c_ctl.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
|
||||
l2c_ctl.cn78xx.dissblkdty = saved_dissblkdty;
|
||||
l2c_wr(priv, CVMX_L2C_CTL, l2c_ctl.u64);
|
||||
l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_ctl.u64);
|
||||
|
||||
return errors;
|
||||
}
|
||||
|
@ -6315,17 +6309,17 @@ static void lmc_final(struct ddr_priv *priv)
|
|||
lmc_rd(priv, CVMX_LMCX_INT(if_num));
|
||||
|
||||
for (tad = 0; tad < num_tads; tad++) {
|
||||
l2c_wr(priv, CVMX_L2C_TADX_INT(tad),
|
||||
l2c_rd(priv, CVMX_L2C_TADX_INT(tad)));
|
||||
l2c_wr(priv, CVMX_L2C_TADX_INT_REL(tad),
|
||||
l2c_rd(priv, CVMX_L2C_TADX_INT_REL(tad)));
|
||||
debug("%-45s : (%d) 0x%08llx\n", "CVMX_L2C_TAD_INT", tad,
|
||||
l2c_rd(priv, CVMX_L2C_TADX_INT(tad)));
|
||||
l2c_rd(priv, CVMX_L2C_TADX_INT_REL(tad)));
|
||||
}
|
||||
|
||||
for (mci = 0; mci < num_mcis; mci++) {
|
||||
l2c_wr(priv, CVMX_L2C_MCIX_INT(mci),
|
||||
l2c_rd(priv, CVMX_L2C_MCIX_INT(mci)));
|
||||
l2c_wr(priv, CVMX_L2C_MCIX_INT_REL(mci),
|
||||
l2c_rd(priv, CVMX_L2C_MCIX_INT_REL(mci)));
|
||||
debug("%-45s : (%d) 0x%08llx\n", "L2C_MCI_INT", mci,
|
||||
l2c_rd(priv, CVMX_L2C_MCIX_INT(mci)));
|
||||
l2c_rd(priv, CVMX_L2C_MCIX_INT_REL(mci)));
|
||||
}
|
||||
|
||||
debug("%-45s : 0x%08llx\n", "LMC_INT",
|
||||
|
@ -9827,7 +9821,7 @@ static void cvmx_dram_address_extract_info(struct ddr_priv *priv, u64 address,
|
|||
address -= ADDRESS_HOLE;
|
||||
|
||||
/* Determine the LMC controllers */
|
||||
l2c_ctl.u64 = l2c_rd(priv, CVMX_L2C_CTL);
|
||||
l2c_ctl.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
|
||||
|
||||
/* xbits depends on number of LMCs */
|
||||
xbits = cvmx_dram_get_num_lmc(priv) >> 1; // 4->2, 2->1, 1->0
|
||||
|
|
|
@ -145,7 +145,7 @@ static void cvmx_l2c_set_big_size(struct ddr_priv *priv, u64 mem_size, int mode)
|
|||
big_ctl.u64 = 0;
|
||||
big_ctl.s.maxdram = bits - 9;
|
||||
big_ctl.cn61xx.disable = mode;
|
||||
l2c_wr(priv, CVMX_L2C_BIG_CTL, big_ctl.u64);
|
||||
l2c_wr(priv, CVMX_L2C_BIG_CTL_REL, big_ctl.u64);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2274,15 +2274,15 @@ static int octeon_ddr_initialize(struct ddr_priv *priv, u32 cpu_hertz,
|
|||
printf("Disabling L2 ECC based on disable_l2_ecc environment variable\n");
|
||||
union cvmx_l2c_ctl l2c_val;
|
||||
|
||||
l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL);
|
||||
l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
|
||||
l2c_val.s.disecc = 1;
|
||||
l2c_wr(priv, CVMX_L2C_CTL, l2c_val.u64);
|
||||
l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
|
||||
} else {
|
||||
union cvmx_l2c_ctl l2c_val;
|
||||
|
||||
l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL);
|
||||
l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
|
||||
l2c_val.s.disecc = 0;
|
||||
l2c_wr(priv, CVMX_L2C_CTL, l2c_val.u64);
|
||||
l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -2295,17 +2295,17 @@ static int octeon_ddr_initialize(struct ddr_priv *priv, u32 cpu_hertz,
|
|||
|
||||
puts("L2 index aliasing disabled.\n");
|
||||
|
||||
l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL);
|
||||
l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
|
||||
l2c_val.s.disidxalias = 1;
|
||||
l2c_wr(priv, CVMX_L2C_CTL, l2c_val.u64);
|
||||
l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
|
||||
} else {
|
||||
union cvmx_l2c_ctl l2c_val;
|
||||
|
||||
/* Enable L2C index aliasing */
|
||||
|
||||
l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL);
|
||||
l2c_val.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
|
||||
l2c_val.s.disidxalias = 0;
|
||||
l2c_wr(priv, CVMX_L2C_CTL, l2c_val.u64);
|
||||
l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_val.u64);
|
||||
}
|
||||
|
||||
if (OCTEON_IS_OCTEON3()) {
|
||||
|
@ -2321,7 +2321,7 @@ static int octeon_ddr_initialize(struct ddr_priv *priv, u32 cpu_hertz,
|
|||
u64 rdf_cnt;
|
||||
char *s;
|
||||
|
||||
l2c_ctl.u64 = l2c_rd(priv, CVMX_L2C_CTL);
|
||||
l2c_ctl.u64 = l2c_rd(priv, CVMX_L2C_CTL_REL);
|
||||
|
||||
/*
|
||||
* It is more convenient to compute the ratio using clock
|
||||
|
@ -2338,7 +2338,7 @@ static int octeon_ddr_initialize(struct ddr_priv *priv, u32 cpu_hertz,
|
|||
debug("%-45s : %d, cpu_hertz:%d, ddr_hertz:%d\n",
|
||||
"EARLY FILL COUNT ", l2c_ctl.cn78xx.rdf_cnt, cpu_hertz,
|
||||
ddr_hertz);
|
||||
l2c_wr(priv, CVMX_L2C_CTL, l2c_ctl.u64);
|
||||
l2c_wr(priv, CVMX_L2C_CTL_REL, l2c_ctl.u64);
|
||||
}
|
||||
|
||||
/* Check for lower DIMM socket populated */
|
||||
|
|
Loading…
Reference in a new issue