- mips: octeon: add support for DDR4 memory controller

- mips: octeon: add support for DWC3 USB
 - mips: octeon: add support for booting Linux
 -----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCgAdFiEEiQkHUH+J02LLC9InKPlOlyTyXBgFAl9+Ie4ACgkQKPlOlyTy
 XBg5lw//TEk6avR1/G4/SKe2Mj2hDQGm2OKjx5BLG6LC/m+It8lk5hFGCKeRTS3m
 zlQ1z4Rki05Q/HW8fZfUSNZTGwbp1n5WJWMx5PRB2T4zYYeoktxQo6dRLbmR9mxL
 2tiZ3QV3qzJafcwy1xdddr2GRGpRyu/sPHnEf5iHtijfT+6MPqSY5E1An+mAGhGb
 IOjzh/OtmO+4Hu9zlp4G5AhNoQBBRRZg+2Sm+kVrzLyM345byucNpnnFiCT1T54M
 iB3KsijBBvcnZ/bR7WMB+x942uFQi9hWdK0ngQhhOO4W1/Mf2ILst44gcxiJQNaf
 dNIEdQxOeL1JDurQL/GSo4cQIr2mXlt3FtTB8RH8sAf9+aMJjlVqYaX3PGUFQJ5B
 z5hnzKWk+jnxwD2F8fAtjqawQY6cBAaF/BDkxFdZcvhIIp9veUNDnk98YYwL7ZjU
 8zRZQSmwI9zmC0kC9C6mmIN+eAbUhT+XDIlVk/USreujDkq9ESWIvo4SdY0+uXjJ
 EJ1Xhqx/H7Qzem+SdbonJjMIWa/5rBnqT/nvCwSGDx64OdvD+F93RHFeyJvp8p1J
 DWNNRZ4rAlrz3z8hDSuIt0M5ENoGWlHjD3nb11CBK/XnobBLhT+8W/b/ffjGcRnn
 dowofymjvGMnhUUkHIb5heyMZaRCZVquGlEOIjrOdc4GNm6EW5Y=
 =v+p3
 -----END PGP SIGNATURE-----

Merge tag 'mips-pull-2020-10-07' of https://gitlab.denx.de/u-boot/custodians/u-boot-mips

- mips: octeon: add support for DDR4 memory controller
- mips: octeon: add support for DWC3 USB
- mips: octeon: add support for booting Linux
This commit is contained in:
Tom Rini 2020-10-07 17:25:25 -04:00
commit 1c431f118c
37 changed files with 26127 additions and 23 deletions

View file

@ -72,6 +72,23 @@
<0x0300e 4>, <0x0300f 4>;
};
l2c: l2c@1180080000000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "cavium,octeon-7xxx-l2c";
reg = <0x11800 0x80000000 0x0 0x01000000>;
u-boot,dm-pre-reloc;
};
lmc: lmc@1180088000000 {
#address-cells = <1>;
#size-cells = <0>;
compatible = "cavium,octeon-7xxx-ddr4";
reg = <0x11800 0x88000000 0x0 0x02000000>; // 2 IFs
u-boot,dm-pre-reloc;
l2c-handle = <&l2c>;
};
reset: reset@1180006001600 {
compatible = "mrvl,cn7xxx-rst";
reg = <0x11800 0x06001600 0x0 0x200>;
@ -126,5 +143,65 @@
spi-max-frequency = <25000000>;
clocks = <&clk OCTEON_CLK_IO>;
};
/* USB 0 */
usb0: uctl@1180068000000 {
compatible = "cavium,octeon-7130-usb-uctl";
reg = <0x11800 0x68000000 0x0 0x100>;
ranges; /* Direct mapping */
#address-cells = <2>;
#size-cells = <2>;
/* Only 100MHz allowed */
refclk-frequency = <100000000>;
/* Only "dlmc_ref_clk0" is supported for 73xx */
refclk-type-ss = "dlmc_ref_clk0";
/* Only "dlmc_ref_clk0" is supported for 73xx */
refclk-type-hs = "dlmc_ref_clk0";
/*
* Power is specified by three parts:
* 1) GPIO handle (must be &gpio)
* 2) GPIO pin number
* 3) Active high (0) or active low (1)
*/
xhci@1680000000000 {
compatible = "cavium,octeon-7130-xhci","synopsys,dwc3","snps,dwc3";
reg = <0x16800 0x00000000 0x10 0x0>;
interrupts = <0x68080 4>; /* UAHC_IMAN, level */
maximum-speed = "super-speed";
dr_mode = "host";
snps,dis_u3_susphy_quirk;
snps,dis_u2_susphy_quirk;
snps,dis_enblslpm_quirk;
};
};
/* USB 1 */
usb1: uctl@1180069000000 {
compatible = "cavium,octeon-7130-usb-uctl";
reg = <0x11800 0x69000000 0x0 0x100>;
ranges; /* Direct mapping */
#address-cells = <2>;
#size-cells = <2>;
/* 50MHz, 100MHz and 125MHz allowed */
refclk-frequency = <100000000>;
/* Either "dlmc_ref_clk0" or "dlmc_ref_clk0" */
refclk-type-ss = "dlmc_ref_clk0";
/* Either "dlmc_ref_clk0" "dlmc_ref_clk1" or "pll_ref_clk" */
refclk-type-hs = "dlmc_ref_clk0";
/*
* Power is specified by three parts:
* 1) GPIO handle (must be &gpio)
* 2) GPIO pin number
* 3) Active high (0) or active low (1)
*/
xhci@1690000000000 {
compatible = "cavium,octeon-7130-xhci","synopsys,dwc3","snps,dwc3";
reg = <0x16900 0x00000000 0x10 0x0>;
interrupts = <0x69080 4>; /* UAHC_IMAN, level */
dr_mode = "host";
};
};
};
};

View file

@ -113,3 +113,27 @@
reg = <0>;
};
};
/* USB 0 */
&usb0 {
status = "okay";
/*
* Power is specified by three parts:
* 1) GPIO handle (must be &gpio)
* 2) GPIO pin number
* 3) Active high (0) or active low (1)
*/
power = <&gpio 20 0>;
};
/* USB 1 */
&usb1 {
status = "okay";
/*
* Power is specified by three parts:
* 1) GPIO handle (must be &gpio)
* 2) GPIO pin number
* 3) Active high (0) or active low (1)
*/
power = <&gpio 21 0>;
};

View file

@ -8,3 +8,6 @@ obj-y += cache.o
obj-y += clock.o
obj-y += cpu.o
obj-y += dram.o
obj-y += cvmx-coremask.o
obj-y += cvmx-bootmem.o
obj-y += bootoctlinux.o

View file

@ -0,0 +1,661 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2020 Stefan Roese <sr@denx.de>
*/
#include <command.h>
#include <config.h>
#include <cpu_func.h>
#include <dm.h>
#include <elf.h>
#include <env.h>
#include <ram.h>
#include <asm/io.h>
#include <linux/compat.h>
#include <linux/ctype.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-bootinfo.h>
#include <mach/cvmx-bootmem.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-model.h>
#include <mach/octeon-feature.h>
#include <mach/bootoct_cmd.h>
DECLARE_GLOBAL_DATA_PTR;
/* ToDo: Revisit these settings */
#define OCTEON_RESERVED_LOW_MEM_SIZE (512 * 1024)
#define OCTEON_RESERVED_LOW_BOOT_MEM_SIZE (1024 * 1024)
#define BOOTLOADER_BOOTMEM_DESC_SPACE (1024 * 1024)
/* Default stack and heap sizes, in bytes */
#define DEFAULT_STACK_SIZE (1 * 1024 * 1024)
#define DEFAULT_HEAP_SIZE (3 * 1024 * 1024)
/**
* NOTE: This must duplicate octeon_boot_descriptor_t in the toolchain
* octeon-app-init.h file.
*/
enum {
/* If set, core should do app-wide init, only one core per app will have
* this flag set.
*/
BOOT_FLAG_INIT_CORE = 1,
OCTEON_BL_FLAG_DEBUG = 1 << 1,
OCTEON_BL_FLAG_NO_MAGIC = 1 << 2,
/* If set, use uart1 for console */
OCTEON_BL_FLAG_CONSOLE_UART1 = 1 << 3,
OCTEON_BL_FLAG_CONSOLE_PCI = 1 << 4, /* If set, use PCI console */
/* Call exit on break on serial port */
OCTEON_BL_FLAG_BREAK = 1 << 5,
/*
* Be sure to update OCTEON_APP_INIT_H_VERSION when new fields are added
* and to conditionalize the new flag's usage based on the version.
*/
} octeon_boot_descriptor_flag;
/**
* NOTE: This must duplicate octeon_boot_descriptor_t in the toolchain
* octeon-app-init.h file.
*/
#ifndef OCTEON_CURRENT_DESC_VERSION
# define OCTEON_CURRENT_DESC_VERSION 7
#endif
/**
* NOTE: This must duplicate octeon_boot_descriptor_t in the toolchain
* octeon-app-init.h file.
*/
/* Version 7 changes: Change names of deprecated fields */
#ifndef OCTEON_ARGV_MAX_ARGS
# define OCTEON_ARGV_MAX_ARGS 64
#endif
/**
* NOTE: This must duplicate octeon_boot_descriptor_t in the toolchain
* octeon-app-init.h file.
*/
#ifndef OCTEON_SERIAL_LEN
# define OCTEON_SERIAL_LEN 20
#endif
/**
* Bootloader structure used to pass info to Octeon executive startup code.
* NOTE: all fields are deprecated except for:
* * desc_version
* * desc_size,
* * heap_base
* * heap_end
* * eclock_hz
* * flags
* * argc
* * argv
* * cvmx_desc_vaddr
* * debugger_flags_base_addr
*
* All other fields have been moved to the cvmx_descriptor, and the new
* fields should be added there. They are left as placeholders in this
* structure for binary compatibility.
*
* NOTE: This structure must match what is in the toolchain octeon-app-init.h
* file.
*/
struct octeon_boot_descriptor {
/* Start of block referenced by assembly code - do not change! */
u32 desc_version;
u32 desc_size;
u64 stack_top;
u64 heap_base;
u64 heap_end;
u64 deprecated17;
u64 deprecated16;
/* End of block referenced by assembly code - do not change! */
u32 deprecated18;
u32 deprecated15;
u32 deprecated14;
u32 argc; /* argc for main() */
u32 argv[OCTEON_ARGV_MAX_ARGS]; /* argv for main() */
u32 flags; /* Flags for application */
u32 core_mask; /* Coremask running this image */
u32 dram_size; /* DEPRECATED, DRAM size in megabyes. Used up to SDK 1.8.1 */
u32 phy_mem_desc_addr;
u32 debugger_flags_base_addr; /* used to pass flags from app to debugger. */
u32 eclock_hz; /* CPU clock speed, in hz. */
u32 deprecated10;
u32 deprecated9;
u16 deprecated8;
u8 deprecated7;
u8 deprecated6;
u16 deprecated5;
u8 deprecated4;
u8 deprecated3;
char deprecated2[OCTEON_SERIAL_LEN];
u8 deprecated1[6];
u8 deprecated0;
u64 cvmx_desc_vaddr; /* Address of cvmx descriptor */
};
static struct octeon_boot_descriptor boot_desc[CVMX_MIPS_MAX_CORES];
static struct cvmx_bootinfo cvmx_bootinfo_array[CVMX_MIPS_MAX_CORES];
/**
* Programs the boot bus moveable region
* @param base base address to place the boot bus moveable region
* (bits [31:7])
* @param region_num Selects which region, 0 or 1 for node 0,
* 2 or 3 for node 1
* @param enable Set true to enable, false to disable
* @param data Pointer to data to put in the region, up to
* 16 dwords.
* @param num_words Number of data dwords (up to 32)
*
* @return 0 for success, -1 on error
*/
static int octeon_set_moveable_region(u32 base, int region_num,
bool enable, const u64 *data,
unsigned int num_words)
{
int node = region_num >> 1;
u64 val;
int i;
u8 node_mask = 0x01; /* ToDo: Currently only one node is supported */
debug("%s(0x%x, %d, %d, %p, %u)\n", __func__, base, region_num, enable,
data, num_words);
if (num_words > 32) {
printf("%s: Too many words (%d) for region %d\n", __func__,
num_words, region_num);
return -1;
}
if (base & 0x7f) {
printf("%s: Error: base address 0x%x must be 128 byte aligned\n",
__func__, base);
return -1;
}
if (region_num > (node_mask > 1 ? 3 : 1)) {
printf("%s: Region number %d out of range\n",
__func__, region_num);
return -1;
}
if (!data && num_words > 0) {
printf("%s: Error: NULL data\n", __func__);
return -1;
}
region_num &= 1;
val = MIO_BOOT_LOC_CFG_EN |
FIELD_PREP(MIO_BOOT_LOC_CFG_BASE, base >> 7);
debug("%s: Setting MIO_BOOT_LOC_CFG(%d) on node %d to 0x%llx\n",
__func__, region_num, node, val);
csr_wr(CVMX_MIO_BOOT_LOC_CFGX(region_num & 1), val);
val = FIELD_PREP(MIO_BOOT_LOC_ADR_ADR, (region_num ? 0x80 : 0x00) >> 3);
debug("%s: Setting MIO_BOOT_LOC_ADR start to 0x%llx\n", __func__, val);
csr_wr(CVMX_MIO_BOOT_LOC_ADR, val);
for (i = 0; i < num_words; i++) {
debug(" 0x%02llx: 0x%016llx\n",
csr_rd(CVMX_MIO_BOOT_LOC_ADR), data[i]);
csr_wr(CVMX_MIO_BOOT_LOC_DAT, data[i]);
}
return 0;
}
/**
* Parse comma separated numbers into an array
*
* @param[out] values values read for each node
* @param[in] str string to parse
* @param base 0 for auto, otherwise 8, 10 or 16 for the number base
*
* @return number of values read.
*/
static int octeon_parse_nodes(u64 values[CVMX_MAX_NODES],
const char *str, int base)
{
int node = 0;
char *sep;
do {
debug("Parsing node %d: \"%s\"\n", node, str);
values[node] = simple_strtoull(str, &sep, base);
debug(" node %d: 0x%llx\n", node, values[node]);
str = sep + 1;
} while (++node < CVMX_MAX_NODES && *sep == ',');
debug("%s: returning %d\n", __func__, node);
return node;
}
/**
* Parse command line arguments
*
* @param argc number of arguments
* @param[in] argv array of argument strings
* @param cmd command type
* @param[out] boot_args parsed values
*
* @return number of arguments parsed
*/
int octeon_parse_bootopts(int argc, char *const argv[],
enum octeon_boot_cmd_type cmd,
struct octeon_boot_args *boot_args)
{
u64 node_values[CVMX_MAX_NODES];
int arg, j;
int num_values;
int node;
u8 node_mask = 0x01; /* ToDo: Currently only one node is supported */
debug("%s(%d, %p, %d, %p)\n", __func__, argc, argv, cmd, boot_args);
memset(boot_args, 0, sizeof(*boot_args));
boot_args->stack_size = DEFAULT_STACK_SIZE;
boot_args->heap_size = DEFAULT_HEAP_SIZE;
boot_args->node_mask = 0;
for (arg = 0; arg < argc; arg++) {
debug(" argv[%d]: %s\n", arg, argv[arg]);
if (cmd == BOOTOCT && !strncmp(argv[arg], "stack=", 6)) {
boot_args->stack_size = simple_strtoul(argv[arg] + 6,
NULL, 0);
} else if (cmd == BOOTOCT && !strncmp(argv[arg], "heap=", 5)) {
boot_args->heap_size = simple_strtoul(argv[arg] + 5,
NULL, 0);
} else if (!strncmp(argv[arg], "debug", 5)) {
puts("setting debug flag!\n");
boot_args->boot_flags |= OCTEON_BL_FLAG_DEBUG;
} else if (cmd == BOOTOCT && !strncmp(argv[arg], "break", 5)) {
puts("setting break flag!\n");
boot_args->boot_flags |= OCTEON_BL_FLAG_BREAK;
} else if (!strncmp(argv[arg], "forceboot", 9)) {
boot_args->forceboot = true;
} else if (!strncmp(argv[arg], "nodemask=", 9)) {
boot_args->node_mask = simple_strtoul(argv[arg] + 9,
NULL, 16);
} else if (!strncmp(argv[arg], "numcores=", 9)) {
memset(node_values, 0, sizeof(node_values));
num_values = octeon_parse_nodes(node_values,
argv[arg] + 9, 0);
for (j = 0; j < num_values; j++)
boot_args->num_cores[j] = node_values[j];
boot_args->num_cores_set = true;
} else if (!strncmp(argv[arg], "skipcores=", 10)) {
memset(node_values, 0, sizeof(node_values));
num_values = octeon_parse_nodes(node_values,
argv[arg] + 10, 0);
for (j = 0; j < num_values; j++)
boot_args->num_skipped[j] = node_values[j];
boot_args->num_skipped_set = true;
} else if (!strncmp(argv[arg], "console_uart=", 13)) {
boot_args->console_uart = simple_strtoul(argv[arg] + 13,
NULL, 0);
if (boot_args->console_uart == 1) {
boot_args->boot_flags |=
OCTEON_BL_FLAG_CONSOLE_UART1;
} else if (!boot_args->console_uart) {
boot_args->boot_flags &=
~OCTEON_BL_FLAG_CONSOLE_UART1;
}
} else if (!strncmp(argv[arg], "coremask=", 9)) {
memset(node_values, 0, sizeof(node_values));
num_values = octeon_parse_nodes(node_values,
argv[arg] + 9, 16);
for (j = 0; j < num_values; j++)
cvmx_coremask_set64_node(&boot_args->coremask,
j, node_values[j]);
boot_args->coremask_set = true;
} else if (cmd == BOOTOCTLINUX &&
!strncmp(argv[arg], "namedblock=", 11)) {
boot_args->named_block = argv[arg] + 11;
} else if (!strncmp(argv[arg], "endbootargs", 11)) {
boot_args->endbootargs = 1;
arg++;
if (argc >= arg && cmd != BOOTOCTLINUX)
boot_args->app_name = argv[arg];
break;
} else {
debug(" Unknown argument \"%s\"\n", argv[arg]);
}
}
if (boot_args->coremask_set && boot_args->num_cores_set) {
puts("Warning: both coremask and numcores are set, using coremask.\n");
} else if (!boot_args->coremask_set && !boot_args->num_cores_set) {
cvmx_coremask_set_core(&boot_args->coremask, 0);
boot_args->coremask_set = true;
} else if ((!boot_args->coremask_set) && boot_args->num_cores_set) {
cvmx_coremask_for_each_node(node, node_mask)
cvmx_coremask_set64_node(&boot_args->coremask, node,
((1ull << boot_args->num_cores[node]) - 1) <<
boot_args->num_skipped[node]);
boot_args->coremask_set = true;
}
/* Update the node mask based on the coremask or the number of cores */
for (j = 0; j < CVMX_MAX_NODES; j++) {
if (cvmx_coremask_get64_node(&boot_args->coremask, j))
boot_args->node_mask |= 1 << j;
}
debug("%s: return %d\n", __func__, arg);
return arg;
}
int do_bootoctlinux(struct cmd_tbl *cmdtp, int flag, int argc,
char *const argv[])
{
typedef void __noreturn (*kernel_entry_t)(int, ulong, ulong, ulong);
kernel_entry_t kernel;
struct octeon_boot_args boot_args;
int arg_start = 1;
int arg_count;
u64 addr = 0; /* Address of the ELF image */
int arg0;
u64 arg1;
u64 arg2;
u64 arg3;
int ret;
struct cvmx_coremask core_mask;
struct cvmx_coremask coremask_to_run;
struct cvmx_coremask avail_coremask;
int first_core;
int core;
struct ram_info ram;
struct udevice *dev;
const u64 *nmi_code;
int num_dwords;
u8 node_mask = 0x01;
int i;
cvmx_coremask_clear_all(&core_mask);
cvmx_coremask_clear_all(&coremask_to_run);
if (argc >= 2 && (isxdigit(argv[1][0]) && (isxdigit(argv[1][1]) ||
argv[1][1] == 'x' ||
argv[1][1] == 'X' ||
argv[1][1] == '\0'))) {
addr = simple_strtoul(argv[1], NULL, 16);
if (!addr)
addr = CONFIG_SYS_LOAD_ADDR;
arg_start++;
}
if (addr == 0)
addr = CONFIG_SYS_LOAD_ADDR;
debug("%s: arg start: %d\n", __func__, arg_start);
arg_count = octeon_parse_bootopts(argc - arg_start, argv + arg_start,
BOOTOCTLINUX, &boot_args);
debug("%s:\n"
" named block: %s\n"
" node mask: 0x%x\n"
" stack size: 0x%x\n"
" heap size: 0x%x\n"
" boot flags: 0x%x\n"
" force boot: %s\n"
" coremask set: %s\n"
" num cores set: %s\n"
" num skipped set: %s\n"
" endbootargs: %s\n",
__func__,
boot_args.named_block ? boot_args.named_block : "none",
boot_args.node_mask,
boot_args.stack_size,
boot_args.heap_size,
boot_args.boot_flags,
boot_args.forceboot ? "true" : "false",
boot_args.coremask_set ? "true" : "false",
boot_args.num_cores_set ? "true" : "false",
boot_args.num_skipped_set ? "true" : "false",
boot_args.endbootargs ? "true" : "false");
debug(" num cores: ");
for (i = 0; i < CVMX_MAX_NODES; i++)
debug("%s%d", i > 0 ? ", " : "", boot_args.num_cores[i]);
debug("\n num skipped: ");
for (i = 0; i < CVMX_MAX_NODES; i++) {
debug("%s%d", i > 0 ? ", " : "", boot_args.num_skipped[i]);
debug("\n coremask:\n");
cvmx_coremask_dprint(&boot_args.coremask);
}
if (boot_args.endbootargs) {
debug("endbootargs set, adjusting argc from %d to %d, arg_count: %d, arg_start: %d\n",
argc, argc - (arg_count + arg_start), arg_count,
arg_start);
argc -= (arg_count + arg_start);
argv += (arg_count + arg_start);
}
/*
* numcores specification overrides a coremask on the same command line
*/
cvmx_coremask_copy(&core_mask, &boot_args.coremask);
/*
* Remove cores from coremask based on environment variable stored in
* flash
*/
if (validate_coremask(&core_mask) != 0) {
puts("Invalid coremask.\n");
return 1;
} else if (cvmx_coremask_is_empty(&core_mask)) {
puts("Coremask is empty after coremask_override mask. Nothing to do.\n");
return 0;
}
if (cvmx_coremask_intersects(&core_mask, &coremask_to_run)) {
puts("ERROR: Can't load code on core twice! Provided coremask:\n");
cvmx_coremask_print(&core_mask);
puts("overlaps previously loaded coremask:\n");
cvmx_coremask_print(&coremask_to_run);
return -1;
}
debug("Setting up boot descriptor block with core mask:\n");
cvmx_coremask_dprint(&core_mask);
/*
* Add coremask to global mask of cores that have been set up and are
* runable
*/
cvmx_coremask_or(&coremask_to_run, &coremask_to_run, &core_mask);
/* Get RAM size */
ret = uclass_get_device(UCLASS_RAM, 0, &dev);
if (ret) {
debug("DRAM init failed: %d\n", ret);
return ret;
}
ret = ram_get_info(dev, &ram);
if (ret) {
debug("Cannot get DRAM size: %d\n", ret);
return ret;
}
/*
* Load kernel ELF image, or try binary if ELF is not detected.
* This way the much smaller vmlinux.bin can also be started but
* has to be loaded at the correct address (ep as parameter).
*/
if (!valid_elf_image(addr))
printf("Booting binary image instead (vmlinux.bin)...\n");
else
addr = load_elf_image_shdr(addr);
/* Set kernel entry point */
kernel = (kernel_entry_t)addr;
/* Init bootmem list for Linux kernel booting */
if (!cvmx_bootmem_phy_mem_list_init(
ram.size, OCTEON_RESERVED_LOW_MEM_SIZE,
(void *)CKSEG0ADDR(BOOTLOADER_BOOTMEM_DESC_SPACE))) {
printf("FATAL: Error initializing free memory list\n");
return 0;
}
first_core = cvmx_coremask_get_first_core(&coremask_to_run);
cvmx_coremask_for_each_core(core, &coremask_to_run) {
debug("%s: Activating core %d\n", __func__, core);
cvmx_bootinfo_array[core].core_mask =
cvmx_coremask_get32(&coremask_to_run);
cvmx_coremask_copy(&cvmx_bootinfo_array[core].ext_core_mask,
&coremask_to_run);
if (core == first_core)
cvmx_bootinfo_array[core].flags |= BOOT_FLAG_INIT_CORE;
cvmx_bootinfo_array[core].dram_size = ram.size / (1024 * 1024);
cvmx_bootinfo_array[core].dclock_hz = gd->mem_clk * 1000000;
cvmx_bootinfo_array[core].eclock_hz = gd->cpu_clk;
cvmx_bootinfo_array[core].led_display_base_addr = 0;
cvmx_bootinfo_array[core].phy_mem_desc_addr =
((u32)(u64)__cvmx_bootmem_internal_get_desc_ptr()) &
0x7ffffff;
cvmx_bootinfo_array[core].major_version = CVMX_BOOTINFO_MAJ_VER;
cvmx_bootinfo_array[core].minor_version = CVMX_BOOTINFO_MIN_VER;
cvmx_bootinfo_array[core].fdt_addr = virt_to_phys(gd->fdt_blob);
boot_desc[core].dram_size = gd->ram_size / (1024 * 1024);
boot_desc[core].cvmx_desc_vaddr =
virt_to_phys(&cvmx_bootinfo_array[core]);
boot_desc[core].desc_version = OCTEON_CURRENT_DESC_VERSION;
boot_desc[core].desc_size = sizeof(boot_desc[0]);
boot_desc[core].flags = cvmx_bootinfo_array[core].flags;
boot_desc[core].eclock_hz = cvmx_bootinfo_array[core].eclock_hz;
boot_desc[core].argc = argc;
for (i = 0; i < argc; i++)
boot_desc[core].argv[i] = (u32)virt_to_phys(argv[i]);
}
core = 0;
arg0 = argc;
arg1 = (u64)argv;
arg2 = 0x1; /* Core 0 sets init core for Linux */
arg3 = XKPHYS | virt_to_phys(&boot_desc[core]);
debug("## Transferring control to Linux (at address %p) ...\n", kernel);
/*
* Flush cache before jumping to application. Let's flush the
* whole SDRAM area, since we don't know the size of the image
* that was loaded.
*/
flush_cache(gd->ram_base, gd->ram_top - gd->ram_base);
/* Take all cores out of reset */
csr_wr(CVMX_CIU_PP_RST, 0);
sync();
/* Wait a short while for the other cores... */
mdelay(100);
/* Install boot code into moveable bus for NMI (other cores) */
nmi_code = (const u64 *)nmi_bootvector;
num_dwords = (((u64)&nmi_handler_para[0] - (u64)nmi_code) + 7) / 8;
ret = octeon_set_moveable_region(0x1fc00000, 0, true, nmi_code,
num_dwords);
if (ret) {
printf("Error installing NMI handler for SMP core startup\n");
return 0;
}
/* Write NMI handler parameters for Linux kernel booting */
nmi_handler_para[0] = (u64)kernel;
nmi_handler_para[1] = arg0;
nmi_handler_para[2] = arg1;
nmi_handler_para[3] = 0; /* Don't set init core for secondary cores */
nmi_handler_para[4] = arg3;
sync();
/* Wait a short while for the other cores... */
mdelay(100);
/*
* Cores have already been taken out of reset to conserve power.
* We need to send a NMI to get the cores out of their wait loop
*/
octeon_get_available_coremask(&avail_coremask);
debug("Available coremask:\n");
cvmx_coremask_dprint(&avail_coremask);
debug("Starting coremask:\n");
cvmx_coremask_dprint(&coremask_to_run);
debug("Sending NMIs to other cores\n");
if (octeon_has_feature(OCTEON_FEATURE_CIU3)) {
u64 avail_cm;
int node;
cvmx_coremask_for_each_node(node, node_mask) {
avail_cm = cvmx_coremask_get64_node(&avail_coremask,
node);
if (avail_cm != 0) {
debug("Sending NMI to node %d, coremask=0x%llx, CIU3_NMI=0x%llx\n",
node, avail_cm,
(node > 0 ? -1ull : -2ull) & avail_cm);
csr_wr(CVMX_CIU3_NMI,
(node > 0 ? -1ull : -2ull) & avail_cm);
}
}
} else {
csr_wr(CVMX_CIU_NMI,
-2ull & cvmx_coremask_get64(&avail_coremask));
}
debug("Done sending NMIs\n");
/* Wait a short while for the other cores... */
mdelay(100);
/*
* pass address parameter as argv[0] (aka command name),
* and all remaining args
* a0 = argc
* a1 = argv (32 bit physical addresses, not pointers)
* a2 = init core
* a3 = boot descriptor address
* a4/t0 = entry point (only used by assembly stub)
*/
kernel(arg0, arg1, arg2, arg3);
return 0;
}
U_BOOT_CMD(bootoctlinux, 32, 0, do_bootoctlinux,
"Boot from a linux ELF image in memory",
"elf_address [coremask=mask_to_run | numcores=core_cnt_to_run] "
"[forceboot] [skipcores=core_cnt_to_skip] [namedblock=name] [endbootargs] [app_args ...]\n"
"elf_address - address of ELF image to load. If 0, default load address\n"
" is used.\n"
"coremask - mask of cores to run on. Anded with coremask_override\n"
" environment variable to ensure only working cores are used\n"
"numcores - number of cores to run on. Runs on specified number of cores,\n"
" taking into account the coremask_override.\n"
"skipcores - only meaningful with numcores. Skips this many cores\n"
" (starting from 0) when loading the numcores cores.\n"
" For example, setting skipcores to 1 will skip core 0\n"
" and load the application starting at the next available core.\n"
"forceboot - if set, boots application even if core 0 is not in mask\n"
"namedblock - specifies a named block to load the kernel\n"
"endbootargs - if set, bootloader does not process any further arguments and\n"
" only passes the arguments that follow to the kernel.\n"
" If not set, the kernel gets the entire commnad line as\n"
" arguments.\n" "\n");

View file

@ -5,14 +5,13 @@
#include <cpu_func.h>
/*
* The Octeon platform is cache coherent and cache flushes and invalidates
* are not needed. Define some platform specific empty flush_foo()
* functions here to overwrite the _weak common function as a no-op.
* This effectively disables all cache operations.
*/
/* Octeon memory write barrier */
#define CVMX_SYNCW asm volatile ("syncw\nsyncw\n" : : : "memory")
void flush_dcache_range(ulong start_addr, ulong stop)
{
/* Flush all pending writes */
CVMX_SYNCW;
}
void flush_cache(ulong start_addr, ulong size)
@ -21,4 +20,5 @@ void flush_cache(ulong start_addr, ulong size)
void invalidate_dcache_range(ulong start_addr, ulong stop)
{
/* Don't need to do anything for OCTEON */
}

View file

@ -13,6 +13,27 @@
DECLARE_GLOBAL_DATA_PTR;
/*
* TRUE for devices having registers with little-endian byte
* order, FALSE for registers with native-endian byte order.
* PCI mandates little-endian, USB and SATA are configurable,
* but we chose little-endian for these.
*
* This table will be referened in the Octeon platform specific
* mangle-port.h header.
*/
const bool octeon_should_swizzle_table[256] = {
[0x00] = true, /* bootbus/CF */
[0x1b] = true, /* PCI mmio window */
[0x1c] = true, /* PCI mmio window */
[0x1d] = true, /* PCI mmio window */
[0x1e] = true, /* PCI mmio window */
[0x68] = true, /* OCTEON III USB */
[0x69] = true, /* OCTEON III USB */
[0x6c] = true, /* OCTEON III SATA */
[0x6f] = true, /* OCTEON II USB */
};
static int get_clocks(void)
{
const u64 ref_clock = PLL_REF_CLK;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,366 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2018-2020 Marvell International Ltd.
*/
#include <env.h>
#include <errno.h>
#include <linux/compat.h>
#include <linux/ctype.h>
#include <mach/cvmx-regs.h>
#include <mach/cvmx-coremask.h>
#include <mach/cvmx-fuse.h>
#include <mach/octeon-model.h>
#include <mach/octeon-feature.h>
struct cvmx_coremask *get_coremask_override(struct cvmx_coremask *pcm)
{
struct cvmx_coremask pcm_override = CVMX_COREMASK_MAX;
char *cptr;
/* The old code sets the number of cores to be to 16 in this case. */
cvmx_coremask_set_cores(pcm, 0, 16);
if (OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3())
cvmx_coremask_copy(pcm, &pcm_override);
cptr = env_get("coremask_override");
if (cptr) {
if (cvmx_coremask_str2bmp(pcm, cptr) < 0)
return NULL;
}
return pcm;
}
/* Validate the coremask that is passed to a boot* function. */
int validate_coremask(struct cvmx_coremask *pcm)
{
struct cvmx_coremask coremask_override;
struct cvmx_coremask fuse_coremask;
if (!get_coremask_override(&coremask_override))
return -1;
octeon_get_available_coremask(&fuse_coremask);
if (!cvmx_coremask_is_subset(&fuse_coremask, pcm)) {
puts("ERROR: Can't boot cores that don't exist!\n");
puts("Available coremask:\n");
cvmx_coremask_print(&fuse_coremask);
return -1;
}
if (!cvmx_coremask_is_subset(&coremask_override, pcm)) {
struct cvmx_coremask print_cm;
puts("Notice: coremask changed from:\n");
cvmx_coremask_print(pcm);
puts("based on coremask_override of:\n");
cvmx_coremask_print(&coremask_override);
cvmx_coremask_and(&print_cm, pcm, &coremask_override);
puts("to:\n");
cvmx_coremask_print(&print_cm);
}
return 0;
}
/**
* In CIU_FUSE for the 78XX, odd and even cores are separated out.
* For example, a CIU_FUSE value of 0xfffffefffffe indicates that bits 0 and 1
* are set.
* This function converts the bit number in the CIU_FUSE register to a
* physical core number.
*/
static int convert_ciu_fuse_to_physical_core(int core, int max_cores)
{
if (!octeon_has_feature(OCTEON_FEATURE_CIU3))
return core;
else if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
return core;
else if (core < (max_cores / 2))
return core * 2;
else
return ((core - (max_cores / 2)) * 2) + 1;
}
/**
* Get the total number of fuses blown as well as the number blown per tad.
*
* @param coremask fuse coremask
* @param[out] tad_blown_count number of cores blown for each tad
* @param num_tads number of tads
* @param max_cores maximum number of cores
*
* @return void
*/
void fill_tad_corecount(u64 coremask, int tad_blown_count[], int num_tads,
int max_cores)
{
int core, physical_core;
for (core = 0; core < max_cores; core++) {
if (!(coremask & (1ULL << core))) {
int tad;
physical_core =
convert_ciu_fuse_to_physical_core(core,
max_cores);
tad = physical_core % num_tads;
tad_blown_count[tad]++;
}
}
}
u64 get_core_pattern(int num_tads, int max_cores)
{
u64 pattern = 1ULL;
int cnt;
for (cnt = 1; cnt < (max_cores / num_tads); cnt++)
pattern |= pattern << num_tads;
return pattern;
}
/**
* For CN78XX and CN68XX this function returns the logical coremask from the
* CIU_FUSE register value. For other models there is no difference.
*
* @param ciu_fuse_value fuse value from CIU_FUSE register
* @return logical coremask of CIU_FUSE value.
*/
u64 get_logical_coremask(u64 ciu_fuse_value)
{
int tad_blown_count[MAX_CORE_TADS] = {0};
int tad;
u64 logical_coremask = 0;
u64 tad_mask, pattern;
int num_tads, max_cores;
if (OCTEON_IS_MODEL(OCTEON_CN78XX)) {
num_tads = 8;
max_cores = 48;
} else if (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
num_tads = 4;
max_cores = 16;
} else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
num_tads = 4;
max_cores = 32;
} else {
/* Most Octeon devices don't need any mapping. */
return ciu_fuse_value;
}
pattern = get_core_pattern(num_tads, max_cores);
fill_tad_corecount(ciu_fuse_value, tad_blown_count,
num_tads, max_cores);
for (tad = 0; tad < num_tads; tad++) {
tad_mask = pattern << tad;
logical_coremask |= tad_mask >> (tad_blown_count[tad] * num_tads);
}
return logical_coremask;
}
/**
* Returns the available coremask either from env or fuses.
* If the fuses are blown and locked, they are the definitive coremask.
*
* @param pcm pointer to coremask to fill in
* @return pointer to coremask
*/
struct cvmx_coremask *octeon_get_available_coremask(struct cvmx_coremask *pcm)
{
u8 node_mask = 0x01; /* ToDo: Currently only one node is supported */
u64 ciu_fuse;
u64 cores;
cvmx_coremask_clear_all(pcm);
if (octeon_has_feature(OCTEON_FEATURE_CIU3)) {
int node;
cvmx_coremask_for_each_node(node, node_mask) {
ciu_fuse = (csr_rd(CVMX_CIU_FUSE) &
0x0000FFFFFFFFFFFFULL);
ciu_fuse = get_logical_coremask(ciu_fuse);
cvmx_coremask_set64_node(pcm, node, ciu_fuse);
}
return pcm;
}
ciu_fuse = (csr_rd(CVMX_CIU_FUSE) & 0x0000FFFFFFFFFFFFULL);
ciu_fuse = get_logical_coremask(ciu_fuse);
if (OCTEON_IS_MODEL(OCTEON_CN68XX))
cvmx_coremask_set64(pcm, ciu_fuse);
/* Get number of cores from fuse register, convert to coremask */
cores = __builtin_popcountll(ciu_fuse);
cvmx_coremask_set_cores(pcm, 0, cores);
return pcm;
}
int cvmx_coremask_str2bmp(struct cvmx_coremask *pcm, char *hexstr)
{
int i, j;
int l; /* length of the hexstr in characters */
int lb; /* number of bits taken by hexstr */
int hldr_offset;/* holder's offset within the coremask */
int hldr_xsz; /* holder's size in the number of hex digits */
u64 h;
char c;
#define MINUS_ONE (hexstr[0] == '-' && hexstr[1] == '1' && hexstr[2] == 0)
if (MINUS_ONE) {
cvmx_coremask_set_all(pcm);
return 0;
}
/* Skip '0x' from hexstr */
if (hexstr[0] == '0' && (hexstr[1] == 'x' || hexstr[1] == 'X'))
hexstr += 2;
if (!strlen(hexstr)) {
printf("%s: Error: hex string is empty\n", __func__);
return -2;
}
/* Trim leading zeros */
while (*hexstr == '0')
hexstr++;
cvmx_coremask_clear_all(pcm);
l = strlen(hexstr);
/* If length is 0 then the hex string must be all zeros */
if (l == 0)
return 0;
for (i = 0; i < l; i++) {
if (isxdigit((int)hexstr[i]) == 0) {
printf("%s: Non-hex digit within hexstr\n", __func__);
return -2;
}
}
lb = (l - 1) * 4;
if (hexstr[0] > '7')
lb += 4;
else if (hexstr[0] > '3')
lb += 3;
else if (hexstr[0] > '1')
lb += 2;
else
lb += 1;
if (lb > CVMX_MIPS_MAX_CORES) {
printf("%s: hexstr (%s) is too long\n", __func__, hexstr);
return -1;
}
hldr_offset = 0;
hldr_xsz = 2 * sizeof(u64);
for (i = l; i > 0; i -= hldr_xsz) {
c = hexstr[i];
hexstr[i] = 0;
j = i - hldr_xsz;
if (j < 0)
j = 0;
h = simple_strtoull(&hexstr[j], NULL, 16);
if (errno == EINVAL) {
printf("%s: strtou returns w/ EINVAL\n", __func__);
return -2;
}
pcm->coremask_bitmap[hldr_offset] = h;
hexstr[i] = c;
hldr_offset++;
}
return 0;
}
void cvmx_coremask_print(const struct cvmx_coremask *pcm)
{
int i, j;
int start;
int found = 0;
/*
* Print one node per line. Since the bitmap is stored LSB to MSB
* we reverse the order when printing.
*/
if (!octeon_has_feature(OCTEON_FEATURE_MULTINODE)) {
start = 0;
for (j = CVMX_COREMASK_MAX_CORES_PER_NODE -
CVMX_COREMASK_HLDRSZ;
j >= 0; j -= CVMX_COREMASK_HLDRSZ) {
if (pcm->coremask_bitmap[j / CVMX_COREMASK_HLDRSZ] != 0)
start = 1;
if (start) {
printf(" 0x%llx",
(u64)pcm->coremask_bitmap[j /
CVMX_COREMASK_HLDRSZ]);
}
}
if (start)
found = 1;
/*
* If the coremask is empty print <EMPTY> so it is not
* confusing
*/
if (!found)
printf("<EMPTY>");
printf("\n");
return;
}
for (i = 0; i < CVMX_MAX_USED_CORES_BMP;
i += CVMX_COREMASK_MAX_CORES_PER_NODE) {
printf("%s node %d:", i > 0 ? "\n" : "",
cvmx_coremask_core_to_node(i));
start = 0;
for (j = i + CVMX_COREMASK_MAX_CORES_PER_NODE -
CVMX_COREMASK_HLDRSZ;
j >= i;
j -= CVMX_COREMASK_HLDRSZ) {
/* Don't start printing until we get a non-zero word. */
if (pcm->coremask_bitmap[j / CVMX_COREMASK_HLDRSZ] != 0)
start = 1;
if (start) {
printf(" 0x%llx", (u64)pcm->coremask_bitmap[j /
CVMX_COREMASK_HLDRSZ]);
}
}
if (start)
found = 1;
}
i /= CVMX_COREMASK_HLDRSZ;
for (; i < CVMX_COREMASK_BMPSZ; i++) {
if (pcm->coremask_bitmap[i]) {
printf(" EXTRA GARBAGE[%i]: %016llx\n", i,
(u64)pcm->coremask_bitmap[i]);
}
}
/* If the coremask is empty print <EMPTY> so it is not confusing */
if (!found)
printf("<EMPTY>");
printf("\n");
}

View file

@ -1,28 +1,84 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) Stefan Roese <sr@denx.de>
* Copyright (C) 2020 Stefan Roese <sr@denx.de>
*/
#include <config.h>
#include <dm.h>
#include <ram.h>
#include <asm/global_data.h>
#include <linux/compat.h>
#include <display_options.h>
DECLARE_GLOBAL_DATA_PTR;
#define UBOOT_RAM_SIZE_MAX 0x10000000ULL
int dram_init(void)
{
/*
* No DDR init yet -> run in L2 cache
*/
gd->ram_size = (4 << 20);
gd->bd->bi_dram[0].size = gd->ram_size;
gd->bd->bi_dram[1].size = 0;
if (IS_ENABLED(CONFIG_RAM_OCTEON)) {
struct ram_info ram;
struct udevice *dev;
int ret;
ret = uclass_get_device(UCLASS_RAM, 0, &dev);
if (ret) {
debug("DRAM init failed: %d\n", ret);
return ret;
}
ret = ram_get_info(dev, &ram);
if (ret) {
debug("Cannot get DRAM size: %d\n", ret);
return ret;
}
gd->ram_size = min_t(size_t, ram.size, UBOOT_RAM_SIZE_MAX);
debug("SDRAM base=%lx, size=%lx\n",
(unsigned long)ram.base, (unsigned long)ram.size);
} else {
/*
* No DDR init yet -> run in L2 cache
*/
gd->ram_size = (4 << 20);
gd->bd->bi_dram[0].size = gd->ram_size;
gd->bd->bi_dram[1].size = 0;
}
return 0;
}
void board_add_ram_info(int use_default)
{
if (IS_ENABLED(CONFIG_RAM_OCTEON)) {
struct ram_info ram;
struct udevice *dev;
int ret;
ret = uclass_get_device(UCLASS_RAM, 0, &dev);
if (ret) {
debug("DRAM init failed: %d\n", ret);
return;
}
ret = ram_get_info(dev, &ram);
if (ret) {
debug("Cannot get DRAM size: %d\n", ret);
return;
}
printf(" (");
print_size(ram.size, " total)");
}
}
ulong board_get_usable_ram_top(ulong total_size)
{
return gd->ram_top;
if (IS_ENABLED(CONFIG_RAM_OCTEON)) {
/* Map a maximum of 256MiB - return not size but address */
return CONFIG_SYS_SDRAM_BASE + min(gd->ram_size,
UBOOT_RAM_SIZE_MAX);
} else {
return gd->ram_top;
}
}

View file

@ -0,0 +1,54 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __BOOTOCT_CMD_H__
#define __BOOTOCT_CMD_H__
#include "cvmx-coremask.h"
enum octeon_boot_cmd_type {
BOOTOCT,
BOOTOCTLINUX,
BOOTOCTELF
};
/** Structure to contain results of command line argument parsing */
struct octeon_boot_args {
struct cvmx_coremask coremask; /** Parsed coremask */
int num_cores[CVMX_MAX_NODES]; /** number of cores */
int num_skipped[CVMX_MAX_NODES];/** number of skipped cores */
const char *app_name; /** Application name */
const char *named_block; /** Named block to load Linux into */
u32 stack_size; /** stack size */
u32 heap_size; /** heap size */
u32 boot_flags; /** boot flags */
int node_mask; /** Node mask to use */
int console_uart; /** serial console number */
bool forceboot; /** force booting if core 0 not set */
bool coremask_set; /** set if coremask was set */
bool num_cores_set; /** Set if num_cores was set */
bool num_skipped_set; /** Set if num_skipped was set */
/** Set if endbootargs parameter was passed. */
bool endbootargs;
};
/**
* Parse command line arguments
*
* @param argc number of arguments
* @param[in] argv array of argument strings
* @param cmd command type
* @param[out] boot_args parsed values
*
* @return number of arguments parsed
*/
int octeon_parse_bootopts(int argc, char *const argv[],
enum octeon_boot_cmd_type cmd,
struct octeon_boot_args *boot_args);
void nmi_bootvector(void);
extern u64 nmi_handler_para[];
#endif /* __BOOTOCT_CMD_H__ */

View file

@ -0,0 +1,350 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
/*
* Header file containing the ABI with the bootloader.
*/
#ifndef __CVMX_BOOTINFO_H__
#define __CVMX_BOOTINFO_H__
#include "cvmx-coremask.h"
/*
* Current major and minor versions of the CVMX bootinfo block that is
* passed from the bootloader to the application. This is versioned
* so that applications can properly handle multiple bootloader
* versions.
*/
#define CVMX_BOOTINFO_MAJ_VER 1
#define CVMX_BOOTINFO_MIN_VER 4
#if (CVMX_BOOTINFO_MAJ_VER == 1)
#define CVMX_BOOTINFO_OCTEON_SERIAL_LEN 20
/*
* This structure is populated by the bootloader. For binary
* compatibility the only changes that should be made are
* adding members to the end of the structure, and the minor
* version should be incremented at that time.
* If an incompatible change is made, the major version
* must be incremented, and the minor version should be reset
* to 0.
*/
struct cvmx_bootinfo {
u32 major_version;
u32 minor_version;
u64 stack_top;
u64 heap_base;
u64 heap_end;
u64 desc_vaddr;
u32 exception_base_addr;
u32 stack_size;
u32 flags;
u32 core_mask;
/* DRAM size in megabytes */
u32 dram_size;
/* physical address of free memory descriptor block*/
u32 phy_mem_desc_addr;
/* used to pass flags from app to debugger */
u32 debugger_flags_base_addr;
/* CPU clock speed, in hz */
u32 eclock_hz;
/* DRAM clock speed, in hz */
u32 dclock_hz;
u32 reserved0;
u16 board_type;
u8 board_rev_major;
u8 board_rev_minor;
u16 reserved1;
u8 reserved2;
u8 reserved3;
char board_serial_number[CVMX_BOOTINFO_OCTEON_SERIAL_LEN];
u8 mac_addr_base[6];
u8 mac_addr_count;
#if (CVMX_BOOTINFO_MIN_VER >= 1)
/*
* Several boards support compact flash on the Octeon boot
* bus. The CF memory spaces may be mapped to different
* addresses on different boards. These are the physical
* addresses, so care must be taken to use the correct
* XKPHYS/KSEG0 addressing depending on the application's
* ABI. These values will be 0 if CF is not present.
*/
u64 compact_flash_common_base_addr;
u64 compact_flash_attribute_base_addr;
/*
* Base address of the LED display (as on EBT3000 board)
* This will be 0 if LED display not present.
*/
u64 led_display_base_addr;
#endif
#if (CVMX_BOOTINFO_MIN_VER >= 2)
/* DFA reference clock in hz (if applicable)*/
u32 dfa_ref_clock_hz;
/*
* flags indicating various configuration options. These
* flags supercede the 'flags' variable and should be used
* instead if available.
*/
u32 config_flags;
#endif
#if (CVMX_BOOTINFO_MIN_VER >= 3)
/*
* Address of the OF Flattened Device Tree structure
* describing the board.
*/
u64 fdt_addr;
#endif
#if (CVMX_BOOTINFO_MIN_VER >= 4)
/*
* Coremask used for processors with more than 32 cores
* or with OCI. This replaces core_mask.
*/
struct cvmx_coremask ext_core_mask;
#endif
};
#define CVMX_BOOTINFO_CFG_FLAG_PCI_HOST (1ull << 0)
#define CVMX_BOOTINFO_CFG_FLAG_PCI_TARGET (1ull << 1)
#define CVMX_BOOTINFO_CFG_FLAG_DEBUG (1ull << 2)
#define CVMX_BOOTINFO_CFG_FLAG_NO_MAGIC (1ull << 3)
/*
* This flag is set if the TLB mappings are not contained in the
* 0x10000000 - 0x20000000 boot bus region.
*/
#define CVMX_BOOTINFO_CFG_FLAG_OVERSIZE_TLB_MAPPING (1ull << 4)
#define CVMX_BOOTINFO_CFG_FLAG_BREAK (1ull << 5)
#endif /* (CVMX_BOOTINFO_MAJ_VER == 1) */
/* Type defines for board and chip types */
enum cvmx_board_types_enum {
CVMX_BOARD_TYPE_NULL = 0,
CVMX_BOARD_TYPE_SIM = 1,
CVMX_BOARD_TYPE_EBT3000 = 2,
CVMX_BOARD_TYPE_KODAMA = 3,
CVMX_BOARD_TYPE_NIAGARA = 4,
CVMX_BOARD_TYPE_NAC38 = 5, /* formerly NAO38 */
CVMX_BOARD_TYPE_THUNDER = 6,
CVMX_BOARD_TYPE_TRANTOR = 7,
CVMX_BOARD_TYPE_EBH3000 = 8,
CVMX_BOARD_TYPE_EBH3100 = 9,
CVMX_BOARD_TYPE_HIKARI = 10,
CVMX_BOARD_TYPE_CN3010_EVB_HS5 = 11,
CVMX_BOARD_TYPE_CN3005_EVB_HS5 = 12,
CVMX_BOARD_TYPE_KBP = 13,
/* Deprecated, CVMX_BOARD_TYPE_CN3010_EVB_HS5 supports the CN3020 */
CVMX_BOARD_TYPE_CN3020_EVB_HS5 = 14,
CVMX_BOARD_TYPE_EBT5800 = 15,
CVMX_BOARD_TYPE_NICPRO2 = 16,
CVMX_BOARD_TYPE_EBH5600 = 17,
CVMX_BOARD_TYPE_EBH5601 = 18,
CVMX_BOARD_TYPE_EBH5200 = 19,
CVMX_BOARD_TYPE_BBGW_REF = 20,
CVMX_BOARD_TYPE_NIC_XLE_4G = 21,
CVMX_BOARD_TYPE_EBT5600 = 22,
CVMX_BOARD_TYPE_EBH5201 = 23,
CVMX_BOARD_TYPE_EBT5200 = 24,
CVMX_BOARD_TYPE_CB5600 = 25,
CVMX_BOARD_TYPE_CB5601 = 26,
CVMX_BOARD_TYPE_CB5200 = 27,
/* Special 'generic' board type, supports many boards */
CVMX_BOARD_TYPE_GENERIC = 28,
CVMX_BOARD_TYPE_EBH5610 = 29,
CVMX_BOARD_TYPE_LANAI2_A = 30,
CVMX_BOARD_TYPE_LANAI2_U = 31,
CVMX_BOARD_TYPE_EBB5600 = 32,
CVMX_BOARD_TYPE_EBB6300 = 33,
CVMX_BOARD_TYPE_NIC_XLE_10G = 34,
CVMX_BOARD_TYPE_LANAI2_G = 35,
CVMX_BOARD_TYPE_EBT5810 = 36,
CVMX_BOARD_TYPE_NIC10E = 37,
CVMX_BOARD_TYPE_EP6300C = 38,
CVMX_BOARD_TYPE_EBB6800 = 39,
CVMX_BOARD_TYPE_NIC4E = 40,
CVMX_BOARD_TYPE_NIC2E = 41,
CVMX_BOARD_TYPE_EBB6600 = 42,
CVMX_BOARD_TYPE_REDWING = 43,
CVMX_BOARD_TYPE_NIC68_4 = 44,
CVMX_BOARD_TYPE_NIC10E_66 = 45,
CVMX_BOARD_TYPE_MAX,
/*
* The range from CVMX_BOARD_TYPE_MAX to
* CVMX_BOARD_TYPE_CUST_DEFINED_MIN is reserved for future
* SDK use.
*/
/*
* Set aside a range for customer boards. These numbers are managed
* by Cavium.
*/
CVMX_BOARD_TYPE_CUST_DEFINED_MIN = 10000,
CVMX_BOARD_TYPE_CUST_WSX16 = 10001,
CVMX_BOARD_TYPE_CUST_NS0216 = 10002,
CVMX_BOARD_TYPE_CUST_NB5 = 10003,
CVMX_BOARD_TYPE_CUST_WMR500 = 10004,
CVMX_BOARD_TYPE_CUST_ITB101 = 10005,
CVMX_BOARD_TYPE_CUST_NTE102 = 10006,
CVMX_BOARD_TYPE_CUST_AGS103 = 10007,
CVMX_BOARD_TYPE_CUST_GST104 = 10008,
CVMX_BOARD_TYPE_CUST_GCT105 = 10009,
CVMX_BOARD_TYPE_CUST_AGS106 = 10010,
CVMX_BOARD_TYPE_CUST_SGM107 = 10011,
CVMX_BOARD_TYPE_CUST_GCT108 = 10012,
CVMX_BOARD_TYPE_CUST_AGS109 = 10013,
CVMX_BOARD_TYPE_CUST_GCT110 = 10014,
CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER = 10015,
CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER = 10016,
CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX = 10017,
CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX = 10018,
CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX = 10019,
CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX = 10020,
CVMX_BOARD_TYPE_CUST_L2_ZINWELL = 10021,
CVMX_BOARD_TYPE_CUST_DEFINED_MAX = 20000,
/*
* Set aside a range for customer private use. The SDK won't
* use any numbers in this range.
*/
CVMX_BOARD_TYPE_CUST_PRIVATE_MIN = 20001,
CVMX_BOARD_TYPE_UBNT_E100 = 20002,
CVMX_BOARD_TYPE_CUST_DSR1000N = 20006,
CVMX_BOARD_TYPE_KONTRON_S1901 = 21901,
CVMX_BOARD_TYPE_CUST_PRIVATE_MAX = 30000,
/* The remaining range is reserved for future use. */
};
enum cvmx_chip_types_enum {
CVMX_CHIP_TYPE_NULL = 0,
CVMX_CHIP_SIM_TYPE_DEPRECATED = 1,
CVMX_CHIP_TYPE_OCTEON_SAMPLE = 2,
CVMX_CHIP_TYPE_MAX,
};
/*
* Compatibility alias for NAC38 name change, planned to be removed
* from SDK 1.7
*/
#define CVMX_BOARD_TYPE_NAO38 CVMX_BOARD_TYPE_NAC38
/* Functions to return string based on type */
#define ENUM_BRD_TYPE_CASE(x) \
case x: \
return(#x + 16) /* Skip CVMX_BOARD_TYPE_ */
static inline const char *cvmx_board_type_to_string(enum
cvmx_board_types_enum type)
{
switch (type) {
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NULL);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_SIM);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT3000);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KODAMA);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIAGARA);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NAC38);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_THUNDER);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_TRANTOR);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3000);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH3100);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_HIKARI);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3010_EVB_HS5);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3005_EVB_HS5);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KBP);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CN3020_EVB_HS5);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5800);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NICPRO2);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5600);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5601);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5200);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_BBGW_REF);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_4G);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5600);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5201);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5200);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5600);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5601);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CB5200);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_GENERIC);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBH5610);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_A);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_U);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB5600);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6300);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC_XLE_10G);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_LANAI2_G);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBT5810);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EP6300C);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6800);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC4E);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC2E);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_EBB6600);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_REDWING);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC68_4);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_NIC10E_66);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_MAX);
/* Customer boards listed here */
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MIN);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WSX16);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NS0216);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NB5);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_WMR500);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_ITB101);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_NTE102);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS103);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GST104);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT105);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS106);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_SGM107);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT108);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_AGS109);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_GCT110);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_SENDER);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_AIR_RECEIVER);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_TX);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ACCTON2_RX);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_TX);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_WSTRNSNIC_RX);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_L2_ZINWELL);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DEFINED_MAX);
/* Customer private range */
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MIN);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_UBNT_E100);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_DSR1000N);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KONTRON_S1901);
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX);
}
return NULL;
}
#define ENUM_CHIP_TYPE_CASE(x) \
case x: \
return(#x + 15) /* Skip CVMX_CHIP_TYPE */
static inline const char *cvmx_chip_type_to_string(enum
cvmx_chip_types_enum type)
{
switch (type) {
ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_NULL);
ENUM_CHIP_TYPE_CASE(CVMX_CHIP_SIM_TYPE_DEPRECATED);
ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_OCTEON_SAMPLE);
ENUM_CHIP_TYPE_CASE(CVMX_CHIP_TYPE_MAX);
}
return "Unsupported Chip";
}
#endif /* __CVMX_BOOTINFO_H__ */

View file

@ -0,0 +1,533 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
/**
* @file
* Simple allocate only memory allocator. Used to allocate memory at application
* start time.
*/
#ifndef __CVMX_BOOTMEM_H__
#define __CVMX_BOOTMEM_H__
/* Must be multiple of 8, changing breaks ABI */
#define CVMX_BOOTMEM_NAME_LEN 128
/* Can change without breaking ABI */
#define CVMX_BOOTMEM_NUM_NAMED_BLOCKS 64
/* minimum alignment of bootmem alloced blocks */
#define CVMX_BOOTMEM_ALIGNMENT_SIZE (16ull)
/* Flags for cvmx_bootmem_phy_mem* functions */
/* Allocate from end of block instead of beginning */
#define CVMX_BOOTMEM_FLAG_END_ALLOC (1 << 0)
#define CVMX_BOOTMEM_FLAG_NO_LOCKING (1 << 1) /* Don't do any locking. */
/* Real physical addresses of memory regions */
#define OCTEON_DDR0_BASE (0x0ULL)
#define OCTEON_DDR0_SIZE (0x010000000ULL)
#define OCTEON_DDR1_BASE ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) \
? 0x20000000ULL : 0x410000000ULL)
#define OCTEON_DDR1_SIZE (0x010000000ULL)
#define OCTEON_DDR2_BASE ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) \
? 0x30000000ULL : 0x20000000ULL)
#define OCTEON_DDR2_SIZE ((OCTEON_IS_OCTEON2() || OCTEON_IS_OCTEON3()) \
? 0x7d0000000ULL : 0x3e0000000ULL)
#define OCTEON_MAX_PHY_MEM_SIZE ((OCTEON_IS_MODEL(OCTEON_CN68XX)) \
? 128 * 1024 * 1024 * 1024ULL \
: (OCTEON_IS_OCTEON2()) \
? 32 * 1024 * 1024 * 1024ull \
: (OCTEON_IS_OCTEON3()) \
? 512 * 1024 * 1024 * 1024ULL \
: 16 * 1024 * 1024 * 1024ULL)
/*
* First bytes of each free physical block of memory contain this structure,
* which is used to maintain the free memory list. Since the bootloader is
* only 32 bits, there is a union providing 64 and 32 bit versions. The
* application init code converts addresses to 64 bit addresses before the
* application starts.
*/
struct cvmx_bootmem_block_header {
/* Note: these are referenced from assembly routines in the bootloader,
* so this structure should not be changed without changing those
* routines as well.
*/
u64 next_block_addr;
u64 size;
};
/*
* Structure for named memory blocks
* Number of descriptors
* available can be changed without affecting compatibility,
* but name length changes require a bump in the bootmem
* descriptor version
* Note: This structure must be naturally 64 bit aligned, as a single
* memory image will be used by both 32 and 64 bit programs.
*/
struct cvmx_bootmem_named_block_desc {
u64 base_addr; /* Base address of named block */
/*
* Size actually allocated for named block (may differ from requested)
*/
u64 size;
char name[CVMX_BOOTMEM_NAME_LEN]; /* name of named block */
};
/* Current descriptor versions */
/* CVMX bootmem descriptor major version */
#define CVMX_BOOTMEM_DESC_MAJ_VER 3
/* CVMX bootmem descriptor minor version */
#define CVMX_BOOTMEM_DESC_MIN_VER 0
/*
* First three members of cvmx_bootmem_desc_t are left in original
* positions for backwards compatibility.
*/
struct cvmx_bootmem_desc {
/* Linux compatible proxy for __BIG_ENDIAN */
u32 lock; /* spinlock to control access to list */
u32 flags; /* flags for indicating various conditions */
u64 head_addr;
/* incremented changed when incompatible changes made */
u32 major_version;
/*
* incremented changed when compatible changes made, reset to
* zero when major incremented
*/
u32 minor_version;
u64 app_data_addr;
u64 app_data_size;
/* number of elements in named blocks array */
u32 named_block_num_blocks;
/* length of name array in bootmem blocks */
u32 named_block_name_len;
/* address of named memory block descriptors */
u64 named_block_array_addr;
};
/**
* Initialize the boot alloc memory structures. This is
* normally called inside of cvmx_user_app_init()
*
* @param mem_desc_addr Address of the free memory list
* @return
*/
int cvmx_bootmem_init(u64 mem_desc_addr);
/**
* Allocate a block of memory from the free list that was passed
* to the application by the bootloader.
* This is an allocate-only algorithm, so freeing memory is not possible.
*
* @param size Size in bytes of block to allocate
* @param alignment Alignment required - must be power of 2
*
* @return pointer to block of memory, NULL on error
*/
void *cvmx_bootmem_alloc(u64 size, u64 alignment);
/**
* Allocate a block of memory from the free list that was passed
* to the application by the bootloader from a specific node.
* This is an allocate-only algorithm, so freeing memory is not possible.
*
* @param node The node to allocate memory from
* @param size Size in bytes of block to allocate
* @param alignment Alignment required - must be power of 2
*
* @return pointer to block of memory, NULL on error
*/
void *cvmx_bootmem_alloc_node(u64 node, u64 size, u64 alignment);
/**
* Allocate a block of memory from the free list that was
* passed to the application by the bootloader at a specific
* address. This is an allocate-only algorithm, so
* freeing memory is not possible. Allocation will fail if
* memory cannot be allocated at the specified address.
*
* @param size Size in bytes of block to allocate
* @param address Physical address to allocate memory at. If this
* memory is not available, the allocation fails.
* @param alignment Alignment required - must be power of 2
* @return pointer to block of memory, NULL on error
*/
void *cvmx_bootmem_alloc_address(u64 size, u64 address,
u64 alignment);
/**
* Allocate a block of memory from the free list that was
* passed to the application by the bootloader within a specified
* address range. This is an allocate-only algorithm, so
* freeing memory is not possible. Allocation will fail if
* memory cannot be allocated in the requested range.
*
* @param size Size in bytes of block to allocate
* @param min_addr defines the minimum address of the range
* @param max_addr defines the maximum address of the range
* @param alignment Alignment required - must be power of 2
* @return pointer to block of memory, NULL on error
*/
void *cvmx_bootmem_alloc_range(u64 size, u64 alignment,
u64 min_addr, u64 max_addr);
/**
* Allocate a block of memory from the free list that was passed
* to the application by the bootloader, and assign it a name in the
* global named block table. (part of the cvmx_bootmem_descriptor_t structure)
* Named blocks can later be freed.
*
* @param size Size in bytes of block to allocate
* @param alignment Alignment required - must be power of 2
* @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
*
* @return pointer to block of memory, NULL on error
*/
void *cvmx_bootmem_alloc_named(u64 size, u64 alignment,
const char *name);
/**
* Allocate a block of memory from the free list that was passed
* to the application by the bootloader, and assign it a name in the
* global named block table. (part of the cvmx_bootmem_descriptor_t structure)
* Named blocks can later be freed.
*
* @param size Size in bytes of block to allocate
* @param alignment Alignment required - must be power of 2
* @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
* @param flags Flags to control options for the allocation.
*
* @return pointer to block of memory, NULL on error
*/
void *cvmx_bootmem_alloc_named_flags(u64 size, u64 alignment,
const char *name, u32 flags);
/**
* Allocate a block of memory from the free list that was passed
* to the application by the bootloader, and assign it a name in the
* global named block table. (part of the cvmx_bootmem_descriptor_t structure)
* Named blocks can later be freed.
*
* @param size Size in bytes of block to allocate
* @param address Physical address to allocate memory at. If this
* memory is not available, the allocation fails.
* @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
*
* @return pointer to block of memory, NULL on error
*/
void *cvmx_bootmem_alloc_named_address(u64 size, u64 address,
const char *name);
/**
* Allocate a block of memory from a specific range of the free list
* that was passed to the application by the bootloader, and assign it
* a name in the global named block table. (part of the
* cvmx_bootmem_descriptor_t structure) Named blocks can later be
* freed. If request cannot be satisfied within the address range
* specified, NULL is returned
*
* @param size Size in bytes of block to allocate
* @param min_addr minimum address of range
* @param max_addr maximum address of range
* @param align Alignment of memory to be allocated. (must be a power of 2)
* @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
*
* @return pointer to block of memory, NULL on error
*/
void *cvmx_bootmem_alloc_named_range(u64 size, u64 min_addr,
u64 max_addr, u64 align,
const char *name);
/**
* Allocate if needed a block of memory from a specific range of the
* free list that was passed to the application by the bootloader, and
* assign it a name in the global named block table. (part of the
* cvmx_bootmem_descriptor_t structure) Named blocks can later be
* freed. If the requested name block is already allocated, return
* the pointer to block of memory. If request cannot be satisfied
* within the address range specified, NULL is returned
*
* @param size Size in bytes of block to allocate
* @param min_addr minimum address of range
* @param max_addr maximum address of range
* @param align Alignment of memory to be allocated. (must be a power of 2)
* @param name name of block - must be less than CVMX_BOOTMEM_NAME_LEN bytes
* @param init Initialization function
*
* The initialization function is optional, if omitted the named block
* is initialized to all zeros when it is created, i.e. once.
*
* @return pointer to block of memory, NULL on error
*/
void *cvmx_bootmem_alloc_named_range_once(u64 size,
u64 min_addr,
u64 max_addr,
u64 align,
const char *name,
void (*init)(void *));
/**
* Allocate all free memory starting at the start address. This is used to
* prevent any free blocks from later being allocated within the reserved space.
* Note that any memory allocated with this function cannot be later freed.
*
* @param start_addr Starting address to reserve
* @param size Size in bytes to reserve starting at start_addr
* @param name Name to assign to reserved blocks
* @param flags Flags to use when reserving memory
*
* @return 0 on failure,
* !0 on success
*/
int cvmx_bootmem_reserve_memory(u64 start_addr, u64 size,
const char *name, u32 flags);
/**
* Frees a previously allocated named bootmem block.
*
* @param name name of block to free
*
* @return 0 on failure,
* !0 on success
*/
int cvmx_bootmem_free_named(const char *name);
/**
* Finds a named bootmem block by name.
*
* @param name name of block to free
*
* @return pointer to named block descriptor on success
* 0 on failure
*/
const struct cvmx_bootmem_named_block_desc *
cvmx_bootmem_find_named_block(const char *name);
/**
* Returns the size of available memory in bytes, only
* counting blocks that are at least as big as the minimum block
* size.
*
* @param min_block_size
* Minimum block size to count in total.
*
* @return Number of bytes available for allocation that meet the
* block size requirement
*/
u64 cvmx_bootmem_available_mem(u64 min_block_size);
/**
* Prints out the list of named blocks that have been allocated
* along with their addresses and sizes.
* This is primarily used for debugging purposes
*/
void cvmx_bootmem_print_named(void);
/**
* Allocates a block of physical memory from the free list, at
* (optional) requested address and alignment.
*
* @param req_size size of region to allocate. All requests are
* rounded up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
*
* @param address_min Minimum address that block can occupy.
*
* @param address_max Specifies the maximum address_min (inclusive)
* that the allocation can use.
*
* @param alignment Requested alignment of the block. If this
* alignment cannot be met, the allocation fails.
* This must be a power of 2. (Note: Alignment of
* CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
* internally enforced. Requested alignments of less
* than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
* CVMX_BOOTMEM_ALIGNMENT_SIZE.)
* @param flags Flags to control options for the allocation.
*
* @return physical address of block allocated, or -1 on failure
*/
s64 cvmx_bootmem_phy_alloc(u64 req_size, u64 address_min, u64 address_max,
u64 alignment, u32 flags);
/**
* Allocates a named block of physical memory from the free list, at
* (optional) requested address and alignment.
*
* @param size size of region to allocate. All requests are rounded
* up to be a multiple CVMX_BOOTMEM_ALIGNMENT_SIZE bytes size
*
* @param min_addr Minimum address that block can occupy.
*
* @param max_addr Specifies the maximum address_min (inclusive) that
* the allocation can use.
*
* @param alignment Requested alignment of the block. If this
* alignment cannot be met, the allocation fails.
* This must be a power of 2. (Note: Alignment of
* CVMX_BOOTMEM_ALIGNMENT_SIZE bytes is required, and
* internally enforced. Requested alignments of less
* than CVMX_BOOTMEM_ALIGNMENT_SIZE are set to
* CVMX_BOOTMEM_ALIGNMENT_SIZE.)
*
* @param name name to assign to named block
*
* @param flags Flags to control options for the allocation.
*
* @return physical address of block allocated, or -1 on failure
*/
s64 cvmx_bootmem_phy_named_block_alloc(u64 size, u64 min_addr, u64 max_addr,
u64 alignment, const char *name,
u32 flags);
/**
* Finds a named memory block by name.
* Also used for finding an unused entry in the named block table.
*
* @param name Name of memory block to find. If NULL pointer given,
* then finds unused descriptor, if available.
*
* @param flags Flags to control options for the allocation.
*
* @return Physical address of the memory block descriptor, zero if not
* found. If zero returned when name parameter is NULL, then no
* memory block descriptors are available.
*/
u64 cvmx_bootmem_phy_named_block_find(const char *name, u32 flags);
/**
* Returns the size of available memory in bytes, only
* counting blocks that are at least as big as the minimum block
* size.
*
* @param min_block_size
* Minimum block size to count in total.
*
* @return Number of bytes available for allocation that meet the
* block size requirement
*/
u64 cvmx_bootmem_phy_available_mem(u64 min_block_size);
/**
* Frees a named block.
*
* @param name name of block to free
* @param flags flags for passing options
*
* @return 0 on failure
* 1 on success
*/
int cvmx_bootmem_phy_named_block_free(const char *name, u32 flags);
/**
* Frees a block to the bootmem allocator list. This must
* be used with care, as the size provided must match the size
* of the block that was allocated, or the list will become
* corrupted.
*
* IMPORTANT: This is only intended to be used as part of named block
* frees and initial population of the free memory list.
* *
*
* @param phy_addr physical address of block
* @param size size of block in bytes.
* @param flags flags for passing options
*
* @return 1 on success,
* 0 on failure
*/
int __cvmx_bootmem_phy_free(u64 phy_addr, u64 size, u32 flags);
/**
* Prints the list of currently allocated named blocks
*
*/
void cvmx_bootmem_phy_named_block_print(void);
/**
* Prints the list of available memory.
*
*/
void cvmx_bootmem_phy_list_print(void);
/**
* This function initializes the free memory list used by cvmx_bootmem.
* This must be called before any allocations can be done.
*
* @param mem_size Total memory available, in bytes
*
* @param low_reserved_bytes Number of bytes to reserve (leave out of
* free list) at address 0x0.
*
* @param desc_buffer Buffer for the bootmem descriptor. This must be
* a 32 bit addressable address.
*
* @return 1 on success
* 0 on failure
*/
s64 cvmx_bootmem_phy_mem_list_init(u64 mem_size, u32 low_reserved_bytes,
struct cvmx_bootmem_desc *desc_buffer);
/**
* This function initializes the free memory list used by cvmx_bootmem.
* This must be called before any allocations can be done.
*
* @param nodemask Nodemask - one bit per node (bit0->node0, bit1->node1,...)
*
* @param mem_size[] Array of memory sizes in MBytes per node ([0]->node0,...)
*
* @param low_reserved_bytes Number of bytes to reserve (leave out of
* free list) at address 0x0.
*
* @param desc_buffer Buffer for the bootmem descriptor. This must be
* a 32 bit addressable address.
*
* @return 1 on success
* 0 on failure
*/
s64 cvmx_bootmem_phy_mem_list_init_multi(u8 nodemask, u32 mem_size[],
u32 low_reserved_bytes,
struct cvmx_bootmem_desc *desc_buffer);
/**
* Locks the bootmem allocator. This is useful in certain situations
* where multiple allocations must be made without being interrupted.
* This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
*
*/
void cvmx_bootmem_lock(void);
/**
* Unlocks the bootmem allocator. This is useful in certain situations
* where multiple allocations must be made without being interrupted.
* This should be used with the CVMX_BOOTMEM_FLAG_NO_LOCKING flag.
*
*/
void cvmx_bootmem_unlock(void);
/**
* Internal use function to get the current descriptor pointer
*/
void *__cvmx_bootmem_internal_get_desc_ptr(void);
/**
* Internal use. This is userd to get a pointer to a physical
* address. For linux n32 the physical address in mmaped to a virtual
* address and the virtual address is returned. For n64 the address
* is converted to an xkphys address and the xkhpys address is
* returned.
*/
void *__cvmx_phys_addr_to_ptr(u64 phys, int size);
const struct cvmx_bootmem_named_block_desc *
__cvmx_bootmem_find_named_block_flags(const char *name, u32 flags);
void *cvmx_bootmem_alloc_named_range_flags(u64 size, u64 min_addr,
u64 max_addr, u64 align,
const char *name, u32 flags);
u64 cvmx_bootmem_phy_alloc_range(u64 size, u64 alignment,
u64 min_addr, u64 max_addr);
#endif /* __CVMX_BOOTMEM_H__ */

View file

@ -0,0 +1,752 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
/**
* Module to support operations on bitmap of cores. Coremask can be used to
* select a specific core, a group of cores, or all available cores, for
* initialization and differentiation of roles within a single shared binary
* executable image.
*
* The core numbers used in this file are the same value as what is found in
* the COP0_EBASE register and the rdhwr 0 instruction.
*
* For the CN78XX and other multi-node environments the core numbers are not
* contiguous. The core numbers for the CN78XX are as follows:
*
* Node 0: Cores 0 - 47
* Node 1: Cores 128 - 175
* Node 2: Cores 256 - 303
* Node 3: Cores 384 - 431
*
* The coremask environment generally tries to be node agnostic in order to
* provide future compatibility if more cores are added to future processors
* or more nodes are supported.
*/
#ifndef __CVMX_COREMASK_H__
#define __CVMX_COREMASK_H__
#include "cvmx-regs.h"
/* bits per holder */
#define CVMX_COREMASK_HLDRSZ ((int)(sizeof(u64) * 8))
/** Maximum allowed cores per node */
#define CVMX_COREMASK_MAX_CORES_PER_NODE (1 << CVMX_NODE_NO_SHIFT)
/** Maximum number of bits actually used in the coremask */
#define CVMX_MAX_USED_CORES_BMP (1 << (CVMX_NODE_NO_SHIFT + CVMX_NODE_BITS))
/* the number of valid bits in and the mask of the most significant holder */
#define CVMX_COREMASK_MSHLDR_NBITS \
(CVMX_MIPS_MAX_CORES % CVMX_COREMASK_HLDRSZ)
#define CVMX_COREMASK_MSHLDR_MASK \
((CVMX_COREMASK_MSHLDR_NBITS) ? \
(((u64)1 << CVMX_COREMASK_MSHLDR_NBITS) - 1) : \
((u64)-1))
/* cvmx_coremask size in u64 */
#define CVMX_COREMASK_BMPSZ \
((int)(CVMX_MIPS_MAX_CORES / CVMX_COREMASK_HLDRSZ + \
(CVMX_COREMASK_MSHLDR_NBITS != 0)))
#define CVMX_COREMASK_USED_BMPSZ \
(CVMX_MAX_USED_CORES_BMP / CVMX_COREMASK_HLDRSZ)
#define CVMX_COREMASK_BMP_NODE_CORE_IDX(node, core) \
((((node) << CVMX_NODE_NO_SHIFT) + (core)) / CVMX_COREMASK_HLDRSZ)
/**
* Maximum available coremask.
*/
#define CVMX_COREMASK_MAX \
{ { \
0x0000FFFFFFFFFFFF, 0, \
0x0000FFFFFFFFFFFF, 0, \
0x0000FFFFFFFFFFFF, 0, \
0x0000FFFFFFFFFFFF, 0, \
0, 0, \
0, 0, \
0, 0, \
0, 0} }
/**
* Empty coremask
*/
#define CVMX_COREMASK_EMPTY \
{ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} }
struct cvmx_coremask {
u64 coremask_bitmap[CVMX_COREMASK_BMPSZ];
};
/**
* Macro to iterate through all available cores in a coremask
*
* @param core - core variable to use to iterate
* @param pcm - pointer to core mask
*
* Use this like a for statement
*/
#define cvmx_coremask_for_each_core(core, pcm) \
for ((core) = -1; \
(core) = cvmx_coremask_next_core((core), pcm), \
(core) >= 0;)
/**
* Given a node and node mask, return the next available node.
*
* @param node starting node number
* @param node_mask node mask to use to find the next node
*
* @return next node number or -1 if no more nodes are available
*/
static inline int cvmx_coremask_next_node(int node, u8 node_mask)
{
int next_offset;
next_offset = __builtin_ffs(node_mask >> (node + 1));
if (next_offset == 0)
return -1;
else
return node + next_offset;
}
/**
* Iterate through all nodes in a node mask
*
* @param node node iterator variable
* @param node_mask mask to use for iterating
*
* Use this like a for statement
*/
#define cvmx_coremask_for_each_node(node, node_mask) \
for ((node) = __builtin_ffs(node_mask) - 1; \
(node) >= 0 && (node) < CVMX_MAX_NODES; \
(node) = cvmx_coremask_next_node(node, node_mask))
/**
* Is ``core'' set in the coremask?
*
* @param pcm is the pointer to the coremask.
* @param core
* @return 1 if core is set and 0 if not.
*/
static inline int cvmx_coremask_is_core_set(const struct cvmx_coremask *pcm,
int core)
{
int n, i;
n = core % CVMX_COREMASK_HLDRSZ;
i = core / CVMX_COREMASK_HLDRSZ;
return (pcm->coremask_bitmap[i] & ((u64)1 << n)) != 0;
}
/**
* Is ``current core'' set in the coremask?
*
* @param pcm is the pointer to the coremask.
* @return 1 if core is set and 0 if not.
*/
static inline int cvmx_coremask_is_self_set(const struct cvmx_coremask *pcm)
{
return cvmx_coremask_is_core_set(pcm, (int)cvmx_get_core_num());
}
/**
* Is coremask empty?
* @param pcm is the pointer to the coremask.
* @return 1 if *pcm is empty (all zeros), 0 if not empty.
*/
static inline int cvmx_coremask_is_empty(const struct cvmx_coremask *pcm)
{
int i;
for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++)
if (pcm->coremask_bitmap[i] != 0)
return 0;
return 1;
}
/**
* Set ``core'' in the coremask.
*
* @param pcm is the pointer to the coremask.
* @param core
* @return 0.
*/
static inline int cvmx_coremask_set_core(struct cvmx_coremask *pcm, int core)
{
int n, i;
n = core % CVMX_COREMASK_HLDRSZ;
i = core / CVMX_COREMASK_HLDRSZ;
pcm->coremask_bitmap[i] |= ((u64)1 << n);
return 0;
}
/**
* Set ``current core'' in the coremask.
*
* @param pcm is the pointer to the coremask.
* @return 0.
*/
static inline int cvmx_coremask_set_self(struct cvmx_coremask *pcm)
{
return cvmx_coremask_set_core(pcm, (int)cvmx_get_core_num());
}
/**
* Clear ``core'' from the coremask.
*
* @param pcm is the pointer to the coremask.
* @param core
* @return 0.
*/
static inline int cvmx_coremask_clear_core(struct cvmx_coremask *pcm, int core)
{
int n, i;
n = core % CVMX_COREMASK_HLDRSZ;
i = core / CVMX_COREMASK_HLDRSZ;
pcm->coremask_bitmap[i] &= ~((u64)1 << n);
return 0;
}
/**
* Clear ``current core'' from the coremask.
*
* @param pcm is the pointer to the coremask.
* @return 0.
*/
static inline int cvmx_coremask_clear_self(struct cvmx_coremask *pcm)
{
return cvmx_coremask_clear_core(pcm, cvmx_get_core_num());
}
/**
* Toggle ``core'' in the coremask.
*
* @param pcm is the pointer to the coremask.
* @param core
* @return 0.
*/
static inline int cvmx_coremask_toggle_core(struct cvmx_coremask *pcm, int core)
{
int n, i;
n = core % CVMX_COREMASK_HLDRSZ;
i = core / CVMX_COREMASK_HLDRSZ;
pcm->coremask_bitmap[i] ^= ((u64)1 << n);
return 0;
}
/**
* Toggle ``current core'' in the coremask.
*
* @param pcm is the pointer to the coremask.
* @return 0.
*/
static inline int cvmx_coremask_toggle_self(struct cvmx_coremask *pcm)
{
return cvmx_coremask_toggle_core(pcm, cvmx_get_core_num());
}
/**
* Set the lower 64-bit of the coremask.
* @param pcm pointer to coremask
* @param coremask_64 64-bit coremask to apply to the first node (0)
*/
static inline void cvmx_coremask_set64(struct cvmx_coremask *pcm,
u64 coremask_64)
{
pcm->coremask_bitmap[0] = coremask_64;
}
/**
* Set the 64-bit of the coremask for a particular node.
* @param pcm pointer to coremask
* @param node node to set
* @param coremask_64 64-bit coremask to apply to the specified node
*/
static inline void cvmx_coremask_set64_node(struct cvmx_coremask *pcm,
u8 node,
u64 coremask_64)
{
pcm->coremask_bitmap[CVMX_COREMASK_BMP_NODE_CORE_IDX(node, 0)] =
coremask_64;
}
/**
* Gets the lower 64-bits of the coremask
*
* @param[in] pcm - pointer to coremask
* @return 64-bit coremask for the first node
*/
static inline u64 cvmx_coremask_get64(const struct cvmx_coremask *pcm)
{
return pcm->coremask_bitmap[0];
}
/**
* Gets the lower 64-bits of the coremask for the specified node
*
* @param[in] pcm - pointer to coremask
* @param node - node to get coremask for
* @return 64-bit coremask for the first node
*/
static inline u64 cvmx_coremask_get64_node(const struct cvmx_coremask *pcm,
u8 node)
{
return pcm->coremask_bitmap[CVMX_COREMASK_BMP_NODE_CORE_IDX(node, 0)];
}
/**
* Gets the lower 32-bits of the coremask for compatibility
*
* @param[in] pcm - pointer to coremask
* @return 32-bit coremask for the first node
* @deprecated This function is to maintain compatibility with older
* SDK applications and may disappear at some point.
* This function is not compatible with the CN78XX or any other
* Octeon device with more than 32 cores.
*/
static inline u32 cvmx_coremask_get32(const struct cvmx_coremask *pcm)
{
return pcm->coremask_bitmap[0] & 0xffffffff;
}
/*
* cvmx_coremask_cmp() returns an integer less than, equal to, or
* greater than zero if *pcm1 is found, respectively, to be less than,
* to match, or be greater than *pcm2.
*/
static inline int cvmx_coremask_cmp(const struct cvmx_coremask *pcm1,
const struct cvmx_coremask *pcm2)
{
int i;
/* Start from highest node for arithemtically correct result */
for (i = CVMX_COREMASK_USED_BMPSZ - 1; i >= 0; i--)
if (pcm1->coremask_bitmap[i] != pcm2->coremask_bitmap[i]) {
return (pcm1->coremask_bitmap[i] >
pcm2->coremask_bitmap[i]) ? 1 : -1;
}
return 0;
}
/*
* cvmx_coremask_OPx(pcm1, pcm2[, pcm3]), where OPx can be
* - and
* - or
* - xor
* - not
* ...
* For binary operators, pcm3 <-- pcm1 OPX pcm2.
* For unaries, pcm2 <-- OPx pcm1.
*/
#define CVMX_COREMASK_BINARY_DEFUN(binary_op, op) \
static inline int cvmx_coremask_##binary_op( \
struct cvmx_coremask *pcm1, \
const struct cvmx_coremask *pcm2, \
const struct cvmx_coremask *pcm3) \
{ \
int i; \
\
for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++) \
pcm1->coremask_bitmap[i] = \
pcm2->coremask_bitmap[i] \
op \
pcm3->coremask_bitmap[i]; \
\
return 0; \
}
#define CVMX_COREMASK_UNARY_DEFUN(unary_op, op) \
static inline int cvmx_coremask_##unary_op( \
struct cvmx_coremask *pcm1, \
const struct cvmx_coremask *pcm2) \
{ \
int i; \
\
for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++) \
pcm1->coremask_bitmap[i] = \
op \
pcm2->coremask_bitmap[i]; \
\
return 0; \
}
/* cvmx_coremask_and(pcm1, pcm2, pcm3): pcm1 = pmc2 & pmc3 */
CVMX_COREMASK_BINARY_DEFUN(and, &)
/* cvmx_coremask_or(pcm1, pcm2, pcm3): pcm1 = pmc2 | pmc3 */
CVMX_COREMASK_BINARY_DEFUN(or, |)
/* cvmx_coremask_xor(pcm1, pcm2, pcm3): pcm1 = pmc2 ^ pmc3 */
CVMX_COREMASK_BINARY_DEFUN(xor, ^)
/* cvmx_coremask_maskoff(pcm1, pcm2, pcm3): pcm1 = pmc2 & ~pmc3 */
CVMX_COREMASK_BINARY_DEFUN(maskoff, & ~)
/* cvmx_coremask_not(pcm1, pcm2): pcm1 = ~pcm2 */
CVMX_COREMASK_UNARY_DEFUN(not, ~)
/* cvmx_coremask_fill(pcm1, pcm2): pcm1 = -1 */
CVMX_COREMASK_UNARY_DEFUN(fill, -1 |)
/* cvmx_coremask_clear(pcm1, pcm2): pcm1 = 0 */
CVMX_COREMASK_UNARY_DEFUN(clear, 0 &)
/* cvmx_coremask_dup(pcm1, pcm2): pcm1 = pcm2 */
CVMX_COREMASK_UNARY_DEFUN(dup, +)
/*
* Macros using the unary functions defined w/
* CVMX_COREMASK_UNARY_DEFUN
* - set *pcm to its complement
* - set all bits in *pcm to 0
* - set all (valid) bits in *pcm to 1
*/
#define cvmx_coremask_complement(pcm) cvmx_coremask_not(pcm, pcm)
/* On clear, even clear the unused bits */
#define cvmx_coremask_clear_all(pcm) \
*(pcm) = (struct cvmx_coremask)CVMX_COREMASK_EMPTY
#define cvmx_coremask_set_all(pcm) cvmx_coremask_fill(pcm, NULL)
/*
* convert a string of hex digits to struct cvmx_coremask
*
* @param pcm
* @param hexstr can be
* - "[1-9A-Fa-f][0-9A-Fa-f]*", or
* - "-1" to set the bits for all the cores.
* return
* 0 for success,
* -1 for string too long (i.e., hexstr takes more bits than
* CVMX_MIPS_MAX_CORES),
* -2 for conversion problems from hex string to an unsigned
* long long, e.g., non-hex char in hexstr, and
* -3 for hexstr starting with '0'.
* NOTE:
* This function clears the bitmask in *pcm before the conversion.
*/
int cvmx_coremask_str2bmp(struct cvmx_coremask *pcm, char *hexstr);
/*
* convert a struct cvmx_coremask to a string of hex digits
*
* @param pcm
* @param hexstr is "[1-9A-Fa-f][0-9A-Fa-f]*"
*
* return 0.
*/
int cvmx_coremask_bmp2str(const struct cvmx_coremask *pcm, char *hexstr);
/*
* Returns the index of the lowest bit in a coremask holder.
*/
static inline int cvmx_coremask_lowest_bit(u64 h)
{
return __builtin_ctzll(h);
}
/*
* Returns the 0-based index of the highest bit in a coremask holder.
*/
static inline int cvmx_coremask_highest_bit(u64 h)
{
return (64 - __builtin_clzll(h) - 1);
}
/**
* Returns the last core within the coremask and -1 when the coremask
* is empty.
*
* @param[in] pcm - pointer to coremask
* @returns last core set in the coremask or -1 if all clear
*
*/
static inline int cvmx_coremask_get_last_core(const struct cvmx_coremask *pcm)
{
int i;
int found = -1;
for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++) {
if (pcm->coremask_bitmap[i])
found = i;
}
if (found == -1)
return -1;
return found * CVMX_COREMASK_HLDRSZ +
cvmx_coremask_highest_bit(pcm->coremask_bitmap[found]);
}
/**
* Returns the first core within the coremask and -1 when the coremask
* is empty.
*
* @param[in] pcm - pointer to coremask
* @returns first core set in the coremask or -1 if all clear
*
*/
static inline int cvmx_coremask_get_first_core(const struct cvmx_coremask *pcm)
{
int i;
for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++)
if (pcm->coremask_bitmap[i])
break;
if (i == CVMX_COREMASK_USED_BMPSZ)
return -1;
return i * CVMX_COREMASK_HLDRSZ +
cvmx_coremask_lowest_bit(pcm->coremask_bitmap[i]);
}
/**
* Given a core and coremask, return the next available core in the coremask
* or -1 if none are available.
*
* @param core - starting core to check (can be -1 for core 0)
* @param pcm - pointer to coremask to check for the next core.
*
* @return next core following the core parameter or -1 if no more cores.
*/
static inline int cvmx_coremask_next_core(int core,
const struct cvmx_coremask *pcm)
{
int n, i;
core++;
n = core % CVMX_COREMASK_HLDRSZ;
i = core / CVMX_COREMASK_HLDRSZ;
if (pcm->coremask_bitmap[i] != 0) {
for (; n < CVMX_COREMASK_HLDRSZ; n++)
if (pcm->coremask_bitmap[i] & (1ULL << n))
return ((i * CVMX_COREMASK_HLDRSZ) + n);
}
for (i = i + 1; i < CVMX_COREMASK_USED_BMPSZ; i++) {
if (pcm->coremask_bitmap[i] != 0)
return (i * CVMX_COREMASK_HLDRSZ) +
cvmx_coremask_lowest_bit(pcm->coremask_bitmap[i]);
}
return -1;
}
/**
* Compute coremask for count cores starting with start_core.
* Note that the coremask for multi-node processors may have
* gaps.
*
* @param[out] pcm pointer to core mask data structure
* @param start_core starting code number
* @param count number of cores
*
*/
static inline void cvmx_coremask_set_cores(struct cvmx_coremask *pcm,
unsigned int start_core,
unsigned int count)
{
int node;
int core; /** Current core in node */
int cores_in_node;
int i;
assert(CVMX_MAX_CORES < CVMX_COREMASK_HLDRSZ);
node = start_core >> CVMX_NODE_NO_SHIFT;
core = start_core & ((1 << CVMX_NODE_NO_SHIFT) - 1);
assert(core < CVMX_MAX_CORES);
cvmx_coremask_clear_all(pcm);
while (count > 0) {
if (count + core > CVMX_MAX_CORES)
cores_in_node = CVMX_MAX_CORES - core;
else
cores_in_node = count;
i = CVMX_COREMASK_BMP_NODE_CORE_IDX(node, core);
pcm->coremask_bitmap[i] = ((1ULL << cores_in_node) - 1) << core;
count -= cores_in_node;
core = 0;
node++;
}
}
/**
* Makes a copy of a coremask
*
* @param[out] dest - pointer to destination coremask
* @param[in] src - pointer to source coremask
*/
static inline void cvmx_coremask_copy(struct cvmx_coremask *dest,
const struct cvmx_coremask *src)
{
memcpy(dest, src, sizeof(*dest));
}
/**
* Test to see if the specified core is first core in coremask.
*
* @param[in] pcm pointer to the coremask to test against
* @param[in] core core to check
*
* @return 1 if the core is first core in the coremask, 0 otherwise
*
*/
static inline int cvmx_coremask_is_core_first_core(const struct cvmx_coremask *pcm,
unsigned int core)
{
int n, i;
n = core / CVMX_COREMASK_HLDRSZ;
for (i = 0; i < n; i++)
if (pcm->coremask_bitmap[i] != 0)
return 0;
/* From now on we only care about the core number within an entry */
core &= (CVMX_COREMASK_HLDRSZ - 1);
if (__builtin_ffsll(pcm->coremask_bitmap[n]) < (core + 1))
return 0;
return (__builtin_ffsll(pcm->coremask_bitmap[n]) == core + 1);
}
/*
* NOTE:
* cvmx_coremask_is_first_core() was retired due to improper usage.
* For inquiring about the current core being the initializing
* core for an application, use cvmx_is_init_core().
* For simply inquring if the current core is numerically
* lowest in a given mask, use :
* cvmx_coremask_is_core_first_core( pcm, dvmx_get_core_num())
*/
/**
* Returns the number of 1 bits set in a coremask
*
* @param[in] pcm - pointer to core mask
*
* @return number of bits set in the coremask
*/
static inline int cvmx_coremask_get_core_count(const struct cvmx_coremask *pcm)
{
int i;
int count = 0;
for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++)
count += __builtin_popcountll(pcm->coremask_bitmap[i]);
return count;
}
/**
* For multi-node systems, return the node a core belongs to.
*
* @param core - core number (0-1023)
*
* @return node number core belongs to
*/
static inline int cvmx_coremask_core_to_node(int core)
{
return (core >> CVMX_NODE_NO_SHIFT) & CVMX_NODE_MASK;
}
/**
* Given a core number on a multi-node system, return the core number for a
* particular node.
*
* @param core - global core number
*
* @returns core number local to the node.
*/
static inline int cvmx_coremask_core_on_node(int core)
{
return (core & ((1 << CVMX_NODE_NO_SHIFT) - 1));
}
/**
* Returns if one coremask is a subset of another coremask
*
* @param main - main coremask to test
* @param subset - subset coremask to test
*
* @return 0 if the subset contains cores not in the main coremask or 1 if
* the subset is fully contained in the main coremask.
*/
static inline int cvmx_coremask_is_subset(const struct cvmx_coremask *main,
const struct cvmx_coremask *subset)
{
int i;
for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++)
if ((main->coremask_bitmap[i] & subset->coremask_bitmap[i]) !=
subset->coremask_bitmap[i])
return 0;
return 1;
}
/**
* Returns if one coremask intersects another coremask
*
* @param c1 - main coremask to test
* @param c2 - subset coremask to test
*
* @return 1 if coremask c1 intersects coremask c2, 0 if they are exclusive
*/
static inline int cvmx_coremask_intersects(const struct cvmx_coremask *c1,
const struct cvmx_coremask *c2)
{
int i;
for (i = 0; i < CVMX_COREMASK_USED_BMPSZ; i++)
if ((c1->coremask_bitmap[i] & c2->coremask_bitmap[i]) != 0)
return 1;
return 0;
}
/**
* Masks a single node of a coremask
*
* @param pcm - coremask to mask [inout]
* @param node - node number to mask against
*/
static inline void cvmx_coremask_mask_node(struct cvmx_coremask *pcm, int node)
{
int i;
for (i = 0; i < CVMX_COREMASK_BMP_NODE_CORE_IDX(node, 0); i++)
pcm->coremask_bitmap[i] = 0;
for (i = CVMX_COREMASK_BMP_NODE_CORE_IDX(node + 1, 0);
i < CVMX_COREMASK_USED_BMPSZ; i++)
pcm->coremask_bitmap[i] = 0;
}
/**
* Prints out a coremask in the form of node X: 0x... 0x...
*
* @param[in] pcm - pointer to core mask
*
* @return nothing
*/
void cvmx_coremask_print(const struct cvmx_coremask *pcm);
static inline void cvmx_coremask_dprint(const struct cvmx_coremask *pcm)
{
if (IS_ENABLED(DEBUG))
cvmx_coremask_print(pcm);
}
struct cvmx_coremask *octeon_get_available_coremask(struct cvmx_coremask *pcm);
int validate_coremask(struct cvmx_coremask *pcm);
#endif /* __CVMX_COREMASK_H__ */

View file

@ -0,0 +1,71 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __CVMX_FUSE_H__
#define __CVMX_FUSE_H__
/**
* Read a byte of fuse data
* @param node node to read from
* @param byte_addr address to read
*
* @return fuse value: 0 or 1
*/
static inline u8 cvmx_fuse_read_byte_node(u8 node, int byte_addr)
{
u64 val;
val = FIELD_PREP(MIO_FUS_RCMD_ADDR, byte_addr) | MIO_FUS_RCMD_PEND;
csr_wr_node(node, CVMX_MIO_FUS_RCMD, val);
do {
val = csr_rd_node(node, CVMX_MIO_FUS_RCMD);
} while (val & MIO_FUS_RCMD_PEND);
return FIELD_GET(MIO_FUS_RCMD_DAT, val);
}
/**
* Read a byte of fuse data
* @param byte_addr address to read
*
* @return fuse value: 0 or 1
*/
static inline u8 cvmx_fuse_read_byte(int byte_addr)
{
return cvmx_fuse_read_byte_node(0, byte_addr);
}
/**
* Read a single fuse bit
*
* @param node Node number
* @param fuse Fuse number (0-1024)
*
* @return fuse value: 0 or 1
*/
static inline int cvmx_fuse_read_node(u8 node, int fuse)
{
return (cvmx_fuse_read_byte_node(node, fuse >> 3) >> (fuse & 0x7)) & 1;
}
/**
* Read a single fuse bit
*
* @param fuse Fuse number (0-1024)
*
* @return fuse value: 0 or 1
*/
static inline int cvmx_fuse_read(int fuse)
{
return cvmx_fuse_read_node(0, fuse);
}
static inline int cvmx_octeon_fuse_locked(void)
{
return cvmx_fuse_read(123);
}
#endif /* __CVMX_FUSE_H__ */

View file

@ -0,0 +1,144 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2020 Stefan Roese <sr@denx.de>
*/
#ifndef __CVMX_REGS_H__
#define __CVMX_REGS_H__
#include <linux/bitfield.h>
#include <linux/bitops.h>
#include <linux/io.h>
/* General defines */
#define CVMX_MAX_CORES 48
/* Maximum # of bits to define core in node */
#define CVMX_NODE_NO_SHIFT 7
#define CVMX_NODE_BITS 2 /* Number of bits to define a node */
#define CVMX_MAX_NODES (1 << CVMX_NODE_BITS)
#define CVMX_NODE_MASK (CVMX_MAX_NODES - 1)
#define CVMX_NODE_IO_SHIFT 36
#define CVMX_NODE_MEM_SHIFT 40
#define CVMX_NODE_IO_MASK ((u64)CVMX_NODE_MASK << CVMX_NODE_IO_SHIFT)
#define CVMX_MIPS_MAX_CORE_BITS 10 /* Maximum # of bits to define cores */
#define CVMX_MIPS_MAX_CORES (1 << CVMX_MIPS_MAX_CORE_BITS)
#define MAX_CORE_TADS 8
#define CAST_ULL(v) ((unsigned long long)(v))
#define CASTPTR(type, v) ((type *)(long)(v))
/* Regs */
#define CVMX_CIU_PP_RST 0x0001010000000100ULL
#define CVMX_CIU3_NMI 0x0001010000000160ULL
#define CVMX_CIU_FUSE 0x00010100000001a0ULL
#define CVMX_CIU_NMI 0x0001070000000718ULL
#define CVMX_MIO_BOOT_LOC_CFGX(x) (0x0001180000000080ULL + ((x) & 1) * 8)
#define MIO_BOOT_LOC_CFG_BASE GENMASK_ULL(27, 3)
#define MIO_BOOT_LOC_CFG_EN BIT_ULL(31)
#define CVMX_MIO_BOOT_LOC_ADR 0x0001180000000090ULL
#define MIO_BOOT_LOC_ADR_ADR GENMASK_ULL(7, 3)
#define CVMX_MIO_BOOT_LOC_DAT 0x0001180000000098ULL
#define CVMX_MIO_FUS_DAT2 0x0001180000001410ULL
#define MIO_FUS_DAT2_NOCRYPTO BIT_ULL(26)
#define MIO_FUS_DAT2_NOMUL BIT_ULL(27)
#define MIO_FUS_DAT2_DORM_CRYPTO BIT_ULL(34)
#define CVMX_MIO_FUS_RCMD 0x0001180000001500ULL
#define MIO_FUS_RCMD_ADDR GENMASK_ULL(7, 0)
#define MIO_FUS_RCMD_PEND BIT_ULL(12)
#define MIO_FUS_RCMD_DAT GENMASK_ULL(23, 16)
#define CVMX_RNM_CTL_STATUS 0x0001180040000000ULL
#define RNM_CTL_STATUS_EER_VAL BIT_ULL(9)
/* turn the variable name into a string */
#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
#define CVMX_TMP_STR2(x) #x
#define CVMX_RDHWRNV(result, regstr) \
asm volatile ("rdhwr %[rt],$" CVMX_TMP_STR(regstr) : [rt] "=d" (result))
#define CVMX_SYNCW \
asm volatile ("syncw\nsyncw\n" : : : "memory")
/* ToDo: Currently only node = 0 supported */
static inline u64 csr_rd_node(int node, u64 addr)
{
void __iomem *base;
base = ioremap_nocache(addr, 0x100);
return ioread64(base);
}
static inline u64 csr_rd(u64 addr)
{
return csr_rd_node(0, addr);
}
static inline void csr_wr_node(int node, u64 addr, u64 val)
{
void __iomem *base;
base = ioremap_nocache(addr, 0x100);
iowrite64(val, base);
}
static inline void csr_wr(u64 addr, u64 val)
{
csr_wr_node(0, addr, val);
}
/*
* We need to use the volatile access here, otherwise the IO accessor
* functions might swap the bytes
*/
static inline u64 cvmx_read64_uint64(u64 addr)
{
return *(volatile u64 *)addr;
}
static inline void cvmx_write64_uint64(u64 addr, u64 val)
{
*(volatile u64 *)addr = val;
}
static inline u32 cvmx_read64_uint32(u64 addr)
{
return *(volatile u32 *)addr;
}
static inline void cvmx_write64_uint32(u64 addr, u32 val)
{
*(volatile u32 *)addr = val;
}
static inline void *cvmx_phys_to_ptr(u64 addr)
{
return (void *)CKSEG0ADDR(addr);
}
static inline u64 cvmx_ptr_to_phys(void *ptr)
{
return virt_to_phys(ptr);
}
/**
* Number of the Core on which the program is currently running.
*
* @return core number
*/
static inline unsigned int cvmx_get_core_num(void)
{
unsigned int core_num;
CVMX_RDHWRNV(core_num, 0);
return core_num;
}
#endif /* __CVMX_REGS_H__ */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,442 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __OCTEON_FEATURE_H__
#define __OCTEON_FEATURE_H__
/*
* Octeon models are declared after the macros in octeon-model.h with the
* suffix _FEATURE. The individual features are declared with the
* _FEATURE_ infix.
*/
enum octeon_feature {
/*
* Checks on the critical path are moved to the top (8 positions)
* so that the compiler generates one less insn than for the rest
* of the checks.
*/
OCTEON_FEATURE_PKND, /* CN68XX uses port kinds for packet interface */
/* CN68XX has different fields in word0 - word2 */
OCTEON_FEATURE_CN68XX_WQE,
/*
* Features
*/
/*
* Octeon models in the CN5XXX family and higher support atomic
* add instructions to memory (saa/saad)
*/
OCTEON_FEATURE_SAAD,
/* Does this Octeon support the ZIP offload engine? */
OCTEON_FEATURE_ZIP,
/* Does this Octeon support crypto acceleration using COP2? */
OCTEON_FEATURE_CRYPTO,
/* Can crypto be enabled by calling cvmx_crypto_dormant_enable()? */
OCTEON_FEATURE_DORM_CRYPTO,
OCTEON_FEATURE_PCIE, /* Does this Octeon support PCI express? */
OCTEON_FEATURE_SRIO, /* Does this Octeon support SRIO */
OCTEON_FEATURE_ILK, /* Does this Octeon support Interlaken */
/*
* Some Octeon models support internal memory for storing
* cryptographic keys
*/
OCTEON_FEATURE_KEY_MEMORY,
/* Octeon has a LED controller for banks of external LEDs */
OCTEON_FEATURE_LED_CONTROLLER,
OCTEON_FEATURE_TRA, /* Octeon has a trace buffer */
OCTEON_FEATURE_MGMT_PORT, /* Octeon has a management port */
OCTEON_FEATURE_RAID, /* Octeon has a raid unit */
OCTEON_FEATURE_USB, /* Octeon has a builtin USB */
/* Octeon IPD can run without using work queue entries */
OCTEON_FEATURE_NO_WPTR,
OCTEON_FEATURE_DFA, /* Octeon has DFA state machines */
/*
* Octeon MDIO block supports clause 45 transactions for
* 10 Gig support
*/
OCTEON_FEATURE_MDIO_CLAUSE_45,
/*
* CN52XX and CN56XX used a block named NPEI for PCIe access.
* Newer chips replaced this with SLI+DPI
*/
OCTEON_FEATURE_NPEI,
OCTEON_FEATURE_HFA, /* Octeon has DFA/HFA */
OCTEON_FEATURE_DFM, /* Octeon has DFM */
OCTEON_FEATURE_CIU2, /* Octeon has CIU2 */
/* Octeon has DMA Instruction Completion Interrupt mode */
OCTEON_FEATURE_DICI_MODE,
/* Octeon has Bit Select Extractor schedulor */
OCTEON_FEATURE_BIT_EXTRACTOR,
OCTEON_FEATURE_NAND, /* Octeon has NAND */
OCTEON_FEATURE_MMC, /* Octeon has built-in MMC support */
OCTEON_FEATURE_ROM, /* Octeon has built-in ROM support */
OCTEON_FEATURE_AUTHENTIK, /* Octeon has Authentik ROM support */
OCTEON_FEATURE_MULTICAST_TIMER, /* Octeon has multi_cast timer */
OCTEON_FEATURE_MULTINODE, /* Octeon has node support */
OCTEON_FEATURE_CIU3, /* Octeon has CIU3 */
OCTEON_FEATURE_FPA3, /* Octeon has FPA first seen on 78XX */
/* CN78XX has different fields in word0 - word2 */
OCTEON_FEATURE_CN78XX_WQE,
OCTEON_FEATURE_PKO3, /* Octeon has enhanced PKO block */
OCTEON_FEATURE_SPI, /* Octeon supports SPI interfaces */
OCTEON_FEATURE_ZIP3, /* Octeon has zip first seen on 78XX */
OCTEON_FEATURE_BCH, /* Octeon supports BCH ECC */
OCTEON_FEATURE_PKI, /* Octeon has PKI block */
OCTEON_FEATURE_OCLA, /* Octeon has OCLA */
OCTEON_FEATURE_FAU, /* Octeon has FAU */
OCTEON_FEATURE_BGX, /* Octeon has BGX */
OCTEON_FEATURE_BGX_MIX, /* On of the BGX is used for MIX */
OCTEON_FEATURE_HNA, /* Octeon has HNA */
OCTEON_FEATURE_BGX_XCV, /* Octeon has BGX XCV RGMII support */
OCTEON_FEATURE_TSO, /* Octeon has tcp segmentation offload */
OCTEON_FEATURE_TDM, /* Octeon has PCM/TDM support */
OCTEON_FEATURE_PTP, /* Octeon has PTP support */
OCTEON_MAX_FEATURE
};
static inline int octeon_has_feature_OCTEON_FEATURE_SAAD(void)
{
return true;
}
static inline int octeon_has_feature_OCTEON_FEATURE_ZIP(void)
{
if (OCTEON_IS_MODEL(OCTEON_CNF71XX) ||
OCTEON_IS_MODEL(OCTEON_CN70XX) || OCTEON_IS_MODEL(OCTEON_CNF75XX))
return 0;
else
return !cvmx_fuse_read(121);
}
static inline int octeon_has_feature_OCTEON_FEATURE_ZIP3(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
OCTEON_IS_MODEL(OCTEON_CN73XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_BCH(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN70XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
OCTEON_IS_MODEL(OCTEON_CN73XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_CRYPTO(void)
{
/* OCTEON II and later */
u64 val;
val = csr_rd(CVMX_MIO_FUS_DAT2);
if (val & MIO_FUS_DAT2_NOCRYPTO || val & MIO_FUS_DAT2_NOMUL)
return 0;
else if (!(val & MIO_FUS_DAT2_DORM_CRYPTO))
return 1;
val = csr_rd(CVMX_RNM_CTL_STATUS);
return val & RNM_CTL_STATUS_EER_VAL;
}
static inline int octeon_has_feature_OCTEON_FEATURE_DORM_CRYPTO(void)
{
/* OCTEON II and later */
u64 val;
val = csr_rd(CVMX_MIO_FUS_DAT2);
return !(val & MIO_FUS_DAT2_NOCRYPTO) && !(val & MIO_FUS_DAT2_NOMUL) &&
(val & MIO_FUS_DAT2_DORM_CRYPTO);
}
static inline int octeon_has_feature_OCTEON_FEATURE_PCIE(void)
{
/* OCTEON II and later have PCIe */
return true;
}
static inline int octeon_has_feature_OCTEON_FEATURE_SRIO(void)
{
if (OCTEON_IS_MODEL(OCTEON_CNF75XX)) {
if (cvmx_fuse_read(1601) == 0)
return 0;
else
return 1;
} else {
return (OCTEON_IS_MODEL(OCTEON_CN63XX) ||
OCTEON_IS_MODEL(OCTEON_CN66XX));
}
}
static inline int octeon_has_feature_OCTEON_FEATURE_ILK(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN68XX) ||
OCTEON_IS_MODEL(OCTEON_CN78XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_KEY_MEMORY(void)
{
/* OCTEON II or later */
return true;
}
static inline int octeon_has_feature_OCTEON_FEATURE_LED_CONTROLLER(void)
{
return false;
}
static inline int octeon_has_feature_OCTEON_FEATURE_TRA(void)
{
return !OCTEON_IS_OCTEON3();
}
static inline int octeon_has_feature_OCTEON_FEATURE_MGMT_PORT(void)
{
/* OCTEON II or later */
return true;
}
static inline int octeon_has_feature_OCTEON_FEATURE_RAID(void)
{
return !OCTEON_IS_MODEL(OCTEON_CNF75XX);
}
static inline int octeon_has_feature_OCTEON_FEATURE_USB(void)
{
return true;
}
static inline int octeon_has_feature_OCTEON_FEATURE_NO_WPTR(void)
{
return true;
}
static inline int octeon_has_feature_OCTEON_FEATURE_DFA(void)
{
return 0;
}
static inline int octeon_has_feature_OCTEON_FEATURE_HFA(void)
{
if (OCTEON_IS_MODEL(OCTEON_CNF75XX))
return 0;
else
return !cvmx_fuse_read(90);
}
static inline int octeon_has_feature_OCTEON_FEATURE_HNA(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN78XX) || OCTEON_IS_MODEL(OCTEON_CN73XX))
return !cvmx_fuse_read(134);
else
return 0;
}
static inline int octeon_has_feature_OCTEON_FEATURE_DFM(void)
{
if (!(OCTEON_IS_MODEL(OCTEON_CN63XX) || OCTEON_IS_MODEL(OCTEON_CN66XX)))
return 0;
else
return !cvmx_fuse_read(90);
}
static inline int octeon_has_feature_OCTEON_FEATURE_MDIO_CLAUSE_45(void)
{
return true;
}
static inline int octeon_has_feature_OCTEON_FEATURE_NPEI(void)
{
return false;
}
static inline int octeon_has_feature_OCTEON_FEATURE_PKND(void)
{
return OCTEON_IS_MODEL(OCTEON_CN68XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
OCTEON_IS_MODEL(OCTEON_CN73XX) ||
OCTEON_IS_MODEL(OCTEON_CN78XX);
}
static inline int octeon_has_feature_OCTEON_FEATURE_CN68XX_WQE(void)
{
return OCTEON_IS_MODEL(OCTEON_CN68XX);
}
static inline int octeon_has_feature_OCTEON_FEATURE_CIU2(void)
{
return OCTEON_IS_MODEL(OCTEON_CN68XX);
}
static inline int octeon_has_feature_OCTEON_FEATURE_CIU3(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
OCTEON_IS_MODEL(OCTEON_CN73XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_FPA3(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
OCTEON_IS_MODEL(OCTEON_CN73XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_NAND(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN63XX) ||
OCTEON_IS_MODEL(OCTEON_CN66XX) ||
OCTEON_IS_MODEL(OCTEON_CN68XX) ||
OCTEON_IS_MODEL(OCTEON_CN73XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
OCTEON_IS_MODEL(OCTEON_CN70XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_DICI_MODE(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X) ||
OCTEON_IS_MODEL(OCTEON_CN61XX) ||
OCTEON_IS_MODEL(OCTEON_CNF71XX) ||
OCTEON_IS_MODEL(OCTEON_CN70XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_BIT_EXTRACTOR(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN68XX_PASS2_X) ||
OCTEON_IS_MODEL(OCTEON_CN61XX) ||
OCTEON_IS_MODEL(OCTEON_CNF71XX) ||
OCTEON_IS_MODEL(OCTEON_CN70XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_MMC(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN61XX) ||
OCTEON_IS_MODEL(OCTEON_CNF71XX) || OCTEON_IS_OCTEON3());
}
static inline int octeon_has_feature_OCTEON_FEATURE_ROM(void)
{
return OCTEON_IS_MODEL(OCTEON_CN66XX) ||
OCTEON_IS_MODEL(OCTEON_CN61XX) ||
OCTEON_IS_MODEL(OCTEON_CNF71XX);
}
static inline int octeon_has_feature_OCTEON_FEATURE_AUTHENTIK(void)
{
if (OCTEON_IS_MODEL(OCTEON_CN66XX) ||
OCTEON_IS_MODEL(OCTEON_CN61XX) ||
OCTEON_IS_MODEL(OCTEON_CNF71XX) ||
OCTEON_IS_MODEL(OCTEON_CN70XX)) {
u64 val;
val = csr_rd(CVMX_MIO_FUS_DAT2);
return (val & MIO_FUS_DAT2_NOCRYPTO) &&
(val & MIO_FUS_DAT2_DORM_CRYPTO);
}
return 0;
}
static inline int octeon_has_feature_OCTEON_FEATURE_MULTICAST_TIMER(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN66XX_PASS1_2) ||
OCTEON_IS_MODEL(OCTEON_CN61XX) ||
OCTEON_IS_MODEL(OCTEON_CNF71XX) ||
OCTEON_IS_MODEL(OCTEON_CN70XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_MULTINODE(void)
{
return (!OCTEON_IS_MODEL(OCTEON_CN76XX) &&
OCTEON_IS_MODEL(OCTEON_CN78XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_CN78XX_WQE(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
OCTEON_IS_MODEL(OCTEON_CN73XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_SPI(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN66XX) ||
OCTEON_IS_MODEL(OCTEON_CN61XX) ||
OCTEON_IS_MODEL(OCTEON_CNF71XX) || OCTEON_IS_OCTEON3());
}
static inline int octeon_has_feature_OCTEON_FEATURE_PKI(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
OCTEON_IS_MODEL(OCTEON_CN73XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_PKO3(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
OCTEON_IS_MODEL(OCTEON_CN73XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_OCLA(void)
{
return OCTEON_IS_OCTEON3();
}
static inline int octeon_has_feature_OCTEON_FEATURE_FAU(void)
{
return (!OCTEON_IS_MODEL(OCTEON_CN78XX) &&
!OCTEON_IS_MODEL(OCTEON_CNF75XX) &&
!OCTEON_IS_MODEL(OCTEON_CN73XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_BGX(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
OCTEON_IS_MODEL(OCTEON_CN73XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_BGX_MIX(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN78XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
OCTEON_IS_MODEL(OCTEON_CN73XX));
}
static inline int octeon_has_feature_OCTEON_FEATURE_BGX_XCV(void)
{
return OCTEON_IS_MODEL(OCTEON_CN73XX);
}
static inline int octeon_has_feature_OCTEON_FEATURE_TSO(void)
{
return (OCTEON_IS_MODEL(OCTEON_CN73XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
OCTEON_IS_MODEL(OCTEON_CN78XX_PASS2_X));
}
static inline int octeon_has_feature_OCTEON_FEATURE_TDM(void)
{
return OCTEON_IS_MODEL(OCTEON_CN61XX) ||
OCTEON_IS_MODEL(OCTEON_CNF71XX) ||
OCTEON_IS_MODEL(OCTEON_CN70XX);
}
static inline int octeon_has_feature_OCTEON_FEATURE_PTP(void)
{
return OCTEON_IS_MODEL(OCTEON_CN6XXX) ||
OCTEON_IS_MODEL(OCTEON_CNF7XXX) ||
OCTEON_IS_MODEL(OCTEON_CN73XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX) ||
OCTEON_IS_MODEL(OCTEON_CN78XX_PASS2_X);
}
/*
* Answer ``Is the bit for feature set in the bitmap?''
* @param feature
* @return 1 when the feature is present and 0 otherwise, -1 in case of error.
*/
#define octeon_has_feature(feature_x) octeon_has_feature_##feature_x()
#endif /* __OCTEON_FEATURE_H__ */

View file

@ -0,0 +1,317 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __OCTEON_MODEL_H__
#define __OCTEON_MODEL_H__
/*
* NOTE: These must match what is checked in common-config.mk
* Defines to represent the different versions of Octeon.
*
* IMPORTANT: When the default pass is updated for an Octeon Model,
* the corresponding change must also be made in the oct-sim script.
*
* The defines below should be used with the OCTEON_IS_MODEL() macro to
* determine what model of chip the software is running on. Models ending
* in 'XX' match multiple models (families), while specific models match only
* that model. If a pass (revision) is specified, then only that revision
* will be matched. Care should be taken when checking for both specific
* models and families that the specific models are checked for first.
* While these defines are similar to the processor ID, they are not intended
* to be used by anything other that the OCTEON_IS_MODEL framework, and
* the values are subject to change at anytime without notice.
*
* NOTE: only the OCTEON_IS_MODEL() macro/function and the OCTEON_CN* macros
* should be used outside of this file. All other macros are for internal
* use only, and may change without notice.
*/
#define OCTEON_FAMILY_MASK 0x00ffff00
#define OCTEON_PRID_MASK 0x00ffffff
/* Flag bits in top byte */
/* Ignores revision in model checks */
#define OM_IGNORE_REVISION 0x01000000
/* Check submodels */
#define OM_CHECK_SUBMODEL 0x02000000
/* Match all models previous than the one specified */
#define OM_MATCH_PREVIOUS_MODELS 0x04000000
/* Ignores the minor revison on newer parts */
#define OM_IGNORE_MINOR_REVISION 0x08000000
#define OM_FLAG_MASK 0xff000000
/* Match all cn5XXX Octeon models. */
#define OM_MATCH_5XXX_FAMILY_MODELS 0x20000000
/* Match all cn6XXX Octeon models. */
#define OM_MATCH_6XXX_FAMILY_MODELS 0x40000000
/* Match all cnf7XXX Octeon models. */
#define OM_MATCH_F7XXX_FAMILY_MODELS 0x80000000
/* Match all cn7XXX Octeon models. */
#define OM_MATCH_7XXX_FAMILY_MODELS 0x10000000
#define OM_MATCH_FAMILY_MODELS (OM_MATCH_5XXX_FAMILY_MODELS | \
OM_MATCH_6XXX_FAMILY_MODELS | \
OM_MATCH_F7XXX_FAMILY_MODELS | \
OM_MATCH_7XXX_FAMILY_MODELS)
/*
* CN7XXX models with new revision encoding
*/
#define OCTEON_CNF75XX_PASS1_0 0x000d9800
#define OCTEON_CNF75XX_PASS1_2 0x000d9802
#define OCTEON_CNF75XX_PASS1_3 0x000d9803
#define OCTEON_CNF75XX (OCTEON_CNF75XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CNF75XX_PASS1_X \
(OCTEON_CNF75XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN73XX_PASS1_0 0x000d9700
#define OCTEON_CN73XX_PASS1_1 0x000d9701
#define OCTEON_CN73XX_PASS1_2 0x000d9702
#define OCTEON_CN73XX_PASS1_3 0x000d9703
#define OCTEON_CN73XX (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CN73XX_PASS1_X \
(OCTEON_CN73XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN72XX OCTEON_CN73XX
#define OCTEON_CN23XX OCTEON_CN73XX
#define OCTEON_CN23XX_PASS1_2 OCTEON_CN73XX_PASS1_2
#define OCTEON_CN23XX_PASS1_3 OCTEON_CN73XX_PASS1_3
#define OCTEON_CN70XX_PASS1_0 0x000d9600
#define OCTEON_CN70XX_PASS1_1 0x000d9601
#define OCTEON_CN70XX_PASS1_2 0x000d9602
#define OCTEON_CN70XX_PASS2_0 0x000d9608
#define OCTEON_CN70XX (OCTEON_CN70XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CN70XX_PASS1_X \
(OCTEON_CN70XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN70XX_PASS2_X \
(OCTEON_CN70XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN71XX OCTEON_CN70XX
#define OCTEON_CN78XX_PASS1_0 0x000d9500
#define OCTEON_CN78XX_PASS1_1 0x000d9501
#define OCTEON_CN78XX_PASS2_0 0x000d9508
#define OCTEON_CN78XX (OCTEON_CN78XX_PASS2_0 | OM_IGNORE_REVISION)
#define OCTEON_CN78XX_PASS1_X \
(OCTEON_CN78XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN78XX_PASS2_X \
(OCTEON_CN78XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN76XX (0x000d9540 | OM_CHECK_SUBMODEL)
/*
* CNF7XXX models with new revision encoding
*/
#define OCTEON_CNF71XX_PASS1_0 0x000d9400
#define OCTEON_CNF71XX_PASS1_1 0x000d9401
#define OCTEON_CNF71XX (OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CNF71XX_PASS1_X \
(OCTEON_CNF71XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
/*
* CN6XXX models with new revision encoding
*/
#define OCTEON_CN68XX_PASS1_0 0x000d9100
#define OCTEON_CN68XX_PASS1_1 0x000d9101
#define OCTEON_CN68XX_PASS2_0 0x000d9108
#define OCTEON_CN68XX_PASS2_1 0x000d9109
#define OCTEON_CN68XX_PASS2_2 0x000d910a
#define OCTEON_CN68XX (OCTEON_CN68XX_PASS2_0 | OM_IGNORE_REVISION)
#define OCTEON_CN68XX_PASS1_X \
(OCTEON_CN68XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN68XX_PASS2_X \
(OCTEON_CN68XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN68XX_PASS1 OCTEON_CN68XX_PASS1_X
#define OCTEON_CN68XX_PASS2 OCTEON_CN68XX_PASS2_X
#define OCTEON_CN66XX_PASS1_0 0x000d9200
#define OCTEON_CN66XX_PASS1_2 0x000d9202
#define OCTEON_CN66XX (OCTEON_CN66XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CN66XX_PASS1_X \
(OCTEON_CN66XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN63XX_PASS1_0 0x000d9000
#define OCTEON_CN63XX_PASS1_1 0x000d9001
#define OCTEON_CN63XX_PASS1_2 0x000d9002
#define OCTEON_CN63XX_PASS2_0 0x000d9008
#define OCTEON_CN63XX_PASS2_1 0x000d9009
#define OCTEON_CN63XX_PASS2_2 0x000d900a
#define OCTEON_CN63XX (OCTEON_CN63XX_PASS2_0 | OM_IGNORE_REVISION)
#define OCTEON_CN63XX_PASS1_X \
(OCTEON_CN63XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
#define OCTEON_CN63XX_PASS2_X \
(OCTEON_CN63XX_PASS2_0 | OM_IGNORE_MINOR_REVISION)
/* CN62XX is same as CN63XX with 1 MB cache */
#define OCTEON_CN62XX OCTEON_CN63XX
#define OCTEON_CN61XX_PASS1_0 0x000d9300
#define OCTEON_CN61XX_PASS1_1 0x000d9301
#define OCTEON_CN61XX (OCTEON_CN61XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CN61XX_PASS1_X \
(OCTEON_CN61XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
/* CN60XX is same as CN61XX with 512 KB cache */
#define OCTEON_CN60XX OCTEON_CN61XX
/* This matches the complete family of CN3xxx CPUs, and not subsequent models */
#define OCTEON_CN6XXX \
(OCTEON_CN63XX_PASS1_0 | OM_MATCH_6XXX_FAMILY_MODELS)
#define OCTEON_CNF7XXX \
(OCTEON_CNF71XX_PASS1_0 | OM_MATCH_F7XXX_FAMILY_MODELS)
#define OCTEON_CN7XXX \
(OCTEON_CN78XX_PASS1_0 | OM_MATCH_7XXX_FAMILY_MODELS)
/*
* The revision byte (low byte) has two different encodings.
* CN3XXX:
*
* bits
* <7:5>: reserved (0)
* <4>: alternate package
* <3:0>: revision
*
* CN5XXX and older models:
*
* bits
* <7>: reserved (0)
* <6>: alternate package
* <5:3>: major revision
* <2:0>: minor revision
*/
/* Masks used for the various types of model/family/revision matching */
#define OCTEON_38XX_FAMILY_MASK 0x00ffff00
#define OCTEON_38XX_FAMILY_REV_MASK 0x00ffff0f
#define OCTEON_38XX_MODEL_MASK 0x00ffff10
#define OCTEON_38XX_MODEL_REV_MASK \
(OCTEON_38XX_FAMILY_REV_MASK | OCTEON_38XX_MODEL_MASK)
/* CN5XXX and later use different layout of bits in the revision ID field */
#define OCTEON_58XX_FAMILY_MASK OCTEON_38XX_FAMILY_MASK
#define OCTEON_58XX_FAMILY_REV_MASK 0x00ffff3f
#define OCTEON_58XX_MODEL_MASK 0x00ffff40
#define OCTEON_58XX_MODEL_REV_MASK \
(OCTEON_58XX_FAMILY_REV_MASK | OCTEON_58XX_MODEL_MASK)
#define OCTEON_58XX_MODEL_MINOR_REV_MASK \
(OCTEON_58XX_MODEL_REV_MASK & 0x00ffff38)
#define OCTEON_5XXX_MODEL_MASK 0x00ff0fc0
#define __OCTEON_MATCH_MASK__(X, Y, Z) \
({ \
typeof(X) x = (X); \
typeof(Y) y = (Y); \
typeof(Z) z = (Z); \
(x & z) == (y & z); \
})
/*
* __OCTEON_IS_MODEL_COMPILE__(arg_model, chip_model)
* returns true if chip_model is identical or belong to the OCTEON
* model group specified in arg_model.
*/
/* Helper macros to make to following macro compacter */
#define OM_MASK OM_FLAG_MASK
#define OM_MATCH_MASK __OCTEON_MATCH_MASK__
#define OM_MATCH_PREVIOUS OM_MATCH_PREVIOUS_MODELS
#define __OCTEON_IS_MODEL_COMPILE__(A, B) \
({ \
typeof(A) a = (A); \
typeof(B) b = (B); \
(((((((a) & OM_MASK) == (OM_IGNORE_REVISION | OM_CHECK_SUBMODEL)) && \
OM_MATCH_MASK((b), (a), OCTEON_58XX_MODEL_MASK)) || \
((((a) & OM_MASK) == 0) && \
OM_MATCH_MASK((b), (a), OCTEON_58XX_FAMILY_REV_MASK)) || \
((((a) & OM_MASK) == OM_IGNORE_MINOR_REVISION) && \
OM_MATCH_MASK((b), (a), OCTEON_58XX_MODEL_MINOR_REV_MASK)) || \
((((a) & OM_MASK) == OM_CHECK_SUBMODEL) && \
OM_MATCH_MASK((b), (a), OCTEON_58XX_MODEL_MASK)) || \
((((a) & OM_MASK) == OM_IGNORE_REVISION) && \
OM_MATCH_MASK((b), (a), OCTEON_58XX_FAMILY_MASK)) || \
((((a) & (OM_MATCH_5XXX_FAMILY_MODELS)) == \
OM_MATCH_5XXX_FAMILY_MODELS) && \
((b & OCTEON_PRID_MASK) < OCTEON_CN63XX_PASS1_0)) || \
((((a) & (OM_MATCH_6XXX_FAMILY_MODELS)) == \
OM_MATCH_6XXX_FAMILY_MODELS) && \
((b & OCTEON_PRID_MASK) >= OCTEON_CN63XX_PASS1_0) && \
((b & OCTEON_PRID_MASK) < OCTEON_CNF71XX_PASS1_0)) || \
((((a) & (OM_MATCH_F7XXX_FAMILY_MODELS)) == \
OM_MATCH_F7XXX_FAMILY_MODELS) && \
((b & OCTEON_PRID_MASK) >= OCTEON_CNF71XX_PASS1_0) && \
((b & OCTEON_PRID_MASK) < OCTEON_CN78XX_PASS1_0)) || \
((((a) & (OM_MATCH_7XXX_FAMILY_MODELS)) == \
OM_MATCH_7XXX_FAMILY_MODELS) && ((b & OCTEON_PRID_MASK) >= \
OCTEON_CN78XX_PASS1_0)) || \
((((a) & (OM_MATCH_PREVIOUS)) == OM_MATCH_PREVIOUS) && \
(((b) & OCTEON_58XX_MODEL_MASK) < ((a) & OCTEON_58XX_MODEL_MASK))) \
))); \
})
#ifndef __ASSEMBLY__
#ifndef OCTEON_IS_MODEL
static inline int __octeon_is_model_runtime_internal__(u32 model)
{
u32 cpuid = read_c0_prid();
return __OCTEON_IS_MODEL_COMPILE__(model, cpuid);
}
static inline int __octeon_is_model_runtime__(u32 model)
{
return __octeon_is_model_runtime_internal__(model);
}
/*
* The OCTEON_IS_MODEL macro should be used for all Octeon model checking done
* in a program.
* This should be kept runtime if at all possible and must be conditionalized
* with OCTEON_IS_COMMON_BINARY() if runtime checking support is required.
*
* Use of the macro in preprocessor directives ( #if OCTEON_IS_MODEL(...) )
* is NOT SUPPORTED, and should be replaced with CVMX_COMPILED_FOR()
* I.e.:
* #if OCTEON_IS_MODEL(OCTEON_CN56XX) -> #if CVMX_COMPILED_FOR(OCTEON_CN56XX)
*/
#define OCTEON_IS_MODEL(x) __octeon_is_model_runtime__(x)
#define OCTEON_IS_COMMON_BINARY() 1
#undef OCTEON_MODEL
#endif
#define OCTEON_IS_OCTEON2() \
(OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
#define OCTEON_IS_OCTEON3() OCTEON_IS_MODEL(OCTEON_CN7XXX)
const char *octeon_model_get_string(u32 chip_id);
const char *octeon_model_get_string_buffer(u32 chip_id, char *buffer);
/**
* Return the octeon family, i.e., ProcessorID of the PrID register.
*
* @return the octeon family on success, ((u32)-1) on error.
*/
static inline u32 cvmx_get_octeon_family(void)
{
return (read_c0_prid() & OCTEON_FAMILY_MASK);
}
#endif /* __ASSEMBLY__ */
#endif /* __OCTEON_MODEL_H__ */

View file

@ -0,0 +1,982 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#ifndef __OCTEON_DDR_H_
#define __OCTEON_DDR_H_
#include <env.h>
#include <linux/compat.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <mach/octeon-model.h>
#include <mach/cvmx/cvmx-lmcx-defs.h>
/* Mapping is done starting from 0x11800.80000000 */
#define CVMX_L2C_CTL 0x00800000
#define CVMX_L2C_BIG_CTL 0x00800030
#define CVMX_L2C_TADX_INT(i) (0x00a00028 + (((i) & 7) * 0x40000))
#define CVMX_L2C_MCIX_INT(i) (0x00c00028 + (((i) & 3) * 0x40000))
/* Some "external" (non-LMC) registers */
#define CVMX_IPD_CLK_COUNT 0x00014F0000000338
#define CVMX_FPA_CLK_COUNT 0x00012800000000F0
#define CVMX_NODE_MEM_SHIFT 40
#define DDR_INTERFACE_MAX 4
/* Private data struct */
struct ddr_priv {
void __iomem *lmc_base;
void __iomem *l2c_base;
bool ddr_clock_initialized[DDR_INTERFACE_MAX];
bool ddr_memory_preserved;
u32 flags;
struct ram_info info;
};
/* Short cut to convert a number to megabytes */
#define MB(X) ((u64)(X) * (u64)(1024 * 1024))
#define octeon_is_cpuid(x) (__OCTEON_IS_MODEL_COMPILE__(x, read_c0_prid()))
#define strtoull simple_strtoull
/* Access LMC registers */
static inline u64 lmc_rd(struct ddr_priv *priv, u64 addr)
{
return ioread64(priv->lmc_base + addr);
}
static inline void lmc_wr(struct ddr_priv *priv, u64 addr, u64 val)
{
iowrite64(val, priv->lmc_base + addr);
}
/* Access L2C registers */
static inline u64 l2c_rd(struct ddr_priv *priv, u64 addr)
{
return ioread64(priv->l2c_base + addr);
}
static inline void l2c_wr(struct ddr_priv *priv, u64 addr, u64 val)
{
iowrite64(val, priv->l2c_base + addr);
}
/* Access other CSR registers not located inside the LMC address space */
static inline u64 csr_rd(u64 addr)
{
void __iomem *base;
base = ioremap_nocache(addr, 0x100);
return ioread64(base);
}
static inline void csr_wr(u64 addr, u64 val)
{
void __iomem *base;
base = ioremap_nocache(addr, 0x100);
return iowrite64(val, base);
}
/* "Normal" access, without any offsets and/or mapping */
static inline u64 cvmx_read64_uint64(u64 addr)
{
return readq((void *)addr);
}
static inline void cvmx_write64_uint64(u64 addr, u64 val)
{
writeq(val, (void *)addr);
}
/* Failsafe mode */
#define FLAG_FAILSAFE_MODE 0x01000
/* Note that the DDR clock initialized flags must be contiguous */
/* Clock for DDR 0 initialized */
#define FLAG_DDR0_CLK_INITIALIZED 0x02000
/* Clock for DDR 1 initialized */
#define FLAG_DDR1_CLK_INITIALIZED 0x04000
/* Clock for DDR 2 initialized */
#define FLAG_DDR2_CLK_INITIALIZED 0x08000
/* Clock for DDR 3 initialized */
#define FLAG_DDR3_CLK_INITIALIZED 0x10000
/* Loaded into RAM externally */
#define FLAG_RAM_RESIDENT 0x20000
/* Verbose DDR information */
#define FLAG_DDR_VERBOSE 0x40000
/* Check env. for DDR variables */
#define FLAG_DDR_DEBUG 0x80000
#define FLAG_DDR_TRACE_INIT 0x100000
#define FLAG_MEMORY_PRESERVED 0x200000
#define FLAG_DFM_VERBOSE 0x400000
#define FLAG_DFM_TRACE_INIT 0x800000
/* DFM memory clock initialized */
#define FLAG_DFM_CLK_INITIALIZED 0x1000000
/* EEPROM clock descr. missing */
#define FLAG_CLOCK_DESC_MISSING 0x2000000
/* EEPROM board descr. missing */
#define FLAG_BOARD_DESC_MISSING 0x4000000
#define FLAG_DDR_PROMPT 0x8000000
#ifndef DDR_NO_DEBUG
static inline int ddr_verbose(struct ddr_priv *priv)
{
return !!(priv->flags & FLAG_DDR_VERBOSE);
}
static inline char *ddr_getenv_debug(struct ddr_priv *priv, char *name)
{
if (priv->flags & FLAG_FAILSAFE_MODE)
return NULL;
if (priv->flags & FLAG_DDR_DEBUG)
return env_get(name);
return NULL;
}
#else
static inline int ddr_verbose(void)
{
return 0;
}
#endif
/* turn the variable name into a string */
#define CVMX_TMP_STR(x) CVMX_TMP_STR2(x)
#define CVMX_TMP_STR2(x) #x
#define CVMX_SYNC asm volatile ("sync" : : : "memory")
#define CVMX_CACHE(op, address, offset) \
asm volatile ("cache " CVMX_TMP_STR(op) ", " \
CVMX_TMP_STR(offset) "(%[rbase])" \
: : [rbase] "d" (address))
/* unlock the state */
#define CVMX_CACHE_WBIL2(address, offset) \
CVMX_CACHE(23, address, offset)
/* complete prefetches, invalidate entire dcache */
#define CVMX_DCACHE_INVALIDATE \
{ CVMX_SYNC; asm volatile ("cache 9, 0($0)" : : ); }
/**
* cvmx_l2c_cfg
*
* Specify the RSL base addresses for the block
*
* L2C_CFG = L2C Configuration
*
* Description:
*/
union cvmx_l2c_cfg {
u64 u64;
struct cvmx_l2c_cfg_s {
uint64_t reserved_20_63:44;
uint64_t bstrun:1;
uint64_t lbist:1;
uint64_t xor_bank:1;
uint64_t dpres1:1;
uint64_t dpres0:1;
uint64_t dfill_dis:1;
uint64_t fpexp:4;
uint64_t fpempty:1;
uint64_t fpen:1;
uint64_t idxalias:1;
uint64_t mwf_crd:4;
uint64_t rsp_arb_mode:1;
uint64_t rfb_arb_mode:1;
uint64_t lrf_arb_mode:1;
} s;
};
/**
* cvmx_l2c_ctl
*
* L2C_CTL = L2C Control
*
*
* Notes:
* (1) If MAXVAB is != 0, VAB_THRESH should be less than MAXVAB.
*
* (2) L2DFDBE and L2DFSBE allows software to generate L2DSBE, L2DDBE, VBFSBE,
* and VBFDBE errors for the purposes of testing error handling code. When
* one (or both) of these bits are set a PL2 which misses in the L2 will fill
* with the appropriate error in the first 2 OWs of the fill. Software can
* determine which OW pair gets the error by choosing the desired fill order
* (address<6:5>). A PL2 which hits in the L2 will not inject any errors.
* Therefore sending a WBIL2 prior to the PL2 is recommended to make a miss
* likely (if multiple processors are involved software must be careful to be
* sure no other processor or IO device can bring the block into the L2).
*
* To generate a VBFSBE or VBFDBE, software must first get the cache block
* into the cache with an error using a PL2 which misses the L2. Then a
* store partial to a portion of the cache block without the error must
* change the block to dirty. Then, a subsequent WBL2/WBIL2/victim will
* trigger the VBFSBE/VBFDBE error.
*/
union cvmx_l2c_ctl {
u64 u64;
struct cvmx_l2c_ctl_s {
uint64_t reserved_29_63:35;
uint64_t rdf_fast:1;
uint64_t disstgl2i:1;
uint64_t l2dfsbe:1;
uint64_t l2dfdbe:1;
uint64_t discclk:1;
uint64_t maxvab:4;
uint64_t maxlfb:4;
uint64_t rsp_arb_mode:1;
uint64_t xmc_arb_mode:1;
uint64_t reserved_2_13:12;
uint64_t disecc:1;
uint64_t disidxalias:1;
} s;
struct cvmx_l2c_ctl_cn73xx {
uint64_t reserved_32_63:32;
uint64_t ocla_qos:3;
uint64_t reserved_28_28:1;
uint64_t disstgl2i:1;
uint64_t reserved_25_26:2;
uint64_t discclk:1;
uint64_t reserved_16_23:8;
uint64_t rsp_arb_mode:1;
uint64_t xmc_arb_mode:1;
uint64_t rdf_cnt:8;
uint64_t reserved_4_5:2;
uint64_t disldwb:1;
uint64_t dissblkdty:1;
uint64_t disecc:1;
uint64_t disidxalias:1;
} cn73xx;
struct cvmx_l2c_ctl_cn73xx cn78xx;
};
/**
* cvmx_l2c_big_ctl
*
* L2C_BIG_CTL = L2C Big memory control register
*
*
* Notes:
* (1) BIGRD interrupts can occur during normal operation as the PP's are
* allowed to prefetch to non-existent memory locations. Therefore,
* BIGRD is for informational purposes only.
*
* (2) When HOLEWR/BIGWR blocks a store L2C_VER_ID, L2C_VER_PP, L2C_VER_IOB,
* and L2C_VER_MSC will be loaded just like a store which is blocked by VRTWR.
* Additionally, L2C_ERR_XMC will be loaded.
*/
union cvmx_l2c_big_ctl {
u64 u64;
struct cvmx_l2c_big_ctl_s {
uint64_t reserved_8_63:56;
uint64_t maxdram:4;
uint64_t reserved_0_3:4;
} s;
struct cvmx_l2c_big_ctl_cn61xx {
uint64_t reserved_8_63:56;
uint64_t maxdram:4;
uint64_t reserved_1_3:3;
uint64_t disable:1;
} cn61xx;
struct cvmx_l2c_big_ctl_cn61xx cn63xx;
struct cvmx_l2c_big_ctl_cn61xx cn66xx;
struct cvmx_l2c_big_ctl_cn61xx cn68xx;
struct cvmx_l2c_big_ctl_cn61xx cn68xxp1;
struct cvmx_l2c_big_ctl_cn70xx {
uint64_t reserved_8_63:56;
uint64_t maxdram:4;
uint64_t reserved_1_3:3;
uint64_t disbig:1;
} cn70xx;
struct cvmx_l2c_big_ctl_cn70xx cn70xxp1;
struct cvmx_l2c_big_ctl_cn70xx cn73xx;
struct cvmx_l2c_big_ctl_cn70xx cn78xx;
struct cvmx_l2c_big_ctl_cn70xx cn78xxp1;
struct cvmx_l2c_big_ctl_cn61xx cnf71xx;
struct cvmx_l2c_big_ctl_cn70xx cnf75xx;
};
struct rlevel_byte_data {
int delay;
int loop_total;
int loop_count;
int best;
u64 bm;
int bmerrs;
int sqerrs;
int bestsq;
};
#define DEBUG_VALIDATE_BITMASK 0
#if DEBUG_VALIDATE_BITMASK
#define debug_bitmask_print printf
#else
#define debug_bitmask_print(...)
#endif
#define RLEVEL_BITMASK_TRAILING_BITS_ERROR 5
// FIXME? now less than TOOLONG
#define RLEVEL_BITMASK_BUBBLE_BITS_ERROR 11
#define RLEVEL_BITMASK_NARROW_ERROR 6
#define RLEVEL_BITMASK_BLANK_ERROR 100
#define RLEVEL_BITMASK_TOOLONG_ERROR 12
#define RLEVEL_NONSEQUENTIAL_DELAY_ERROR 50
#define RLEVEL_ADJACENT_DELAY_ERROR 30
/*
* Apply a filter to the BITMASK results returned from Octeon
* read-leveling to determine the most likely delay result. This
* computed delay may be used to qualify the delay result returned by
* Octeon. Accumulate an error penalty for invalid characteristics of
* the bitmask so that they can be used to select the most reliable
* results.
*
* The algorithm searches for the largest contiguous MASK within a
* maximum RANGE of bits beginning with the MSB.
*
* 1. a MASK with a WIDTH less than 4 will be penalized
* 2. Bubbles in the bitmask that occur before or after the MASK
* will be penalized
* 3. If there are no trailing bubbles then extra bits that occur
* beyond the maximum RANGE will be penalized.
*
* +++++++++++++++++++++++++++++++++++++++++++++++++++
* + +
* + e.g. bitmask = 27B00 +
* + +
* + 63 +--- mstart 0 +
* + | | | +
* + | +---------+ +--- fb | +
* + | | range | | | +
* + V V V V V +
* + +
* + 0 0 ... 1 0 0 1 1 1 1 0 1 1 0 0 0 0 0 0 0 0 +
* + +
* + ^ ^ ^ +
* + | | mask| +
* + lb ---+ +-----+ +
* + width +
* + +
* +++++++++++++++++++++++++++++++++++++++++++++++++++
*/
struct rlevel_bitmask {
u64 bm;
u8 mstart;
u8 width;
int errs;
};
#define MASKRANGE_BITS 6
#define MASKRANGE ((1 << MASKRANGE_BITS) - 1)
/* data field addresses in the DDR2 SPD eeprom */
enum ddr2_spd_addrs {
DDR2_SPD_BYTES_PROGRAMMED = 0,
DDR2_SPD_TOTAL_BYTES = 1,
DDR2_SPD_MEM_TYPE = 2,
DDR2_SPD_NUM_ROW_BITS = 3,
DDR2_SPD_NUM_COL_BITS = 4,
DDR2_SPD_NUM_RANKS = 5,
DDR2_SPD_CYCLE_CLX = 9,
DDR2_SPD_CONFIG_TYPE = 11,
DDR2_SPD_REFRESH = 12,
DDR2_SPD_SDRAM_WIDTH = 13,
DDR2_SPD_BURST_LENGTH = 16,
DDR2_SPD_NUM_BANKS = 17,
DDR2_SPD_CAS_LATENCY = 18,
DDR2_SPD_DIMM_TYPE = 20,
DDR2_SPD_CYCLE_CLX1 = 23,
DDR2_SPD_CYCLE_CLX2 = 25,
DDR2_SPD_TRP = 27,
DDR2_SPD_TRRD = 28,
DDR2_SPD_TRCD = 29,
DDR2_SPD_TRAS = 30,
DDR2_SPD_TWR = 36,
DDR2_SPD_TWTR = 37,
DDR2_SPD_TRFC_EXT = 40,
DDR2_SPD_TRFC = 42,
DDR2_SPD_CHECKSUM = 63,
DDR2_SPD_MFR_ID = 64
};
/* data field addresses in the DDR2 SPD eeprom */
enum ddr3_spd_addrs {
DDR3_SPD_BYTES_PROGRAMMED = 0,
DDR3_SPD_REVISION = 1,
DDR3_SPD_KEY_BYTE_DEVICE_TYPE = 2,
DDR3_SPD_KEY_BYTE_MODULE_TYPE = 3,
DDR3_SPD_DENSITY_BANKS = 4,
DDR3_SPD_ADDRESSING_ROW_COL_BITS = 5,
DDR3_SPD_NOMINAL_VOLTAGE = 6,
DDR3_SPD_MODULE_ORGANIZATION = 7,
DDR3_SPD_MEMORY_BUS_WIDTH = 8,
DDR3_SPD_FINE_TIMEBASE_DIVIDEND_DIVISOR = 9,
DDR3_SPD_MEDIUM_TIMEBASE_DIVIDEND = 10,
DDR3_SPD_MEDIUM_TIMEBASE_DIVISOR = 11,
DDR3_SPD_MINIMUM_CYCLE_TIME_TCKMIN = 12,
DDR3_SPD_CAS_LATENCIES_LSB = 14,
DDR3_SPD_CAS_LATENCIES_MSB = 15,
DDR3_SPD_MIN_CAS_LATENCY_TAAMIN = 16,
DDR3_SPD_MIN_WRITE_RECOVERY_TWRMIN = 17,
DDR3_SPD_MIN_RAS_CAS_DELAY_TRCDMIN = 18,
DDR3_SPD_MIN_ROW_ACTIVE_DELAY_TRRDMIN = 19,
DDR3_SPD_MIN_ROW_PRECHARGE_DELAY_TRPMIN = 20,
DDR3_SPD_UPPER_NIBBLES_TRAS_TRC = 21,
DDR3_SPD_MIN_ACTIVE_PRECHARGE_LSB_TRASMIN = 22,
DDR3_SPD_MIN_ACTIVE_REFRESH_LSB_TRCMIN = 23,
DDR3_SPD_MIN_REFRESH_RECOVERY_LSB_TRFCMIN = 24,
DDR3_SPD_MIN_REFRESH_RECOVERY_MSB_TRFCMIN = 25,
DDR3_SPD_MIN_INTERNAL_WRITE_READ_CMD_TWTRMIN = 26,
DDR3_SPD_MIN_INTERNAL_READ_PRECHARGE_CMD_TRTPMIN = 27,
DDR3_SPD_UPPER_NIBBLE_TFAW = 28,
DDR3_SPD_MIN_FOUR_ACTIVE_WINDOW_TFAWMIN = 29,
DDR3_SPD_SDRAM_OPTIONAL_FEATURES = 30,
DDR3_SPD_SDRAM_THERMAL_REFRESH_OPTIONS = 31,
DDR3_SPD_MODULE_THERMAL_SENSOR = 32,
DDR3_SPD_SDRAM_DEVICE_TYPE = 33,
DDR3_SPD_MINIMUM_CYCLE_TIME_FINE_TCKMIN = 34,
DDR3_SPD_MIN_CAS_LATENCY_FINE_TAAMIN = 35,
DDR3_SPD_MIN_RAS_CAS_DELAY_FINE_TRCDMIN = 36,
DDR3_SPD_MIN_ROW_PRECHARGE_DELAY_FINE_TRPMIN = 37,
DDR3_SPD_MIN_ACTIVE_REFRESH_LSB_FINE_TRCMIN = 38,
DDR3_SPD_REFERENCE_RAW_CARD = 62,
DDR3_SPD_ADDRESS_MAPPING = 63,
DDR3_SPD_REGISTER_MANUFACTURER_ID_LSB = 65,
DDR3_SPD_REGISTER_MANUFACTURER_ID_MSB = 66,
DDR3_SPD_REGISTER_REVISION_NUMBER = 67,
DDR3_SPD_MODULE_SERIAL_NUMBER = 122,
DDR3_SPD_CYCLICAL_REDUNDANCY_CODE_LOWER_NIBBLE = 126,
DDR3_SPD_CYCLICAL_REDUNDANCY_CODE_UPPER_NIBBLE = 127,
DDR3_SPD_MODULE_PART_NUMBER = 128
};
/* data field addresses in the DDR4 SPD eeprom */
enum ddr4_spd_addrs {
DDR4_SPD_BYTES_PROGRAMMED = 0,
DDR4_SPD_REVISION = 1,
DDR4_SPD_KEY_BYTE_DEVICE_TYPE = 2,
DDR4_SPD_KEY_BYTE_MODULE_TYPE = 3,
DDR4_SPD_DENSITY_BANKS = 4,
DDR4_SPD_ADDRESSING_ROW_COL_BITS = 5,
DDR4_SPD_PACKAGE_TYPE = 6,
DDR4_SPD_OPTIONAL_FEATURES = 7,
DDR4_SPD_THERMAL_REFRESH_OPTIONS = 8,
DDR4_SPD_OTHER_OPTIONAL_FEATURES = 9,
DDR4_SPD_SECONDARY_PACKAGE_TYPE = 10,
DDR4_SPD_MODULE_NOMINAL_VOLTAGE = 11,
DDR4_SPD_MODULE_ORGANIZATION = 12,
DDR4_SPD_MODULE_MEMORY_BUS_WIDTH = 13,
DDR4_SPD_MODULE_THERMAL_SENSOR = 14,
DDR4_SPD_RESERVED_BYTE15 = 15,
DDR4_SPD_RESERVED_BYTE16 = 16,
DDR4_SPD_TIMEBASES = 17,
DDR4_SPD_MINIMUM_CYCLE_TIME_TCKAVGMIN = 18,
DDR4_SPD_MAXIMUM_CYCLE_TIME_TCKAVGMAX = 19,
DDR4_SPD_CAS_LATENCIES_BYTE0 = 20,
DDR4_SPD_CAS_LATENCIES_BYTE1 = 21,
DDR4_SPD_CAS_LATENCIES_BYTE2 = 22,
DDR4_SPD_CAS_LATENCIES_BYTE3 = 23,
DDR4_SPD_MIN_CAS_LATENCY_TAAMIN = 24,
DDR4_SPD_MIN_RAS_CAS_DELAY_TRCDMIN = 25,
DDR4_SPD_MIN_ROW_PRECHARGE_DELAY_TRPMIN = 26,
DDR4_SPD_UPPER_NIBBLES_TRAS_TRC = 27,
DDR4_SPD_MIN_ACTIVE_PRECHARGE_LSB_TRASMIN = 28,
DDR4_SPD_MIN_ACTIVE_REFRESH_LSB_TRCMIN = 29,
DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC1MIN = 30,
DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC1MIN = 31,
DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC2MIN = 32,
DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC2MIN = 33,
DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC4MIN = 34,
DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC4MIN = 35,
DDR4_SPD_MIN_FOUR_ACTIVE_WINDOW_MSN_TFAWMIN = 36,
DDR4_SPD_MIN_FOUR_ACTIVE_WINDOW_LSB_TFAWMIN = 37,
DDR4_SPD_MIN_ROW_ACTIVE_DELAY_SAME_TRRD_SMIN = 38,
DDR4_SPD_MIN_ROW_ACTIVE_DELAY_DIFF_TRRD_LMIN = 39,
DDR4_SPD_MIN_CAS_TO_CAS_DELAY_TCCD_LMIN = 40,
DDR4_SPD_MIN_CAS_TO_CAS_DELAY_FINE_TCCD_LMIN = 117,
DDR4_SPD_MIN_ACT_TO_ACT_DELAY_SAME_FINE_TRRD_LMIN = 118,
DDR4_SPD_MIN_ACT_TO_ACT_DELAY_DIFF_FINE_TRRD_SMIN = 119,
DDR4_SPD_MIN_ACT_TO_ACT_REFRESH_DELAY_FINE_TRCMIN = 120,
DDR4_SPD_MIN_ROW_PRECHARGE_DELAY_FINE_TRPMIN = 121,
DDR4_SPD_MIN_RAS_TO_CAS_DELAY_FINE_TRCDMIN = 122,
DDR4_SPD_MIN_CAS_LATENCY_FINE_TAAMIN = 123,
DDR4_SPD_MAX_CYCLE_TIME_FINE_TCKAVGMAX = 124,
DDR4_SPD_MIN_CYCLE_TIME_FINE_TCKAVGMIN = 125,
DDR4_SPD_CYCLICAL_REDUNDANCY_CODE_LOWER_NIBBLE = 126,
DDR4_SPD_CYCLICAL_REDUNDANCY_CODE_UPPER_NIBBLE = 127,
DDR4_SPD_REFERENCE_RAW_CARD = 130,
DDR4_SPD_UDIMM_ADDR_MAPPING_FROM_EDGE = 131,
DDR4_SPD_REGISTER_MANUFACTURER_ID_LSB = 133,
DDR4_SPD_REGISTER_MANUFACTURER_ID_MSB = 134,
DDR4_SPD_REGISTER_REVISION_NUMBER = 135,
DDR4_SPD_RDIMM_ADDR_MAPPING_FROM_REGISTER_TO_DRAM = 136,
DDR4_SPD_RDIMM_REGISTER_DRIVE_STRENGTH_CTL = 137,
DDR4_SPD_RDIMM_REGISTER_DRIVE_STRENGTH_CK = 138,
};
#define SPD_EEPROM_SIZE (DDR4_SPD_RDIMM_REGISTER_DRIVE_STRENGTH_CK + 1)
struct impedence_values {
unsigned char *rodt_ohms;
unsigned char *rtt_nom_ohms;
unsigned char *rtt_nom_table;
unsigned char *rtt_wr_ohms;
unsigned char *dic_ohms;
short *drive_strength;
short *dqx_strength;
};
#define RODT_OHMS_COUNT 8
#define RTT_NOM_OHMS_COUNT 8
#define RTT_NOM_TABLE_COUNT 8
#define RTT_WR_OHMS_COUNT 8
#define DIC_OHMS_COUNT 3
#define DRIVE_STRENGTH_COUNT 15
/*
* Structure that provides DIMM information, either in the form of an SPD
* TWSI address, or a pointer to an array that contains SPD data. One of
* the two fields must be valid.
*/
struct dimm_config {
u16 spd_addrs[2]; /* TWSI address of SPD, 0 if not used */
u8 *spd_ptrs[2]; /* pointer to SPD data array, NULL if not used */
int spd_cached[2];
u8 spd_data[2][SPD_EEPROM_SIZE];
};
struct dimm_odt_config {
u8 odt_ena; /* FIX: dqx_ctl for Octeon 3 DDR4 */
u64 odt_mask; /* FIX: wodt_mask for Octeon 3 */
union cvmx_lmcx_modereg_params1 modereg_params1;
union cvmx_lmcx_modereg_params2 modereg_params2;
u8 qs_dic; /* FIX: rodt_ctl for Octeon 3 */
u64 rodt_ctl; /* FIX: rodt_mask for Octeon 3 */
u8 dic;
};
struct ddr_delay_config {
u32 ddr_board_delay;
u8 lmc_delay_clk;
u8 lmc_delay_cmd;
u8 lmc_delay_dq;
};
/*
* The parameters below make up the custom_lmc_config data structure.
* This structure is used to customize the way that the LMC DRAM
* Controller is configured for a particular board design.
*
* The HRM describes LMC Read Leveling which supports automatic
* selection of per byte-lane delays. When measuring the read delays
* the LMC configuration software sweeps through a range of settings
* for LMC0_COMP_CTL2[RODT_CTL], the Octeon II on-die-termination
* resistance and LMC0_MODEREG_PARAMS1[RTT_NOM_XX], the DRAM
* on-die-termination resistance. The minimum and maximum parameters
* for rtt_nom_idx and rodt_ctl listed below determine the ranges of
* ODT settings used for the measurements. Note that for rtt_nom an
* index is used into a sorted table rather than the direct csr setting
* in order to optimize the sweep.
*
* .min_rtt_nom_idx: 1=120ohms, 2=60ohms, 3=40ohms, 4=30ohms, 5=20ohms
* .max_rtt_nom_idx: 1=120ohms, 2=60ohms, 3=40ohms, 4=30ohms, 5=20ohms
* .min_rodt_ctl: 1=20ohms, 2=30ohms, 3=40ohms, 4=60ohms, 5=120ohms
* .max_rodt_ctl: 1=20ohms, 2=30ohms, 3=40ohms, 4=60ohms, 5=120ohms
*
* The settings below control the Octeon II drive strength for the CK,
* ADD/CMD, and DQ/DQS signals. 1=24ohms, 2=26.67ohms, 3=30ohms,
* 4=34.3ohms, 5=40ohms, 6=48ohms, 6=60ohms.
*
* .dqx_ctl: Drive strength control for DDR_DQX/DDR_DQS_X_P/N drivers.
* .ck_ctl: Drive strength control for
* DDR_CK_X_P/DDR_DIMMX_CSX_L/DDR_DIMMX_ODT_X drivers.
* .cmd_ctl: Drive strength control for CMD/A/RESET_L/CKEX drivers.
*
* The LMC controller software selects the most optimal CAS Latency
* that complies with the appropriate SPD values and the frequency
* that the DRAMS are being operated. When operating the DRAMs at
* frequencies substantially lower than their rated frequencies it
* might be necessary to limit the minimum CAS Latency the LMC
* controller software is allowed to select in order to make the DRAM
* work reliably.
*
* .min_cas_latency: Minimum allowed CAS Latency
*
* The value used for LMC0_RLEVEL_CTL[OFFSET_EN] determine how the
* read-leveling information that the Octeon II gathers is interpreted
* to determine the per-byte read delays.
*
* .offset_en: Value used for LMC0_RLEVEL_CTL[OFFSET_EN].
* .offset_udimm: Value used for LMC0_RLEVEL_CTL[OFFSET] for UDIMMS.
* .offset_rdimm: Value used for LMC0_RLEVEL_CTL[OFFSET] for RDIMMS.
*
* The LMC configuration software sweeps through a range of ODT
* settings while measuring the per-byte read delays. During those
* measurements the software makes an assessment of the quality of the
* measurements in order to determine which measurements provide the
* most accurate delays. The automatic settings provide the option to
* allow that same assessment to determine the most optimal RODT_CTL
* and/or RTT_NOM settings.
*
* The automatic approach might provide the best means to determine
* the settings used for initial poweron of a new design. However,
* the final settings should be determined by board analysis, testing,
* and experience.
*
* .ddr_rtt_nom_auto: 1 means automatically set RTT_NOM value.
* .ddr_rodt_ctl_auto: 1 means automatically set RODT_CTL value.
*
* .rlevel_compute: Enables software interpretation of per-byte read
* delays using the measurements collected by the
* Octeon II rather than completely relying on the
* Octeon II to determine the delays. 1=software
* computation is recomended since a more complete
* analysis is implemented in software.
*
* .rlevel_comp_offset: Set to 2 unless instructed differently by Cavium.
*
* .rlevel_average_loops: Determines the number of times the read-leveling
* sequence is run for each rank. The results is
* then averaged across the number of loops. The
* default setting is 1.
*
* .ddr2t_udimm:
* .ddr2t_rdimm: Turn on the DDR 2T mode. 2-cycle window for CMD and
* address. This mode helps relieve setup time pressure
* on the address and command bus. Please refer to
* Micron's tech note tn_47_01 titled DDR2-533 Memory
* Design Guide for Two Dimm Unbuffered Systems for
* physical details.
*
* .disable_sequential_delay_check: As result of the flyby topology
* prescribed in the JEDEC specifications the byte delays should
* maintain a consistent increasing or decreasing trend across
* the bytes on standard dimms. This setting can be used disable
* that check for unusual circumstances where the check is not
* useful.
*
* .maximum_adjacent_rlevel_delay_increment: An additional sequential
* delay check for the delays that result from the flyby
* topology. This value specifies the maximum difference between
* the delays of adjacent bytes. A value of 0 disables this
* check.
*
* .fprch2 Front Porch Enable: When set, the turn-off
* time for the default DDR_DQ/DQS drivers is FPRCH2 CKs earlier.
* 00 = 0 CKs
* 01 = 1 CKs
* 10 = 2 CKs
*
* .parity: The parity input signal PAR_IN on each dimm must be
* strapped high or low on the board. This bit is programmed
* into LMC0_DIMM_CTL[PARITY] and it must be set to match the
* board strapping. This signal is typically strapped low.
*
* .mode32b: Enable 32-bit datapath mode. Set to 1 if only 32 DQ pins
* are used. (cn61xx, cn71xx)
*
* .measured_vref: Set to 1 to measure VREF; set to 0 to compute VREF.
*
* .dram_connection: Set to 1 if discrete DRAMs; set to 0 if using DIMMs.
* This changes the algorithms used to compute VREF.
*
* .dll_write_offset: FIXME: Add description
* .dll_read_offset: FIXME: Add description
*/
struct rlevel_table {
const char part[20];
int speed;
u64 rl_rank[4][4];
};
struct ddr3_custom_config {
u8 min_rtt_nom_idx;
u8 max_rtt_nom_idx;
u8 min_rodt_ctl;
u8 max_rodt_ctl;
u8 dqx_ctl;
u8 ck_ctl;
u8 cmd_ctl;
u8 ctl_ctl;
u8 min_cas_latency;
u8 offset_en;
u8 offset_udimm;
u8 offset_rdimm;
u8 rlevel_compute;
u8 ddr_rtt_nom_auto;
u8 ddr_rodt_ctl_auto;
u8 rlevel_comp_offset_udimm;
u8 rlevel_comp_offset_rdimm;
int8_t ptune_offset;
int8_t ntune_offset;
u8 rlevel_average_loops;
u8 ddr2t_udimm;
u8 ddr2t_rdimm;
u8 disable_sequential_delay_check;
u8 maximum_adjacent_rlevel_delay_increment;
u8 parity;
u8 fprch2;
u8 mode32b;
u8 measured_vref;
u8 dram_connection;
const int8_t *dll_write_offset;
const int8_t *dll_read_offset;
struct rlevel_table *rl_tbl;
};
#define DDR_CFG_T_MAX_DIMMS 5
struct ddr_conf {
struct dimm_config dimm_config_table[DDR_CFG_T_MAX_DIMMS];
struct dimm_odt_config odt_1rank_config[4];
struct dimm_odt_config odt_2rank_config[4];
struct dimm_odt_config odt_4rank_config[4];
struct ddr_delay_config unbuffered;
struct ddr_delay_config registered;
struct ddr3_custom_config custom_lmc_config;
};
/* Divide and round results to the nearest integer. */
static inline u64 divide_nint(u64 dividend, u64 divisor)
{
u64 quotent, remainder;
quotent = dividend / divisor;
remainder = dividend % divisor;
return (quotent + ((remainder * 2) >= divisor));
}
/* Divide and round results up to the next higher integer. */
static inline u64 divide_roundup(u64 dividend, u64 divisor)
{
return ((dividend + divisor - 1) / divisor);
}
enum ddr_type {
DDR3_DRAM = 3,
DDR4_DRAM = 4,
};
#define rttnom_none 0 /* Rtt_Nom disabled */
#define rttnom_60ohm 1 /* RZQ/4 = 240/4 = 60 ohms */
#define rttnom_120ohm 2 /* RZQ/2 = 240/2 = 120 ohms */
#define rttnom_40ohm 3 /* RZQ/6 = 240/6 = 40 ohms */
#define rttnom_20ohm 4 /* RZQ/12 = 240/12 = 20 ohms */
#define rttnom_30ohm 5 /* RZQ/8 = 240/8 = 30 ohms */
#define rttnom_rsrv1 6 /* Reserved */
#define rttnom_rsrv2 7 /* Reserved */
#define rttwr_none 0 /* Dynamic ODT off */
#define rttwr_60ohm 1 /* RZQ/4 = 240/4 = 60 ohms */
#define rttwr_120ohm 2 /* RZQ/2 = 240/2 = 120 ohms */
#define rttwr_rsrv1 3 /* Reserved */
#define dic_40ohm 0 /* RZQ/6 = 240/6 = 40 ohms */
#define dic_34ohm 1 /* RZQ/7 = 240/7 = 34 ohms */
#define driver_24_ohm 1
#define driver_27_ohm 2
#define driver_30_ohm 3
#define driver_34_ohm 4
#define driver_40_ohm 5
#define driver_48_ohm 6
#define driver_60_ohm 7
#define rodt_ctl_none 0
#define rodt_ctl_20_ohm 1
#define rodt_ctl_30_ohm 2
#define rodt_ctl_40_ohm 3
#define rodt_ctl_60_ohm 4
#define rodt_ctl_120_ohm 5
#define ddr4_rttnom_none 0 /* Rtt_Nom disabled */
#define ddr4_rttnom_60ohm 1 /* RZQ/4 = 240/4 = 60 ohms */
#define ddr4_rttnom_120ohm 2 /* RZQ/2 = 240/2 = 120 ohms */
#define ddr4_rttnom_40ohm 3 /* RZQ/6 = 240/6 = 40 ohms */
#define ddr4_rttnom_240ohm 4 /* RZQ/1 = 240/1 = 240 ohms */
#define ddr4_rttnom_48ohm 5 /* RZQ/5 = 240/5 = 48 ohms */
#define ddr4_rttnom_80ohm 6 /* RZQ/3 = 240/3 = 80 ohms */
#define ddr4_rttnom_34ohm 7 /* RZQ/7 = 240/7 = 34 ohms */
#define ddr4_rttwr_none 0 /* Dynamic ODT off */
#define ddr4_rttwr_120ohm 1 /* RZQ/2 = 240/2 = 120 ohms */
#define ddr4_rttwr_240ohm 2 /* RZQ/1 = 240/1 = 240 ohms */
#define ddr4_rttwr_hiz 3 /* HiZ */
/* This setting is available for cn78xx pass 2, and cn73xx & cnf75xx pass 1 */
#define ddr4_rttwr_80ohm 4 /* RZQ/3 = 240/3 = 80 ohms */
#define ddr4_dic_34ohm 0 /* RZQ/7 = 240/7 = 34 ohms */
#define ddr4_dic_48ohm 1 /* RZQ/5 = 240/5 = 48 ohms */
#define ddr4_rttpark_none 0 /* Rtt_Park disabled */
#define ddr4_rttpark_60ohm 1 /* RZQ/4 = 240/4 = 60 ohms */
#define ddr4_rttpark_120ohm 2 /* RZQ/2 = 240/2 = 120 ohms */
#define ddr4_rttpark_40ohm 3 /* RZQ/6 = 240/6 = 40 ohms */
#define ddr4_rttpark_240ohm 4 /* RZQ/1 = 240/1 = 240 ohms */
#define ddr4_rttpark_48ohm 5 /* RZQ/5 = 240/5 = 48 ohms */
#define ddr4_rttpark_80ohm 6 /* RZQ/3 = 240/3 = 80 ohms */
#define ddr4_rttpark_34ohm 7 /* RZQ/7 = 240/7 = 34 ohms */
#define ddr4_driver_26_ohm 2
#define ddr4_driver_30_ohm 3
#define ddr4_driver_34_ohm 4
#define ddr4_driver_40_ohm 5
#define ddr4_driver_48_ohm 6
#define ddr4_dqx_driver_24_ohm 1
#define ddr4_dqx_driver_27_ohm 2
#define ddr4_dqx_driver_30_ohm 3
#define ddr4_dqx_driver_34_ohm 4
#define ddr4_dqx_driver_40_ohm 5
#define ddr4_dqx_driver_48_ohm 6
#define ddr4_dqx_driver_60_ohm 7
#define ddr4_rodt_ctl_none 0
#define ddr4_rodt_ctl_40_ohm 1
#define ddr4_rodt_ctl_60_ohm 2
#define ddr4_rodt_ctl_80_ohm 3
#define ddr4_rodt_ctl_120_ohm 4
#define ddr4_rodt_ctl_240_ohm 5
#define ddr4_rodt_ctl_34_ohm 6
#define ddr4_rodt_ctl_48_ohm 7
#define DIMM_CONFIG_TERMINATOR { {0, 0}, {NULL, NULL} }
#define SET_DDR_DLL_CTL3(field, expr) \
do { \
if (octeon_is_cpuid(OCTEON_CN66XX) || \
octeon_is_cpuid(OCTEON_CN63XX)) \
ddr_dll_ctl3.cn63xx.field = (expr); \
else if (octeon_is_cpuid(OCTEON_CN68XX) || \
octeon_is_cpuid(OCTEON_CN61XX) || \
octeon_is_cpuid(OCTEON_CNF71XX)) \
ddr_dll_ctl3.cn61xx.field = (expr); \
else if (octeon_is_cpuid(OCTEON_CN70XX) || \
octeon_is_cpuid(OCTEON_CN78XX)) \
ddr_dll_ctl3.cn70xx.field = (expr); \
else if (octeon_is_cpuid(OCTEON_CN73XX) || \
octeon_is_cpuid(OCTEON_CNF75XX)) \
ddr_dll_ctl3.cn73xx.field = (expr); \
else \
debug("%s(): " #field \
"not set for unknown chip\n", \
__func__); \
} while (0)
#define ENCODE_DLL90_BYTE_SEL(byte_sel) \
(octeon_is_cpuid(OCTEON_CN70XX) ? ((9 + 7 - (byte_sel)) % 9) : \
((byte_sel) + 1))
/**
* If debugging is disabled the ddr_print macro is not compatible
* with this macro.
*/
# define GET_DDR_DLL_CTL3(field) \
((octeon_is_cpuid(OCTEON_CN66XX) || \
octeon_is_cpuid(OCTEON_CN63XX)) ? \
ddr_dll_ctl3.cn63xx.field : \
(octeon_is_cpuid(OCTEON_CN68XX) || \
octeon_is_cpuid(OCTEON_CN61XX) || \
octeon_is_cpuid(OCTEON_CNF71XX)) ? \
ddr_dll_ctl3.cn61xx.field : \
(octeon_is_cpuid(OCTEON_CN70XX) || \
octeon_is_cpuid(OCTEON_CN78XX)) ? \
ddr_dll_ctl3.cn70xx.field : \
(octeon_is_cpuid(OCTEON_CN73XX) || \
octeon_is_cpuid(OCTEON_CNF75XX)) ? \
ddr_dll_ctl3.cn73xx.field : 0)
extern const char *ddr3_dimm_types[];
extern const char *ddr4_dimm_types[];
extern const struct dimm_odt_config disable_odt_config[];
#define RLEVEL_BYTE_BITS 6
#define RLEVEL_BYTE_MSK ((1ULL << 6) - 1)
/* Prototypes */
int get_ddr_type(struct dimm_config *dimm_config, int upper_dimm);
int get_dimm_module_type(struct dimm_config *dimm_config, int upper_dimm,
int ddr_type);
int read_spd(struct dimm_config *dimm_config, int dimm_index, int spd_field);
int read_spd_init(struct dimm_config *dimm_config, int dimm_index);
void report_dimm(struct dimm_config *dimm_config, int upper_dimm,
int dimm, int if_num);
int validate_dimm(struct ddr_priv *priv, struct dimm_config *dimm_config,
int dimm_index);
char *printable_rank_spec(char *buffer, int num_ranks, int dram_width,
int spd_package);
bool ddr_memory_preserved(struct ddr_priv *priv);
int get_wl_rank(union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte);
int get_rl_rank(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank, int byte);
void upd_wl_rank(union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte,
int delay);
void upd_rl_rank(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank, int byte,
int delay);
int compute_ddr3_rlevel_delay(u8 mstart, u8 width,
union cvmx_lmcx_rlevel_ctl rlevel_ctl);
int encode_row_lsb_ddr3(int row_lsb);
int encode_pbank_lsb_ddr3(int pbank_lsb);
int initialize_ddr_clock(struct ddr_priv *priv, struct ddr_conf *ddr_conf,
u32 cpu_hertz, u32 ddr_hertz, u32 ddr_ref_hertz,
int if_num, u32 if_mask);
void process_custom_dll_offsets(struct ddr_priv *priv, int if_num,
const char *enable_str,
const int8_t *offsets, const char *byte_str,
int mode);
int nonseq_del(struct rlevel_byte_data *rlevel_byte, int start, int end,
int max_adj_delay_inc);
int roundup_ddr3_wlevel_bitmask(int bitmask);
void oct3_ddr3_seq(struct ddr_priv *priv, int rank_mask, int if_num,
int sequence);
void ddr_init_seq(struct ddr_priv *priv, int rank_mask, int if_num);
void rlevel_to_wlevel(union cvmx_lmcx_rlevel_rankx *lmc_rlevel_rank,
union cvmx_lmcx_wlevel_rankx *lmc_wlevel_rank, int byte);
int validate_ddr3_rlevel_bitmask(struct rlevel_bitmask *rlevel_bitmask_p,
int ddr_type);
void change_dll_offset_enable(struct ddr_priv *priv, int if_num, int change);
unsigned short load_dll_offset(struct ddr_priv *priv, int if_num,
int dll_offset_mode,
int byte_offset, int byte);
u64 lmc_ddr3_rl_dbg_read(struct ddr_priv *priv, int if_num, int idx);
u64 lmc_ddr3_wl_dbg_read(struct ddr_priv *priv, int if_num, int idx);
void cvmx_maybe_tune_node(struct ddr_priv *priv, u32 ddr_speed);
void cvmx_dbi_switchover(struct ddr_priv *priv);
int init_octeon3_ddr3_interface(struct ddr_priv *priv,
struct ddr_conf *ddr_conf,
u32 ddr_hertz, u32 cpu_hertz, u32 ddr_ref_hertz,
int if_num, u32 if_mask);
char *lookup_env(struct ddr_priv *priv, const char *format, ...);
char *lookup_env_ull(struct ddr_priv *priv, const char *format, ...);
/* Each board provides a board-specific config table via this function */
struct ddr_conf *octeon_ddr_conf_table_get(int *count, int *def_ddr_freq);
#endif /* __OCTEON_DDR_H_ */

View file

@ -0,0 +1,56 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2003, 2004 Ralf Baechle
*/
#ifndef __ASM_MACH_GENERIC_MANGLE_PORT_H
#define __ASM_MACH_GENERIC_MANGLE_PORT_H
#include <asm/byteorder.h>
#ifdef __BIG_ENDIAN
static inline bool __should_swizzle_bits(volatile void *a)
{
extern const bool octeon_should_swizzle_table[];
u64 did = ((u64)(uintptr_t)a >> 40) & 0xff;
return octeon_should_swizzle_table[did];
}
# define __swizzle_addr_b(port) (port)
# define __swizzle_addr_w(port) (port)
# define __swizzle_addr_l(port) (port)
# define __swizzle_addr_q(port) (port)
#else /* __LITTLE_ENDIAN */
#define __should_swizzle_bits(a) false
static inline bool __should_swizzle_addr(u64 p)
{
/* boot bus? */
return ((p >> 40) & 0xff) == 0;
}
# define __swizzle_addr_b(port) \
(__should_swizzle_addr(port) ? (port) ^ 7 : (port))
# define __swizzle_addr_w(port) \
(__should_swizzle_addr(port) ? (port) ^ 6 : (port))
# define __swizzle_addr_l(port) \
(__should_swizzle_addr(port) ? (port) ^ 4 : (port))
# define __swizzle_addr_q(port) (port)
#endif /* __BIG_ENDIAN */
# define ioswabb(a, x) (x)
# define __mem_ioswabb(a, x) (x)
# define ioswabw(a, x) (__should_swizzle_bits(a) ? le16_to_cpu(x) : x)
# define __mem_ioswabw(a, x) (x)
# define ioswabl(a, x) (__should_swizzle_bits(a) ? le32_to_cpu(x) : x)
# define __mem_ioswabl(a, x) (x)
# define ioswabq(a, x) (__should_swizzle_bits(a) ? le64_to_cpu(x) : x)
# define __mem_ioswabq(a, x) (x)
#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */

View file

@ -10,10 +10,36 @@
#include <asm/mipsregs.h>
#include <asm/addrspace.h>
#include <asm/asm.h>
#include <mach/octeon-model.h>
#define COP0_CVMCTL_REG $9,7 /* Cavium control */
#define COP0_CVMMEMCTL_REG $11,7 /* Cavium memory control */
#define COP0_PROC_ID_REG $15,0
.set noreorder
LEAF(lowlevel_init)
/* Set LMEMSZ in CVMMEMCTL register */
dmfc0 a0, COP0_CVMMEMCTL_REG
dins a0, zero, 0, 9
mfc0 a4, COP0_PROC_ID_REG
li a5, OCTEON_CN63XX_PASS1_0 /* Octeon cn63xx pass1 chip id */
bgt a5, a4, 2f
ori a0, 0x104 /* setup 4 lines of scratch */
ori a6, a5, 8 /* Octeon cn63xx pass2 chip id */
bge a4, a6, 2f
nop
li a6, 4
ins a0, a6, 11, 4 /* Set WBTHRESH=4 as per Core-14752 errata */
2:
dmtc0 a0, COP0_CVMMEMCTL_REG
/* Set REPUN bit in CVMCTL register */
dmfc0 a0, COP0_CVMCTL_REG
ori a0, 1<<14 /* enable fixup of unaligned mem access */
dmtc0 a0, COP0_CVMCTL_REG
jr ra
nop
END(lowlevel_init)
@ -67,3 +93,53 @@ __dummy:
nop
END(mips_mach_early_init)
LEAF(nmi_bootvector)
/*
* From Marvell original bootvector setup
*/
mfc0 k0, CP0_STATUS
/* Enable 64-bit addressing, set ERL (should already be set) */
ori k0, 0x84
mtc0 k0, CP0_STATUS
/* Core-14345, clear L1 Dcache virtual tags if the core hit an NMI */
cache 17, 0($0)
/*
* Needed for Linux kernel booting, otherwise it hangs while
* zero'ing all of CVMSEG
*/
dmfc0 a0, COP0_CVMMEMCTL_REG
dins a0, zero, 0, 9
ori a0, 0x104 /* setup 4 lines of scratch */
dmtc0 a0, COP0_CVMMEMCTL_REG
/*
* Load parameters and entry point
*/
PTR_LA t9, nmi_handler_para
sync
ld s0, 0x00(t9)
ld a0, 0x08(t9)
ld a1, 0x10(t9)
ld a2, 0x18(t9)
ld a3, 0x20(t9)
/* Finally jump to entry point (start kernel etc) */
j s0
nop
END(nmi_bootvector)
/*
* Add here some space for the NMI parameters (entry point and args)
*/
.globl nmi_handler_para
nmi_handler_para:
.dword 0 // entry-point
.dword 0 // arg0
.dword 0 // arg1
.dword 0 // arg2
.dword 0 // arg3

View file

@ -3,7 +3,24 @@
* Copyright (C) 2020 Stefan Roese <sr@denx.de>
*/
/*
* Nothing included right now. Code will be added in follow-up
* patches.
*/
#include <common.h>
#include <dm.h>
#include <ram.h>
#include <mach/octeon_ddr.h>
#include "board_ddr.h"
#define EBB7304_DEF_DRAM_FREQ 800
static struct ddr_conf board_ddr_conf[] = {
OCTEON_EBB7304_DDR_CONFIGURATION
};
struct ddr_conf *octeon_ddr_conf_table_get(int *count, int *def_ddr_freq)
{
*count = ARRAY_SIZE(board_ddr_conf);
*def_ddr_freq = EBB7304_DEF_DRAM_FREQ;
return board_ddr_conf;
}

View file

@ -0,0 +1,447 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2020 Marvell International Ltd.
*
* https://spdx.org/licenses
*/
#ifndef __BOARD_DDR_H__
#define __BOARD_DDR_H__
#define OCTEON_EBB7304_DRAM_SOCKET_CONFIGURATION0 \
{ {0x1050, 0x0}, {NULL, NULL} }, { {0x1051, 0x0}, {NULL, NULL} }
#define OCTEON_EBB7304_DRAM_SOCKET_CONFIGURATION1 \
{ {0x1052, 0x0}, {NULL, NULL} }, { {0x1053, 0x0}, {NULL, NULL} }
#define OCTEON_EBB7304_BOARD_EEPROM_TWSI_ADDR 0x56
/*
* Local copy of these parameters to allow for customization for this
* board design. The generic version resides in lib_octeon_shared.h.
*/
/* LMC0_MODEREG_PARAMS1 */
#define OCTEON_EBB7304_MODEREG_PARAMS1_1RANK_1SLOT \
{ \
.cn78xx = { \
.pasr_00 = 0, \
.asr_00 = 0, \
.srt_00 = 0, \
.rtt_wr_00 = ddr4_rttwr_80ohm & 3, \
.rtt_wr_00_ext = (ddr4_rttwr_80ohm >> 2) & 1, \
.dic_00 = ddr4_dic_34ohm, \
.rtt_nom_00 = 0, \
.pasr_01 = 0, \
.asr_01 = 0, \
.srt_01 = 0, \
.rtt_wr_01 = 0, \
.dic_01 = ddr4_dic_34ohm, \
.rtt_nom_01 = 0, \
.pasr_10 = 0, \
.asr_10 = 0, \
.srt_10 = 0, \
.rtt_wr_10 = 0, \
.dic_10 = ddr4_dic_34ohm, \
.rtt_nom_10 = 0, \
.pasr_11 = 0, \
.asr_11 = 0, \
.srt_11 = 0, \
.rtt_wr_11 = 0, \
.dic_11 = ddr4_dic_34ohm, \
.rtt_nom_11 = 0, \
} \
}
#define OCTEON_EBB7304_MODEREG_PARAMS1_1RANK_2SLOT \
{ \
.cn78xx = { \
.pasr_00 = 0, \
.asr_00 = 0, \
.srt_00 = 0, \
.rtt_wr_00 = ddr4_rttwr_80ohm & 3, \
.rtt_wr_00_ext = (ddr4_rttwr_80ohm >> 2) & 1, \
.dic_00 = ddr4_dic_34ohm, \
.rtt_nom_00 = 0, \
.pasr_01 = 0, \
.asr_01 = 0, \
.srt_01 = 0, \
.rtt_wr_01 = 0, \
.dic_01 = ddr4_dic_34ohm, \
.rtt_nom_01 = 0, \
.pasr_10 = 0, \
.asr_10 = 0, \
.srt_10 = 0, \
.rtt_wr_10 = ddr4_rttwr_80ohm & 3, \
.rtt_wr_10_ext = (ddr4_rttwr_80ohm >> 2) & 1, \
.dic_10 = ddr4_dic_34ohm, \
.rtt_nom_10 = 0, \
.pasr_11 = 0, \
.asr_11 = 0, \
.srt_11 = 0, \
.rtt_wr_11 = 0, \
.dic_11 = ddr4_dic_34ohm, \
.rtt_nom_11 = 0 \
} \
}
#define OCTEON_EBB7304_MODEREG_PARAMS1_2RANK_1SLOT \
{ \
.cn78xx = { \
.pasr_00 = 0, \
.asr_00 = 0, \
.srt_00 = 0, \
.rtt_wr_00 = ddr4_rttwr_240ohm, \
.dic_00 = ddr4_dic_34ohm, \
.rtt_nom_00 = 0, \
.pasr_01 = 0, \
.asr_01 = 0, \
.srt_01 = 0, \
.rtt_wr_01 = ddr4_rttwr_240ohm, \
.dic_01 = ddr4_dic_34ohm, \
.rtt_nom_01 = 0, \
.pasr_10 = 0, \
.asr_10 = 0, \
.srt_10 = 0, \
.dic_10 = ddr4_dic_34ohm, \
.rtt_nom_10 = 0, \
.pasr_11 = 0, \
.asr_11 = 0, \
.srt_11 = 0, \
.rtt_wr_11 = 0, \
.dic_11 = ddr4_dic_34ohm, \
.rtt_nom_11 = 0, \
} \
}
#define OCTEON_EBB7304_MODEREG_PARAMS1_2RANK_2SLOT \
{ \
.cn78xx = { \
.pasr_00 = 0, \
.asr_00 = 0, \
.srt_00 = 0, \
.rtt_wr_00 = ddr4_rttwr_240ohm, \
.dic_00 = ddr4_dic_34ohm, \
.rtt_nom_00 = ddr4_rttnom_120ohm, \
.pasr_01 = 0, \
.asr_01 = 0, \
.srt_01 = 0, \
.rtt_wr_01 = ddr4_rttwr_240ohm, \
.dic_01 = ddr4_dic_34ohm, \
.rtt_nom_01 = ddr4_rttnom_120ohm, \
.pasr_10 = 0, \
.asr_10 = 0, \
.srt_10 = 0, \
.rtt_wr_10 = ddr4_rttwr_240ohm, \
.dic_10 = ddr4_dic_34ohm, \
.rtt_nom_10 = ddr4_rttnom_120ohm, \
.pasr_11 = 0, \
.asr_11 = 0, \
.srt_11 = 0, \
.rtt_wr_11 = ddr4_rttwr_240ohm, \
.dic_11 = ddr4_dic_34ohm, \
.rtt_nom_11 = ddr4_rttnom_120ohm, \
} \
}
#define OCTEON_EBB7304_MODEREG_PARAMS1_4RANK_1SLOT \
{ \
.cn78xx = { \
.pasr_00 = 0, \
.asr_00 = 0, \
.srt_00 = 0, \
.rtt_wr_00 = rttwr_60ohm, \
.dic_00 = dic_34ohm, \
.rtt_nom_00 = rttnom_20ohm, \
.pasr_01 = 0, \
.asr_01 = 0, \
.srt_01 = 0, \
.rtt_wr_01 = rttwr_60ohm, \
.dic_01 = dic_34ohm, \
.rtt_nom_01 = rttnom_none, \
.pasr_10 = 0, \
.asr_10 = 0, \
.srt_10 = 0, \
.rtt_wr_10 = rttwr_60ohm, \
.dic_10 = dic_34ohm, \
.rtt_nom_10 = rttnom_20ohm, \
.pasr_11 = 0, \
.asr_11 = 0, \
.srt_11 = 0, \
.rtt_wr_11 = rttwr_60ohm, \
.dic_11 = dic_34ohm, \
.rtt_nom_11 = rttnom_none, \
} \
}
#define OCTEON_EBB7304_MODEREG_PARAMS2_1RANK_1SLOT \
{ \
.cn78xx = { \
.rtt_park_00 = ddr4_rttpark_60ohm, \
.vref_value_00 = 0x22, \
.vref_range_00 = 0, \
.rtt_park_01 = 0, \
.vref_value_01 = 0, \
.vref_range_01 = 0, \
.rtt_park_10 = 0, \
.vref_value_10 = 0, \
.vref_range_10 = 0, \
.rtt_park_11 = 0, \
.vref_value_11 = 0, \
.vref_range_11 = 0 \
} \
}
/* FIX */
#define OCTEON_EBB7304_MODEREG_PARAMS2_1RANK_2SLOT \
{ \
.cn78xx = { \
.rtt_park_00 = ddr4_rttpark_48ohm, \
.vref_value_00 = 0x1f, \
.vref_range_00 = 0, \
.rtt_park_01 = 0, \
.vref_value_01 = 0, \
.vref_range_01 = 0, \
.rtt_park_10 = ddr4_rttpark_48ohm, \
.vref_value_10 = 0x1f, \
.vref_range_10 = 0, \
.rtt_park_11 = 0, \
.vref_value_11 = 0, \
.vref_range_11 = 0 \
} \
}
#define OCTEON_EBB7304_MODEREG_PARAMS2_2RANK_1SLOT \
{ \
.cn78xx = { \
.rtt_park_00 = ddr4_rttpark_120ohm, \
.vref_value_00 = 0x19, \
.vref_range_00 = 0, \
.rtt_park_01 = ddr4_rttpark_120ohm, \
.vref_value_01 = 0x19, \
.vref_range_01 = 0, \
.rtt_park_10 = 0, \
.vref_value_10 = 0, \
.vref_range_10 = 0, \
.rtt_park_11 = 0, \
.vref_value_11 = 0, \
.vref_range_11 = 0 \
} \
}
#define OCTEON_EBB7304_MODEREG_PARAMS2_2RANK_2SLOT \
{ \
.cn78xx = { \
.rtt_park_00 = ddr4_rttpark_60ohm, \
.vref_value_00 = 0x19, \
.vref_range_00 = 0, \
.rtt_park_01 = ddr4_rttpark_60ohm, \
.vref_value_01 = 0x19, \
.vref_range_01 = 0, \
.rtt_park_10 = ddr4_rttpark_60ohm, \
.vref_value_10 = 0x19, \
.vref_range_10 = 0, \
.rtt_park_11 = ddr4_rttpark_60ohm, \
.vref_value_11 = 0x19, \
.vref_range_11 = 0 \
} \
}
#define OCTEON_EBB7304_MODEREG_PARAMS2_4RANK_1SLOT \
{ \
.cn78xx = { \
.rtt_park_00 = ddr4_rttpark_80ohm, \
.vref_value_00 = 0x1f, \
.vref_range_00 = 0, \
.rtt_park_01 = ddr4_rttpark_80ohm, \
.vref_value_01 = 0x1f, \
.vref_range_01 = 0, \
.rtt_park_10 = 0, \
.vref_value_10 = 0, \
.vref_range_10 = 0, \
.rtt_park_11 = 0, \
.vref_value_11 = 0, \
.vref_range_11 = 0 \
} \
}
#define OCTEON_EBB7304_CN78XX_DRAM_ODT_1RANK_CONFIGURATION \
/* 1 */ \
{ \
ddr4_dqx_driver_34_ohm, \
0x00000000ULL, \
OCTEON_EBB7304_MODEREG_PARAMS1_1RANK_1SLOT, \
OCTEON_EBB7304_MODEREG_PARAMS2_1RANK_1SLOT, \
ddr4_rodt_ctl_48_ohm, \
0x00000000ULL, \
0 \
}, \
/* 2 */ \
{ \
ddr4_dqx_driver_34_ohm, \
0x00000000ULL, \
OCTEON_EBB7304_MODEREG_PARAMS1_1RANK_2SLOT, \
OCTEON_EBB7304_MODEREG_PARAMS2_1RANK_2SLOT, \
ddr4_rodt_ctl_80_ohm, \
0x00000000ULL, \
0 \
}
#define OCTEON_EBB7304_CN78XX_DRAM_ODT_2RANK_CONFIGURATION \
/* 1 */ \
{ \
ddr4_dqx_driver_34_ohm, \
0x00000000ULL, \
OCTEON_EBB7304_MODEREG_PARAMS1_2RANK_1SLOT, \
OCTEON_EBB7304_MODEREG_PARAMS2_2RANK_1SLOT, \
ddr4_rodt_ctl_80_ohm, \
0x00000000ULL, \
0 \
}, \
/* 2 */ \
{ \
ddr4_dqx_driver_34_ohm, \
0x0c0c0303ULL, \
OCTEON_EBB7304_MODEREG_PARAMS1_2RANK_2SLOT, \
OCTEON_EBB7304_MODEREG_PARAMS2_2RANK_2SLOT, \
ddr4_rodt_ctl_48_ohm, \
0x04080102ULL, \
0 \
}
#define OCTEON_EBB7304_CN78XX_DRAM_ODT_4RANK_CONFIGURATION \
/* 1 */ \
{ \
ddr4_dqx_driver_34_ohm, \
0x01030203ULL, \
OCTEON_EBB7304_MODEREG_PARAMS1_4RANK_1SLOT, \
OCTEON_EBB7304_MODEREG_PARAMS2_4RANK_1SLOT, \
ddr4_rodt_ctl_48_ohm, \
0x01010202ULL, \
0 \
}
/*
* Construct a static initializer for the ddr_configuration_t variable that
* holds (almost) all of the information required for DDR initialization.
*/
/*
* The parameters below make up the custom_lmc_config data structure.
* This structure is used to customize the way that the LMC DRAM
* Controller is configured for a particular board design.
*
* Refer to the file lib_octeon_board_table_entry.h for a description
* of the custom board settings. It is usually kept in the following
* location... arch/mips/include/asm/arch-octeon/
*
*/
#define OCTEON_EBB7304_DDR_CONFIGURATION \
/* Interface 0 */ \
{ \
.custom_lmc_config = { \
.min_rtt_nom_idx = 1, \
.max_rtt_nom_idx = 7, \
.min_rodt_ctl = 1, \
.max_rodt_ctl = 7, \
.ck_ctl = ddr4_driver_34_ohm, \
.cmd_ctl = ddr4_driver_34_ohm, \
.ctl_ctl = ddr4_driver_34_ohm, \
.min_cas_latency = 0, \
.offset_en = 1, \
.offset_udimm = 2, \
.offset_rdimm = 2, \
.ddr_rtt_nom_auto = 0, \
.ddr_rodt_ctl_auto = 0, \
.rlevel_comp_offset_udimm = 0, \
.rlevel_comp_offset_rdimm = 0, \
.rlevel_compute = 0, \
.ddr2t_udimm = 1, \
.ddr2t_rdimm = 1, \
.maximum_adjacent_rlevel_delay_increment = 2, \
.fprch2 = 2, \
.dll_write_offset = NULL, \
.dll_read_offset = NULL, \
.parity = 0 \
}, \
.dimm_config_table = { \
OCTEON_EBB7304_DRAM_SOCKET_CONFIGURATION0, \
DIMM_CONFIG_TERMINATOR \
}, \
.unbuffered = { \
.ddr_board_delay = 0, \
.lmc_delay_clk = 0, \
.lmc_delay_cmd = 0, \
.lmc_delay_dq = 0 \
}, \
.registered = { \
.ddr_board_delay = 0, \
.lmc_delay_clk = 0, \
.lmc_delay_cmd = 0, \
.lmc_delay_dq = 0 \
}, \
.odt_1rank_config = { \
OCTEON_EBB7304_CN78XX_DRAM_ODT_1RANK_CONFIGURATION \
}, \
.odt_2rank_config = { \
OCTEON_EBB7304_CN78XX_DRAM_ODT_2RANK_CONFIGURATION \
}, \
.odt_4rank_config = { \
OCTEON_EBB7304_CN78XX_DRAM_ODT_4RANK_CONFIGURATION \
} \
}, \
/* Interface 1 */ \
{ \
.custom_lmc_config = { \
.min_rtt_nom_idx = 1, \
.max_rtt_nom_idx = 7, \
.min_rodt_ctl = 1, \
.max_rodt_ctl = 7, \
.ck_ctl = ddr4_driver_34_ohm, \
.cmd_ctl = ddr4_driver_34_ohm, \
.ctl_ctl = ddr4_driver_34_ohm, \
.min_cas_latency = 0, \
.offset_en = 1, \
.offset_udimm = 2, \
.offset_rdimm = 2, \
.ddr_rtt_nom_auto = 0, \
.ddr_rodt_ctl_auto = 0, \
.rlevel_comp_offset_udimm = 0, \
.rlevel_comp_offset_rdimm = 0, \
.rlevel_compute = 0, \
.ddr2t_udimm = 1, \
.ddr2t_rdimm = 1, \
.maximum_adjacent_rlevel_delay_increment = 2, \
.fprch2 = 2, \
.dll_write_offset = NULL, \
.dll_read_offset = NULL, \
.parity = 0 \
}, \
.dimm_config_table = { \
OCTEON_EBB7304_DRAM_SOCKET_CONFIGURATION1, \
DIMM_CONFIG_TERMINATOR \
}, \
.unbuffered = { \
.ddr_board_delay = 0, \
.lmc_delay_clk = 0, \
.lmc_delay_cmd = 0, \
.lmc_delay_dq = 0 \
}, \
.registered = { \
.ddr_board_delay = 0, \
.lmc_delay_clk = 0, \
.lmc_delay_cmd = 0, \
.lmc_delay_dq = 0 \
}, \
.odt_1rank_config = { \
OCTEON_EBB7304_CN78XX_DRAM_ODT_1RANK_CONFIGURATION \
}, \
.odt_2rank_config = { \
OCTEON_EBB7304_CN78XX_DRAM_ODT_2RANK_CONFIGURATION \
}, \
.odt_4rank_config = { \
OCTEON_EBB7304_CN78XX_DRAM_ODT_4RANK_CONFIGURATION \
} \
},
#endif /* __BOARD_DDR_H__ */

View file

@ -15,12 +15,19 @@ CONFIG_HUSH_PARSER=y
CONFIG_CMD_GPIO=y
CONFIG_CMD_I2C=y
CONFIG_CMD_MTD=y
CONFIG_CMD_PART=y
CONFIG_CMD_PCI=y
CONFIG_CMD_USB=y
CONFIG_CMD_DHCP=y
CONFIG_CMD_PING=y
CONFIG_CMD_TIME=y
CONFIG_CMD_EXT4=y
CONFIG_CMD_FAT=y
CONFIG_CMD_FS_GENERIC=y
# CONFIG_DOS_PARTITION is not set
CONFIG_ENV_IS_IN_FLASH=y
CONFIG_ENV_ADDR=0x1FBFE000
CONFIG_BLK=y
CONFIG_CLK=y
# CONFIG_INPUT is not set
CONFIG_MTD=y
@ -38,6 +45,9 @@ CONFIG_SPI_FLASH_STMICRO=y
# CONFIG_NETDEVICES is not set
CONFIG_PCI=y
CONFIG_DM_PCI=y
CONFIG_RAM=y
CONFIG_RAM_OCTEON=y
CONFIG_RAM_OCTEON_DDR4=y
CONFIG_DEBUG_UART_SHIFT=3
CONFIG_DEBUG_UART_ANNOUNCE=y
CONFIG_SYS_NS16550=y
@ -45,4 +55,14 @@ CONFIG_SPI=y
CONFIG_OCTEON_SPI=y
CONFIG_SYSRESET=y
CONFIG_SYSRESET_OCTEON=y
CONFIG_USB=y
CONFIG_DM_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_XHCI_DWC3=y
CONFIG_USB_HOST_ETHER=y
CONFIG_USB_ETHER_ASIX=y
CONFIG_USB_ETHER_ASIX88179=y
CONFIG_USB_ETHER_MCS7830=y
CONFIG_USB_ETHER_RTL8152=y
CONFIG_USB_ETHER_SMSC95XX=y
CONFIG_HEXDUMP=y

View file

@ -76,3 +76,4 @@ config IMXRT_SDRAM
source "drivers/ram/rockchip/Kconfig"
source "drivers/ram/sifive/Kconfig"
source "drivers/ram/stm32mp1/Kconfig"
source "drivers/ram/octeon/Kconfig"

View file

@ -19,3 +19,5 @@ obj-$(CONFIG_K3_J721E_DDRSS) += k3-j721e/
obj-$(CONFIG_IMXRT_SDRAM) += imxrt_sdram.o
obj-$(CONFIG_RAM_SIFIVE) += sifive/
obj-$(CONFIG_ARCH_OCTEON) += octeon/

View file

@ -0,0 +1,17 @@
config RAM_OCTEON
bool "Ram drivers for Octeon SoCs"
depends on RAM && ARCH_OCTEON
default n
help
This enables support for RAM drivers for Octeon SoCs.
if RAM_OCTEON
config RAM_OCTEON_DDR4
bool "Octeon III DDR4 RAM support"
default n
help
This enables support for DDR4 RAM suppoort for Octeon III. This does
not include support for Octeon CN70XX.
endif # RAM_OCTEON

View file

@ -0,0 +1,8 @@
# SPDX-License-Identifier: GPL-2.0
#
# Copyright (c) 2020 Marvell, Inc.
#
obj-$(CONFIG_RAM_OCTEON_DDR4) += octeon_ddr.o
obj-$(CONFIG_RAM_OCTEON_DDR4) += octeon3_lmc.o
obj-y += dimm_spd_eeprom.o

View file

@ -0,0 +1,407 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) 2020 Marvell International Ltd.
*/
#include <i2c.h>
#include <ram.h>
#include <mach/octeon_ddr.h>
#define DEVICE_TYPE DDR4_SPD_KEY_BYTE_DEVICE_TYPE // same for DDR3 and DDR4
#define MODULE_TYPE DDR4_SPD_KEY_BYTE_MODULE_TYPE // same for DDR3 and DDR4
#define BUS_WIDTH(t) (((t) == DDR4_DRAM) ? \
DDR4_SPD_MODULE_MEMORY_BUS_WIDTH : \
DDR3_SPD_MEMORY_BUS_WIDTH)
/*
* Allow legacy code to encode bus number in the upper bits of the address
* These are only supported in read_spd()
*/
#define OCTEON_TWSI_BUS_IN_ADDR_BIT 12
#define OCTEON_TWSI_BUS_IN_ADDR_MASK (15 << OCTEON_TWSI_BUS_IN_ADDR_BIT)
#define OCTEON_TWSI_GET_BUS(addr) \
(((addr) >> OCTEON_TWSI_BUS_IN_ADDR_BIT) & 0xf)
const char *ddr3_dimm_types[] = {
/* 0000 */ "Undefined",
/* 0001 */ "RDIMM",
/* 0010 */ "UDIMM",
/* 0011 */ "SO-DIMM",
/* 0100 */ "Micro-DIMM",
/* 0101 */ "Mini-RDIMM",
/* 0110 */ "Mini-UDIMM",
/* 0111 */ "Mini-CDIMM",
/* 1000 */ "72b-SO-UDIMM",
/* 1001 */ "72b-SO-RDIMM",
/* 1010 */ "72b-SO-CDIMM"
/* 1011 */ "LRDIMM",
/* 1100 */ "16b-SO-DIMM",
/* 1101 */ "32b-SO-DIMM",
/* 1110 */ "Reserved",
/* 1111 */ "Reserved"
};
const char *ddr4_dimm_types[] = {
/* 0000 */ "Extended",
/* 0001 */ "RDIMM",
/* 0010 */ "UDIMM",
/* 0011 */ "SO-DIMM",
/* 0100 */ "LRDIMM",
/* 0101 */ "Mini-RDIMM",
/* 0110 */ "Mini-UDIMM",
/* 0111 */ "Reserved",
/* 1000 */ "72b-SO-RDIMM",
/* 1001 */ "72b-SO-UDIMM",
/* 1010 */ "Reserved",
/* 1011 */ "Reserved",
/* 1100 */ "16b-SO-DIMM",
/* 1101 */ "32b-SO-DIMM",
/* 1110 */ "Reserved",
/* 1111 */ "Reserved"
};
static u16 ddr3_crc16(u8 *ptr, int count)
{
/* From DDR3 SPD specification */
int crc, i;
crc = 0;
while (--count >= 0) {
crc = crc ^ (int)*ptr++ << 8;
for (i = 0; i < 8; ++i) {
if (crc & 0x8000)
crc = crc << 1 ^ 0x1021;
else
crc = crc << 1;
}
}
return (crc & 0xFFFF);
}
static int validate_spd_checksum_ddr4(struct dimm_config *dimm_config,
int dimm_index, int twsi_addr, int silent)
{
u8 *spd_data = dimm_config->spd_data[dimm_index];
int crc_bytes = 126;
u16 crc_comp;
/* Check byte 0 to see how many bytes checksum is over */
if (spd_data[0] & 0x80)
crc_bytes = 117;
crc_comp = ddr3_crc16(spd_data, crc_bytes);
if (spd_data[126] == (crc_comp & 0xff) &&
spd_data[127] == (crc_comp >> 8))
return 1;
if (!silent) {
printf("DDR4 SPD CRC error, spd addr: 0x%x, calculated crc: 0x%04x, read crc: 0x%02x%02x\n",
twsi_addr, crc_comp, spd_data[127], spd_data[126]);
}
return 0;
}
static int validate_spd_checksum(struct ddr_priv *priv,
struct dimm_config *dimm_config,
int dimm_index, int twsi_addr,
int silent, u8 rv)
{
if (ddr_verbose(priv))
debug("Validating DIMM at address 0x%x\n", twsi_addr);
if (rv >= 0x8 && rv <= 0xA)
printf("%s: Error: DDR2 support disabled\n", __func__);
if (rv == 0xB)
printf("%s: Error: DDR3 support disabled\n", __func__);
if (rv == 0xC) {
return validate_spd_checksum_ddr4(dimm_config, dimm_index,
twsi_addr, silent);
}
if (!silent) {
printf("Unrecognized DIMM type: 0x%x at spd address: 0x%x\n",
rv, twsi_addr);
}
return 0;
}
/*
* Read an DIMM SPD value, either using TWSI to read it from the DIMM, or
* from a provided array.
*/
int read_spd(struct dimm_config *dimm_config, int dimm_index, int spd_field)
{
dimm_index = !!dimm_index;
if (spd_field >= SPD_EEPROM_SIZE) {
printf("ERROR: Trying to read unsupported SPD EEPROM value %d\n",
spd_field);
}
/*
* If pointer to data is provided, use it, otherwise read from SPD
* over twsi
*/
if (dimm_config->spd_ptrs[dimm_index])
return dimm_config->spd_ptrs[dimm_index][spd_field];
else if (dimm_config->spd_addrs[dimm_index])
return dimm_config->spd_data[dimm_index][spd_field];
return -1;
}
int read_spd_init(struct dimm_config *dimm_config, int dimm_index)
{
u8 busno = OCTEON_TWSI_GET_BUS(dimm_config->spd_addrs[dimm_index]);
u8 cmdno = dimm_config->spd_addrs[dimm_index];
struct udevice *dev_i2c;
u8 *spd_data;
int ret;
if (dimm_config->spd_cached[dimm_index])
return 0;
dimm_config->spd_cached[dimm_index] = 1;
spd_data = dimm_config->spd_data[dimm_index];
ret = i2c_get_chip_for_busnum(busno, cmdno, 2, &dev_i2c);
if (ret) {
debug("Cannot find SPL EEPROM: %d\n", ret);
return -ENODEV;
}
ret = dm_i2c_read(dev_i2c, 0, spd_data, SPD_EEPROM_SIZE);
return ret;
}
int validate_dimm(struct ddr_priv *priv, struct dimm_config *dimm_config,
int dimm_index)
{
int spd_addr;
dimm_index = !!dimm_index; /* Normalize to 0/1 */
spd_addr = dimm_config->spd_addrs[dimm_index];
debug("Validating dimm %d, spd addr: 0x%02x spd ptr: %p\n",
dimm_index,
dimm_config->spd_addrs[dimm_index],
dimm_config->spd_ptrs[dimm_index]);
/* Only validate 'real' dimms, assume compiled in values are OK */
if (!dimm_config->spd_ptrs[dimm_index]) {
int val0, val1;
int dimm_type;
int ret;
ret = read_spd_init(dimm_config, dimm_index);
if (ret)
return 0;
dimm_type = read_spd(dimm_config, dimm_index,
DDR2_SPD_MEM_TYPE) & 0xff;
switch (dimm_type) {
case 0x0B: /* DDR3 */
if (ddr_verbose(priv))
printf("Validating DDR3 DIMM %d\n", dimm_index);
val0 = read_spd(dimm_config, dimm_index,
DDR3_SPD_DENSITY_BANKS);
val1 = read_spd(dimm_config, dimm_index,
DDR3_SPD_ADDRESSING_ROW_COL_BITS);
if (val0 < 0 && val1 < 0) {
if (ddr_verbose(priv))
printf("Error reading SPD for DIMM %d\n",
dimm_index);
return 0; /* Failed to read dimm */
}
if (val0 == 0xff && val1 == 0xff) {
if (ddr_verbose(priv))
printf("Blank or unreadable SPD for DIMM %d\n",
dimm_index);
/* Blank SPD or otherwise unreadable device */
return 0;
}
/* Don't treat bad checksums as fatal */
validate_spd_checksum(priv, dimm_config, dimm_index,
spd_addr, 0, dimm_type);
break;
case 0x0C: /* DDR4 */
if (ddr_verbose(priv))
printf("Validating DDR4 DIMM %d\n", dimm_index);
val0 = read_spd(dimm_config, dimm_index,
DDR4_SPD_DENSITY_BANKS);
val1 = read_spd(dimm_config, dimm_index,
DDR4_SPD_ADDRESSING_ROW_COL_BITS);
if (val0 < 0 && val1 < 0) {
if (ddr_verbose(priv))
printf("Error reading SPD for DIMM %d\n",
dimm_index);
return 0; /* Failed to read dimm */
}
if (val0 == 0xff && val1 == 0xff) {
if (ddr_verbose(priv)) {
printf("Blank or unreadable SPD for DIMM %d\n",
dimm_index);
}
/* Blank SPD or otherwise unreadable device */
return 0;
}
/* Don't treat bad checksums as fatal */
validate_spd_checksum(priv, dimm_config, dimm_index,
spd_addr, 0, dimm_type);
break;
case 0x00:
/* Terminator detected. Fail silently. */
return 0;
default:
debug("Unknown DIMM type 0x%x for DIMM %d @ 0x%x\n",
dimm_type, dimm_index,
dimm_config->spd_addrs[dimm_index]);
return 0; /* Failed to read dimm */
}
}
return 1;
}
int get_ddr_type(struct dimm_config *dimm_config, int upper_dimm)
{
int spd_ddr_type;
spd_ddr_type = read_spd(dimm_config, upper_dimm, DEVICE_TYPE);
debug("%s:%d spd_ddr_type=0x%02x\n", __func__, __LINE__,
spd_ddr_type);
/* we return only DDR4 or DDR3 */
return (spd_ddr_type == 0x0C) ? DDR4_DRAM : DDR3_DRAM;
}
static int get_dimm_ecc(struct dimm_config *dimm_config, int upper_dimm,
int ddr_type)
{
return !!(read_spd(dimm_config, upper_dimm, BUS_WIDTH(ddr_type)) & 8);
}
int get_dimm_module_type(struct dimm_config *dimm_config, int upper_dimm,
int ddr_type)
{
return read_spd(dimm_config, upper_dimm, MODULE_TYPE) & 0x0f;
}
char *printable_rank_spec(char *buffer, int num_ranks, int dram_width,
int spd_package)
{
int die_count = ((spd_package >> 4) & 7) + 1;
if (spd_package & 0x80) { // non-monolithic
if ((spd_package & 3) == 2) { // 3DS
sprintf(buffer, "%dS%dRx%d", num_ranks, die_count,
dram_width);
} else { // MLS
char hchar = (die_count == 2) ? 'D' : 'Q';
sprintf(buffer, "%d%cRx%d", num_ranks, hchar,
dram_width);
}
} else {
sprintf(buffer, "%dRx%d", num_ranks, dram_width);
}
return buffer;
}
static void report_common_dimm(struct dimm_config *dimm_config, int upper_dimm,
int dimm, const char **dimm_types, int ddr_type,
char *volt_str, int if_num,
int num_ranks, int dram_width, int spd_package)
{
unsigned int spd_module_type;
char rank_spec[8];
int spd_ecc;
spd_module_type = get_dimm_module_type(dimm_config, upper_dimm,
ddr_type);
spd_ecc = get_dimm_ecc(dimm_config, upper_dimm, ddr_type);
printable_rank_spec(rank_spec, num_ranks, dram_width, spd_package);
printf("LMC%d.DIMM%d: DDR%d %s %s %s, %s\n",
if_num, dimm, ddr_type, dimm_types[spd_module_type],
rank_spec, spd_ecc ? "ECC" : "non-ECC", volt_str);
}
static void report_ddr3_dimm(struct dimm_config *dimm_config, int upper_dimm,
int dimm, int if_num)
{
int spd_voltage;
char *volt_str;
int spd_org = read_spd(dimm_config, upper_dimm,
DDR3_SPD_MODULE_ORGANIZATION);
int num_ranks = 1 + ((spd_org >> 3) & 0x7);
int dram_width = 4 << ((spd_org >> 0) & 0x7);
spd_voltage = read_spd(dimm_config, upper_dimm,
DDR3_SPD_NOMINAL_VOLTAGE);
if (spd_voltage == 0 || spd_voltage & 3)
volt_str = "1.5V";
if (spd_voltage & 2)
volt_str = "1.35V";
if (spd_voltage & 4)
volt_str = "1.2xV";
report_common_dimm(dimm_config, upper_dimm, dimm, ddr3_dimm_types,
DDR3_DRAM, volt_str, if_num,
num_ranks, dram_width, /*spd_package*/0);
}
static void report_ddr4_dimm(struct dimm_config *dimm_config, int upper_dimm,
int dimm, int if_num)
{
int spd_voltage;
char *volt_str;
int spd_package = 0xff & read_spd(dimm_config, upper_dimm,
DDR4_SPD_PACKAGE_TYPE);
int spd_org = 0xff & read_spd(dimm_config, upper_dimm,
DDR4_SPD_MODULE_ORGANIZATION);
int num_ranks = 1 + ((spd_org >> 3) & 0x7);
int dram_width = 4 << ((spd_org >> 0) & 0x7);
spd_voltage = read_spd(dimm_config, upper_dimm,
DDR4_SPD_MODULE_NOMINAL_VOLTAGE);
if (spd_voltage == 0x01 || spd_voltage & 0x02)
volt_str = "1.2V";
if (spd_voltage == 0x04 || spd_voltage & 0x08)
volt_str = "TBD1 V";
if (spd_voltage == 0x10 || spd_voltage & 0x20)
volt_str = "TBD2 V";
report_common_dimm(dimm_config, upper_dimm, dimm, ddr4_dimm_types,
DDR4_DRAM, volt_str, if_num,
num_ranks, dram_width, spd_package);
}
void report_dimm(struct dimm_config *dimm_config, int upper_dimm,
int dimm, int if_num)
{
int ddr_type;
/* ddr_type only indicates DDR4 or DDR3 */
ddr_type = get_ddr_type(dimm_config, upper_dimm);
if (ddr_type == DDR4_DRAM)
report_ddr4_dimm(dimm_config, 0, dimm, if_num);
else
report_ddr3_dimm(dimm_config, 0, dimm, if_num);
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -46,6 +46,15 @@ config USB_XHCI_MVEBU
SoCs, which includes Armada8K, Armada3700 and other Armada
family SoCs.
config USB_XHCI_OCTEON
bool "Support for Marvell Octeon family on-chip xHCI USB controller"
depends on ARCH_OCTEON
default y
help
Enables support for the on-chip xHCI controller on Marvell Octeon
family SoCs. This is a driver for the dwc3 to provide the glue logic
to configure the controller.
config USB_XHCI_PCI
bool "Support for PCI-based xHCI USB controller"
depends on DM_USB

View file

@ -56,6 +56,7 @@ obj-$(CONFIG_USB_XHCI_OMAP) += xhci-omap.o
obj-$(CONFIG_USB_XHCI_PCI) += xhci-pci.o
obj-$(CONFIG_USB_XHCI_RCAR) += xhci-rcar.o
obj-$(CONFIG_USB_XHCI_STI) += dwc3-sti-glue.o
obj-$(CONFIG_USB_XHCI_OCTEON) += dwc3-octeon-glue.o
# designware
obj-$(CONFIG_USB_DWC2) += dwc2.o

View file

@ -0,0 +1,393 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Octeon family DWC3 specific glue layer
*
* Copyright (C) 2020 Stefan Roese <sr@denx.de>
*
* The low-level init code is based on the Linux driver octeon-usb.c by
* David Daney <david.daney@cavium.com>, which is:
* Copyright (C) 2010-2017 Cavium Networks
*/
#include <dm.h>
#include <errno.h>
#include <usb.h>
#include <asm/io.h>
#include <dm/lists.h>
#include <dm/of_access.h>
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/io.h>
#include <linux/usb/dwc3.h>
#include <linux/usb/otg.h>
#include <mach/octeon-model.h>
DECLARE_GLOBAL_DATA_PTR;
#define CVMX_GPIO_BIT_CFGX(i) (0x0001070000000900ull + ((i) * 8))
#define CVMX_GPIO_XBIT_CFGX(i) (0x0001070000000900ull + \
((i) & 31) * 8 - 8 * 16)
#define GPIO_BIT_CFG_TX_OE BIT_ULL(0)
#define GPIO_BIT_CFG_OUTPUT_SEL GENMASK_ULL(20, 16)
#define UCTL_CTL_UCTL_RST BIT_ULL(0)
#define UCTL_CTL_UAHC_RST BIT_ULL(1)
#define UCTL_CTL_UPHY_RST BIT_ULL(2)
#define UCTL_CTL_DRD_MODE BIT_ULL(3)
#define UCTL_CTL_SCLK_EN BIT_ULL(4)
#define UCTL_CTL_HS_POWER_EN BIT_ULL(12)
#define UCTL_CTL_SS_POWER_EN BIT_ULL(14)
#define UCTL_CTL_H_CLKDIV_SEL GENMASK_ULL(26, 24)
#define UCTL_CTL_H_CLKDIV_RST BIT_ULL(28)
#define UCTL_CTL_H_CLK_EN BIT_ULL(30)
#define UCTL_CTL_REF_CLK_FSEL GENMASK_ULL(37, 32)
#define UCTL_CTL_REF_CLK_DIV2 BIT_ULL(38)
#define UCTL_CTL_REF_SSP_EN BIT_ULL(39)
#define UCTL_CTL_MPLL_MULTIPLIER GENMASK_ULL(46, 40)
#define UCTL_CTL_SSC_EN BIT_ULL(59)
#define UCTL_CTL_REF_CLK_SEL GENMASK_ULL(61, 60)
#define UCTL_HOST_CFG 0xe0
#define UCTL_HOST_CFG_PPC_ACTIVE_HIGH_EN BIT_ULL(24)
#define UCTL_HOST_CFG_PPC_EN BIT_ULL(25)
#define UCTL_SHIM_CFG 0xe8
#define UCTL_SHIM_CFG_CSR_ENDIAN_MODE GENMASK_ULL(1, 0)
#define UCTL_SHIM_CFG_DMA_ENDIAN_MODE GENMASK_ULL(9, 8)
#define OCTEON_H_CLKDIV_SEL 8
#define OCTEON_MIN_H_CLK_RATE 150000000
#define OCTEON_MAX_H_CLK_RATE 300000000
#define CLOCK_50MHZ 50000000
#define CLOCK_100MHZ 100000000
#define CLOCK_125MHZ 125000000
static u8 clk_div[OCTEON_H_CLKDIV_SEL] = {1, 2, 4, 6, 8, 16, 24, 32};
static int dwc3_octeon_config_power(struct udevice *dev, void __iomem *base)
{
u64 uctl_host_cfg;
u64 gpio_bit;
u32 gpio_pwr[3];
int gpio, len, power_active_low;
const struct device_node *node = dev_np(dev);
int index = ((u64)base >> 24) & 1;
void __iomem *gpio_bit_cfg;
if (of_find_property(node, "power", &len)) {
if (len == 12) {
dev_read_u32_array(dev, "power", gpio_pwr, 3);
power_active_low = gpio_pwr[2] & 0x01;
gpio = gpio_pwr[1];
} else if (len == 8) {
dev_read_u32_array(dev, "power", gpio_pwr, 2);
power_active_low = 0;
gpio = gpio_pwr[1];
} else {
printf("dwc3 controller clock init failure\n");
return -EINVAL;
}
gpio_bit_cfg = ioremap(CVMX_GPIO_BIT_CFGX(gpio), 0);
if ((OCTEON_IS_MODEL(OCTEON_CN73XX) ||
OCTEON_IS_MODEL(OCTEON_CNF75XX)) && gpio <= 31) {
gpio_bit = ioread64(gpio_bit_cfg);
gpio_bit |= GPIO_BIT_CFG_TX_OE;
gpio_bit &= ~GPIO_BIT_CFG_OUTPUT_SEL;
gpio_bit |= FIELD_PREP(GPIO_BIT_CFG_OUTPUT_SEL,
index == 0 ? 0x14 : 0x15);
iowrite64(gpio_bit, gpio_bit_cfg);
} else if (gpio <= 15) {
gpio_bit = ioread64(gpio_bit_cfg);
gpio_bit |= GPIO_BIT_CFG_TX_OE;
gpio_bit &= ~GPIO_BIT_CFG_OUTPUT_SEL;
gpio_bit |= FIELD_PREP(GPIO_BIT_CFG_OUTPUT_SEL,
index == 0 ? 0x14 : 0x19);
iowrite64(gpio_bit, gpio_bit_cfg);
} else {
gpio_bit_cfg = ioremap(CVMX_GPIO_XBIT_CFGX(gpio), 0);
gpio_bit = ioread64(gpio_bit_cfg);
gpio_bit |= GPIO_BIT_CFG_TX_OE;
gpio_bit &= ~GPIO_BIT_CFG_OUTPUT_SEL;
gpio_bit |= FIELD_PREP(GPIO_BIT_CFG_OUTPUT_SEL,
index == 0 ? 0x14 : 0x19);
iowrite64(gpio_bit, gpio_bit_cfg);
}
/* Enable XHCI power control and set if active high or low. */
uctl_host_cfg = ioread64(base + UCTL_HOST_CFG);
uctl_host_cfg |= UCTL_HOST_CFG_PPC_EN;
if (power_active_low)
uctl_host_cfg &= ~UCTL_HOST_CFG_PPC_ACTIVE_HIGH_EN;
else
uctl_host_cfg |= UCTL_HOST_CFG_PPC_ACTIVE_HIGH_EN;
iowrite64(uctl_host_cfg, base + UCTL_HOST_CFG);
/* Wait for power to stabilize */
mdelay(10);
} else {
/* Disable XHCI power control and set if active high. */
uctl_host_cfg = ioread64(base + UCTL_HOST_CFG);
uctl_host_cfg &= ~UCTL_HOST_CFG_PPC_EN;
uctl_host_cfg &= ~UCTL_HOST_CFG_PPC_ACTIVE_HIGH_EN;
iowrite64(uctl_host_cfg, base + UCTL_HOST_CFG);
dev_warn(dev, "dwc3 controller clock init failure.\n");
}
return 0;
}
static int dwc3_octeon_clocks_start(struct udevice *dev, void __iomem *base)
{
u64 uctl_ctl;
int ref_clk_sel = 2;
u64 div;
u32 clock_rate;
int mpll_mul;
int i;
u64 h_clk_rate;
void __iomem *uctl_ctl_reg = base;
const char *ss_clock_type;
const char *hs_clock_type;
i = dev_read_u32(dev, "refclk-frequency", &clock_rate);
if (i) {
printf("No UCTL \"refclk-frequency\"\n");
return -EINVAL;
}
ss_clock_type = dev_read_string(dev, "refclk-type-ss");
if (!ss_clock_type) {
printf("No UCTL \"refclk-type-ss\"\n");
return -EINVAL;
}
hs_clock_type = dev_read_string(dev, "refclk-type-hs");
if (!hs_clock_type) {
printf("No UCTL \"refclk-type-hs\"\n");
return -EINVAL;
}
if (strcmp("dlmc_ref_clk0", ss_clock_type) == 0) {
if (strcmp(hs_clock_type, "dlmc_ref_clk0") == 0) {
ref_clk_sel = 0;
} else if (strcmp(hs_clock_type, "pll_ref_clk") == 0) {
ref_clk_sel = 2;
} else {
printf("Invalid HS clock type %s, using pll_ref_clk\n",
hs_clock_type);
}
} else if (strcmp(ss_clock_type, "dlmc_ref_clk1") == 0) {
if (strcmp(hs_clock_type, "dlmc_ref_clk1") == 0) {
ref_clk_sel = 1;
} else if (strcmp(hs_clock_type, "pll_ref_clk") == 0) {
ref_clk_sel = 3;
} else {
printf("Invalid HS clock type %s, using pll_ref_clk\n",
hs_clock_type);
ref_clk_sel = 3;
}
} else {
printf("Invalid SS clock type %s, using dlmc_ref_clk0\n",
ss_clock_type);
}
if ((ref_clk_sel == 0 || ref_clk_sel == 1) &&
clock_rate != CLOCK_100MHZ)
printf("Invalid UCTL clock rate of %u\n", clock_rate);
/*
* Step 1: Wait for all voltages to be stable...that surely
* happened before this driver is started. SKIP
*/
/* Step 2: Select GPIO for overcurrent indication, if desired. SKIP */
/* Step 3: Assert all resets. */
uctl_ctl = ioread64(uctl_ctl_reg);
uctl_ctl |= UCTL_CTL_UCTL_RST | UCTL_CTL_UAHC_RST | UCTL_CTL_UPHY_RST;
iowrite64(uctl_ctl, uctl_ctl_reg);
/* Step 4a: Reset the clock dividers. */
uctl_ctl = ioread64(uctl_ctl_reg);
uctl_ctl |= UCTL_CTL_H_CLKDIV_RST;
iowrite64(uctl_ctl, uctl_ctl_reg);
/* Step 4b: Select controller clock frequency. */
for (div = ARRAY_SIZE(clk_div) - 1; div >= 0; div--) {
h_clk_rate = gd->bus_clk / clk_div[div];
if (h_clk_rate <= OCTEON_MAX_H_CLK_RATE &&
h_clk_rate >= OCTEON_MIN_H_CLK_RATE)
break;
}
uctl_ctl = ioread64(uctl_ctl_reg);
uctl_ctl &= ~UCTL_CTL_H_CLKDIV_SEL;
uctl_ctl |= FIELD_PREP(UCTL_CTL_H_CLKDIV_SEL, div);
uctl_ctl |= UCTL_CTL_H_CLK_EN;
iowrite64(uctl_ctl, uctl_ctl_reg);
uctl_ctl = ioread64(uctl_ctl_reg);
if (div != FIELD_GET(UCTL_CTL_H_CLKDIV_SEL, uctl_ctl) ||
!(uctl_ctl & UCTL_CTL_H_CLK_EN)) {
printf("dwc3 controller clock init failure\n");
return -EINVAL;
}
/* Step 4c: Deassert the controller clock divider reset. */
uctl_ctl = ioread64(uctl_ctl_reg);
uctl_ctl &= ~UCTL_CTL_H_CLKDIV_RST;
iowrite64(uctl_ctl, uctl_ctl_reg);
/* Step 5a: Reference clock configuration. */
uctl_ctl = ioread64(uctl_ctl_reg);
uctl_ctl &= ~UCTL_CTL_REF_CLK_SEL;
uctl_ctl |= FIELD_PREP(UCTL_CTL_REF_CLK_SEL, ref_clk_sel);
uctl_ctl &= ~UCTL_CTL_REF_CLK_FSEL;
uctl_ctl |= FIELD_PREP(UCTL_CTL_REF_CLK_FSEL, 0x07);
uctl_ctl &= ~UCTL_CTL_REF_CLK_DIV2;
switch (clock_rate) {
default:
printf("Invalid ref_clk %u, using %u instead\n", CLOCK_100MHZ,
clock_rate);
fallthrough;
case CLOCK_100MHZ:
mpll_mul = 0x19;
if (ref_clk_sel < 2) {
uctl_ctl &= ~UCTL_CTL_REF_CLK_FSEL;
uctl_ctl |= FIELD_PREP(UCTL_CTL_REF_CLK_FSEL, 0x27);
}
break;
case CLOCK_50MHZ:
mpll_mul = 0x32;
break;
case CLOCK_125MHZ:
mpll_mul = 0x28;
break;
}
uctl_ctl &= ~UCTL_CTL_MPLL_MULTIPLIER;
uctl_ctl |= FIELD_PREP(UCTL_CTL_MPLL_MULTIPLIER, mpll_mul);
/* Step 5b: Configure and enable spread-spectrum for SuperSpeed. */
uctl_ctl |= UCTL_CTL_SSC_EN;
/* Step 5c: Enable SuperSpeed. */
uctl_ctl |= UCTL_CTL_REF_SSP_EN;
/* Step 5d: Configure PHYs. SKIP */
/* Step 6a & 6b: Power up PHYs. */
uctl_ctl |= UCTL_CTL_HS_POWER_EN;
uctl_ctl |= UCTL_CTL_SS_POWER_EN;
iowrite64(uctl_ctl, uctl_ctl_reg);
/* Step 7: Wait 10 controller-clock cycles to take effect. */
udelay(10);
/* Step 8a: Deassert UCTL reset signal. */
uctl_ctl = ioread64(uctl_ctl_reg);
uctl_ctl &= ~UCTL_CTL_UCTL_RST;
iowrite64(uctl_ctl, uctl_ctl_reg);
/* Step 8b: Wait 10 controller-clock cycles. */
udelay(10);
/* Step 8c: Setup power-power control. */
if (dwc3_octeon_config_power(dev, base)) {
printf("Error configuring power\n");
return -EINVAL;
}
/* Step 8d: Deassert UAHC reset signal. */
uctl_ctl = ioread64(uctl_ctl_reg);
uctl_ctl &= ~UCTL_CTL_UAHC_RST;
iowrite64(uctl_ctl, uctl_ctl_reg);
/* Step 8e: Wait 10 controller-clock cycles. */
udelay(10);
/* Step 9: Enable conditional coprocessor clock of UCTL. */
uctl_ctl = ioread64(uctl_ctl_reg);
uctl_ctl |= UCTL_CTL_SCLK_EN;
iowrite64(uctl_ctl, uctl_ctl_reg);
/* Step 10: Set for host mode only. */
uctl_ctl = ioread64(uctl_ctl_reg);
uctl_ctl &= ~UCTL_CTL_DRD_MODE;
iowrite64(uctl_ctl, uctl_ctl_reg);
return 0;
}
static void dwc3_octeon_set_endian_mode(void __iomem *base)
{
u64 shim_cfg;
shim_cfg = ioread64(base + UCTL_SHIM_CFG);
shim_cfg &= ~UCTL_SHIM_CFG_CSR_ENDIAN_MODE;
shim_cfg |= FIELD_PREP(UCTL_SHIM_CFG_CSR_ENDIAN_MODE, 1);
shim_cfg &= ~UCTL_SHIM_CFG_DMA_ENDIAN_MODE;
shim_cfg |= FIELD_PREP(UCTL_SHIM_CFG_DMA_ENDIAN_MODE, 1);
iowrite64(shim_cfg, base + UCTL_SHIM_CFG);
}
static void dwc3_octeon_phy_reset(void __iomem *base)
{
u64 uctl_ctl;
uctl_ctl = ioread64(base);
uctl_ctl &= ~UCTL_CTL_UPHY_RST;
iowrite64(uctl_ctl, base);
}
static int octeon_dwc3_glue_probe(struct udevice *dev)
{
void __iomem *base;
base = dev_remap_addr(dev);
if (IS_ERR(base))
return PTR_ERR(base);
dwc3_octeon_clocks_start(dev, base);
dwc3_octeon_set_endian_mode(base);
dwc3_octeon_phy_reset(base);
return 0;
}
static int octeon_dwc3_glue_bind(struct udevice *dev)
{
ofnode node, dwc3_node;
/* Find snps,dwc3 node from subnode */
dwc3_node = ofnode_null();
ofnode_for_each_subnode(node, dev->node) {
if (ofnode_device_is_compatible(node, "snps,dwc3"))
dwc3_node = node;
}
if (!ofnode_valid(dwc3_node)) {
printf("Can't find dwc3 subnode for %s\n", dev->name);
return -ENODEV;
}
return dm_scan_fdt_dev(dev);
}
static const struct udevice_id octeon_dwc3_glue_ids[] = {
{ .compatible = "cavium,octeon-7130-usb-uctl" },
{ }
};
U_BOOT_DRIVER(dwc3_octeon_glue) = {
.name = "dwc3_octeon_glue",
.id = UCLASS_NOP,
.of_match = octeon_dwc3_glue_ids,
.probe = octeon_dwc3_glue_probe,
.bind = octeon_dwc3_glue_bind,
.flags = DM_FLAG_ALLOC_PRIV_DMA,
};

View file

@ -122,7 +122,7 @@ static int xhci_dwc3_probe(struct udevice *dev)
u32 reg;
int ret;
hccr = (struct xhci_hccr *)((uintptr_t)dev_read_addr(dev));
hccr = (struct xhci_hccr *)((uintptr_t)dev_remap_addr(dev));
hcor = (struct xhci_hcor *)((uintptr_t)hccr +
HC_LENGTH(xhci_readl(&(hccr)->cr_capbase)));

View file

@ -722,8 +722,6 @@ int xhci_bulk_tx(struct usb_device *udev, unsigned long pipe,
BUG_ON(TRB_TO_SLOT_ID(field) != slot_id);
BUG_ON(TRB_TO_EP_INDEX(field) != ep_index);
BUG_ON(*(void **)(uintptr_t)le64_to_cpu(event->trans_event.buffer) -
buffer > (size_t)length);
record_transfer_result(udev, event, length);
xhci_acknowledge_event(ctrl);

View file

@ -7,13 +7,20 @@
#ifndef __OCTEON_COMMON_H__
#define __OCTEON_COMMON_H__
/* No DDR init yet -> run in L2 cache with limited resources */
#if defined(CONFIG_RAM_OCTEON)
#define CONFIG_SYS_MALLOC_LEN (16 << 20)
#define CONFIG_SYS_INIT_SP_OFFSET 0x20100000
#else
/* No DDR init -> run in L2 cache with limited resources */
#define CONFIG_SYS_MALLOC_LEN (256 << 10)
#define CONFIG_SYS_INIT_SP_OFFSET 0x00180000
#endif
#define CONFIG_SYS_SDRAM_BASE 0xffffffff80000000
#define CONFIG_SYS_MONITOR_BASE CONFIG_SYS_TEXT_BASE
#define CONFIG_SYS_LOAD_ADDR (CONFIG_SYS_SDRAM_BASE + (1 << 20))
#define CONFIG_SYS_INIT_SP_OFFSET 0x180000
#define CONFIG_SYS_BOOTM_LEN (64 << 20) /* 64M */
#endif /* __OCTEON_COMMON_H__ */