Merge git://git.denx.de/u-boot-marvell

This commit is contained in:
Tom Rini 2018-05-14 08:52:48 -04:00
commit ca70cbabdc
89 changed files with 9745 additions and 6294 deletions

View file

@ -87,6 +87,7 @@ dtb-$(CONFIG_TEGRA) += tegra20-harmony.dtb \
dtb-$(CONFIG_ARCH_MVEBU) += \
armada-3720-db.dtb \
armada-3720-espressobin.dtb \
armada-3720-turris-mox.dtb \
armada-375-db.dtb \
armada-388-clearfog.dtb \
armada-388-gp.dtb \

View file

@ -0,0 +1,132 @@
// SPDX-License-Identifier: GPL-2.0+ or X11
/*
* Device Tree file for CZ.NIC Turris Mox Board
* 2018 by Marek Behun <marek.behun@nic.cz>
*
* Based on armada-3720-espressobin.dts by:
* Gregory CLEMENT <gregory.clement@free-electrons.com>
* Konstantin Porotchkin <kostap@marvell.com>
*/
/dts-v1/;
#include <dt-bindings/gpio/gpio.h>
#include "armada-372x.dtsi"
/ {
model = "CZ.NIC Turris Mox Board";
compatible = "cznic,turris-mox", "marvell,armada3720",
"marvell,armada3710";
chosen {
stdout-path = "serial0:115200n8";
};
aliases {
ethernet0 = &eth0;
i2c0 = &i2c0;
spi0 = &spi0;
};
memory {
device_type = "memory";
reg = <0x00000000 0x00000000 0x00000000 0x20000000>;
};
reg_usb3_vbus: usb3_vbus@0 {
compatible = "regulator-fixed";
regulator-name = "usb3-vbus";
regulator-min-microvolt = <5000000>;
regulator-max-microvolt = <5000000>;
shutdown-delay-us = <1000000>;
gpio = <&gpiosb 0 GPIO_ACTIVE_HIGH>;
regulator-boot-on;
};
mdio {
eth_phy1: ethernet-phy@1 {
reg = <1>;
};
};
};
&comphy {
max-lanes = <3>;
phy0 {
phy-type = <PHY_TYPE_SGMII1>;
phy-speed = <PHY_SPEED_3_125G>;
};
phy1 {
phy-type = <PHY_TYPE_PEX0>;
phy-speed = <PHY_SPEED_2_5G>;
};
phy2 {
phy-type = <PHY_TYPE_USB3_HOST0>;
phy-speed = <PHY_SPEED_5G>;
};
};
&eth0 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&rgmii_pins>, <&smi_pins>;
phy-mode = "rgmii";
phy = <&eth_phy1>;
};
&i2c0 {
pinctrl-names = "default";
pinctrl-0 = <&i2c1_pins>;
status = "okay";
};
&sdhci1 {
bus-width = <4>;
status = "okay";
};
&pinctrl_nb {
spi_cs1_pins: spi-cs1-pins {
groups = "spi_cs1";
function = "spi";
};
};
&pinctrl_sb {
smi_pins: smi-pins {
groups = "smi";
function = "smi";
};
};
&spi0 {
status = "okay";
pinctrl-names = "default";
pinctrl-0 = <&spi_cs1_pins>;
spi-flash@0 {
#address-cells = <1>;
#size-cells = <1>;
compatible = "st,s25fl064l", "spi-flash";
reg = <0>;
spi-max-frequency = <20000000>;
m25p,fast-read;
};
};
&uart0 {
pinctrl-names = "default";
pinctrl-0 = <&uart1_pins>;
status = "okay";
};
&usb2 {
status = "okay";
};
&usb3 {
vbus-supply = <&reg_usb3_vbus>;
status = "okay";
};

View file

@ -107,6 +107,32 @@
status = "disabled";
};
wdt: watchdog-timer@8300 {
compatible = "marvell,armada-3700-wdt";
reg = <0xd064 0x4>,
<0x8300 0x40>;
};
nb_periph_clk: nb-periph-clk@13000 {
compatible = "marvell,armada-3700-periph-clock-nb";
reg = <0x13000 0x100>;
clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, <&tbg 3>;
#clock-cells = <1>;
};
sb_periph_clk: sb-periph-clk@18000 {
compatible = "marvell,armada-3700-periph-clock-sb";
reg = <0x18000 0x100>;
clocks = <&tbg 0>, <&tbg 1>, <&tbg 2>, <&tbg 3>;
#clock-cells = <1>;
};
tbg: tbg@13200 {
compatible = "marvell,armada-3700-tbg-clock";
reg = <0x13200 0x100>;
#clock-cells = <1>;
};
pinctrl_nb: pinctrl-nb@13800 {
compatible = "marvell,armada3710-nb-pinctrl",
"syscon", "simple-mfd";
@ -281,8 +307,8 @@
#address-cells = <1>;
#size-cells = <0>;
#clock-cells = <0>;
clock-frequency = <160000>;
spi-max-frequency = <40000>;
spi-max-frequency = <50000000>;
clocks = <&nb_periph_clk 7>;
status = "disabled";
};
@ -290,8 +316,9 @@
compatible = "marvell,mvebu-comphy", "marvell,comphy-armada-3700";
reg = <0x18300 0x28>,
<0x1f300 0x3d000>;
mux-bitcount = <1>;
max-lanes = <2>;
mux-bitcount = <4>;
mux-lane-order = <1 0 2>;
max-lanes = <3>;
};
};

View file

@ -96,6 +96,10 @@ config TARGET_TURRIS_OMNIA
bool "Support Turris Omnia"
select 88F6820
config TARGET_TURRIS_MOX
bool "Support Turris Mox"
select ARMADA_3700
config TARGET_MVEBU_ARMADA_8K
bool "Support Armada 7k/8k platforms"
select ARMADA_8K
@ -133,6 +137,7 @@ config SYS_BOARD
default "db-88f6820-gp" if TARGET_DB_88F6820_GP
default "db-88f6820-amc" if TARGET_DB_88F6820_AMC
default "turris_omnia" if TARGET_TURRIS_OMNIA
default "turris_mox" if TARGET_TURRIS_MOX
default "mvebu_armada-8k" if TARGET_MVEBU_ARMADA_8K
default "db-mv784mp-gp" if TARGET_DB_MV784MP_GP
default "ds414" if TARGET_DS414
@ -151,6 +156,7 @@ config SYS_CONFIG_NAME
default "maxbcm" if TARGET_MAXBCM
default "theadorable" if TARGET_THEADORABLE
default "turris_omnia" if TARGET_TURRIS_OMNIA
default "turris_mox" if TARGET_TURRIS_MOX
config SYS_VENDOR
default "Marvell" if TARGET_DB_MV784MP_GP
@ -162,6 +168,7 @@ config SYS_VENDOR
default "solidrun" if TARGET_CLEARFOG
default "Synology" if TARGET_DS414
default "CZ.NIC" if TARGET_TURRIS_OMNIA
default "CZ.NIC" if TARGET_TURRIS_MOX
config SYS_SOC
default "mvebu"

View file

@ -7,7 +7,7 @@ ifdef CONFIG_ARM64
obj-$(CONFIG_ARMADA_3700) += armada3700/
obj-$(CONFIG_ARMADA_8K) += armada8k/
obj-y += arm64-common.o
obj-y += sata.o
obj-$(CONFIG_AHCI) += sata.o
else # CONFIG_ARM64

View file

@ -46,6 +46,7 @@
/* Controller revision info */
#define PEX_DEVICE_AND_VENDOR_ID 0x000
#define PEX_CFG_DIRECT_ACCESS(if, reg) (PEX_IF_REGS_BASE(if) + (reg))
/* PCI Express Configuration Address Register */
#define PXCAR_REG_NUM_OFFS 2

View file

@ -12,8 +12,6 @@
#include "seq_exec.h"
#include "sys_env_lib.h"
#include "../../../drivers/ddr/marvell/a38x/ddr3_a38x.h"
#ifdef CONFIG_ARMADA_38X
enum unit_id sys_env_soc_unit_nums[MAX_UNITS_ID][MAX_DEV_ID_NUM] = {
/* 6820 6810 6811 6828 */
@ -234,3 +232,27 @@ u32 sys_env_device_id_get(void)
return g_dev_id;
}
/*
* sys_env_device_rev_get - Get Marvell controller device revision number
*
* DESCRIPTION:
* This function returns 8bit describing the device revision as defined
* Revision ID Register.
*
* INPUT:
* None.
*
* OUTPUT:
* None.
*
* RETURN:
* 8bit desscribing Marvell controller revision number
*/
u8 sys_env_device_rev_get(void)
{
u32 value;
value = reg_read(DEV_VERSION_ID_REG);
return (value & (REVISON_ID_MASK)) >> REVISON_ID_OFFS;
}

View file

@ -7,7 +7,6 @@
#define _SYS_ENV_LIB_H
#include "../../../drivers/ddr/marvell/a38x/ddr3_init.h"
#include "../../../drivers/ddr/marvell/a38x/ddr3_hws_hw_training.h"
/* Serdes definitions */
#define COMMON_PHY_BASE_ADDR 0x18300

View file

@ -0,0 +1,6 @@
TURRIS OMNIA BOARD
M: Marek Behún <marek.behun@nic.cz>
S: Maintained
F: board/CZ.NIC/turris_mox/
F: include/configs/turris_mox.h
F: configs/turris_mox_defconfig

View file

@ -0,0 +1,5 @@
# SPDX-License-Identifier: GPL-2.0+
#
# Copyright (C) 2018 Marek Behun <marek.behun@nic.cz>
obj-y := turris_mox.o

View file

@ -0,0 +1,127 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Copyright (C) 2018 Marek Behun <marek.behun@nic.cz>
*/
#include <common.h>
#include <dm.h>
#include <clk.h>
#include <spi.h>
#include <linux/string.h>
#ifdef CONFIG_WDT_ARMADA_3720
#include <wdt.h>
#endif
DECLARE_GLOBAL_DATA_PTR;
#ifdef CONFIG_WDT_ARMADA_3720
static struct udevice *watchdog_dev;
void watchdog_reset(void)
{
static ulong next_reset;
ulong now;
if (!watchdog_dev)
return;
now = timer_get_us();
/* Do not reset the watchdog too often */
if (now > next_reset) {
wdt_reset(watchdog_dev);
next_reset = now + 100000;
}
}
#endif
int board_init(void)
{
/* address of boot parameters */
gd->bd->bi_boot_params = CONFIG_SYS_SDRAM_BASE + 0x100;
#ifdef CONFIG_WDT_ARMADA_3720
if (uclass_get_device(UCLASS_WDT, 0, &watchdog_dev)) {
printf("Cannot find Armada 3720 watchdog!\n");
} else {
printf("Enabling Armada 3720 watchdog (3 minutes timeout).\n");
wdt_start(watchdog_dev, 180000, 0);
}
#endif
return 0;
}
int last_stage_init(void)
{
struct spi_slave *slave;
struct udevice *dev;
u8 din[10], dout[10];
int ret, i;
size_t len = 0;
char module_topology[128];
ret = spi_get_bus_and_cs(0, 1, 20000000, SPI_CPHA, "spi_generic_drv",
"mox-modules@1", &dev, &slave);
if (ret)
goto fail;
ret = spi_claim_bus(slave);
if (ret)
goto fail_free;
memset(din, 0, 10);
memset(dout, 0, 10);
ret = spi_xfer(slave, 80, dout, din, SPI_XFER_ONCE);
if (ret)
goto fail_release;
if (din[0] != 0x00 && din[0] != 0xff)
goto fail_release;
printf("Module Topology:\n");
for (i = 1; i < 10 && din[i] != 0xff; ++i) {
u8 mid = din[i] & 0xf;
size_t mlen;
const char *mname = "";
switch (mid) {
case 0x1:
mname = "sfp-";
printf("% 4i: SFP Module\n", i);
break;
case 0x2:
mname = "pci-";
printf("% 4i: Mini-PCIe Module\n", i);
break;
case 0x3:
mname = "topaz-";
printf("% 4i: Topaz Switch Module\n", i);
break;
default:
printf("% 4i: unknown (ID %i)\n", i, mid);
}
mlen = strlen(mname);
if (len + mlen < sizeof(module_topology)) {
strcpy(module_topology + len, mname);
len += mlen;
}
}
printf("\n");
module_topology[len > 0 ? len - 1 : 0] = '\0';
env_set("module_topology", module_topology);
fail_release:
spi_release_bus(slave);
fail_free:
spi_free_slave(slave);
fail:
if (ret)
printf("Cannot read module topology!\n");
return ret;
}

View file

@ -27,7 +27,7 @@
# include <wdt.h>
#endif
#include "../drivers/ddr/marvell/a38x/ddr3_a38x_topology.h"
#include "../drivers/ddr/marvell/a38x/ddr3_init.h"
#include <../serdes/a38x/high_speed_env_spec.h>
DECLARE_GLOBAL_DATA_PTR;
@ -200,7 +200,8 @@ static bool omnia_read_eeprom(struct omnia_eeprom *oep)
* be used by the DDR3 init code in the SPL U-Boot version to configure
* the DDR3 controller.
*/
static struct hws_topology_map board_topology_map_1g = {
static struct mv_ddr_topology_map board_topology_map_1g = {
DEBUG_LEVEL_ERROR,
0x1, /* active interfaces */
/* cs_mask, mirror, dqs_swap, ck_swap X PUPs */
{ { { {0x1, 0, 0, 0},
@ -209,17 +210,20 @@ static struct hws_topology_map board_topology_map_1g = {
{0x1, 0, 0, 0},
{0x1, 0, 0, 0} },
SPEED_BIN_DDR_1600K, /* speed_bin */
BUS_WIDTH_16, /* memory_width */
MEM_4G, /* mem_size */
MV_DDR_DEV_WIDTH_16BIT, /* memory_width */
MV_DDR_DIE_CAP_4GBIT, /* mem_size */
DDR_FREQ_800, /* frequency */
0, 0, /* cas_wl cas_l */
HWS_TEMP_NORMAL, /* temperature */
HWS_TIM_2T} }, /* timing (force 2t) */
5, /* Num Of Bus Per Interface*/
BUS_MASK_32BIT /* Busses mask */
MV_DDR_TEMP_NORMAL, /* temperature */
MV_DDR_TIM_2T} }, /* timing */
BUS_MASK_32BIT, /* Busses mask */
MV_DDR_CFG_DEFAULT, /* ddr configuration data source */
{ {0} }, /* raw spd data */
{0} /* timing parameters */
};
static struct hws_topology_map board_topology_map_2g = {
static struct mv_ddr_topology_map board_topology_map_2g = {
DEBUG_LEVEL_ERROR,
0x1, /* active interfaces */
/* cs_mask, mirror, dqs_swap, ck_swap X PUPs */
{ { { {0x1, 0, 0, 0},
@ -228,17 +232,19 @@ static struct hws_topology_map board_topology_map_2g = {
{0x1, 0, 0, 0},
{0x1, 0, 0, 0} },
SPEED_BIN_DDR_1600K, /* speed_bin */
BUS_WIDTH_16, /* memory_width */
MEM_8G, /* mem_size */
MV_DDR_DEV_WIDTH_16BIT, /* memory_width */
MV_DDR_DIE_CAP_8GBIT, /* mem_size */
DDR_FREQ_800, /* frequency */
0, 0, /* cas_wl cas_l */
HWS_TEMP_NORMAL, /* temperature */
HWS_TIM_2T} }, /* timing (force 2t) */
5, /* Num Of Bus Per Interface*/
BUS_MASK_32BIT /* Busses mask */
MV_DDR_TEMP_NORMAL, /* temperature */
MV_DDR_TIM_2T} }, /* timing */
BUS_MASK_32BIT, /* Busses mask */
MV_DDR_CFG_DEFAULT, /* ddr configuration data source */
{ {0} }, /* raw spd data */
{0} /* timing parameters */
};
struct hws_topology_map *ddr3_get_topology_map(void)
struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
{
static int mem = 0;
struct omnia_eeprom oep;

View file

@ -11,7 +11,7 @@
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "../drivers/ddr/marvell/a38x/ddr3_a38x_topology.h"
#include "../drivers/ddr/marvell/a38x/ddr3_init.h"
#include <../serdes/a38x/high_speed_env_spec.h>
DECLARE_GLOBAL_DATA_PTR;
@ -55,7 +55,8 @@ int hws_board_topology_load(struct serdes_map **serdes_map_array, u8 *count)
* be used by the DDR3 init code in the SPL U-Boot version to configure
* the DDR3 controller.
*/
static struct hws_topology_map board_topology_map = {
static struct mv_ddr_topology_map board_topology_map = {
DEBUG_LEVEL_ERROR,
0x1, /* active interfaces */
/* cs_mask, mirror, dqs_swap, ck_swap X PUPs */
{ { { {0x1, 0, 0, 0},
@ -64,17 +65,19 @@ static struct hws_topology_map board_topology_map = {
{0x1, 0, 0, 0},
{0x1, 0, 0, 0} },
SPEED_BIN_DDR_1866L, /* speed_bin */
BUS_WIDTH_8, /* memory_width */
MEM_2G, /* mem_size */
MV_DDR_DEV_WIDTH_8BIT, /* memory_width */
MV_DDR_DIE_CAP_2GBIT, /* mem_size */
DDR_FREQ_800, /* frequency */
0, 0, /* cas_wl cas_l */
HWS_TEMP_LOW, /* temperature */
HWS_TIM_DEFAULT} }, /* timing */
5, /* Num Of Bus Per Interface*/
BUS_MASK_32BIT /* Busses mask */
MV_DDR_TEMP_LOW, /* temperature */
MV_DDR_TIM_DEFAULT} }, /* timing */
BUS_MASK_32BIT, /* Busses mask */
MV_DDR_CFG_DEFAULT, /* ddr configuration data source */
{ {0} }, /* raw spd data */
{0} /* timing parameters */
};
struct hws_topology_map *ddr3_get_topology_map(void)
struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
{
/* Return the board topology as defined in the board code */
return &board_topology_map;

View file

@ -11,7 +11,7 @@
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "../drivers/ddr/marvell/a38x/ddr3_a38x_topology.h"
#include "../drivers/ddr/marvell/a38x/ddr3_init.h"
#include <../serdes/a38x/high_speed_env_spec.h>
DECLARE_GLOBAL_DATA_PTR;
@ -76,7 +76,8 @@ int hws_board_topology_load(struct serdes_map **serdes_map_array, u8 *count)
* be used by the DDR3 init code in the SPL U-Boot version to configure
* the DDR3 controller.
*/
static struct hws_topology_map board_topology_map = {
static struct mv_ddr_topology_map board_topology_map = {
DEBUG_LEVEL_ERROR,
0x1, /* active interfaces */
/* cs_mask, mirror, dqs_swap, ck_swap X PUPs */
{ { { {0x1, 0, 0, 0},
@ -85,17 +86,19 @@ static struct hws_topology_map board_topology_map = {
{0x1, 0, 0, 0},
{0x1, 0, 0, 0} },
SPEED_BIN_DDR_1866L, /* speed_bin */
BUS_WIDTH_8, /* memory_width */
MEM_4G, /* mem_size */
MV_DDR_DEV_WIDTH_8BIT, /* memory_width */
MV_DDR_DIE_CAP_4GBIT, /* mem_size */
DDR_FREQ_800, /* frequency */
0, 0, /* cas_wl cas_l */
HWS_TEMP_LOW, /* temperature */
HWS_TIM_DEFAULT} }, /* timing */
5, /* Num Of Bus Per Interface*/
BUS_MASK_32BIT /* Busses mask */
MV_DDR_TEMP_LOW, /* temperature */
MV_DDR_TIM_DEFAULT} }, /* timing */
BUS_MASK_32BIT, /* Busses mask */
MV_DDR_CFG_DEFAULT, /* ddr configuration data source */
{ {0} }, /* raw spd data */
{0} /* timing parameters */
};
struct hws_topology_map *ddr3_get_topology_map(void)
struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
{
/* Return the board topology as defined in the board code */
return &board_topology_map;

View file

@ -12,7 +12,7 @@
#include <asm/arch/cpu.h>
#include <asm-generic/gpio.h>
#include "../drivers/ddr/marvell/a38x/ddr3_a38x_topology.h"
#include "../drivers/ddr/marvell/a38x/ddr3_init.h"
#include "../arch/arm/mach-mvebu/serdes/a38x/high_speed_env_spec.h"
#include "keyprogram.h"
@ -39,7 +39,8 @@ DECLARE_GLOBAL_DATA_PTR;
* be used by the DDR3 init code in the SPL U-Boot version to configure
* the DDR3 controller.
*/
static struct hws_topology_map ddr_topology_map = {
static struct mv_ddr_topology_map ddr_topology_map = {
DEBUG_LEVEL_ERROR,
0x1, /* active interfaces */
/* cs_mask, mirror, dqs_swap, ck_swap X PUPs */
{ { { {0x1, 0, 0, 0},
@ -48,14 +49,17 @@ static struct hws_topology_map ddr_topology_map = {
{0x1, 0, 0, 0},
{0x1, 0, 0, 0} },
SPEED_BIN_DDR_1600K, /* speed_bin */
BUS_WIDTH_16, /* memory_width */
MEM_4G, /* mem_size */
MV_DDR_DEV_WIDTH_16BIT, /* memory_width */
MV_DDR_DIE_CAP_4GBIT, /* mem_size */
DDR_FREQ_533, /* frequency */
0, 0, /* cas_wl cas_l */
HWS_TEMP_LOW, /* temperature */
HWS_TIM_DEFAULT} }, /* timing */
5, /* Num Of Bus Per Interface*/
BUS_MASK_32BIT /* Busses mask */
MV_DDR_TEMP_LOW, /* temperature */
MV_DDR_TIM_DEFAULT} }, /* timing */
BUS_MASK_32BIT, /* Busses mask */
MV_DDR_CFG_DEFAULT, /* ddr configuration data source */
{ {0} }, /* raw spd data */
{0} /* timing parameters */
};
static struct serdes_map serdes_topology_map[] = {
@ -121,7 +125,7 @@ void board_pex_config(void)
#endif
}
struct hws_topology_map *ddr3_get_topology_map(void)
struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
{
return &ddr_topology_map;
}

View file

@ -11,7 +11,7 @@
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "../drivers/ddr/marvell/a38x/ddr3_a38x_topology.h"
#include "../drivers/ddr/marvell/a38x/ddr3_init.h"
#include <../serdes/a38x/high_speed_env_spec.h>
DECLARE_GLOBAL_DATA_PTR;
@ -69,7 +69,8 @@ int hws_board_topology_load(struct serdes_map **serdes_map_array, u8 *count)
* be used by the DDR3 init code in the SPL U-Boot version to configure
* the DDR3 controller.
*/
static struct hws_topology_map board_topology_map = {
static struct mv_ddr_topology_map board_topology_map = {
DEBUG_LEVEL_ERROR,
0x1, /* active interfaces */
/* cs_mask, mirror, dqs_swap, ck_swap X PUPs */
{ { { {0x1, 0, 0, 0},
@ -78,17 +79,19 @@ static struct hws_topology_map board_topology_map = {
{0x1, 0, 0, 0},
{0x1, 0, 0, 0} },
SPEED_BIN_DDR_1600K, /* speed_bin */
BUS_WIDTH_16, /* memory_width */
MEM_4G, /* mem_size */
MV_DDR_DEV_WIDTH_16BIT, /* memory_width */
MV_DDR_DIE_CAP_4GBIT, /* mem_size */
DDR_FREQ_800, /* frequency */
0, 0, /* cas_wl cas_l */
HWS_TEMP_LOW, /* temperature */
HWS_TIM_DEFAULT} }, /* timing */
5, /* Num Of Bus Per Interface*/
BUS_MASK_32BIT /* Busses mask */
MV_DDR_TEMP_LOW, /* temperature */
MV_DDR_TIM_DEFAULT} }, /* timing */
BUS_MASK_32BIT, /* Busses mask */
MV_DDR_CFG_DEFAULT, /* ddr configuration data source */
{ {0} }, /* raw spd data */
{0} /* timing parameters */
};
struct hws_topology_map *ddr3_get_topology_map(void)
struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
{
/* Return the board topology as defined in the board code */
return &board_topology_map;

View file

@ -36,6 +36,9 @@ CONFIG_DM_GPIO=y
# CONFIG_MVEBU_GPIO is not set
CONFIG_DM_I2C=y
CONFIG_MISC=y
CONFIG_CLK=y
CONFIG_CLK_MVEBU=y
CONFIG_CLK_ARMADA_3720=y
CONFIG_DM_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_SDMA=y

View file

@ -35,6 +35,9 @@ CONFIG_BLOCK_CACHE=y
CONFIG_DM_GPIO=y
CONFIG_DM_I2C=y
CONFIG_MISC=y
CONFIG_CLK=y
CONFIG_CLK_MVEBU=y
CONFIG_CLK_ARMADA_3720=y
CONFIG_DM_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_SDMA=y

View file

@ -0,0 +1,76 @@
CONFIG_ARM=y
CONFIG_ARCH_MVEBU=y
CONFIG_SYS_TEXT_BASE=0x00000000
CONFIG_SYS_MALLOC_F_LEN=0x2000
CONFIG_TARGET_TURRIS_MOX=y
CONFIG_DEFAULT_DEVICE_TREE="armada-3720-turris-mox"
CONFIG_DEBUG_UART=y
CONFIG_DISTRO_DEFAULTS=y
# CONFIG_SYS_MALLOC_CLEAR_ON_INIT is not set
CONFIG_SYS_CONSOLE_INFO_QUIET=y
# CONFIG_DISPLAY_CPUINFO is not set
# CONFIG_DISPLAY_BOARDINFO is not set
CONFIG_ARCH_EARLY_INIT_R=y
# CONFIG_CMD_FLASH is not set
CONFIG_CMD_CLK=y
CONFIG_CMD_I2C=y
CONFIG_CMD_MMC=y
CONFIG_CMD_SF=y
CONFIG_CMD_SPI=y
CONFIG_CMD_USB=y
# CONFIG_CMD_SETEXPR is not set
CONFIG_CMD_TFTPPUT=y
CONFIG_CMD_CACHE=y
CONFIG_CMD_TIME=y
CONFIG_CMD_MVEBU_BUBT=y
CONFIG_CMD_BTRFS=y
CONFIG_CMD_EXT4_WRITE=y
CONFIG_MAC_PARTITION=y
CONFIG_ENV_IS_IN_SPI_FLASH=y
CONFIG_BLOCK_CACHE=y
CONFIG_DM_I2C=y
CONFIG_MISC=y
CONFIG_CLK=y
CONFIG_CLK_MVEBU=y
CONFIG_CLK_ARMADA_3720=y
CONFIG_DM_MMC=y
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_SDMA=y
CONFIG_MMC_SDHCI_XENON=y
CONFIG_WDT=y
CONFIG_WDT_ARMADA_37XX=y
CONFIG_DM_GPIO=y
# CONFIG_MVEBU_GPIO is not set
CONFIG_PINCTRL=y
CONFIG_PINCTRL_GENERIC=y
CONFIG_PINMUX=y
CONFIG_PINCTRL_ARMADA_37XX=y
CONFIG_SPI_FLASH=y
CONFIG_SPI_FLASH_MACRONIX=y
CONFIG_SPI_FLASH_SPANSION=y
CONFIG_SPI_FLASH_STMICRO=y
CONFIG_SPI_FLASH_WINBOND=y
CONFIG_PHYLIB=y
CONFIG_PHY_GIGE=y
CONFIG_MVNETA=y
CONFIG_MVEBU_COMPHY_SUPPORT=y
# CONFIG_SPL_SERIAL_PRESENT is not set
CONFIG_DEBUG_MVEBU_A3700_UART=y
CONFIG_DEBUG_UART_BASE=0xd0012000
CONFIG_DEBUG_UART_CLOCK=25804800
CONFIG_DEBUG_UART_SHIFT=2
CONFIG_DEBUG_UART_ANNOUNCE=y
CONFIG_MVEBU_A3700_UART=y
CONFIG_MVEBU_A3700_SPI=y
CONFIG_USB=y
CONFIG_DM_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_USB_HOST_ETHER=y
CONFIG_USB_ETHER_ASIX=y
CONFIG_USB_ETHER_MCS7830=y
CONFIG_USB_ETHER_RTL8152=y
CONFIG_USB_ETHER_SMSC95XX=y
CONFIG_SHA1=y
CONFIG_SHA256=y

View file

@ -88,6 +88,7 @@ source "drivers/clk/uniphier/Kconfig"
source "drivers/clk/exynos/Kconfig"
source "drivers/clk/at91/Kconfig"
source "drivers/clk/renesas/Kconfig"
source "drivers/clk/mvebu/Kconfig"
config ICS8N3QV01
bool "Enable ICS8N3QV01 VCXO driver"

View file

@ -11,6 +11,7 @@ obj-y += tegra/
obj-$(CONFIG_ARCH_ASPEED) += aspeed/
obj-$(CONFIG_ARCH_ROCKCHIP) += rockchip/
obj-$(CONFIG_CLK_AT91) += at91/
obj-$(CONFIG_CLK_MVEBU) += mvebu/
obj-$(CONFIG_CLK_BCM6345) += clk_bcm6345.o
obj-$(CONFIG_CLK_BOSTON) += clk_boston.o
obj-$(CONFIG_CLK_EXYNOS) += exynos/

11
drivers/clk/mvebu/Kconfig Normal file
View file

@ -0,0 +1,11 @@
config CLK_MVEBU
bool "MVEBU clock drivers"
depends on CLK && ARCH_MVEBU
help
Enable support for clock present on Marvell MVEBU SoCs.
config CLK_ARMADA_3720
bool "Marvell Armada 3720 clock driver"
depends on CLK_MVEBU && ARM64
help
Enable this to support the clocks on Marvell Armada 3720 SoC.

View file

@ -0,0 +1 @@
obj-$(CONFIG_CLK_ARMADA_3720) += armada-37xx-periph.o armada-37xx-tbg.o

View file

@ -0,0 +1,499 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Marvell Armada 37xx SoC Peripheral clocks
*
* Marek Behun <marek.behun@nic.cz>
*
* Based on Linux driver by:
* Gregory CLEMENT <gregory.clement@free-electrons.com>
*/
#include <common.h>
#include <malloc.h>
#include <clk-uclass.h>
#include <clk.h>
#include <dm.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#define TBG_SEL 0x0
#define DIV_SEL0 0x4
#define DIV_SEL1 0x8
#define DIV_SEL2 0xC
#define CLK_SEL 0x10
#define CLK_DIS 0x14
enum a37xx_periph_parent {
TBG_A_P = 0,
TBG_B_P = 1,
TBG_A_S = 2,
TBG_B_S = 3,
MAX_TBG_PARENTS = 4,
XTAL = 4,
MAX_PARENTS = 5,
};
static const struct {
const char *name;
enum a37xx_periph_parent parent;
} a37xx_periph_parent_names[] = {
{ "TBG-A-P", TBG_A_P },
{ "TBG-B-P", TBG_B_P },
{ "TBG-A-S", TBG_A_S },
{ "TBG-B-S", TBG_B_S },
{ "xtal", XTAL },
};
struct clk_periph;
struct a37xx_periphclk {
void __iomem *reg;
ulong parents[MAX_PARENTS];
const struct clk_periph *clks;
bool clk_has_periph_parent[16];
int clk_parent[16];
int count;
};
struct clk_div_table {
u32 div;
u32 val;
};
struct clk_periph {
const char *name;
const char *parent_name;
u32 disable_bit;
int mux_shift;
const struct clk_div_table *div_table[2];
s32 div_reg_off[2];
u32 div_mask[2];
int div_shift[2];
unsigned can_gate : 1;
unsigned can_mux : 1;
unsigned dividers : 2;
};
static const struct clk_div_table div_table1[] = {
{ 1, 1 },
{ 2, 2 },
{ 0, 0 },
};
static const struct clk_div_table div_table2[] = {
{ 2, 1 },
{ 4, 2 },
{ 0, 0 },
};
static const struct clk_div_table div_table6[] = {
{ 1, 1 },
{ 2, 2 },
{ 3, 3 },
{ 4, 4 },
{ 5, 5 },
{ 6, 6 },
{ 0, 0 },
};
#define CLK_FULL_DD(_n, _d, _mux, _r0, _r1, _s0, _s1) \
{ \
.name = #_n, \
.disable_bit = BIT(_d), \
.mux_shift = _mux, \
.div_table[0] = div_table6, \
.div_table[1] = div_table6, \
.div_reg_off[0] = _r0, \
.div_reg_off[1] = _r1, \
.div_shift[0] = _s0, \
.div_shift[1] = _s1, \
.div_mask[0] = 7, \
.div_mask[1] = 7, \
.can_gate = 1, \
.can_mux = 1, \
.dividers = 2, \
}
#define CLK_FULL(_n, _d, _mux, _r, _s, _m, _t) \
{ \
.name = #_n, \
.disable_bit = BIT(_d), \
.mux_shift = _mux, \
.div_table[0] = _t, \
.div_reg_off[0] = _r, \
.div_shift[0] = _s, \
.div_mask[0] = _m, \
.can_gate = 1, \
.can_mux = 1, \
.dividers = 1, \
}
#define CLK_GATE_DIV(_n, _d, _r, _s, _m, _t, _p) \
{ \
.name = #_n, \
.parent_name = _p, \
.disable_bit = BIT(_d), \
.div_table[0] = _t, \
.div_reg_off[0] = _r, \
.div_shift[0] = _s, \
.div_mask[0] = _m, \
.can_gate = 1, \
.dividers = 1, \
}
#define CLK_GATE(_n, _d, _p) \
{ \
.name = #_n, \
.parent_name = _p, \
.disable_bit = BIT(_d), \
.can_gate = 1, \
}
#define CLK_MUX_DIV(_n, _mux, _r, _s, _m, _t) \
{ \
.name = #_n, \
.mux_shift = _mux, \
.div_table[0] = _t, \
.div_reg_off[0] = _r, \
.div_shift[0] = _s, \
.div_mask[0] = _m, \
.can_mux = 1, \
.dividers = 1, \
}
#define CLK_MUX_DD(_n, _mux, _r0, _r1, _s0, _s1) \
{ \
.name = #_n, \
.mux_shift = _mux, \
.div_table[0] = div_table6, \
.div_table[1] = div_table6, \
.div_reg_off[0] = _r0, \
.div_reg_off[1] = _r1, \
.div_shift[0] = _s0, \
.div_shift[1] = _s1, \
.div_mask[0] = 7, \
.div_mask[1] = 7, \
.can_mux = 1, \
.dividers = 2, \
}
/* NB periph clocks */
static const struct clk_periph clks_nb[] = {
CLK_FULL_DD(mmc, 2, 0, DIV_SEL2, DIV_SEL2, 16, 13),
CLK_FULL_DD(sata_host, 3, 2, DIV_SEL2, DIV_SEL2, 10, 7),
CLK_FULL_DD(sec_at, 6, 4, DIV_SEL1, DIV_SEL1, 3, 0),
CLK_FULL_DD(sec_dap, 7, 6, DIV_SEL1, DIV_SEL1, 9, 6),
CLK_FULL_DD(tscem, 8, 8, DIV_SEL1, DIV_SEL1, 15, 12),
CLK_FULL(tscem_tmx, 10, 10, DIV_SEL1, 18, 7, div_table6),
CLK_GATE(avs, 11, "xtal"),
CLK_FULL_DD(sqf, 12, 12, DIV_SEL1, DIV_SEL1, 27, 24),
CLK_FULL_DD(pwm, 13, 14, DIV_SEL0, DIV_SEL0, 3, 0),
CLK_GATE(i2c_2, 16, "xtal"),
CLK_GATE(i2c_1, 17, "xtal"),
CLK_GATE_DIV(ddr_phy, 19, DIV_SEL0, 18, 1, div_table2, "TBG-A-S"),
CLK_FULL_DD(ddr_fclk, 21, 16, DIV_SEL0, DIV_SEL0, 15, 12),
CLK_FULL(trace, 22, 18, DIV_SEL0, 20, 7, div_table6),
CLK_FULL(counter, 23, 20, DIV_SEL0, 23, 7, div_table6),
CLK_FULL_DD(eip97, 24, 24, DIV_SEL2, DIV_SEL2, 22, 19),
CLK_MUX_DIV(cpu, 22, DIV_SEL0, 28, 7, div_table6),
{ },
};
/* SB periph clocks */
static const struct clk_periph clks_sb[] = {
CLK_MUX_DD(gbe_50, 6, DIV_SEL2, DIV_SEL2, 6, 9),
CLK_MUX_DD(gbe_core, 8, DIV_SEL1, DIV_SEL1, 18, 21),
CLK_MUX_DD(gbe_125, 10, DIV_SEL1, DIV_SEL1, 6, 9),
CLK_GATE(gbe1_50, 0, "gbe_50"),
CLK_GATE(gbe0_50, 1, "gbe_50"),
CLK_GATE(gbe1_125, 2, "gbe_125"),
CLK_GATE(gbe0_125, 3, "gbe_125"),
CLK_GATE_DIV(gbe1_core, 4, DIV_SEL1, 13, 1, div_table1, "gbe_core"),
CLK_GATE_DIV(gbe0_core, 5, DIV_SEL1, 14, 1, div_table1, "gbe_core"),
CLK_GATE_DIV(gbe_bm, 12, DIV_SEL1, 0, 1, div_table1, "gbe_core"),
CLK_FULL_DD(sdio, 11, 14, DIV_SEL0, DIV_SEL0, 3, 6),
CLK_FULL_DD(usb32_usb2_sys, 16, 16, DIV_SEL0, DIV_SEL0, 9, 12),
CLK_FULL_DD(usb32_ss_sys, 17, 18, DIV_SEL0, DIV_SEL0, 15, 18),
{ },
};
static inline int get_mux(struct a37xx_periphclk *priv, int shift)
{
return (readl(priv->reg + TBG_SEL) >> shift) & 3;
}
static ulong periph_clk_get_rate(struct a37xx_periphclk *priv, int id);
static ulong get_parent_rate(struct a37xx_periphclk *priv, int id)
{
const struct clk_periph *clk = &priv->clks[id];
ulong res;
if (clk->can_mux) {
/* parent is one of TBG clocks */
int tbg = get_mux(priv, clk->mux_shift);
res = priv->parents[tbg];
} else if (priv->clk_has_periph_parent[id]) {
/* parent is one of other periph clocks */
if (priv->clk_parent[id] >= priv->count)
return -EINVAL;
res = periph_clk_get_rate(priv, priv->clk_parent[id]);
} else {
/* otherwise parent is one of TBGs or XTAL */
if (priv->clk_parent[id] >= MAX_PARENTS)
return -EINVAL;
res = priv->parents[priv->clk_parent[id]];
}
return res;
}
static ulong get_div(struct a37xx_periphclk *priv,
const struct clk_periph *clk, int idx)
{
const struct clk_div_table *i;
u32 reg;
reg = readl(priv->reg + clk->div_reg_off[idx]);
reg = (reg >> clk->div_shift[idx]) & clk->div_mask[idx];
/* find divisor for register value val */
for (i = clk->div_table[idx]; i && i->div != 0; ++i)
if (i->val == reg)
return i->div;
return 0;
}
static ulong periph_clk_get_rate(struct a37xx_periphclk *priv, int id)
{
const struct clk_periph *clk = &priv->clks[id];
ulong rate, div;
int i;
rate = get_parent_rate(priv, id);
if (rate == -EINVAL)
return -EINVAL;
/* divide the parent rate by dividers */
div = 1;
for (i = 0; i < clk->dividers; ++i)
div *= get_div(priv, clk, i);
if (!div)
return 0;
return DIV_ROUND_UP(rate, div);
}
static ulong armada_37xx_periph_clk_get_rate(struct clk *clk)
{
struct a37xx_periphclk *priv = dev_get_priv(clk->dev);
if (clk->id >= priv->count)
return -EINVAL;
return periph_clk_get_rate(priv, clk->id);
}
static int periph_clk_enable(struct clk *clk, int enable)
{
struct a37xx_periphclk *priv = dev_get_priv(clk->dev);
const struct clk_periph *periph_clk = &priv->clks[clk->id];
if (clk->id >= priv->count)
return -EINVAL;
if (!periph_clk->can_gate)
return -ENOTSUPP;
if (enable)
clrbits_le32(priv->reg + CLK_DIS, periph_clk->disable_bit);
else
setbits_le32(priv->reg + CLK_DIS, periph_clk->disable_bit);
return 0;
}
static int armada_37xx_periph_clk_enable(struct clk *clk)
{
return periph_clk_enable(clk, 1);
}
static int armada_37xx_periph_clk_disable(struct clk *clk)
{
return periph_clk_enable(clk, 0);
}
#if defined(CONFIG_CMD_CLK) && defined(CONFIG_CLK_ARMADA_3720)
static int armada_37xx_periph_clk_dump(struct udevice *dev)
{
struct a37xx_periphclk *priv = dev_get_priv(dev);
const struct clk_periph *clks;
int i;
if (!priv)
return -ENODEV;
clks = priv->clks;
for (i = 0; i < priv->count; ++i)
printf(" %s at %lu Hz\n", clks[i].name,
periph_clk_get_rate(priv, i));
printf("\n");
return 0;
}
static int clk_dump(const char *name, int (*func)(struct udevice *))
{
struct udevice *dev;
if (uclass_get_device_by_name(UCLASS_CLK, name, &dev)) {
printf("Cannot find device %s\n", name);
return -ENODEV;
}
return func(dev);
}
int armada_37xx_tbg_clk_dump(struct udevice *);
int soc_clk_dump(void)
{
printf(" xtal at %u000000 Hz\n\n", get_ref_clk());
if (clk_dump("tbg@13200", armada_37xx_tbg_clk_dump))
return 1;
if (clk_dump("nb-periph-clk@13000",
armada_37xx_periph_clk_dump))
return 1;
if (clk_dump("sb-periph-clk@18000",
armada_37xx_periph_clk_dump))
return 1;
return 0;
}
#endif
static int armada_37xx_periph_clk_probe(struct udevice *dev)
{
struct a37xx_periphclk *priv = dev_get_priv(dev);
const struct clk_periph *clks;
int ret, i;
clks = (const struct clk_periph *)dev_get_driver_data(dev);
if (!clks)
return -ENODEV;
priv->reg = dev_read_addr_ptr(dev);
if (!priv->reg) {
dev_err(dev, "no io address\n");
return -ENODEV;
}
/* count clk_periph nodes */
priv->count = 0;
while (clks[priv->count].name)
priv->count++;
priv->clks = clks;
/* assign parent IDs to nodes which have non-NULL parent_name */
for (i = 0; i < priv->count; ++i) {
int j;
if (!clks[i].parent_name)
continue;
/* first try if parent_name is one of TBGs or XTAL */
for (j = 0; j < MAX_PARENTS; ++j)
if (!strcmp(clks[i].parent_name,
a37xx_periph_parent_names[j].name))
break;
if (j < MAX_PARENTS) {
priv->clk_has_periph_parent[i] = false;
priv->clk_parent[i] =
a37xx_periph_parent_names[j].parent;
continue;
}
/* else parent_name should be one of other periph clocks */
for (j = 0; j < priv->count; ++j) {
if (!strcmp(clks[i].parent_name, clks[j].name))
break;
}
if (j < priv->count) {
priv->clk_has_periph_parent[i] = true;
priv->clk_parent[i] = j;
continue;
}
dev_err(dev, "undefined parent %s\n", clks[i].parent_name);
return -EINVAL;
}
for (i = 0; i < MAX_PARENTS; ++i) {
struct clk clk;
if (i == XTAL) {
priv->parents[i] = get_ref_clk() * 1000000;
continue;
}
ret = clk_get_by_index(dev, i, &clk);
if (ret) {
dev_err(dev, "one of parent clocks (%i) missing: %i\n",
i, ret);
return -ENODEV;
}
priv->parents[i] = clk_get_rate(&clk);
clk_free(&clk);
}
return 0;
}
static const struct clk_ops armada_37xx_periph_clk_ops = {
.get_rate = armada_37xx_periph_clk_get_rate,
.enable = armada_37xx_periph_clk_enable,
.disable = armada_37xx_periph_clk_disable,
};
static const struct udevice_id armada_37xx_periph_clk_ids[] = {
{
.compatible = "marvell,armada-3700-periph-clock-nb",
.data = (ulong)clks_nb,
},
{
.compatible = "marvell,armada-3700-periph-clock-sb",
.data = (ulong)clks_sb,
},
{}
};
U_BOOT_DRIVER(armada_37xx_periph_clk) = {
.name = "armada_37xx_periph_clk",
.id = UCLASS_CLK,
.of_match = armada_37xx_periph_clk_ids,
.ops = &armada_37xx_periph_clk_ops,
.priv_auto_alloc_size = sizeof(struct a37xx_periphclk),
.probe = armada_37xx_periph_clk_probe,
};

View file

@ -0,0 +1,154 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Marvell Armada 37xx SoC Time Base Generator clocks
*
* Marek Behun <marek.behun@nic.cz>
*
* Based on Linux driver by:
* Gregory CLEMENT <gregory.clement@free-electrons.com>
*/
#include <common.h>
#include <clk-uclass.h>
#include <clk.h>
#include <dm.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#define NUM_TBG 4
#define TBG_CTRL0 0x4
#define TBG_CTRL1 0x8
#define TBG_CTRL7 0x20
#define TBG_CTRL8 0x30
#define TBG_DIV_MASK 0x1FF
#define TBG_A_REFDIV 0
#define TBG_B_REFDIV 16
#define TBG_A_FBDIV 2
#define TBG_B_FBDIV 18
#define TBG_A_VCODIV_SE 0
#define TBG_B_VCODIV_SE 16
#define TBG_A_VCODIV_DIFF 1
#define TBG_B_VCODIV_DIFF 17
struct tbg_def {
const char *name;
u32 refdiv_offset;
u32 fbdiv_offset;
u32 vcodiv_reg;
u32 vcodiv_offset;
};
static const struct tbg_def tbg[NUM_TBG] = {
{"TBG-A-P", TBG_A_REFDIV, TBG_A_FBDIV, TBG_CTRL8, TBG_A_VCODIV_DIFF},
{"TBG-B-P", TBG_B_REFDIV, TBG_B_FBDIV, TBG_CTRL8, TBG_B_VCODIV_DIFF},
{"TBG-A-S", TBG_A_REFDIV, TBG_A_FBDIV, TBG_CTRL1, TBG_A_VCODIV_SE},
{"TBG-B-S", TBG_B_REFDIV, TBG_B_FBDIV, TBG_CTRL1, TBG_B_VCODIV_SE},
};
struct a37xx_tbgclk {
ulong rates[NUM_TBG];
unsigned int mult[NUM_TBG];
unsigned int div[NUM_TBG];
};
static unsigned int tbg_get_mult(void __iomem *reg, const struct tbg_def *ptbg)
{
u32 val;
val = readl(reg + TBG_CTRL0);
return ((val >> ptbg->fbdiv_offset) & TBG_DIV_MASK) << 2;
}
static unsigned int tbg_get_div(void __iomem *reg, const struct tbg_def *ptbg)
{
u32 val;
unsigned int div;
val = readl(reg + TBG_CTRL7);
div = (val >> ptbg->refdiv_offset) & TBG_DIV_MASK;
if (div == 0)
div = 1;
val = readl(reg + ptbg->vcodiv_reg);
div *= 1 << ((val >> ptbg->vcodiv_offset) & TBG_DIV_MASK);
return div;
}
static ulong armada_37xx_tbg_clk_get_rate(struct clk *clk)
{
struct a37xx_tbgclk *priv = dev_get_priv(clk->dev);
if (clk->id >= NUM_TBG)
return -ENODEV;
return priv->rates[clk->id];
}
#if defined(CONFIG_CMD_CLK) && defined(CONFIG_CLK_ARMADA_3720)
int armada_37xx_tbg_clk_dump(struct udevice *dev)
{
struct a37xx_tbgclk *priv = dev_get_priv(dev);
int i;
for (i = 0; i < NUM_TBG; ++i)
printf(" %s at %lu Hz\n", tbg[i].name,
priv->rates[i]);
printf("\n");
return 0;
}
#endif
static int armada_37xx_tbg_clk_probe(struct udevice *dev)
{
struct a37xx_tbgclk *priv = dev_get_priv(dev);
void __iomem *reg;
ulong xtal;
int i;
reg = dev_read_addr_ptr(dev);
if (!reg) {
dev_err(dev, "no io address\n");
return -ENODEV;
}
xtal = (ulong)get_ref_clk() * 1000000;
for (i = 0; i < NUM_TBG; ++i) {
unsigned int mult, div;
mult = tbg_get_mult(reg, &tbg[i]);
div = tbg_get_div(reg, &tbg[i]);
priv->rates[i] = (xtal * mult) / div;
}
return 0;
}
static const struct clk_ops armada_37xx_tbg_clk_ops = {
.get_rate = armada_37xx_tbg_clk_get_rate,
};
static const struct udevice_id armada_37xx_tbg_clk_ids[] = {
{ .compatible = "marvell,armada-3700-tbg-clock" },
{}
};
U_BOOT_DRIVER(armada_37xx_tbg_clk) = {
.name = "armada_37xx_tbg_clk",
.id = UCLASS_CLK,
.of_match = armada_37xx_tbg_clk_ids,
.ops = &armada_37xx_tbg_clk_ops,
.priv_auto_alloc_size = sizeof(struct a37xx_tbgclk),
.probe = armada_37xx_tbg_clk_probe,
};

View file

@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0+
obj-$(CONFIG_SPL_BUILD) += ddr3_a38x.o
obj-$(CONFIG_SPL_BUILD) += ddr3_a38x_training.o
obj-$(CONFIG_SPL_BUILD) += mv_ddr_plat.o
obj-$(CONFIG_SPL_BUILD) += mv_ddr_sys_env_lib.o
obj-$(CONFIG_SPL_BUILD) += ddr3_debug.o
obj-$(CONFIG_SPL_BUILD) += ddr3_hws_hw_training.o
obj-$(CONFIG_SPL_BUILD) += ddr3_init.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_bist.o
@ -13,5 +12,8 @@ obj-$(CONFIG_SPL_BUILD) += ddr3_training_hw_algo.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_ip_engine.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_leveling.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_pbs.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_static.o
obj-$(CONFIG_SPL_BUILD) += mv_ddr_build_message.o
obj-$(CONFIG_SPL_BUILD) += mv_ddr_common.o
obj-$(CONFIG_SPL_BUILD) += mv_ddr_spd.o
obj-$(CONFIG_SPL_BUILD) += mv_ddr_topology.o
obj-$(CONFIG_SPL_BUILD) += xor.o

View file

@ -1,736 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define A38X_NUMBER_OF_INTERFACES 5
#define SAR_DEV_ID_OFFS 27
#define SAR_DEV_ID_MASK 0x7
/* Termal Sensor Registers */
#define TSEN_STATE_REG 0xe4070
#define TSEN_STATE_OFFSET 31
#define TSEN_STATE_MASK (0x1 << TSEN_STATE_OFFSET)
#define TSEN_CONF_REG 0xe4074
#define TSEN_CONF_RST_OFFSET 8
#define TSEN_CONF_RST_MASK (0x1 << TSEN_CONF_RST_OFFSET)
#define TSEN_STATUS_REG 0xe4078
#define TSEN_STATUS_READOUT_VALID_OFFSET 10
#define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \
TSEN_STATUS_READOUT_VALID_OFFSET)
#define TSEN_STATUS_TEMP_OUT_OFFSET 0
#define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
static struct dfx_access interface_map[] = {
/* Pipe Client */
{ 0, 17 },
{ 1, 7 },
{ 1, 11 },
{ 0, 3 },
{ 1, 25 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 }
};
/* This array hold the board round trip delay (DQ and CK) per <interface,bus> */
struct trip_delay_element a38x_board_round_trip_delay_array[] = {
/* 1st board */
/* Interface bus DQS-delay CK-delay */
{ 3952, 5060 },
{ 3192, 4493 },
{ 4785, 6677 },
{ 3413, 7267 },
{ 4282, 6086 }, /* ECC PUP */
{ 3952, 5134 },
{ 3192, 4567 },
{ 4785, 6751 },
{ 3413, 7341 },
{ 4282, 6160 }, /* ECC PUP */
/* 2nd board */
/* Interface bus DQS-delay CK-delay */
{ 3952, 5060 },
{ 3192, 4493 },
{ 4785, 6677 },
{ 3413, 7267 },
{ 4282, 6086 }, /* ECC PUP */
{ 3952, 5134 },
{ 3192, 4567 },
{ 4785, 6751 },
{ 3413, 7341 },
{ 4282, 6160 } /* ECC PUP */
};
#ifdef STATIC_ALGO_SUPPORT
/* package trace */
static struct trip_delay_element a38x_package_round_trip_delay_array[] = {
/* IF BUS DQ_DELAY CK_DELAY */
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 }
};
static int a38x_silicon_delay_offset[] = {
/* board 0 */
0,
/* board 1 */
0,
/* board 2 */
0
};
#endif
static u8 a38x_bw_per_freq[DDR_FREQ_LIMIT] = {
0x3, /* DDR_FREQ_100 */
0x4, /* DDR_FREQ_400 */
0x4, /* DDR_FREQ_533 */
0x5, /* DDR_FREQ_667 */
0x5, /* DDR_FREQ_800 */
0x5, /* DDR_FREQ_933 */
0x5, /* DDR_FREQ_1066 */
0x3, /* DDR_FREQ_311 */
0x3, /* DDR_FREQ_333 */
0x4, /* DDR_FREQ_467 */
0x5, /* DDR_FREQ_850 */
0x5, /* DDR_FREQ_600 */
0x3, /* DDR_FREQ_300 */
0x5, /* DDR_FREQ_900 */
0x3, /* DDR_FREQ_360 */
0x5 /* DDR_FREQ_1000 */
};
static u8 a38x_rate_per_freq[DDR_FREQ_LIMIT] = {
/*TBD*/ 0x1, /* DDR_FREQ_100 */
0x2, /* DDR_FREQ_400 */
0x2, /* DDR_FREQ_533 */
0x2, /* DDR_FREQ_667 */
0x2, /* DDR_FREQ_800 */
0x3, /* DDR_FREQ_933 */
0x3, /* DDR_FREQ_1066 */
0x1, /* DDR_FREQ_311 */
0x1, /* DDR_FREQ_333 */
0x2, /* DDR_FREQ_467 */
0x2, /* DDR_FREQ_850 */
0x2, /* DDR_FREQ_600 */
0x1, /* DDR_FREQ_300 */
0x2, /* DDR_FREQ_900 */
0x1, /* DDR_FREQ_360 */
0x2 /* DDR_FREQ_1000 */
};
static u16 a38x_vco_freq_per_sar[] = {
666, /* 0 */
1332,
800,
1600,
1066,
2132,
1200,
2400,
1332,
1332,
1500,
1500,
1600, /* 12 */
1600,
1700,
1700,
1866,
1866,
1800, /* 18 */
2000,
2000,
4000,
2132,
2132,
2300,
2300,
2400,
2400,
2500,
2500,
800
};
u32 pipe_multicast_mask;
u32 dq_bit_map_2_phy_pin[] = {
1, 0, 2, 6, 9, 8, 3, 7, /* 0 */
8, 9, 1, 7, 2, 6, 3, 0, /* 1 */
3, 9, 7, 8, 1, 0, 2, 6, /* 2 */
1, 0, 6, 2, 8, 3, 7, 9, /* 3 */
0, 1, 2, 9, 7, 8, 3, 6, /* 4 */
};
static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
enum hws_ddr_freq freq);
/*
* Read temperature TJ value
*/
u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
{
int reg = 0;
/* Initiates TSEN hardware reset once */
if ((reg_read(TSEN_CONF_REG) & TSEN_CONF_RST_MASK) == 0)
reg_bit_set(TSEN_CONF_REG, TSEN_CONF_RST_MASK);
mdelay(10);
/* Check if the readout field is valid */
if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
printf("%s: TSEN not ready\n", __func__);
return 0;
}
reg = reg_read(TSEN_STATUS_REG);
reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
}
/*
* Name: ddr3_tip_a38x_get_freq_config.
* Desc:
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum hws_ddr_freq freq,
struct hws_tip_freq_config_info
*freq_config_info)
{
if (a38x_bw_per_freq[freq] == 0xff)
return MV_NOT_SUPPORTED;
if (freq_config_info == NULL)
return MV_BAD_PARAM;
freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
freq_config_info->is_supported = 1;
return MV_OK;
}
/*
* Name: ddr3_tip_a38x_pipe_enable.
* Desc:
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_pipe_enable(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, int enable)
{
u32 data_value, pipe_enable_mask = 0;
if (enable == 0) {
pipe_enable_mask = 0;
} else {
if (interface_access == ACCESS_TYPE_MULTICAST)
pipe_enable_mask = pipe_multicast_mask;
else
pipe_enable_mask = (1 << interface_map[if_id].pipe);
}
CHECK_STATUS(ddr3_tip_reg_read
(dev_num, PIPE_ENABLE_ADDR, &data_value, MASK_ALL_BITS));
data_value = (data_value & (~0xff)) | pipe_enable_mask;
CHECK_STATUS(ddr3_tip_reg_write(dev_num, PIPE_ENABLE_ADDR, data_value));
return MV_OK;
}
/*
* Name: ddr3_tip_a38x_if_write.
* Desc:
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_if_write(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 data_value,
u32 mask)
{
u32 ui_data_read;
if (mask != MASK_ALL_BITS) {
CHECK_STATUS(ddr3_tip_a38x_if_read
(dev_num, ACCESS_TYPE_UNICAST, if_id, reg_addr,
&ui_data_read, MASK_ALL_BITS));
data_value = (ui_data_read & (~mask)) | (data_value & mask);
}
reg_write(reg_addr, data_value);
return MV_OK;
}
/*
* Name: ddr3_tip_a38x_if_read.
* Desc:
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_if_read(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 *data, u32 mask)
{
*data = reg_read(reg_addr) & mask;
return MV_OK;
}
/*
* Name: ddr3_tip_a38x_select_ddr_controller.
* Desc: Enable/Disable access to Marvell's server.
* Args: dev_num - device number
* enable - whether to enable or disable the server
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
{
u32 reg;
reg = reg_read(CS_ENABLE_REG);
if (enable)
reg |= (1 << 6);
else
reg &= ~(1 << 6);
reg_write(CS_ENABLE_REG, reg);
return MV_OK;
}
/*
* Name: ddr3_tip_init_a38x_silicon.
* Desc: init Training SW DB.
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
static int ddr3_tip_init_a38x_silicon(u32 dev_num, u32 board_id)
{
struct hws_tip_config_func_db config_func;
enum hws_ddr_freq ddr_freq;
int status;
struct hws_topology_map *tm = ddr3_get_topology_map();
/* new read leveling version */
config_func.tip_dunit_read_func = ddr3_tip_a38x_if_read;
config_func.tip_dunit_write_func = ddr3_tip_a38x_if_write;
config_func.tip_dunit_mux_select_func =
ddr3_tip_a38x_select_ddr_controller;
config_func.tip_get_freq_config_info_func =
ddr3_tip_a38x_get_freq_config;
config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
ddr3_tip_init_config_func(dev_num, &config_func);
ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
#ifdef STATIC_ALGO_SUPPORT
{
struct hws_tip_static_config_info static_config;
u32 board_offset =
board_id * A38X_NUMBER_OF_INTERFACES *
tm->num_of_bus_per_interface;
static_config.silicon_delay =
a38x_silicon_delay_offset[board_id];
static_config.package_trace_arr =
a38x_package_round_trip_delay_array;
static_config.board_trace_arr =
&a38x_board_round_trip_delay_array[board_offset];
ddr3_tip_init_static_config_db(dev_num, &static_config);
}
#endif
status = ddr3_tip_a38x_get_init_freq(dev_num, &ddr_freq);
if (MV_OK != status) {
DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
("DDR3 silicon get target frequency - FAILED 0x%x\n",
status));
return status;
}
rl_version = 1;
mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
LOAD_PATTERN_MASK_BIT |
SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
/* LOAD_PATTERN_2_MASK_BIT | */
WRITE_LEVELING_SUPP_MASK_BIT |
READ_LEVELING_MASK_BIT |
PBS_RX_MASK_BIT |
PBS_TX_MASK_BIT |
SET_TARGET_FREQ_MASK_BIT |
WRITE_LEVELING_TF_MASK_BIT |
WRITE_LEVELING_SUPP_TF_MASK_BIT |
READ_LEVELING_TF_MASK_BIT |
CENTRALIZATION_RX_MASK_BIT |
CENTRALIZATION_TX_MASK_BIT);
rl_mid_freq_wa = 1;
if ((ddr_freq == DDR_FREQ_333) || (ddr_freq == DDR_FREQ_400)) {
mask_tune_func = (WRITE_LEVELING_MASK_BIT |
LOAD_PATTERN_2_MASK_BIT |
WRITE_LEVELING_SUPP_MASK_BIT |
READ_LEVELING_MASK_BIT |
PBS_RX_MASK_BIT |
PBS_TX_MASK_BIT |
CENTRALIZATION_RX_MASK_BIT |
CENTRALIZATION_TX_MASK_BIT);
rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
}
/* Supplementary not supported for ECC modes */
if (1 == ddr3_if_ecc_enabled()) {
mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
mask_tune_func &= ~PBS_TX_MASK_BIT;
mask_tune_func &= ~PBS_RX_MASK_BIT;
}
if (ck_delay == -1)
ck_delay = 160;
if (ck_delay_16 == -1)
ck_delay_16 = 160;
ca_delay = 0;
delay_enable = 1;
calibration_update_control = 1;
init_freq = tm->interface_params[first_active_if].memory_freq;
ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
return MV_OK;
}
int ddr3_a38x_update_topology_map(u32 dev_num, struct hws_topology_map *tm)
{
u32 if_id = 0;
enum hws_ddr_freq freq;
ddr3_tip_a38x_get_init_freq(dev_num, &freq);
tm->interface_params[if_id].memory_freq = freq;
/*
* re-calc topology parameters according to topology updates
* (if needed)
*/
CHECK_STATUS(hws_ddr3_tip_load_topology_map(dev_num, tm));
return MV_OK;
}
int ddr3_tip_init_a38x(u32 dev_num, u32 board_id)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
if (NULL == tm)
return MV_FAIL;
ddr3_a38x_update_topology_map(dev_num, tm);
ddr3_tip_init_a38x_silicon(dev_num, board_id);
return MV_OK;
}
int ddr3_tip_a38x_get_init_freq(int dev_num, enum hws_ddr_freq *freq)
{
u32 reg;
/* Read sample at reset setting */
reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
switch (reg) {
case 0x0:
case 0x1:
*freq = DDR_FREQ_333;
break;
case 0x2:
case 0x3:
*freq = DDR_FREQ_400;
break;
case 0x4:
case 0xd:
*freq = DDR_FREQ_533;
break;
case 0x6:
*freq = DDR_FREQ_600;
break;
case 0x8:
case 0x11:
case 0x14:
*freq = DDR_FREQ_667;
break;
case 0xc:
case 0x15:
case 0x1b:
*freq = DDR_FREQ_800;
break;
case 0x10:
*freq = DDR_FREQ_933;
break;
case 0x12:
*freq = DDR_FREQ_900;
break;
case 0x13:
*freq = DDR_FREQ_900;
break;
default:
*freq = 0;
return MV_NOT_SUPPORTED;
}
return MV_OK;
}
int ddr3_tip_a38x_get_medium_freq(int dev_num, enum hws_ddr_freq *freq)
{
u32 reg;
/* Read sample at reset setting */
reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
switch (reg) {
case 0x0:
case 0x1:
/* Medium is same as TF to run PBS in this freq */
*freq = DDR_FREQ_333;
break;
case 0x2:
case 0x3:
/* Medium is same as TF to run PBS in this freq */
*freq = DDR_FREQ_400;
break;
case 0x4:
case 0xd:
*freq = DDR_FREQ_533;
break;
case 0x8:
case 0x11:
case 0x14:
*freq = DDR_FREQ_333;
break;
case 0xc:
case 0x15:
case 0x1b:
*freq = DDR_FREQ_400;
break;
case 0x6:
*freq = DDR_FREQ_300;
break;
case 0x12:
*freq = DDR_FREQ_360;
break;
case 0x13:
*freq = DDR_FREQ_400;
break;
default:
*freq = 0;
return MV_NOT_SUPPORTED;
}
return MV_OK;
}
u32 ddr3_tip_get_init_freq(void)
{
enum hws_ddr_freq freq;
ddr3_tip_a38x_get_init_freq(0, &freq);
return freq;
}
static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
enum hws_ddr_freq frequency)
{
u32 divider = 0;
u32 sar_val;
if (if_id != 0) {
DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
("A38x does not support interface 0x%x\n",
if_id));
return MV_BAD_PARAM;
}
/* get VCO freq index */
sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
divider = a38x_vco_freq_per_sar[sar_val] / freq_val[frequency];
/* Set Sync mode */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x20220, 0x0,
0x1000));
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe42f4, 0x0,
0x200));
/* cpupll_clkdiv_reset_mask */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0x1f,
0xff));
/* cpupll_clkdiv_reload_smooth */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
(0x2 << 8), (0xff << 8)));
/* cpupll_clkdiv_relax_en */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
(0x2 << 24), (0xff << 24)));
/* write the divider */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4268,
(divider << 8), (0x3f << 8)));
/* set cpupll_clkdiv_reload_ratio */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264,
(1 << 8), (1 << 8)));
/* undet cpupll_clkdiv_reload_ratio */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
(1 << 8)));
/* clear cpupll_clkdiv_reload_force */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
(0xff << 8)));
/* clear cpupll_clkdiv_relax_en */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
(0xff << 24)));
/* clear cpupll_clkdiv_reset_mask */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
0xff));
/* Dunit training clock + 1:1 mode */
if ((frequency == DDR_FREQ_LOW_FREQ) || (freq_val[frequency] <= 400)) {
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
(1 << 16), (1 << 16)));
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
(0 << 15), (1 << 15)));
} else {
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
0, (1 << 16)));
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
(1 << 15), (1 << 15)));
}
return MV_OK;
}
/*
* external read from memory
*/
int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
u32 num_of_bursts, u32 *data)
{
u32 burst_num;
for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
data[burst_num] = readl(reg_addr + 4 * burst_num);
return MV_OK;
}
/*
* external write to memory
*/
int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
u32 num_of_bursts, u32 *data) {
u32 burst_num;
for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
writel(data[burst_num], reg_addr + 4 * burst_num);
return MV_OK;
}
int ddr3_silicon_pre_init(void)
{
return ddr3_silicon_init();
}
int ddr3_post_run_alg(void)
{
return MV_OK;
}
int ddr3_silicon_post_init(void)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
/* Set half bus width */
if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
REG_SDRAM_CONFIG_ADDR, 0x0, 0x8000));
}
return MV_OK;
}
int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
{
info_ptr->device_id = 0x6800;
info_ptr->ck_delay = ck_delay;
return MV_OK;
}

View file

@ -1,92 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR3_A38X_H
#define _DDR3_A38X_H
#define MAX_INTERFACE_NUM 1
#define MAX_BUS_NUM 5
#include "ddr3_hws_hw_training_def.h"
#define ECC_SUPPORT
/* right now, we're not supporting this in mainline */
#undef SUPPORT_STATIC_DUNIT_CONFIG
/* Controler bus divider 1 for 32 bit, 2 for 64 bit */
#define DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER 1
/* Tune internal training params values */
#define TUNE_TRAINING_PARAMS_CK_DELAY 160
#define TUNE_TRAINING_PARAMS_CK_DELAY_16 160
#define TUNE_TRAINING_PARAMS_PFINGER 41
#define TUNE_TRAINING_PARAMS_NFINGER 43
#define TUNE_TRAINING_PARAMS_PHYREG3VAL 0xa
#define MARVELL_BOARD MARVELL_BOARD_ID_BASE
#define REG_DEVICE_SAR1_ADDR 0xe4204
#define RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET 17
#define RST2_CPU_DDR_CLOCK_SELECT_IN_MASK 0x1f
/* DRAM Windows */
#define REG_XBAR_WIN_5_CTRL_ADDR 0x20050
#define REG_XBAR_WIN_5_BASE_ADDR 0x20054
/* DRAM Windows */
#define REG_XBAR_WIN_4_CTRL_ADDR 0x20040
#define REG_XBAR_WIN_4_BASE_ADDR 0x20044
#define REG_XBAR_WIN_4_REMAP_ADDR 0x20048
#define REG_XBAR_WIN_7_REMAP_ADDR 0x20078
#define REG_XBAR_WIN_16_CTRL_ADDR 0x200d0
#define REG_XBAR_WIN_16_BASE_ADDR 0x200d4
#define REG_XBAR_WIN_16_REMAP_ADDR 0x200dc
#define REG_XBAR_WIN_19_CTRL_ADDR 0x200e8
#define REG_FASTPATH_WIN_BASE_ADDR(win) (0x20180 + (0x8 * win))
#define REG_FASTPATH_WIN_CTRL_ADDR(win) (0x20184 + (0x8 * win))
/* SatR defined too change topology busWidth and ECC configuration */
#define DDR_SATR_CONFIG_MASK_WIDTH 0x8
#define DDR_SATR_CONFIG_MASK_ECC 0x10
#define DDR_SATR_CONFIG_MASK_ECC_PUP 0x20
#define REG_SAMPLE_RESET_HIGH_ADDR 0x18600
#define MV_BOARD_REFCLK MV_BOARD_REFCLK_25MHZ
/* Matrix enables DRAM modes (bus width/ECC) per boardId */
#define TOPOLOGY_UPDATE_32BIT 0
#define TOPOLOGY_UPDATE_32BIT_ECC 1
#define TOPOLOGY_UPDATE_16BIT 2
#define TOPOLOGY_UPDATE_16BIT_ECC 3
#define TOPOLOGY_UPDATE_16BIT_ECC_PUP3 4
#define TOPOLOGY_UPDATE { \
/* 32Bit, 32bit ECC, 16bit, 16bit ECC PUP4, 16bit ECC PUP3 */ \
{1, 1, 1, 1, 1}, /* RD_NAS_68XX_ID */ \
{1, 1, 1, 1, 1}, /* DB_68XX_ID */ \
{1, 0, 1, 0, 1}, /* RD_AP_68XX_ID */ \
{1, 0, 1, 0, 1}, /* DB_AP_68XX_ID */ \
{1, 0, 1, 0, 1}, /* DB_GP_68XX_ID */ \
{0, 0, 1, 1, 0}, /* DB_BP_6821_ID */ \
{1, 1, 1, 1, 1} /* DB_AMC_6820_ID */ \
};
enum {
CPU_1066MHZ_DDR_400MHZ,
CPU_RESERVED_DDR_RESERVED0,
CPU_667MHZ_DDR_667MHZ,
CPU_800MHZ_DDR_800MHZ,
CPU_RESERVED_DDR_RESERVED1,
CPU_RESERVED_DDR_RESERVED2,
CPU_RESERVED_DDR_RESERVED3,
LAST_FREQ
};
#define ACTIVE_INTERFACE_MASK 0x1
#endif /* _DDR3_A38X_H */

View file

@ -1,225 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR3_A38X_MC_STATIC_H
#define _DDR3_A38X_MC_STATIC_H
#include "ddr3_a38x.h"
#ifdef SUPPORT_STATIC_DUNIT_CONFIG
#ifdef CONFIG_CUSTOMER_BOARD_SUPPORT
static struct reg_data ddr3_customer_800[] = {
/* parameters for customer board (based on 800MHZ) */
{0x1400, 0x7b00cc30, 0xffffffff},
{0x1404, 0x36301820, 0xffffffff},
{0x1408, 0x5415baab, 0xffffffff},
{0x140c, 0x38411def, 0xffffffff},
{0x1410, 0x18300000, 0xffffffff},
{0x1414, 0x00000700, 0xffffffff},
{0x1424, 0x0060f3ff, 0xffffffff},
{0x1428, 0x0011a940, 0xffffffff},
{0x142c, 0x28c5134, 0xffffffff},
{0x1474, 0x00000000, 0xffffffff},
{0x147c, 0x0000d771, 0xffffffff},
{0x1494, 0x00030000, 0xffffffff},
{0x149c, 0x00000300, 0xffffffff},
{0x14a8, 0x00000000, 0xffffffff},
{0x14cc, 0xbd09000d, 0xffffffff},
{0x1504, 0xfffffff1, 0xffffffff},
{0x150c, 0xffffffe5, 0xffffffff},
{0x1514, 0x00000000, 0xffffffff},
{0x151c, 0x00000000, 0xffffffff},
{0x1538, 0x00000b0b, 0xffffffff},
{0x153c, 0x00000c0c, 0xffffffff},
{0x15d0, 0x00000670, 0xffffffff},
{0x15d4, 0x00000046, 0xffffffff},
{0x15d8, 0x00000010, 0xffffffff},
{0x15dc, 0x00000000, 0xffffffff},
{0x15e0, 0x00000023, 0xffffffff},
{0x15e4, 0x00203c18, 0xffffffff},
{0x15ec, 0xf8000019, 0xffffffff},
{0x16a0, 0xcc000006, 0xffffffff}, /* Clock Delay */
{0xe4124, 0x08008073, 0xffffffff}, /* AVS BG default */
{0, 0, 0}
};
#else /* CONFIG_CUSTOMER_BOARD_SUPPORT */
struct reg_data ddr3_a38x_933[MV_MAX_DDR3_STATIC_SIZE] = {
/* parameters for 933MHZ */
{0x1400, 0x7b00ce3a, 0xffffffff},
{0x1404, 0x36301820, 0xffffffff},
{0x1408, 0x7417eccf, 0xffffffff},
{0x140c, 0x3e421f98, 0xffffffff},
{0x1410, 0x1a300000, 0xffffffff},
{0x1414, 0x00000700, 0xffffffff},
{0x1424, 0x0060f3ff, 0xffffffff},
{0x1428, 0x0013ca50, 0xffffffff},
{0x142c, 0x028c5165, 0xffffffff},
{0x1474, 0x00000000, 0xffffffff},
{0x147c, 0x0000e871, 0xffffffff},
{0x1494, 0x00010000, 0xffffffff},
{0x149c, 0x00000001, 0xffffffff},
{0x14a8, 0x00000000, 0xffffffff},
{0x14cc, 0xbd09000d, 0xffffffff},
{0x1504, 0xffffffe1, 0xffffffff},
{0x150c, 0xffffffe5, 0xffffffff},
{0x1514, 0x00000000, 0xffffffff},
{0x151c, 0x00000000, 0xffffffff},
{0x1538, 0x00000d0d, 0xffffffff},
{0x153c, 0x00000d0d, 0xffffffff},
{0x15d0, 0x00000608, 0xffffffff},
{0x15d4, 0x00000044, 0xffffffff},
{0x15d8, 0x00000020, 0xffffffff},
{0x15dc, 0x00000000, 0xffffffff},
{0x15e0, 0x00000021, 0xffffffff},
{0x15e4, 0x00203c18, 0xffffffff},
{0x15ec, 0xf8000019, 0xffffffff},
{0x16a0, 0xcc000006, 0xffffffff}, /* Clock Delay */
{0xe4124, 0x08008073, 0xffffffff}, /* AVS BG default */
{0, 0, 0}
};
static struct reg_data ddr3_a38x_800[] = {
/* parameters for 800MHZ */
{0x1400, 0x7b00cc30, 0xffffffff},
{0x1404, 0x36301820, 0xffffffff},
{0x1408, 0x5415baab, 0xffffffff},
{0x140c, 0x38411def, 0xffffffff},
{0x1410, 0x18300000, 0xffffffff},
{0x1414, 0x00000700, 0xffffffff},
{0x1424, 0x0060f3ff, 0xffffffff},
{0x1428, 0x0011a940, 0xffffffff},
{0x142c, 0x28c5134, 0xffffffff},
{0x1474, 0x00000000, 0xffffffff},
{0x147c, 0x0000d771, 0xffffffff},
{0x1494, 0x00030000, 0xffffffff},
{0x149c, 0x00000300, 0xffffffff},
{0x14a8, 0x00000000, 0xffffffff},
{0x14cc, 0xbd09000d, 0xffffffff},
{0x1504, 0xfffffff1, 0xffffffff},
{0x150c, 0xffffffe5, 0xffffffff},
{0x1514, 0x00000000, 0xffffffff},
{0x151c, 0x00000000, 0xffffffff},
{0x1538, 0x00000b0b, 0xffffffff},
{0x153c, 0x00000c0c, 0xffffffff},
{0x15d0, 0x00000670, 0xffffffff},
{0x15d4, 0x00000046, 0xffffffff},
{0x15d8, 0x00000010, 0xffffffff},
{0x15dc, 0x00000000, 0xffffffff},
{0x15e0, 0x00000023, 0xffffffff},
{0x15e4, 0x00203c18, 0xffffffff},
{0x15ec, 0xf8000019, 0xffffffff},
{0x16a0, 0xcc000006, 0xffffffff}, /* Clock Delay */
{0xe4124, 0x08008073, 0xffffffff}, /* AVS BG default */
{0, 0, 0}
};
static struct reg_data ddr3_a38x_667[] = {
/* parameters for 667MHZ */
/* DDR SDRAM Configuration Register */
{0x1400, 0x7b00ca28, 0xffffffff},
/* Dunit Control Low Register - kw28 bit12 low (disable CLK1) */
{0x1404, 0x36301820, 0xffffffff},
/* DDR SDRAM Timing (Low) Register */
{0x1408, 0x43149997, 0xffffffff},
/* DDR SDRAM Timing (High) Register */
{0x140c, 0x38411bc7, 0xffffffff},
/* DDR SDRAM Address Control Register */
{0x1410, 0x14330000, 0xffffffff},
/* DDR SDRAM Open Pages Control Register */
{0x1414, 0x00000700, 0xffffffff},
/* Dunit Control High Register (2 :1 - bits 15:12 = 0xd) */
{0x1424, 0x0060f3ff, 0xffffffff},
/* Dunit Control High Register */
{0x1428, 0x000f8830, 0xffffffff},
/* Dunit Control High Register (2:1 - bit 29 = '1') */
{0x142c, 0x28c50f8, 0xffffffff},
{0x147c, 0x0000c671, 0xffffffff},
/* DDR SDRAM ODT Control (Low) Register */
{0x1494, 0x00030000, 0xffffffff},
/* DDR SDRAM ODT Control (High) Register, will be configured at WL */
{0x1498, 0x00000000, 0xffffffff},
/* DDR Dunit ODT Control Register */
{0x149c, 0x00000300, 0xffffffff},
{0x14a8, 0x00000000, 0xffffffff}, /* */
{0x14cc, 0xbd09000d, 0xffffffff}, /* */
{0x1474, 0x00000000, 0xffffffff},
/* Read Data Sample Delays Register */
{0x1538, 0x00000009, 0xffffffff},
/* Read Data Ready Delay Register */
{0x153c, 0x0000000c, 0xffffffff},
{0x1504, 0xfffffff1, 0xffffffff}, /* */
{0x150c, 0xffffffe5, 0xffffffff}, /* */
{0x1514, 0x00000000, 0xffffffff}, /* */
{0x151c, 0x0, 0xffffffff}, /* */
{0x15d0, 0x00000650, 0xffffffff}, /* MR0 */
{0x15d4, 0x00000046, 0xffffffff}, /* MR1 */
{0x15d8, 0x00000010, 0xffffffff}, /* MR2 */
{0x15dc, 0x00000000, 0xffffffff}, /* MR3 */
{0x15e0, 0x23, 0xffffffff}, /* */
{0x15e4, 0x00203c18, 0xffffffff}, /* ZQC Configuration Register */
{0x15ec, 0xf8000019, 0xffffffff}, /* DDR PHY */
{0x16a0, 0xcc000006, 0xffffffff}, /* Clock Delay */
{0xe4124, 0x08008073, 0xffffffff}, /* AVS BG default */
{0, 0, 0}
};
static struct reg_data ddr3_a38x_533[] = {
/* parameters for 533MHZ */
/* DDR SDRAM Configuration Register */
{0x1400, 0x7b00d040, 0xffffffff},
/* Dunit Control Low Register - kw28 bit12 low (disable CLK1) */
{0x1404, 0x36301820, 0xffffffff},
/* DDR SDRAM Timing (Low) Register */
{0x1408, 0x33137772, 0xffffffff},
/* DDR SDRAM Timing (High) Register */
{0x140c, 0x3841199f, 0xffffffff},
/* DDR SDRAM Address Control Register */
{0x1410, 0x10330000, 0xffffffff},
/* DDR SDRAM Open Pages Control Register */
{0x1414, 0x00000700, 0xffffffff},
/* Dunit Control High Register (2 :1 - bits 15:12 = 0xd) */
{0x1424, 0x0060f3ff, 0xffffffff},
/* Dunit Control High Register */
{0x1428, 0x000d6720, 0xffffffff},
/* Dunit Control High Register (2:1 - bit 29 = '1') */
{0x142c, 0x028c50c3, 0xffffffff},
{0x147c, 0x0000b571, 0xffffffff},
/* DDR SDRAM ODT Control (Low) Register */
{0x1494, 0x00030000, 0xffffffff},
/* DDR SDRAM ODT Control (High) Register, will be configured at WL */
{0x1498, 0x00000000, 0xffffffff},
/* DDR Dunit ODT Control Register */
{0x149c, 0x00000003, 0xffffffff},
{0x14a8, 0x00000000, 0xffffffff}, /* */
{0x14cc, 0xbd09000d, 0xffffffff}, /* */
{0x1474, 0x00000000, 0xffffffff},
/* Read Data Sample Delays Register */
{0x1538, 0x00000707, 0xffffffff},
/* Read Data Ready Delay Register */
{0x153c, 0x00000707, 0xffffffff},
{0x1504, 0xffffffe1, 0xffffffff}, /* */
{0x150c, 0xffffffe5, 0xffffffff}, /* */
{0x1514, 0x00000000, 0xffffffff}, /* */
{0x151c, 0x00000000, 0xffffffff}, /* */
{0x15d0, 0x00000630, 0xffffffff}, /* MR0 */
{0x15d4, 0x00000046, 0xffffffff}, /* MR1 */
{0x15d8, 0x00000008, 0xffffffff}, /* MR2 */
{0x15dc, 0x00000000, 0xffffffff}, /* MR3 */
{0x15e0, 0x00000023, 0xffffffff}, /* */
{0x15e4, 0x00203c18, 0xffffffff}, /* ZQC Configuration Register */
{0x15ec, 0xf8000019, 0xffffffff}, /* DDR PHY */
{0x16a0, 0xcc000006, 0xffffffff}, /* Clock Delay */
{0xe4124, 0x08008073, 0xffffffff}, /* AVS BG default */
{0, 0, 0}
};
#endif /* CONFIG_CUSTOMER_BOARD_SUPPORT */
#endif /* SUPPORT_STATIC_DUNIT_CONFIG */
#endif /* _DDR3_A38X_MC_STATIC_H */

View file

@ -1,21 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR3_A38X_TOPOLOGY_H
#define _DDR3_A38X_TOPOLOGY_H
#include "ddr_topology_def.h"
/* Bus mask variants */
#define BUS_MASK_32BIT 0xf
#define BUS_MASK_32BIT_ECC 0x1f
#define BUS_MASK_16BIT 0x3
#define BUS_MASK_16BIT_ECC 0x13
#define BUS_MASK_16BIT_ECC_PUP3 0xb
#define DYNAMIC_CS_SIZE_CONFIG
#define DISABLE_L2_FILTERING_DURING_DDR_TRAINING
#endif /* _DDR3_A38X_TOPOLOGY_H */

View file

@ -1,39 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
/*
* Name: ddr3_tip_init_silicon
* Desc: initiate silicon parameters
* Args:
* Notes:
* Returns: required value
*/
int ddr3_silicon_init(void)
{
int status;
static int init_done;
if (init_done == 1)
return MV_OK;
status = ddr3_tip_init_a38x(0, 0);
if (MV_OK != status) {
printf("DDR3 A38x silicon init - FAILED 0x%x\n", status);
return status;
}
init_done = 1;
return MV_OK;
}

File diff suppressed because it is too large Load diff

View file

@ -1,147 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define REG_READ_DATA_SAMPLE_DELAYS_ADDR 0x1538
#define REG_READ_DATA_SAMPLE_DELAYS_MASK 0x1f
#define REG_READ_DATA_SAMPLE_DELAYS_OFFS 8
#define REG_READ_DATA_READY_DELAYS_ADDR 0x153c
#define REG_READ_DATA_READY_DELAYS_MASK 0x1f
#define REG_READ_DATA_READY_DELAYS_OFFS 8
int ddr3_if_ecc_enabled(void)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
if (DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask) ||
DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask))
return 1;
else
return 0;
}
int ddr3_pre_algo_config(void)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
/* Set Bus3 ECC training mode */
if (DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask)) {
/* Set Bus3 ECC MUX */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
REG_SDRAM_PINS_MUX, 0x100, 0x100));
}
/* Set regular ECC training mode (bus4 and bus 3) */
if ((DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask)) ||
(DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask))) {
/* Enable ECC Write MUX */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
TRAINING_SW_2_REG, 0x100, 0x100));
/* General ECC enable */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
REG_SDRAM_CONFIG_ADDR, 0x40000, 0x40000));
/* Disable Read Data ECC MUX */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
TRAINING_SW_2_REG, 0x0, 0x2));
}
return MV_OK;
}
int ddr3_post_algo_config(void)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
int status;
status = ddr3_post_run_alg();
if (MV_OK != status) {
printf("DDR3 Post Run Alg - FAILED 0x%x\n", status);
return status;
}
/* Un_set ECC training mode */
if ((DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask)) ||
(DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask))) {
/* Disable ECC Write MUX */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
TRAINING_SW_2_REG, 0x0, 0x100));
/* General ECC and Bus3 ECC MUX remains enabled */
}
return MV_OK;
}
int ddr3_hws_hw_training(void)
{
enum hws_algo_type algo_mode = ALGO_TYPE_DYNAMIC;
int status;
struct init_cntr_param init_param;
status = ddr3_silicon_pre_init();
if (MV_OK != status) {
printf("DDR3 Pre silicon Config - FAILED 0x%x\n", status);
return status;
}
init_param.do_mrs_phy = 1;
#if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X)
init_param.is_ctrl64_bit = 0;
#else
init_param.is_ctrl64_bit = 1;
#endif
#if defined(CONFIG_ALLEYCAT3) || defined(CONFIG_ARMADA_38X) || \
defined(CONFIG_ARMADA_39X)
init_param.init_phy = 1;
#else
init_param.init_phy = 0;
#endif
init_param.msys_init = 1;
status = hws_ddr3_tip_init_controller(0, &init_param);
if (MV_OK != status) {
printf("DDR3 init controller - FAILED 0x%x\n", status);
return status;
}
status = ddr3_silicon_post_init();
if (MV_OK != status) {
printf("DDR3 Post Init - FAILED 0x%x\n", status);
return status;
}
status = ddr3_pre_algo_config();
if (MV_OK != status) {
printf("DDR3 Pre Algo Config - FAILED 0x%x\n", status);
return status;
}
/* run algorithm in order to configure the PHY */
status = hws_ddr3_tip_run_alg(0, algo_mode);
if (MV_OK != status) {
printf("DDR3 run algorithm - FAILED 0x%x\n", status);
return status;
}
status = ddr3_post_algo_config();
if (MV_OK != status) {
printf("DDR3 Post Algo Config - FAILED 0x%x\n", status);
return status;
}
return MV_OK;
}

View file

@ -1,48 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR3_HWS_HW_TRAINING_H
#define _DDR3_HWS_HW_TRAINING_H
/* struct used for DLB configuration array */
struct dlb_config {
u32 reg_addr;
u32 reg_data;
};
/* Topology update structure */
struct topology_update_info {
int update_ecc;
u8 ecc;
int update_width;
u8 width;
int update_ecc_pup3_mode;
u8 ecc_pup_mode_offset;
};
/* Topology update defines */
#define TOPOLOGY_UPDATE_WIDTH_16BIT 1
#define TOPOLOGY_UPDATE_WIDTH_32BIT 0
#define TOPOLOGY_UPDATE_WIDTH_32BIT_MASK 0xf
#define TOPOLOGY_UPDATE_WIDTH_16BIT_MASK 0x3
#define TOPOLOGY_UPDATE_ECC_ON 1
#define TOPOLOGY_UPDATE_ECC_OFF 0
#define TOPOLOGY_UPDATE_ECC_OFFSET_PUP4 4
#define TOPOLOGY_UPDATE_ECC_OFFSET_PUP3 3
/*
* 1. L2 filter should be set at binary header to 0xd000000,
* to avoid conflict with internal register IO.
* 2. U-Boot modifies internal registers base to 0xf100000,
* and than should update L2 filter accordingly to 0xf000000 (3.75 GB)
*/
/* temporary limit l2 filter to 3GiB (LSP issue) */
#define L2_FILTER_FOR_MAX_MEMORY_SIZE 0xc0000000
#define ADDRESS_FILTERING_END_REGISTER 0x8c04
#define SUB_VERSION 0
#endif /* _DDR3_HWS_HW_TRAINING_H */

View file

@ -1,463 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR3_HWS_HW_TRAINING_DEF_H
#define _DDR3_HWS_HW_TRAINING_DEF_H
#define SAR_DDR3_FREQ_MASK 0xfe00000
#define SAR_CPU_FAB_GET(cpu, fab) (((cpu & 0x7) << 21) | \
((fab & 0xf) << 24))
#define MAX_CS 4
#define MIN_DIMM_ADDR 0x50
#define FAR_END_DIMM_ADDR 0x50
#define MAX_DIMM_ADDR 0x60
#define SDRAM_CS_SIZE 0xfffffff
#define SDRAM_CS_BASE 0x0
#define SDRAM_DIMM_SIZE 0x80000000
#define CPU_CONFIGURATION_REG(id) (0x21800 + (id * 0x100))
#define CPU_MRVL_ID_OFFSET 0x10
#define SAR1_CPU_CORE_MASK 0x38000000
#define SAR1_CPU_CORE_OFFSET 27
#define NEW_FABRIC_TWSI_ADDR 0x4e
#ifdef DB_784MP_GP
#define BUS_WIDTH_ECC_TWSI_ADDR 0x4e
#else
#define BUS_WIDTH_ECC_TWSI_ADDR 0x4f
#endif
#define MV_MAX_DDR3_STATIC_SIZE 50
#define MV_DDR3_MODES_NUMBER 30
#define RESUME_RL_PATTERNS_ADDR 0xfe0000
#define RESUME_RL_PATTERNS_SIZE 0x100
#define RESUME_TRAINING_VALUES_ADDR (RESUME_RL_PATTERNS_ADDR + \
RESUME_RL_PATTERNS_SIZE)
#define RESUME_TRAINING_VALUES_MAX 0xcd0
#define BOOT_INFO_ADDR (RESUME_RL_PATTERNS_ADDR + 0x1000)
#define CHECKSUM_RESULT_ADDR (BOOT_INFO_ADDR + 0x1000)
#define NUM_OF_REGISTER_ADDR (CHECKSUM_RESULT_ADDR + 4)
#define SUSPEND_MAGIC_WORD 0xdeadb002
#define REGISTER_LIST_END 0xffffffff
/* MISC */
#define INTER_REGS_BASE SOC_REGS_PHY_BASE
/* DDR */
#define REG_SDRAM_CONFIG_ADDR 0x1400
#define REG_SDRAM_CONFIG_MASK 0x9fffffff
#define REG_SDRAM_CONFIG_RFRS_MASK 0x3fff
#define REG_SDRAM_CONFIG_WIDTH_OFFS 15
#define REG_SDRAM_CONFIG_REGDIMM_OFFS 17
#define REG_SDRAM_CONFIG_ECC_OFFS 18
#define REG_SDRAM_CONFIG_IERR_OFFS 19
#define REG_SDRAM_CONFIG_PUPRSTDIV_OFFS 28
#define REG_SDRAM_CONFIG_RSTRD_OFFS 30
#define REG_SDRAM_PINS_MUX 0x19d4
#define REG_DUNIT_CTRL_LOW_ADDR 0x1404
#define REG_DUNIT_CTRL_LOW_2T_OFFS 3
#define REG_DUNIT_CTRL_LOW_2T_MASK 0x3
#define REG_DUNIT_CTRL_LOW_DPDE_OFFS 14
#define REG_SDRAM_TIMING_LOW_ADDR 0x1408
#define REG_SDRAM_TIMING_HIGH_ADDR 0x140c
#define REG_SDRAM_TIMING_H_R2R_OFFS 7
#define REG_SDRAM_TIMING_H_R2R_MASK 0x3
#define REG_SDRAM_TIMING_H_R2W_W2R_OFFS 9
#define REG_SDRAM_TIMING_H_R2W_W2R_MASK 0x3
#define REG_SDRAM_TIMING_H_W2W_OFFS 11
#define REG_SDRAM_TIMING_H_W2W_MASK 0x1f
#define REG_SDRAM_TIMING_H_R2R_H_OFFS 19
#define REG_SDRAM_TIMING_H_R2R_H_MASK 0x7
#define REG_SDRAM_TIMING_H_R2W_W2R_H_OFFS 22
#define REG_SDRAM_TIMING_H_R2W_W2R_H_MASK 0x7
#define REG_SDRAM_ADDRESS_CTRL_ADDR 0x1410
#define REG_SDRAM_ADDRESS_SIZE_OFFS 2
#define REG_SDRAM_ADDRESS_SIZE_HIGH_OFFS 18
#define REG_SDRAM_ADDRESS_CTRL_STRUCT_OFFS 4
#define REG_SDRAM_OPEN_PAGES_ADDR 0x1414
#define REG_SDRAM_OPERATION_CS_OFFS 8
#define REG_SDRAM_OPERATION_ADDR 0x1418
#define REG_SDRAM_OPERATION_CWA_DELAY_SEL_OFFS 24
#define REG_SDRAM_OPERATION_CWA_DATA_OFFS 20
#define REG_SDRAM_OPERATION_CWA_DATA_MASK 0xf
#define REG_SDRAM_OPERATION_CWA_RC_OFFS 16
#define REG_SDRAM_OPERATION_CWA_RC_MASK 0xf
#define REG_SDRAM_OPERATION_CMD_MR0 0xf03
#define REG_SDRAM_OPERATION_CMD_MR1 0xf04
#define REG_SDRAM_OPERATION_CMD_MR2 0xf08
#define REG_SDRAM_OPERATION_CMD_MR3 0xf09
#define REG_SDRAM_OPERATION_CMD_RFRS 0xf02
#define REG_SDRAM_OPERATION_CMD_CWA 0xf0e
#define REG_SDRAM_OPERATION_CMD_RFRS_DONE 0xf
#define REG_SDRAM_OPERATION_CMD_MASK 0xf
#define REG_SDRAM_OPERATION_CS_OFFS 8
#define REG_OUDDR3_TIMING_ADDR 0x142c
#define REG_SDRAM_MODE_ADDR 0x141c
#define REG_SDRAM_EXT_MODE_ADDR 0x1420
#define REG_DDR_CONT_HIGH_ADDR 0x1424
#define REG_ODT_TIME_LOW_ADDR 0x1428
#define REG_ODT_ON_CTL_RD_OFFS 12
#define REG_ODT_OFF_CTL_RD_OFFS 16
#define REG_SDRAM_ERROR_ADDR 0x1454
#define REG_SDRAM_AUTO_PWR_SAVE_ADDR 0x1474
#define REG_ODT_TIME_HIGH_ADDR 0x147c
#define REG_SDRAM_INIT_CTRL_ADDR 0x1480
#define REG_SDRAM_INIT_CTRL_OFFS 0
#define REG_SDRAM_INIT_CKE_ASSERT_OFFS 2
#define REG_SDRAM_INIT_RESET_DEASSERT_OFFS 3
#define REG_SDRAM_INIT_RESET_MASK_OFFS 1
#define REG_SDRAM_ODT_CTRL_LOW_ADDR 0x1494
#define REG_SDRAM_ODT_CTRL_HIGH_ADDR 0x1498
#define REG_SDRAM_ODT_CTRL_HIGH_OVRD_MASK 0x0
#define REG_SDRAM_ODT_CTRL_HIGH_OVRD_ENA 0x3
#define REG_DUNIT_ODT_CTRL_ADDR 0x149c
#define REG_DUNIT_ODT_CTRL_OVRD_OFFS 8
#define REG_DUNIT_ODT_CTRL_OVRD_VAL_OFFS 9
#define REG_DRAM_FIFO_CTRL_ADDR 0x14a0
#define REG_DRAM_AXI_CTRL_ADDR 0x14a8
#define REG_DRAM_AXI_CTRL_AXIDATABUSWIDTH_OFFS 0
#define REG_METAL_MASK_ADDR 0x14b0
#define REG_METAL_MASK_MASK 0xdfffffff
#define REG_METAL_MASK_RETRY_OFFS 0
#define REG_DRAM_ADDR_CTRL_DRIVE_STRENGTH_ADDR 0x14c0
#define REG_DRAM_DATA_DQS_DRIVE_STRENGTH_ADDR 0x14c4
#define REG_DRAM_VER_CAL_MACHINE_CTRL_ADDR 0x14c8
#define REG_DRAM_MAIN_PADS_CAL_ADDR 0x14cc
#define REG_DRAM_HOR_CAL_MACHINE_CTRL_ADDR 0x17c8
#define REG_CS_SIZE_SCRATCH_ADDR 0x1504
#define REG_DYNAMIC_POWER_SAVE_ADDR 0x1520
#define REG_DDR_IO_ADDR 0x1524
#define REG_DDR_IO_CLK_RATIO_OFFS 15
#define REG_DFS_ADDR 0x1528
#define REG_DFS_DLLNEXTSTATE_OFFS 0
#define REG_DFS_BLOCK_OFFS 1
#define REG_DFS_SR_OFFS 2
#define REG_DFS_ATSR_OFFS 3
#define REG_DFS_RECONF_OFFS 4
#define REG_DFS_CL_NEXT_STATE_OFFS 8
#define REG_DFS_CL_NEXT_STATE_MASK 0xf
#define REG_DFS_CWL_NEXT_STATE_OFFS 12
#define REG_DFS_CWL_NEXT_STATE_MASK 0x7
#define REG_READ_DATA_SAMPLE_DELAYS_ADDR 0x1538
#define REG_READ_DATA_SAMPLE_DELAYS_MASK 0x1f
#define REG_READ_DATA_SAMPLE_DELAYS_OFFS 8
#define REG_READ_DATA_READY_DELAYS_ADDR 0x153c
#define REG_READ_DATA_READY_DELAYS_MASK 0x1f
#define REG_READ_DATA_READY_DELAYS_OFFS 8
#define START_BURST_IN_ADDR 1
#define REG_DRAM_TRAINING_SHADOW_ADDR 0x18488
#define REG_DRAM_TRAINING_ADDR 0x15b0
#define REG_DRAM_TRAINING_LOW_FREQ_OFFS 0
#define REG_DRAM_TRAINING_PATTERNS_OFFS 4
#define REG_DRAM_TRAINING_MED_FREQ_OFFS 2
#define REG_DRAM_TRAINING_WL_OFFS 3
#define REG_DRAM_TRAINING_RL_OFFS 6
#define REG_DRAM_TRAINING_DQS_RX_OFFS 15
#define REG_DRAM_TRAINING_DQS_TX_OFFS 16
#define REG_DRAM_TRAINING_CS_OFFS 20
#define REG_DRAM_TRAINING_RETEST_OFFS 24
#define REG_DRAM_TRAINING_DFS_FREQ_OFFS 27
#define REG_DRAM_TRAINING_DFS_REQ_OFFS 29
#define REG_DRAM_TRAINING_ERROR_OFFS 30
#define REG_DRAM_TRAINING_AUTO_OFFS 31
#define REG_DRAM_TRAINING_RETEST_PAR 0x3
#define REG_DRAM_TRAINING_RETEST_MASK 0xf8ffffff
#define REG_DRAM_TRAINING_CS_MASK 0xff0fffff
#define REG_DRAM_TRAINING_PATTERNS_MASK 0xff0f0000
#define REG_DRAM_TRAINING_1_ADDR 0x15b4
#define REG_DRAM_TRAINING_1_TRNBPOINT_OFFS 16
#define REG_DRAM_TRAINING_2_ADDR 0x15b8
#define REG_DRAM_TRAINING_2_OVERRUN_OFFS 17
#define REG_DRAM_TRAINING_2_FIFO_RST_OFFS 4
#define REG_DRAM_TRAINING_2_RL_MODE_OFFS 3
#define REG_DRAM_TRAINING_2_WL_MODE_OFFS 2
#define REG_DRAM_TRAINING_2_ECC_MUX_OFFS 1
#define REG_DRAM_TRAINING_2_SW_OVRD_OFFS 0
#define REG_DRAM_TRAINING_PATTERN_BASE_ADDR 0x15bc
#define REG_DRAM_TRAINING_PATTERN_BASE_OFFS 3
#define REG_TRAINING_DEBUG_2_ADDR 0x15c4
#define REG_TRAINING_DEBUG_2_OFFS 16
#define REG_TRAINING_DEBUG_2_MASK 0x3
#define REG_TRAINING_DEBUG_3_ADDR 0x15c8
#define REG_TRAINING_DEBUG_3_OFFS 3
#define REG_TRAINING_DEBUG_3_MASK 0x7
#define MR_CS_ADDR_OFFS 4
#define REG_DDR3_MR0_ADDR 0x15d0
#define REG_DDR3_MR0_CS_ADDR 0x1870
#define REG_DDR3_MR0_CL_MASK 0x74
#define REG_DDR3_MR0_CL_OFFS 2
#define REG_DDR3_MR0_CL_HIGH_OFFS 3
#define CL_MASK 0xf
#define REG_DDR3_MR1_ADDR 0x15d4
#define REG_DDR3_MR1_CS_ADDR 0x1874
#define REG_DDR3_MR1_RTT_MASK 0xfffffdbb
#define REG_DDR3_MR1_DLL_ENA_OFFS 0
#define REG_DDR3_MR1_RTT_DISABLED 0x0
#define REG_DDR3_MR1_RTT_RZQ2 0x40
#define REG_DDR3_MR1_RTT_RZQ4 0x2
#define REG_DDR3_MR1_RTT_RZQ6 0x42
#define REG_DDR3_MR1_RTT_RZQ8 0x202
#define REG_DDR3_MR1_RTT_RZQ12 0x4
/* WL-disabled, OB-enabled */
#define REG_DDR3_MR1_OUTBUF_WL_MASK 0xffffef7f
/* Output Buffer Disabled */
#define REG_DDR3_MR1_OUTBUF_DIS_OFFS 12
#define REG_DDR3_MR1_WL_ENA_OFFS 7
#define REG_DDR3_MR1_WL_ENA 0x80 /* WL Enabled */
#define REG_DDR3_MR1_ODT_MASK 0xfffffdbb
#define REG_DDR3_MR2_ADDR 0x15d8
#define REG_DDR3_MR2_CS_ADDR 0x1878
#define REG_DDR3_MR2_CWL_OFFS 3
#define REG_DDR3_MR2_CWL_MASK 0x7
#define REG_DDR3_MR2_ODT_MASK 0xfffff9ff
#define REG_DDR3_MR3_ADDR 0x15dc
#define REG_DDR3_MR3_CS_ADDR 0x187c
#define REG_DDR3_RANK_CTRL_ADDR 0x15e0
#define REG_DDR3_RANK_CTRL_CS_ENA_MASK 0xf
#define REG_DDR3_RANK_CTRL_MIRROR_OFFS 4
#define REG_ZQC_CONF_ADDR 0x15e4
#define REG_DRAM_PHY_CONFIG_ADDR 0x15ec
#define REG_DRAM_PHY_CONFIG_MASK 0x3fffffff
#define REG_ODPG_CNTRL_ADDR 0x1600
#define REG_ODPG_CNTRL_OFFS 21
#define REG_PHY_LOCK_MASK_ADDR 0x1670
#define REG_PHY_LOCK_MASK_MASK 0xfffff000
#define REG_PHY_LOCK_STATUS_ADDR 0x1674
#define REG_PHY_LOCK_STATUS_LOCK_OFFS 9
#define REG_PHY_LOCK_STATUS_LOCK_MASK 0xfff
#define REG_PHY_LOCK_APLL_ADLL_STATUS_MASK 0x7ff
#define REG_PHY_REGISTRY_FILE_ACCESS_ADDR 0x16a0
#define REG_PHY_REGISTRY_FILE_ACCESS_OP_WR 0xc0000000
#define REG_PHY_REGISTRY_FILE_ACCESS_OP_RD 0x80000000
#define REG_PHY_REGISTRY_FILE_ACCESS_OP_DONE 0x80000000
#define REG_PHY_BC_OFFS 27
#define REG_PHY_CNTRL_OFFS 26
#define REG_PHY_CS_OFFS 16
#define REG_PHY_DQS_REF_DLY_OFFS 10
#define REG_PHY_PHASE_OFFS 8
#define REG_PHY_PUP_OFFS 22
#define REG_TRAINING_WL_ADDR 0x16ac
#define REG_TRAINING_WL_CS_MASK 0xfffffffc
#define REG_TRAINING_WL_UPD_OFFS 2
#define REG_TRAINING_WL_CS_DONE_OFFS 3
#define REG_TRAINING_WL_RATIO_MASK 0xffffff0f
#define REG_TRAINING_WL_1TO1 0x50
#define REG_TRAINING_WL_2TO1 0x10
#define REG_TRAINING_WL_DELAYEXP_MASK 0x20000000
#define REG_TRAINING_WL_RESULTS_MASK 0x000001ff
#define REG_TRAINING_WL_RESULTS_OFFS 20
#define REG_REGISTERED_DRAM_CTRL_ADDR 0x16d0
#define REG_REGISTERED_DRAM_CTRL_SR_FLOAT_OFFS 15
#define REG_REGISTERED_DRAM_CTRL_PARITY_MASK 0x3f
/* DLB */
#define REG_STATIC_DRAM_DLB_CONTROL 0x1700
#define DLB_BUS_OPTIMIZATION_WEIGHTS_REG 0x1704
#define DLB_AGING_REGISTER 0x1708
#define DLB_EVICTION_CONTROL_REG 0x170c
#define DLB_EVICTION_TIMERS_REGISTER_REG 0x1710
#define DLB_USER_COMMAND_REG 0x1714
#define DLB_BUS_WEIGHTS_DIFF_CS 0x1770
#define DLB_BUS_WEIGHTS_DIFF_BG 0x1774
#define DLB_BUS_WEIGHTS_SAME_BG 0x1778
#define DLB_BUS_WEIGHTS_RD_WR 0x177c
#define DLB_BUS_WEIGHTS_ATTR_SYS_PRIO 0x1780
#define DLB_MAIN_QUEUE_MAP 0x1784
#define DLB_LINE_SPLIT 0x1788
#define DLB_ENABLE 0x1
#define DLB_WRITE_COALESING (0x1 << 2)
#define DLB_AXI_PREFETCH_EN (0x1 << 3)
#define DLB_MBUS_PREFETCH_EN (0x1 << 4)
#define PREFETCH_N_LN_SZ_TR (0x1 << 6)
#define DLB_INTERJECTION_ENABLE (0x1 << 3)
/* CPU */
#define REG_BOOTROM_ROUTINE_ADDR 0x182d0
#define REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS 12
#define REG_DRAM_INIT_CTRL_STATUS_ADDR 0x18488
#define REG_DRAM_INIT_CTRL_TRN_CLK_OFFS 16
#define REG_CPU_DIV_CLK_CTRL_0_NEW_RATIO 0x000200ff
#define REG_DRAM_INIT_CTRL_STATUS_2_ADDR 0x1488
#define REG_CPU_DIV_CLK_CTRL_0_ADDR 0x18700
#define REG_CPU_DIV_CLK_CTRL_1_ADDR 0x18704
#define REG_CPU_DIV_CLK_CTRL_2_ADDR 0x18708
#define REG_CPU_DIV_CLK_CTRL_3_ADDR 0x1870c
#define REG_CPU_DIV_CLK_CTRL_3_FREQ_MASK 0xffffc0ff
#define REG_CPU_DIV_CLK_CTRL_3_FREQ_OFFS 8
#define REG_CPU_DIV_CLK_CTRL_4_ADDR 0x18710
#define REG_CPU_DIV_CLK_STATUS_0_ADDR 0x18718
#define REG_CPU_DIV_CLK_ALL_STABLE_OFFS 8
#define REG_CPU_PLL_CTRL_0_ADDR 0x1871c
#define REG_CPU_PLL_STATUS_0_ADDR 0x18724
#define REG_CORE_DIV_CLK_CTRL_ADDR 0x18740
#define REG_CORE_DIV_CLK_STATUS_ADDR 0x18744
#define REG_DDRPHY_APLL_CTRL_ADDR 0x18780
#define REG_DDRPHY_APLL_CTRL_2_ADDR 0x18784
#define REG_SFABRIC_CLK_CTRL_ADDR 0x20858
#define REG_SFABRIC_CLK_CTRL_SMPL_OFFS 8
/* DRAM Windows */
#define REG_XBAR_WIN_19_CTRL_ADDR 0x200e8
#define REG_XBAR_WIN_4_CTRL_ADDR 0x20040
#define REG_XBAR_WIN_4_BASE_ADDR 0x20044
#define REG_XBAR_WIN_4_REMAP_ADDR 0x20048
#define REG_FASTPATH_WIN_0_CTRL_ADDR 0x20184
#define REG_XBAR_WIN_7_REMAP_ADDR 0x20078
/* SRAM */
#define REG_CDI_CONFIG_ADDR 0x20220
#define REG_SRAM_WINDOW_0_ADDR 0x20240
#define REG_SRAM_WINDOW_0_ENA_OFFS 0
#define REG_SRAM_WINDOW_1_ADDR 0x20244
#define REG_SRAM_L2_ENA_ADDR 0x8500
#define REG_SRAM_CLEAN_BY_WAY_ADDR 0x87bc
/* Timers */
#define REG_TIMERS_CTRL_ADDR 0x20300
#define REG_TIMERS_EVENTS_ADDR 0x20304
#define REG_TIMER0_VALUE_ADDR 0x20314
#define REG_TIMER1_VALUE_ADDR 0x2031c
#define REG_TIMER0_ENABLE_MASK 0x1
#define MV_BOARD_REFCLK_25MHZ 25000000
#define CNTMR_RELOAD_REG(tmr) (REG_TIMERS_CTRL_ADDR + 0x10 + (tmr * 8))
#define CNTMR_VAL_REG(tmr) (REG_TIMERS_CTRL_ADDR + 0x14 + (tmr * 8))
#define CNTMR_CTRL_REG(tmr) (REG_TIMERS_CTRL_ADDR)
#define CTCR_ARM_TIMER_EN_OFFS(timer) (timer * 2)
#define CTCR_ARM_TIMER_EN_MASK(timer) (1 << CTCR_ARM_TIMER_EN_OFFS(timer))
#define CTCR_ARM_TIMER_EN(timer) (1 << CTCR_ARM_TIMER_EN_OFFS(timer))
#define CTCR_ARM_TIMER_AUTO_OFFS(timer) (1 + (timer * 2))
#define CTCR_ARM_TIMER_AUTO_MASK(timer) (1 << CTCR_ARM_TIMER_EN_OFFS(timer))
#define CTCR_ARM_TIMER_AUTO_EN(timer) (1 << CTCR_ARM_TIMER_AUTO_OFFS(timer))
/* PMU */
#define REG_PMU_I_F_CTRL_ADDR 0x1c090
#define REG_PMU_DUNIT_BLK_OFFS 16
#define REG_PMU_DUNIT_RFRS_OFFS 20
#define REG_PMU_DUNIT_ACK_OFFS 24
/* MBUS */
#define MBUS_UNITS_PRIORITY_CONTROL_REG (MBUS_REGS_OFFSET + 0x420)
#define FABRIC_UNITS_PRIORITY_CONTROL_REG (MBUS_REGS_OFFSET + 0x424)
#define MBUS_UNITS_PREFETCH_CONTROL_REG (MBUS_REGS_OFFSET + 0x428)
#define FABRIC_UNITS_PREFETCH_CONTROL_REG (MBUS_REGS_OFFSET + 0x42c)
#define REG_PM_STAT_MASK_ADDR 0x2210c
#define REG_PM_STAT_MASK_CPU0_IDLE_MASK_OFFS 16
#define REG_PM_EVENT_STAT_MASK_ADDR 0x22120
#define REG_PM_EVENT_STAT_MASK_DFS_DONE_OFFS 17
#define REG_PM_CTRL_CONFIG_ADDR 0x22104
#define REG_PM_CTRL_CONFIG_DFS_REQ_OFFS 18
#define REG_FABRIC_LOCAL_IRQ_MASK_ADDR 0x218c4
#define REG_FABRIC_LOCAL_IRQ_PMU_MASK_OFFS 18
/* Controller revision info */
#define PCI_CLASS_CODE_AND_REVISION_ID 0x008
#define PCCRIR_REVID_OFFS 0 /* Revision ID */
#define PCCRIR_REVID_MASK (0xff << PCCRIR_REVID_OFFS)
/* Power Management Clock Gating Control Register */
#define POWER_MNG_CTRL_REG 0x18220
#define PEX_DEVICE_AND_VENDOR_ID 0x000
#define PEX_CFG_DIRECT_ACCESS(if, reg) (PEX_IF_REGS_BASE(if) + (reg))
#define PMC_PEXSTOPCLOCK_OFFS(p) ((p) < 8 ? (5 + (p)) : (18 + (p)))
#define PMC_PEXSTOPCLOCK_MASK(p) (1 << PMC_PEXSTOPCLOCK_OFFS(p))
#define PMC_PEXSTOPCLOCK_EN(p) (1 << PMC_PEXSTOPCLOCK_OFFS(p))
#define PMC_PEXSTOPCLOCK_STOP(p) (0 << PMC_PEXSTOPCLOCK_OFFS(p))
/* TWSI */
#define TWSI_DATA_ADDR_MASK 0x7
#define TWSI_DATA_ADDR_OFFS 1
/* General */
#define MAX_CS 4
/* Frequencies */
#define FAB_OPT 21
#define CLK_CPU 12
#define CLK_VCO (2 * CLK_CPU)
#define CLK_DDR 12
/* CPU Frequencies: */
#define CLK_CPU_1000 0
#define CLK_CPU_1066 1
#define CLK_CPU_1200 2
#define CLK_CPU_1333 3
#define CLK_CPU_1500 4
#define CLK_CPU_1666 5
#define CLK_CPU_1800 6
#define CLK_CPU_2000 7
#define CLK_CPU_600 8
#define CLK_CPU_667 9
#define CLK_CPU_800 0xa
/* Extra Cpu Frequencies: */
#define CLK_CPU_1600 11
#define CLK_CPU_2133 12
#define CLK_CPU_2200 13
#define CLK_CPU_2400 14
#endif /* _DDR3_HWS_HW_TRAINING_DEF_H */

View file

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR3_HWS_SIL_TRAINING_H
#define _DDR3_HWS_SIL_TRAINING_H
#include "ddr3_training_ip.h"
#include "ddr3_training_ip_prv_if.h"
int ddr3_silicon_pre_config(void);
int ddr3_silicon_init(void);
int ddr3_silicon_get_ddr_target_freq(u32 *ddr_freq);
#endif /* _DDR3_HWS_SIL_TRAINING_H */

View file

@ -3,280 +3,30 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#include "mv_ddr_common.h"
#include "../../../../arch/arm/mach-mvebu/serdes/a38x/sys_env_lib.h"
static struct dlb_config ddr3_dlb_config_table[] = {
{REG_STATIC_DRAM_DLB_CONTROL, 0x2000005c},
{DLB_BUS_OPTIMIZATION_WEIGHTS_REG, 0x00880000},
{DLB_AGING_REGISTER, 0x0f7f007f},
{DLB_EVICTION_CONTROL_REG, 0x0000129f},
{DLB_EVICTION_TIMERS_REGISTER_REG, 0x00ff0000},
{DLB_BUS_WEIGHTS_DIFF_CS, 0x04030802},
{DLB_BUS_WEIGHTS_DIFF_BG, 0x00000a02},
{DLB_BUS_WEIGHTS_SAME_BG, 0x09000a01},
{DLB_BUS_WEIGHTS_RD_WR, 0x00020005},
{DLB_BUS_WEIGHTS_ATTR_SYS_PRIO, 0x00060f10},
{DLB_MAIN_QUEUE_MAP, 0x00000543},
{DLB_LINE_SPLIT, 0x00000000},
{DLB_USER_COMMAND_REG, 0x00000000},
{0x0, 0x0}
};
static struct dlb_config ddr3_dlb_config_table_a0[] = {
{REG_STATIC_DRAM_DLB_CONTROL, 0x2000005c},
{DLB_BUS_OPTIMIZATION_WEIGHTS_REG, 0x00880000},
{DLB_AGING_REGISTER, 0x0f7f007f},
{DLB_EVICTION_CONTROL_REG, 0x0000129f},
{DLB_EVICTION_TIMERS_REGISTER_REG, 0x00ff0000},
{DLB_BUS_WEIGHTS_DIFF_CS, 0x04030802},
{DLB_BUS_WEIGHTS_DIFF_BG, 0x00000a02},
{DLB_BUS_WEIGHTS_SAME_BG, 0x09000a01},
{DLB_BUS_WEIGHTS_RD_WR, 0x00020005},
{DLB_BUS_WEIGHTS_ATTR_SYS_PRIO, 0x00060f10},
{DLB_MAIN_QUEUE_MAP, 0x00000543},
{DLB_LINE_SPLIT, 0x00000000},
{DLB_USER_COMMAND_REG, 0x00000000},
{0x0, 0x0}
};
#if defined(CONFIG_ARMADA_38X)
struct dram_modes {
char *mode_name;
u8 cpu_freq;
u8 fab_freq;
u8 chip_id;
u8 chip_board_rev;
struct reg_data *regs;
};
struct dram_modes ddr_modes[] = {
#ifdef SUPPORT_STATIC_DUNIT_CONFIG
/* Conf name, CPUFreq, Fab_freq, Chip ID, Chip/Board, MC regs*/
#ifdef CONFIG_CUSTOMER_BOARD_SUPPORT
{"a38x_customer_0_800", DDR_FREQ_800, 0, 0x0, A38X_CUSTOMER_BOARD_ID0,
ddr3_customer_800},
{"a38x_customer_1_800", DDR_FREQ_800, 0, 0x0, A38X_CUSTOMER_BOARD_ID1,
ddr3_customer_800},
#else
{"a38x_533", DDR_FREQ_533, 0, 0x0, MARVELL_BOARD, ddr3_a38x_533},
{"a38x_667", DDR_FREQ_667, 0, 0x0, MARVELL_BOARD, ddr3_a38x_667},
{"a38x_800", DDR_FREQ_800, 0, 0x0, MARVELL_BOARD, ddr3_a38x_800},
{"a38x_933", DDR_FREQ_933, 0, 0x0, MARVELL_BOARD, ddr3_a38x_933},
#endif
#endif
};
#endif /* defined(CONFIG_ARMADA_38X) */
/* Translates topology map definitions to real memory size in bits */
/*
* Translates topology map definitions to real memory size in bits
* (per values in ddr3_training_ip_def.h)
*/
u32 mem_size[] = {
ADDR_SIZE_512MB, ADDR_SIZE_1GB, ADDR_SIZE_2GB, ADDR_SIZE_4GB,
ADDR_SIZE_512MB,
ADDR_SIZE_1GB,
ADDR_SIZE_2GB,
ADDR_SIZE_4GB,
ADDR_SIZE_8GB
};
static char *ddr_type = "DDR3";
/*
* Set 1 to use dynamic DUNIT configuration,
* set 0 (supported for A380 and AC3) to configure DUNIT in values set by
* ddr3_tip_init_specific_reg_config
* generic_init_controller controls D-unit configuration:
* '1' - dynamic D-unit configuration,
*/
u8 generic_init_controller = 1;
#ifdef SUPPORT_STATIC_DUNIT_CONFIG
static u32 ddr3_get_static_ddr_mode(void);
#endif
static int ddr3_hws_tune_training_params(u8 dev_num);
/* device revision */
#define DEV_VERSION_ID_REG 0x1823c
#define REVISON_ID_OFFS 8
#define REVISON_ID_MASK 0xf00
/* A38x revisions */
#define MV_88F68XX_Z1_ID 0x0
#define MV_88F68XX_A0_ID 0x4
/* A39x revisions */
#define MV_88F69XX_Z1_ID 0x2
/*
* sys_env_device_rev_get - Get Marvell controller device revision number
*
* DESCRIPTION:
* This function returns 8bit describing the device revision as defined
* Revision ID Register.
*
* INPUT:
* None.
*
* OUTPUT:
* None.
*
* RETURN:
* 8bit desscribing Marvell controller revision number
*/
u8 sys_env_device_rev_get(void)
{
u32 value;
value = reg_read(DEV_VERSION_ID_REG);
return (value & (REVISON_ID_MASK)) >> REVISON_ID_OFFS;
}
/*
* sys_env_dlb_config_ptr_get
*
* DESCRIPTION: defines pointer to to DLB COnfiguration table
*
* INPUT: none
*
* OUTPUT: pointer to DLB COnfiguration table
*
* RETURN:
* returns pointer to DLB COnfiguration table
*/
struct dlb_config *sys_env_dlb_config_ptr_get(void)
{
#ifdef CONFIG_ARMADA_39X
return &ddr3_dlb_config_table_a0[0];
#else
if (sys_env_device_rev_get() == MV_88F68XX_A0_ID)
return &ddr3_dlb_config_table_a0[0];
else
return &ddr3_dlb_config_table[0];
#endif
}
/*
* sys_env_get_cs_ena_from_reg
*
* DESCRIPTION: Get bit mask of enabled CS
*
* INPUT: None
*
* OUTPUT: None
*
* RETURN:
* Bit mask of enabled CS, 1 if only CS0 enabled,
* 3 if both CS0 and CS1 enabled
*/
u32 sys_env_get_cs_ena_from_reg(void)
{
return reg_read(REG_DDR3_RANK_CTRL_ADDR) &
REG_DDR3_RANK_CTRL_CS_ENA_MASK;
}
static void ddr3_restore_and_set_final_windows(u32 *win)
{
u32 win_ctrl_reg, num_of_win_regs;
u32 cs_ena = sys_env_get_cs_ena_from_reg();
u32 ui;
win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
num_of_win_regs = 16;
/* Return XBAR windows 4-7 or 16-19 init configuration */
for (ui = 0; ui < num_of_win_regs; ui++)
reg_write((win_ctrl_reg + 0x4 * ui), win[ui]);
printf("%s Training Sequence - Switching XBAR Window to FastPath Window\n",
ddr_type);
#if defined DYNAMIC_CS_SIZE_CONFIG
if (ddr3_fast_path_dynamic_cs_size_config(cs_ena) != MV_OK)
printf("ddr3_fast_path_dynamic_cs_size_config FAILED\n");
#else
u32 reg, cs;
reg = 0x1fffffe1;
for (cs = 0; cs < MAX_CS; cs++) {
if (cs_ena & (1 << cs)) {
reg |= (cs << 2);
break;
}
}
/* Open fast path Window to - 0.5G */
reg_write(REG_FASTPATH_WIN_0_CTRL_ADDR, reg);
#endif
}
static int ddr3_save_and_set_training_windows(u32 *win)
{
u32 cs_ena;
u32 reg, tmp_count, cs, ui;
u32 win_ctrl_reg, win_base_reg, win_remap_reg;
u32 num_of_win_regs, win_jump_index;
win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
win_base_reg = REG_XBAR_WIN_4_BASE_ADDR;
win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR;
win_jump_index = 0x10;
num_of_win_regs = 16;
struct hws_topology_map *tm = ddr3_get_topology_map();
#ifdef DISABLE_L2_FILTERING_DURING_DDR_TRAINING
/*
* Disable L2 filtering during DDR training
* (when Cross Bar window is open)
*/
reg_write(ADDRESS_FILTERING_END_REGISTER, 0);
#endif
cs_ena = tm->interface_params[0].as_bus_params[0].cs_bitmask;
/* Close XBAR Window 19 - Not needed */
/* {0x000200e8} - Open Mbus Window - 2G */
reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0);
/* Save XBAR Windows 4-19 init configurations */
for (ui = 0; ui < num_of_win_regs; ui++)
win[ui] = reg_read(win_ctrl_reg + 0x4 * ui);
/* Open XBAR Windows 4-7 or 16-19 for other CS */
reg = 0;
tmp_count = 0;
for (cs = 0; cs < MAX_CS; cs++) {
if (cs_ena & (1 << cs)) {
switch (cs) {
case 0:
reg = 0x0e00;
break;
case 1:
reg = 0x0d00;
break;
case 2:
reg = 0x0b00;
break;
case 3:
reg = 0x0700;
break;
}
reg |= (1 << 0);
reg |= (SDRAM_CS_SIZE & 0xffff0000);
reg_write(win_ctrl_reg + win_jump_index * tmp_count,
reg);
reg = (((SDRAM_CS_SIZE + 1) * (tmp_count)) &
0xffff0000);
reg_write(win_base_reg + win_jump_index * tmp_count,
reg);
if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR)
reg_write(win_remap_reg +
win_jump_index * tmp_count, 0);
tmp_count++;
}
}
return MV_OK;
}
static int mv_ddr_training_params_set(u8 dev_num);
/*
* Name: ddr3_init - Main DDR3 Init function
@ -287,476 +37,182 @@ static int ddr3_save_and_set_training_windows(u32 *win)
*/
int ddr3_init(void)
{
u32 reg = 0;
u32 soc_num;
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
u32 octets_per_if_num;
int status;
u32 win[16];
int is_manual_cal_done;
/* SoC/Board special Initializtions */
/* Get version from internal library */
ddr3_print_version();
/* Print mv_ddr version */
mv_ddr_ver_print();
/*Add sub_version string */
DEBUG_INIT_C("", SUB_VERSION, 1);
mv_ddr_pre_training_fixup();
/* Switching CPU to MRVL ID */
soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >>
SAR1_CPU_CORE_OFFSET;
switch (soc_num) {
case 0x3:
case 0x1:
reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET);
case 0x0:
reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET);
default:
break;
/* SoC/Board special initializations */
mv_ddr_pre_training_soc_config(ddr_type);
/* Set log level for training library */
mv_ddr_user_log_level_set(DEBUG_BLOCK_ALL);
mv_ddr_early_init();
if (mv_ddr_topology_map_update() == NULL) {
printf("mv_ddr: failed to update topology\n");
return MV_FAIL;
}
/*
* Set DRAM Reset Mask in case detected GPIO indication of wakeup from
* suspend i.e the DRAM values will not be overwritten / reset when
* waking from suspend
*/
if (sys_env_suspend_wakeup_check() ==
SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED) {
reg_bit_set(REG_SDRAM_INIT_CTRL_ADDR,
1 << REG_SDRAM_INIT_RESET_MASK_OFFS);
}
if (mv_ddr_early_init2() != MV_OK)
return MV_FAIL;
/*
* Stage 0 - Set board configuration
*/
/* Check if DRAM is already initialized */
if (reg_read(REG_BOOTROM_ROUTINE_ADDR) &
(1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)) {
printf("%s Training Sequence - 2nd boot - Skip\n", ddr_type);
return MV_OK;
}
/*
* Stage 1 - Dunit Setup
*/
/* Fix read ready phases for all SOC in reg 0x15c8 */
reg = reg_read(REG_TRAINING_DEBUG_3_ADDR);
reg &= ~(REG_TRAINING_DEBUG_3_MASK);
reg |= 0x4; /* Phase 0 */
reg &= ~(REG_TRAINING_DEBUG_3_MASK << REG_TRAINING_DEBUG_3_OFFS);
reg |= (0x4 << (1 * REG_TRAINING_DEBUG_3_OFFS)); /* Phase 1 */
reg &= ~(REG_TRAINING_DEBUG_3_MASK << (3 * REG_TRAINING_DEBUG_3_OFFS));
reg |= (0x6 << (3 * REG_TRAINING_DEBUG_3_OFFS)); /* Phase 3 */
reg &= ~(REG_TRAINING_DEBUG_3_MASK << (4 * REG_TRAINING_DEBUG_3_OFFS));
reg |= (0x6 << (4 * REG_TRAINING_DEBUG_3_OFFS));
reg &= ~(REG_TRAINING_DEBUG_3_MASK << (5 * REG_TRAINING_DEBUG_3_OFFS));
reg |= (0x6 << (5 * REG_TRAINING_DEBUG_3_OFFS));
reg_write(REG_TRAINING_DEBUG_3_ADDR, reg);
/*
* Axi_bresp_mode[8] = Compliant,
* Axi_addr_decode_cntrl[11] = Internal,
* Axi_data_bus_width[0] = 128bit
* */
/* 0x14a8 - AXI Control Register */
reg_write(REG_DRAM_AXI_CTRL_ADDR, 0);
/*
* Stage 2 - Training Values Setup
*/
/* Set X-BAR windows for the training sequence */
ddr3_save_and_set_training_windows(win);
#ifdef SUPPORT_STATIC_DUNIT_CONFIG
/*
* Load static controller configuration (in case dynamic/generic init
* is not enabled
*/
if (generic_init_controller == 0) {
ddr3_tip_init_specific_reg_config(0,
ddr_modes
[ddr3_get_static_ddr_mode
()].regs);
}
#endif
/* Tune training algo paramteres */
status = ddr3_hws_tune_training_params(0);
/* Set training algorithm's parameters */
status = mv_ddr_training_params_set(0);
if (MV_OK != status)
return status;
/* Set log level for training lib */
ddr3_hws_set_log_level(DEBUG_BLOCK_ALL, DEBUG_LEVEL_ERROR);
/* Start New Training IP */
status = ddr3_hws_hw_training();
mv_ddr_mc_config();
is_manual_cal_done = mv_ddr_manual_cal_do();
mv_ddr_mc_init();
if (!is_manual_cal_done) {
}
status = ddr3_silicon_post_init();
if (MV_OK != status) {
printf("DDR3 Post Init - FAILED 0x%x\n", status);
return status;
}
/* PHY initialization (Training) */
status = hws_ddr3_tip_run_alg(0, ALGO_TYPE_DYNAMIC);
if (MV_OK != status) {
printf("%s Training Sequence - FAILED\n", ddr_type);
return status;
}
/*
* Stage 3 - Finish
*/
/* Restore and set windows */
ddr3_restore_and_set_final_windows(win);
/* Update DRAM init indication in bootROM register */
reg = reg_read(REG_BOOTROM_ROUTINE_ADDR);
reg_write(REG_BOOTROM_ROUTINE_ADDR,
reg | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS));
/* DLB config */
ddr3_new_tip_dlb_config();
#if defined(ECC_SUPPORT)
if (ddr3_if_ecc_enabled())
ddr3_new_tip_ecc_scrub();
#if defined(CONFIG_PHY_STATIC_PRINT)
mv_ddr_phy_static_print();
#endif
printf("%s Training Sequence - Ended Successfully\n", ddr_type);
/* Post MC/PHY initializations */
mv_ddr_post_training_soc_config(ddr_type);
mv_ddr_post_training_fixup();
octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
if (ddr3_if_ecc_enabled()) {
if (MV_DDR_IS_64BIT_DRAM_MODE(tm->bus_act_mask) ||
MV_DDR_IS_32BIT_IN_64BIT_DRAM_MODE(tm->bus_act_mask, octets_per_if_num))
mv_ddr_mem_scrubbing();
else
ddr3_new_tip_ecc_scrub();
}
printf("mv_ddr: completed successfully\n");
return MV_OK;
}
/*
* Name: ddr3_get_cpu_freq
* Desc: read S@R and return CPU frequency
* Args:
* Notes:
* Returns: required value
*/
u32 ddr3_get_cpu_freq(void)
uint64_t mv_ddr_get_memory_size_per_cs_in_bits(void)
{
return ddr3_tip_get_init_freq();
}
uint64_t memory_size_per_cs;
/*
* Name: ddr3_get_fab_opt
* Desc: read S@R and return CPU frequency
* Args:
* Notes:
* Returns: required value
*/
u32 ddr3_get_fab_opt(void)
{
return 0; /* No fabric */
}
u32 bus_cnt, num_of_active_bus = 0;
u32 num_of_sub_phys_per_ddr_unit = 0;
/*
* Name: ddr3_get_static_m_cValue - Init Memory controller with
* static parameters
* Desc: Use this routine to init the controller without the HW training
* procedure.
* User must provide compatible header file with registers data.
* Args: None.
* Notes:
* Returns: None.
*/
u32 ddr3_get_static_mc_value(u32 reg_addr, u32 offset1, u32 mask1,
u32 offset2, u32 mask2)
{
u32 reg, temp;
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
reg = reg_read(reg_addr);
u32 octets_per_if_num = ddr3_tip_dev_attr_get(DEV_NUM_0, MV_ATTR_OCTET_PER_INTERFACE);
temp = (reg >> offset1) & mask1;
if (mask2)
temp |= (reg >> offset2) & mask2;
return temp;
}
/*
* Name: ddr3_get_static_ddr_mode - Init Memory controller with
* static parameters
* Desc: Use this routine to init the controller without the HW training
* procedure.
* User must provide compatible header file with registers data.
* Args: None.
* Notes:
* Returns: None.
*/
u32 ddr3_get_static_ddr_mode(void)
{
u32 chip_board_rev, i;
u32 size;
/* Valid only for A380 only, MSYS using dynamic controller config */
#ifdef CONFIG_CUSTOMER_BOARD_SUPPORT
/*
* Customer boards select DDR mode according to
* board ID & Sample@Reset
*/
chip_board_rev = mv_board_id_get();
#else
/* Marvell boards select DDR mode according to Sample@Reset only */
chip_board_rev = MARVELL_BOARD;
#endif
size = ARRAY_SIZE(ddr_modes);
for (i = 0; i < size; i++) {
if ((ddr3_get_cpu_freq() == ddr_modes[i].cpu_freq) &&
(ddr3_get_fab_opt() == ddr_modes[i].fab_freq) &&
(chip_board_rev == ddr_modes[i].chip_board_rev))
return i;
/* count the number of active bus */
for (bus_cnt = 0; bus_cnt < octets_per_if_num - 1/* ignore ecc octet */; bus_cnt++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt);
num_of_active_bus++;
}
DEBUG_INIT_S("\n*** Error: ddr3_get_static_ddr_mode: No match for requested DDR mode. ***\n\n");
/* calculate number of sub-phys per ddr unit */
if (tm->interface_params[0].bus_width/* supports only single interface */ == MV_DDR_DEV_WIDTH_16BIT)
num_of_sub_phys_per_ddr_unit = TWO_SUB_PHYS;
if (tm->interface_params[0].bus_width/* supports only single interface */ == MV_DDR_DEV_WIDTH_8BIT)
num_of_sub_phys_per_ddr_unit = SINGLE_SUB_PHY;
return 0;
/* calculate dram size per cs */
memory_size_per_cs = (uint64_t)mem_size[tm->interface_params[0].memory_size] * (uint64_t)num_of_active_bus
/ (uint64_t)num_of_sub_phys_per_ddr_unit * (uint64_t)MV_DDR_NUM_BITS_IN_BYTE;
return memory_size_per_cs;
}
/******************************************************************************
* Name: ddr3_get_cs_num_from_reg
* Desc:
* Args:
* Notes:
* Returns:
*/
u32 ddr3_get_cs_num_from_reg(void)
uint64_t mv_ddr_get_total_memory_size_in_bits(void)
{
u32 cs_ena = sys_env_get_cs_ena_from_reg();
u32 cs_count = 0;
u32 cs;
uint64_t total_memory_size = 0;
uint64_t memory_size_per_cs = 0;
for (cs = 0; cs < MAX_CS; cs++) {
if (cs_ena & (1 << cs))
cs_count++;
}
/* get the number of cs */
u32 max_cs = ddr3_tip_max_cs_get(DEV_NUM_0);
return cs_count;
memory_size_per_cs = mv_ddr_get_memory_size_per_cs_in_bits();
total_memory_size = (uint64_t)max_cs * memory_size_per_cs;
return total_memory_size;
}
void get_target_freq(u32 freq_mode, u32 *ddr_freq, u32 *hclk_ps)
int ddr3_if_ecc_enabled(void)
{
u32 tmp, hclk = 200;
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
switch (freq_mode) {
case 4:
tmp = 1; /* DDR_400; */
hclk = 200;
break;
case 0x8:
tmp = 1; /* DDR_666; */
hclk = 333;
break;
case 0xc:
tmp = 1; /* DDR_800; */
hclk = 400;
break;
default:
*ddr_freq = 0;
*hclk_ps = 0;
break;
}
*ddr_freq = tmp; /* DDR freq define */
*hclk_ps = 1000000 / hclk; /* values are 1/HCLK in ps */
return;
}
void ddr3_new_tip_dlb_config(void)
{
u32 reg, i = 0;
struct dlb_config *config_table_ptr = sys_env_dlb_config_ptr_get();
/* Write the configuration */
while (config_table_ptr[i].reg_addr != 0) {
reg_write(config_table_ptr[i].reg_addr,
config_table_ptr[i].reg_data);
i++;
}
/* Enable DLB */
reg = reg_read(REG_STATIC_DRAM_DLB_CONTROL);
reg |= DLB_ENABLE | DLB_WRITE_COALESING | DLB_AXI_PREFETCH_EN |
DLB_MBUS_PREFETCH_EN | PREFETCH_N_LN_SZ_TR;
reg_write(REG_STATIC_DRAM_DLB_CONTROL, reg);
}
int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena)
{
u32 reg, cs;
u32 mem_total_size = 0;
u32 cs_mem_size = 0;
u32 mem_total_size_c, cs_mem_size_c;
#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
u32 physical_mem_size;
u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE;
struct hws_topology_map *tm = ddr3_get_topology_map();
#endif
/* Open fast path windows */
for (cs = 0; cs < MAX_CS; cs++) {
if (cs_ena & (1 << cs)) {
/* get CS size */
if (ddr3_calc_mem_cs_size(cs, &cs_mem_size) != MV_OK)
return MV_FAIL;
#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
/*
* if number of address pins doesn't allow to use max
* mem size that is defined in topology
* mem size is defined by DEVICE_MAX_DRAM_ADDRESS_SIZE
*/
physical_mem_size = mem_size
[tm->interface_params[0].memory_size];
if (ddr3_get_device_width(cs) == 16) {
/*
* 16bit mem device can be twice more - no need
* in less significant pin
*/
max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2;
}
if (physical_mem_size > max_mem_size) {
cs_mem_size = max_mem_size *
(ddr3_get_bus_width() /
ddr3_get_device_width(cs));
printf("Updated Physical Mem size is from 0x%x to %x\n",
physical_mem_size,
DEVICE_MAX_DRAM_ADDRESS_SIZE);
}
#endif
/* set fast path window control for the cs */
reg = 0xffffe1;
reg |= (cs << 2);
reg |= (cs_mem_size - 1) & 0xffff0000;
/*Open fast path Window */
reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg);
/* Set fast path window base address for the cs */
reg = ((cs_mem_size) * cs) & 0xffff0000;
/* Set base address */
reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg);
/*
* Since memory size may be bigger than 4G the summ may
* be more than 32 bit word,
* so to estimate the result divide mem_total_size and
* cs_mem_size by 0x10000 (it is equal to >> 16)
*/
mem_total_size_c = mem_total_size >> 16;
cs_mem_size_c = cs_mem_size >> 16;
/* if the sum less than 2 G - calculate the value */
if (mem_total_size_c + cs_mem_size_c < 0x10000)
mem_total_size += cs_mem_size;
else /* put max possible size */
mem_total_size = L2_FILTER_FOR_MAX_MEMORY_SIZE;
}
}
/* Set L2 filtering to Max Memory size */
reg_write(ADDRESS_FILTERING_END_REGISTER, mem_total_size);
return MV_OK;
}
u32 ddr3_get_bus_width(void)
{
u32 bus_width;
bus_width = (reg_read(REG_SDRAM_CONFIG_ADDR) & 0x8000) >>
REG_SDRAM_CONFIG_WIDTH_OFFS;
return (bus_width == 0) ? 16 : 32;
}
u32 ddr3_get_device_width(u32 cs)
{
u32 device_width;
device_width = (reg_read(REG_SDRAM_ADDRESS_CTRL_ADDR) &
(0x3 << (REG_SDRAM_ADDRESS_CTRL_STRUCT_OFFS * cs))) >>
(REG_SDRAM_ADDRESS_CTRL_STRUCT_OFFS * cs);
return (device_width == 0) ? 8 : 16;
}
static int ddr3_get_device_size(u32 cs)
{
u32 device_size_low, device_size_high, device_size;
u32 data, cs_low_offset, cs_high_offset;
cs_low_offset = REG_SDRAM_ADDRESS_SIZE_OFFS + cs * 4;
cs_high_offset = REG_SDRAM_ADDRESS_SIZE_OFFS +
REG_SDRAM_ADDRESS_SIZE_HIGH_OFFS + cs;
data = reg_read(REG_SDRAM_ADDRESS_CTRL_ADDR);
device_size_low = (data >> cs_low_offset) & 0x3;
device_size_high = (data >> cs_high_offset) & 0x1;
device_size = device_size_low | (device_size_high << 2);
switch (device_size) {
case 0:
return 2048;
case 2:
return 512;
case 3:
return 1024;
case 4:
return 4096;
case 5:
return 8192;
case 1:
default:
DEBUG_INIT_C("Error: Wrong device size of Cs: ", cs, 1);
/*
* Small value will give wrong emem size in
* ddr3_calc_mem_cs_size
*/
if (DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask) ||
DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask) ||
DDR3_IS_ECC_PUP8_MODE(tm->bus_act_mask))
return 1;
else
return 0;
}
}
int ddr3_calc_mem_cs_size(u32 cs, u32 *cs_size)
{
int cs_mem_size;
/* Calculate in GiB */
cs_mem_size = ((ddr3_get_bus_width() / ddr3_get_device_width(cs)) *
ddr3_get_device_size(cs)) / 8;
/*
* Multiple controller bus width, 2x for 64 bit
* (SoC controller may be 32 or 64 bit,
* so bit 15 in 0x1400, that means if whole bus used or only half,
* have a differnt meaning
*/
cs_mem_size *= DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER;
if (!cs_mem_size || (cs_mem_size == 64) || (cs_mem_size == 4096)) {
DEBUG_INIT_C("Error: Wrong Memory size of Cs: ", cs, 1);
return MV_BAD_VALUE;
}
*cs_size = cs_mem_size << 20;
return MV_OK;
}
/*
* Name: ddr3_hws_tune_training_params
* Name: mv_ddr_training_params_set
* Desc:
* Args:
* Notes: Tune internal training params
* Notes: sets internal training params
* Returns:
*/
static int ddr3_hws_tune_training_params(u8 dev_num)
static int mv_ddr_training_params_set(u8 dev_num)
{
struct tune_train_params params;
int status;
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
u32 if_id;
u32 cs_num;
CHECK_STATUS(ddr3_tip_get_first_active_if
(dev_num, tm->if_act_mask,
&if_id));
CHECK_STATUS(calc_cs_num(dev_num, if_id, &cs_num));
/* NOTE: do not remove any field initilization */
params.ck_delay = TUNE_TRAINING_PARAMS_CK_DELAY;
params.ck_delay_16 = TUNE_TRAINING_PARAMS_CK_DELAY_16;
params.p_finger = TUNE_TRAINING_PARAMS_PFINGER;
params.n_finger = TUNE_TRAINING_PARAMS_NFINGER;
params.phy_reg3_val = TUNE_TRAINING_PARAMS_PHYREG3VAL;
params.g_zpri_data = TUNE_TRAINING_PARAMS_PRI_DATA;
params.g_znri_data = TUNE_TRAINING_PARAMS_NRI_DATA;
params.g_zpri_ctrl = TUNE_TRAINING_PARAMS_PRI_CTRL;
params.g_znri_ctrl = TUNE_TRAINING_PARAMS_NRI_CTRL;
params.g_znodt_data = TUNE_TRAINING_PARAMS_N_ODT_DATA;
params.g_zpodt_ctrl = TUNE_TRAINING_PARAMS_P_ODT_CTRL;
params.g_znodt_ctrl = TUNE_TRAINING_PARAMS_N_ODT_CTRL;
params.g_zpodt_data = TUNE_TRAINING_PARAMS_P_ODT_DATA;
params.g_dic = TUNE_TRAINING_PARAMS_DIC;
params.g_rtt_nom = TUNE_TRAINING_PARAMS_RTT_NOM;
if (cs_num == 1) {
params.g_rtt_wr = TUNE_TRAINING_PARAMS_RTT_WR_1CS;
params.g_odt_config = TUNE_TRAINING_PARAMS_ODT_CONFIG_1CS;
} else {
params.g_rtt_wr = TUNE_TRAINING_PARAMS_RTT_WR_2CS;
params.g_odt_config = TUNE_TRAINING_PARAMS_ODT_CONFIG_2CS;
}
status = ddr3_tip_tune_training_params(dev_num, &params);
if (MV_OK != status) {

View file

@ -6,13 +6,12 @@
#ifndef _DDR3_INIT_H
#define _DDR3_INIT_H
#if defined(CONFIG_ARMADA_38X)
#include "ddr3_a38x.h"
#include "ddr3_a38x_mc_static.h"
#include "ddr3_a38x_topology.h"
#include "ddr_ml_wrapper.h"
#if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X)
#include "mv_ddr_plat.h"
#endif
#include "ddr3_hws_hw_training.h"
#include "ddr3_hws_sil_training.h"
#include "seq_exec.h"
#include "ddr3_logging_def.h"
#include "ddr3_training_hw_algo.h"
#include "ddr3_training_ip.h"
@ -21,119 +20,9 @@
#include "ddr3_training_ip_flow.h"
#include "ddr3_training_ip_pbs.h"
#include "ddr3_training_ip_prv_if.h"
#include "ddr3_training_ip_static.h"
#include "ddr3_training_leveling.h"
#include "xor.h"
/*
* MV_DEBUG_INIT need to be defines, otherwise the output of the
* DDR2 training code is not complete and misleading
*/
#define MV_DEBUG_INIT
#ifdef MV_DEBUG_INIT
#define DEBUG_INIT_S(s) puts(s)
#define DEBUG_INIT_D(d, l) printf("%x", d)
#define DEBUG_INIT_D_10(d, l) printf("%d", d)
#else
#define DEBUG_INIT_S(s)
#define DEBUG_INIT_D(d, l)
#define DEBUG_INIT_D_10(d, l)
#endif
#ifdef MV_DEBUG_INIT_FULL
#define DEBUG_INIT_FULL_S(s) puts(s)
#define DEBUG_INIT_FULL_D(d, l) printf("%x", d)
#define DEBUG_INIT_FULL_D_10(d, l) printf("%d", d)
#define DEBUG_WR_REG(reg, val) \
{ DEBUG_INIT_S("Write Reg: 0x"); DEBUG_INIT_D((reg), 8); \
DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); }
#define DEBUG_RD_REG(reg, val) \
{ DEBUG_INIT_S("Read Reg: 0x"); DEBUG_INIT_D((reg), 8); \
DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); }
#else
#define DEBUG_INIT_FULL_S(s)
#define DEBUG_INIT_FULL_D(d, l)
#define DEBUG_INIT_FULL_D_10(d, l)
#define DEBUG_WR_REG(reg, val)
#define DEBUG_RD_REG(reg, val)
#endif
#define DEBUG_INIT_FULL_C(s, d, l) \
{ DEBUG_INIT_FULL_S(s); \
DEBUG_INIT_FULL_D(d, l); \
DEBUG_INIT_FULL_S("\n"); }
#define DEBUG_INIT_C(s, d, l) \
{ DEBUG_INIT_S(s); DEBUG_INIT_D(d, l); DEBUG_INIT_S("\n"); }
/*
* Debug (Enable/Disable modules) and Error report
*/
#ifdef BASIC_DEBUG
#define MV_DEBUG_WL
#define MV_DEBUG_RL
#define MV_DEBUG_DQS_RESULTS
#endif
#ifdef FULL_DEBUG
#define MV_DEBUG_WL
#define MV_DEBUG_RL
#define MV_DEBUG_DQS
#define MV_DEBUG_PBS
#define MV_DEBUG_DFS
#define MV_DEBUG_MAIN_FULL
#define MV_DEBUG_DFS_FULL
#define MV_DEBUG_DQS_FULL
#define MV_DEBUG_RL_FULL
#define MV_DEBUG_WL_FULL
#endif
#if defined(CONFIG_ARMADA_38X)
#include "ddr3_a38x.h"
#include "ddr3_a38x_topology.h"
#endif
/* The following is a list of Marvell status */
#define MV_ERROR (-1)
#define MV_OK (0x00) /* Operation succeeded */
#define MV_FAIL (0x01) /* Operation failed */
#define MV_BAD_VALUE (0x02) /* Illegal value (general) */
#define MV_OUT_OF_RANGE (0x03) /* The value is out of range */
#define MV_BAD_PARAM (0x04) /* Illegal parameter in function called */
#define MV_BAD_PTR (0x05) /* Illegal pointer value */
#define MV_BAD_SIZE (0x06) /* Illegal size */
#define MV_BAD_STATE (0x07) /* Illegal state of state machine */
#define MV_SET_ERROR (0x08) /* Set operation failed */
#define MV_GET_ERROR (0x09) /* Get operation failed */
#define MV_CREATE_ERROR (0x0a) /* Fail while creating an item */
#define MV_NOT_FOUND (0x0b) /* Item not found */
#define MV_NO_MORE (0x0c) /* No more items found */
#define MV_NO_SUCH (0x0d) /* No such item */
#define MV_TIMEOUT (0x0e) /* Time Out */
#define MV_NO_CHANGE (0x0f) /* Parameter(s) is already in this value */
#define MV_NOT_SUPPORTED (0x10) /* This request is not support */
#define MV_NOT_IMPLEMENTED (0x11) /* Request supported but not implemented*/
#define MV_NOT_INITIALIZED (0x12) /* The item is not initialized */
#define MV_NO_RESOURCE (0x13) /* Resource not available (memory ...) */
#define MV_FULL (0x14) /* Item is full (Queue or table etc...) */
#define MV_EMPTY (0x15) /* Item is empty (Queue or table etc...) */
#define MV_INIT_ERROR (0x16) /* Error occurred while INIT process */
#define MV_HW_ERROR (0x17) /* Hardware error */
#define MV_TX_ERROR (0x18) /* Transmit operation not succeeded */
#define MV_RX_ERROR (0x19) /* Recieve operation not succeeded */
#define MV_NOT_READY (0x1a) /* The other side is not ready yet */
#define MV_ALREADY_EXIST (0x1b) /* Tried to create existing item */
#define MV_OUT_OF_CPU_MEM (0x1c) /* Cpu memory allocation failed. */
#define MV_NOT_STARTED (0x1d) /* Not started yet */
#define MV_BUSY (0x1e) /* Item is busy. */
#define MV_TERMINATE (0x1f) /* Item terminates it's work. */
#define MV_NOT_ALIGNED (0x20) /* Wrong alignment */
#define MV_NOT_ALLOWED (0x21) /* Operation NOT allowed */
#define MV_WRITE_PROTECT (0x22) /* Write protected */
#define MV_INVALID (int)(-1)
/* For checking function return values */
#define CHECK_STATUS(orig_func) \
{ \
@ -143,6 +32,14 @@
return status; \
}
#define GET_MAX_VALUE(x, y) \
((x) > (y)) ? (x) : (y)
#define SUB_VERSION 0
/* max number of devices supported by driver */
#define MAX_DEVICE_NUM 1
enum log_level {
MV_LOG_LEVEL_0,
MV_LOG_LEVEL_1,
@ -151,28 +48,27 @@ enum log_level {
};
/* Globals */
extern u8 debug_training;
extern u8 debug_training, debug_calibration, debug_ddr4_centralization,
debug_tap_tuning, debug_dm_tuning;
extern u8 is_reg_dump;
extern u8 generic_init_controller;
extern u32 freq_val[];
/* list of allowed frequency listed in order of enum hws_ddr_freq */
extern u32 freq_val[DDR_FREQ_LAST];
extern u32 is_pll_old;
extern struct cl_val_per_freq cas_latency_table[];
extern struct pattern_info pattern_table[];
extern struct cl_val_per_freq cas_write_latency_table[];
extern u8 debug_training;
extern u8 debug_centralization, debug_training_ip, debug_training_bist,
debug_pbs, debug_training_static, debug_leveling;
extern u32 pipe_multicast_mask;
extern struct hws_tip_config_func_db config_func_info[];
extern u8 cs_mask_reg[];
extern u8 twr_mask_table[];
extern u8 cl_mask_table[];
extern u8 cwl_mask_table[];
extern u16 rfc_table[];
extern u32 speed_bin_table_t_rc[];
extern u32 speed_bin_table_t_rcd_t_rp[];
extern u32 ck_delay, ck_delay_16;
extern u32 vref_init_val;
extern u32 g_zpri_data;
extern u32 g_znri_data;
extern u32 g_zpri_ctrl;
@ -182,39 +78,28 @@ extern u32 g_znodt_data;
extern u32 g_zpodt_ctrl;
extern u32 g_znodt_ctrl;
extern u32 g_dic;
extern u32 g_odt_config_2cs;
extern u32 g_odt_config_1cs;
extern u32 g_odt_config;
extern u32 g_rtt_nom;
extern u32 g_rtt_wr;
extern u32 g_rtt_park;
extern u8 debug_training_access;
extern u8 debug_training_a38x;
extern u32 first_active_if;
extern enum hws_ddr_freq init_freq;
extern u32 delay_enable, ck_delay, ck_delay_16, ca_delay;
extern u32 delay_enable, ck_delay, ca_delay;
extern u32 mask_tune_func;
extern u32 rl_version;
extern int rl_mid_freq_wa;
extern u8 calibration_update_control; /* 2 external only, 1 is internal only */
extern enum hws_ddr_freq medium_freq;
extern u32 ck_delay, ck_delay_16;
extern enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM];
extern u32 first_active_if;
extern u32 mask_tune_func;
extern u32 freq_val[];
extern enum hws_ddr_freq init_freq;
extern enum hws_ddr_freq low_freq;
extern enum hws_ddr_freq medium_freq;
extern u8 generic_init_controller;
extern enum auto_tune_stage training_stage;
extern u32 is_pll_before_init;
extern u32 is_adll_calib_before_init;
extern u32 is_dfs_in_init;
extern int wl_debug_delay;
extern u32 silicon_delay[HWS_MAX_DEVICE_NUM];
extern u32 p_finger;
extern u32 n_finger;
extern u32 freq_val[DDR_FREQ_LIMIT];
extern u32 silicon_delay[MAX_DEVICE_NUM];
extern u32 start_pattern, end_pattern;
extern u32 phy_reg0_val;
extern u32 phy_reg1_val;
@ -222,172 +107,93 @@ extern u32 phy_reg2_val;
extern u32 phy_reg3_val;
extern enum hws_pattern sweep_pattern;
extern enum hws_pattern pbs_pattern;
extern u8 is_rzq6;
extern u32 znri_data_phy_val;
extern u32 zpri_data_phy_val;
extern u32 znri_ctrl_phy_val;
extern u32 zpri_ctrl_phy_val;
extern u8 debug_training_access;
extern u32 g_znri_data;
extern u32 g_zpri_data;
extern u32 g_znri_ctrl;
extern u32 g_zpri_ctrl;
extern u32 finger_test, p_finger_start, p_finger_end, n_finger_start,
n_finger_end, p_finger_step, n_finger_step;
extern u32 mode2_t;
extern u32 mode_2t;
extern u32 xsb_validate_type;
extern u32 xsb_validation_base_address;
extern u32 odt_additional;
extern u32 debug_mode;
extern u32 delay_enable;
extern u32 ca_delay;
extern u32 debug_dunit;
extern u32 clamp_tbl[];
extern u32 freq_mask[HWS_MAX_DEVICE_NUM][DDR_FREQ_LIMIT];
extern u32 start_pattern, end_pattern;
extern u32 freq_mask[MAX_DEVICE_NUM][DDR_FREQ_LAST];
extern u32 maxt_poll_tries;
extern u32 is_bist_reset_bit;
extern u8 debug_training_bist;
extern u8 vref_window_size[MAX_INTERFACE_NUM][MAX_BUS_NUM];
extern u32 debug_mode;
extern u32 effective_cs;
extern int ddr3_tip_centr_skip_min_win_check;
extern u32 *dq_map_table;
extern enum auto_tune_stage training_stage;
extern u8 debug_centralization;
extern u32 delay_enable;
extern u32 start_pattern, end_pattern;
extern u32 freq_val[DDR_FREQ_LIMIT];
extern u8 debug_training_hw_alg;
extern enum auto_tune_stage training_stage;
extern u8 debug_training_ip;
extern enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM];
extern enum auto_tune_stage training_stage;
extern u32 effective_cs;
extern u8 debug_leveling;
extern enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM];
extern enum auto_tune_stage training_stage;
extern u32 rl_version;
extern struct cl_val_per_freq cas_latency_table[];
extern u32 start_xsb_offset;
extern u32 debug_mode;
extern u32 odt_config;
extern u32 effective_cs;
extern u32 phy_reg1_val;
extern u8 debug_pbs;
extern u32 effective_cs;
extern u16 mask_results_dq_reg_map[];
extern enum hws_ddr_freq medium_freq;
extern u32 freq_val[];
extern enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM];
extern enum auto_tune_stage training_stage;
extern u32 debug_mode;
extern u32 *dq_map_table;
extern u32 vref;
extern struct cl_val_per_freq cas_latency_table[];
extern u32 target_freq;
extern struct hws_tip_config_func_db config_func_info[HWS_MAX_DEVICE_NUM];
extern u32 clamp_tbl[];
extern u32 init_freq;
/* list of allowed frequency listed in order of enum hws_ddr_freq */
extern u32 freq_val[];
extern u8 debug_training_static;
extern u32 first_active_if;
extern u32 dfs_low_freq;
extern u32 mem_size[];
extern u32 nominal_avs;
extern u32 extension_avs;
/* Prototypes */
int ddr3_init(void);
int ddr3_tip_enable_init_sequence(u32 dev_num);
int ddr3_tip_init_a38x(u32 dev_num, u32 board_id);
int ddr3_hws_hw_training(void);
int ddr3_silicon_pre_init(void);
int ddr3_hws_hw_training(enum hws_algo_type algo_mode);
int mv_ddr_early_init(void);
int mv_ddr_early_init2(void);
int ddr3_silicon_post_init(void);
int ddr3_post_run_alg(void);
int ddr3_if_ecc_enabled(void);
void ddr3_new_tip_ecc_scrub(void);
void ddr3_print_version(void);
void ddr3_new_tip_dlb_config(void);
struct hws_topology_map *ddr3_get_topology_map(void);
void mv_ddr_ver_print(void);
struct mv_ddr_topology_map *mv_ddr_topology_map_get(void);
int ddr3_if_ecc_enabled(void);
int ddr3_tip_reg_write(u32 dev_num, u32 reg_addr, u32 data);
int ddr3_tip_reg_read(u32 dev_num, u32 reg_addr, u32 *data, u32 reg_mask);
int ddr3_silicon_get_ddr_target_freq(u32 *ddr_freq);
int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum hws_ddr_freq freq,
struct hws_tip_freq_config_info
*freq_config_info);
int ddr3_a38x_update_topology_map(u32 dev_num,
struct hws_topology_map *topology_map);
int ddr3_tip_a38x_get_init_freq(int dev_num, enum hws_ddr_freq *freq);
int ddr3_tip_a38x_get_medium_freq(int dev_num, enum hws_ddr_freq *freq);
int ddr3_tip_a38x_if_read(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 *data, u32 mask);
int ddr3_tip_a38x_if_write(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 data, u32 mask);
int ddr3_tip_a38x_get_device_info(u8 dev_num,
struct ddr3_device_info *info_ptr);
int ddr3_tip_init_a38x(u32 dev_num, u32 board_id);
int print_adll(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM]);
int print_ph(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM]);
int read_phase_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
int reg_addr, u32 mask);
int write_leveling_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
u32 pup_ph_values[MAX_INTERFACE_NUM * MAX_BUS_NUM], int reg_addr);
int ddr3_tip_restore_dunit_regs(u32 dev_num);
void print_topology(struct hws_topology_map *topology_db);
void print_topology(struct mv_ddr_topology_map *tm);
u32 mv_board_id_get(void);
int ddr3_load_topology_map(void);
int ddr3_tip_init_specific_reg_config(u32 dev_num,
struct reg_data *reg_config_arr);
u32 ddr3_tip_get_init_freq(void);
void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level);
void mv_ddr_user_log_level_set(enum ddr_lib_debug_block block);
int ddr3_tip_tune_training_params(u32 dev_num,
struct tune_train_params *params);
void get_target_freq(u32 freq_mode, u32 *ddr_freq, u32 *hclk_ps);
int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena);
void ddr3_fast_path_static_cs_size_config(u32 cs_ena);
u32 ddr3_get_device_width(u32 cs);
u32 mv_board_id_index_get(u32 board_id);
u32 mv_board_id_get(void);
u32 ddr3_get_bus_width(void);
void ddr3_set_log_level(u32 n_log_level);
int ddr3_calc_mem_cs_size(u32 cs, u32 *cs_size);
int calc_cs_num(u32 dev_num, u32 if_id, u32 *cs_num);
int hws_ddr3_cs_base_adr_calc(u32 if_id, u32 cs, u32 *cs_base_addr);
int ddr3_tip_print_pbs_result(u32 dev_num, u32 cs_num, enum pbs_dir pbs_mode);
int ddr3_tip_clean_pbs_result(u32 dev_num, enum pbs_dir pbs_mode);
int ddr3_tip_static_round_trip_arr_build(u32 dev_num,
struct trip_delay_element *table_ptr,
int is_wl, u32 *round_trip_delay_arr);
u32 hws_ddr3_tip_max_cs_get(void);
/*
* Accessor functions for the registers
*/
static inline void reg_write(u32 addr, u32 val)
{
writel(val, INTER_REGS_BASE + addr);
}
static inline u32 reg_read(u32 addr)
{
return readl(INTER_REGS_BASE + addr);
}
static inline void reg_bit_set(u32 addr, u32 mask)
{
setbits_le32(INTER_REGS_BASE + addr, mask);
}
static inline void reg_bit_clr(u32 addr, u32 mask)
{
clrbits_le32(INTER_REGS_BASE + addr, mask);
}
u32 mv_ddr_init_freq_get(void);
void mv_ddr_mc_config(void);
int mv_ddr_mc_init(void);
void mv_ddr_set_calib_controller(void);
#endif /* _DDR3_INIT_H */

View file

@ -73,10 +73,14 @@
#endif
#endif
/* Logging defines */
#define DEBUG_LEVEL_TRACE 1
#define DEBUG_LEVEL_INFO 2
#define DEBUG_LEVEL_ERROR 3
enum mv_ddr_debug_level {
DEBUG_LEVEL_TRACE = 1,
DEBUG_LEVEL_INFO = 2,
DEBUG_LEVEL_ERROR = 3,
DEBUG_LEVEL_LAST
};
enum ddr_lib_debug_block {
DEBUG_BLOCK_STATIC,

View file

@ -6,6 +6,7 @@
#ifndef __DDR3_PATTERNS_64_H
#define __DDR3_PATTERNS_64_H
#define FAB_OPT 21
/*
* Patterns Declerations
*/

View file

@ -6,8 +6,9 @@
#ifndef _DDR3_TOPOLOGY_DEF_H
#define _DDR3_TOPOLOGY_DEF_H
/* TOPOLOGY */
#define DEV_NUM_0 0
/* TOPOLOGY */
enum hws_speed_bin {
SPEED_BIN_DDR_800D,
SPEED_BIN_DDR_800E,
@ -53,7 +54,8 @@ enum hws_ddr_freq {
DDR_FREQ_900,
DDR_FREQ_360,
DDR_FREQ_1000,
DDR_FREQ_LIMIT
DDR_FREQ_LAST,
DDR_FREQ_SAR
};
enum speed_bin_table_elements {

File diff suppressed because it is too large Load diff

View file

@ -3,12 +3,6 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
static u32 bist_offset = 32;
@ -24,7 +18,7 @@ static int ddr3_tip_bist_operation(u32 dev_num,
*/
int ddr3_tip_bist_activate(u32 dev_num, enum hws_pattern pattern,
enum hws_access_type access_type, u32 if_num,
enum hws_dir direction,
enum hws_dir dir,
enum hws_stress_jump addr_stress_jump,
enum hws_pattern_duration duration,
enum hws_bist_operation oper_type,
@ -32,103 +26,43 @@ int ddr3_tip_bist_activate(u32 dev_num, enum hws_pattern pattern,
{
u32 tx_burst_size;
u32 delay_between_burst;
u32 rd_mode, val;
u32 poll_cnt = 0, max_poll = 1000, i, start_if, end_if;
u32 rd_mode;
struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
u32 read_data[MAX_INTERFACE_NUM];
struct hws_topology_map *tm = ddr3_get_topology_map();
/* ODPG Write enable from BIST */
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_DATA_CONTROL_REG, 0x1, 0x1));
/* ODPG Read enable/disable from BIST */
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_DATA_CONTROL_REG,
(direction == OPER_READ) ?
0x2 : 0, 0x2));
CHECK_STATUS(ddr3_tip_load_pattern_to_odpg(dev_num, access_type, if_num,
pattern, offset));
/* odpg bist write enable */
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG,
(ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS),
(ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS));
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_DATA_BUF_SIZE_REG,
pattern_addr_length, MASK_ALL_BITS));
tx_burst_size = (direction == OPER_WRITE) ?
/* odpg bist read enable/disable */
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG,
(dir == OPER_READ) ? (ODPG_WRBUF_RD_CTRL_ENA << ODPG_WRBUF_RD_CTRL_OFFS) :
(ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS),
(ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS));
ddr3_tip_load_pattern_to_odpg(0, access_type, 0, pattern, offset);
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_SIZE_REG, pattern_addr_length, MASK_ALL_BITS);
tx_burst_size = (dir == OPER_WRITE) ?
pattern_table[pattern].tx_burst_size : 0;
delay_between_burst = (direction == OPER_WRITE) ? 2 : 0;
rd_mode = (direction == OPER_WRITE) ? 1 : 0;
CHECK_STATUS(ddr3_tip_configure_odpg
(dev_num, access_type, if_num, direction,
delay_between_burst = (dir == OPER_WRITE) ? 2 : 0;
rd_mode = (dir == OPER_WRITE) ? 1 : 0;
ddr3_tip_configure_odpg(0, access_type, 0, dir,
pattern_table[pattern].num_of_phases_tx, tx_burst_size,
pattern_table[pattern].num_of_phases_rx,
delay_between_burst,
rd_mode, cs_num, addr_stress_jump, duration));
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_PATTERN_ADDR_OFFSET_REG,
offset, MASK_ALL_BITS));
rd_mode, cs_num, addr_stress_jump, duration);
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_OFFS_REG, offset, MASK_ALL_BITS);
if (oper_type == BIST_STOP) {
CHECK_STATUS(ddr3_tip_bist_operation(dev_num, access_type,
if_num, BIST_STOP));
ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP);
} else {
CHECK_STATUS(ddr3_tip_bist_operation(dev_num, access_type,
if_num, BIST_START));
if (duration != DURATION_CONT) {
/*
* This pdelay is a WA, becuase polling fives "done"
* also the odpg did nmot finish its task
*/
if (access_type == ACCESS_TYPE_MULTICAST) {
start_if = 0;
end_if = MAX_INTERFACE_NUM - 1;
} else {
start_if = if_num;
end_if = if_num;
}
for (i = start_if; i <= end_if; i++) {
VALIDATE_ACTIVE(tm->
if_act_mask, i);
for (poll_cnt = 0; poll_cnt < max_poll;
poll_cnt++) {
CHECK_STATUS(ddr3_tip_if_read
(dev_num,
ACCESS_TYPE_UNICAST,
if_num, ODPG_BIST_DONE,
read_data,
MASK_ALL_BITS));
val = read_data[i];
if ((val & 0x1) == 0x0) {
/*
* In SOC type devices this bit
* is self clear so, if it was
* cleared all good
*/
break;
}
}
if (poll_cnt >= max_poll) {
DEBUG_TRAINING_BIST_ENGINE
(DEBUG_LEVEL_ERROR,
("Bist poll failure 2\n"));
CHECK_STATUS(ddr3_tip_if_write
(dev_num,
ACCESS_TYPE_UNICAST,
if_num,
ODPG_DATA_CONTROL_REG, 0,
MASK_ALL_BITS));
return MV_FAIL;
}
}
CHECK_STATUS(ddr3_tip_bist_operation
(dev_num, access_type, if_num, BIST_STOP));
}
ddr3_tip_bist_operation(0, access_type, 0, BIST_START);
if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK)
return MV_FAIL;
ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP);
}
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_DATA_CONTROL_REG, 0,
MASK_ALL_BITS));
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS);
return MV_OK;
}
@ -141,34 +75,34 @@ int ddr3_tip_bist_read_result(u32 dev_num, u32 if_id,
{
int ret;
u32 read_data[MAX_INTERFACE_NUM];
struct hws_topology_map *tm = ddr3_get_topology_map();
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
if (IS_ACTIVE(tm->if_act_mask, if_id) == 0)
if (IS_IF_ACTIVE(tm->if_act_mask, if_id) == 0)
return MV_NOT_SUPPORTED;
DEBUG_TRAINING_BIST_ENGINE(DEBUG_LEVEL_TRACE,
("ddr3_tip_bist_read_result if_id %d\n",
if_id));
ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
ODPG_BIST_FAILED_DATA_HI_REG, read_data,
ODPG_DATA_RX_WORD_ERR_DATA_HIGH_REG, read_data,
MASK_ALL_BITS);
if (ret != MV_OK)
return ret;
pst_bist_result->bist_fail_high = read_data[if_id];
ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
ODPG_BIST_FAILED_DATA_LOW_REG, read_data,
ODPG_DATA_RX_WORD_ERR_DATA_LOW_REG, read_data,
MASK_ALL_BITS);
if (ret != MV_OK)
return ret;
pst_bist_result->bist_fail_low = read_data[if_id];
ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
ODPG_BIST_LAST_FAIL_ADDR_REG, read_data,
ODPG_DATA_RX_WORD_ERR_ADDR_REG, read_data,
MASK_ALL_BITS);
if (ret != MV_OK)
return ret;
pst_bist_result->bist_last_fail_addr = read_data[if_id];
ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
ODPG_BIST_DATA_ERROR_COUNTER_REG, read_data,
ODPG_DATA_RX_WORD_ERR_CNTR_REG, read_data,
MASK_ALL_BITS);
if (ret != MV_OK)
return ret;
@ -187,10 +121,10 @@ int hws_ddr3_run_bist(u32 dev_num, enum hws_pattern pattern, u32 *result,
u32 i = 0;
u32 win_base;
struct bist_result st_bist_result;
struct hws_topology_map *tm = ddr3_get_topology_map();
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
for (i = 0; i < MAX_INTERFACE_NUM; i++) {
VALIDATE_ACTIVE(tm->if_act_mask, i);
VALIDATE_IF_ACTIVE(tm->if_act_mask, i);
hws_ddr3_cs_base_adr_calc(i, cs_num, &win_base);
ret = ddr3_tip_bist_activate(dev_num, pattern,
ACCESS_TYPE_UNICAST,
@ -233,13 +167,10 @@ static int ddr3_tip_bist_operation(u32 dev_num,
enum hws_access_type access_type,
u32 if_id, enum hws_bist_operation oper_type)
{
if (oper_type == BIST_STOP) {
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
ODPG_BIST_DONE, 1 << 8, 1 << 8));
} else {
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
ODPG_BIST_DONE, 1, 1));
}
if (oper_type == BIST_STOP)
mv_ddr_odpg_disable();
else
mv_ddr_odpg_enable();
return MV_OK;
}
@ -253,11 +184,10 @@ void ddr3_tip_print_bist_res(void)
u32 i;
struct bist_result st_bist_result[MAX_INTERFACE_NUM];
int res;
struct hws_topology_map *tm = ddr3_get_topology_map();
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
for (i = 0; i < MAX_INTERFACE_NUM; i++) {
if (IS_ACTIVE(tm->if_act_mask, i) == 0)
continue;
VALIDATE_IF_ACTIVE(tm->if_act_mask, i);
res = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result[i]);
if (res != MV_OK) {
@ -273,9 +203,7 @@ void ddr3_tip_print_bist_res(void)
("interface | error_cnt | fail_low | fail_high | fail_addr\n"));
for (i = 0; i < MAX_INTERFACE_NUM; i++) {
if (IS_ACTIVE(tm->if_act_mask, i) ==
0)
continue;
VALIDATE_IF_ACTIVE(tm->if_act_mask, i);
DEBUG_TRAINING_BIST_ENGINE(
DEBUG_LEVEL_INFO,
@ -286,3 +214,389 @@ void ddr3_tip_print_bist_res(void)
st_bist_result[i].bist_last_fail_addr));
}
}
enum {
PASS,
FAIL
};
#define TIP_ITERATION_NUM 31
static int mv_ddr_tip_bist(enum hws_dir dir, u32 val, enum hws_pattern pattern, u32 cs, u32 *result)
{
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
enum hws_training_ip_stat training_result;
u16 *reg_map = ddr3_tip_get_mask_results_pup_reg_map();
u32 max_subphy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
u32 subphy, read_data;
ddr3_tip_ip_training(0, ACCESS_TYPE_MULTICAST, 0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
RESULT_PER_BYTE, HWS_CONTROL_ELEMENT_ADLL, HWS_LOW2HIGH, dir, tm->if_act_mask, val,
TIP_ITERATION_NUM, pattern, EDGE_FP, CS_SINGLE, cs, &training_result);
for (subphy = 0; subphy < max_subphy; subphy++) {
ddr3_tip_if_read(0, ACCESS_TYPE_UNICAST, 0, reg_map[subphy], &read_data, MASK_ALL_BITS);
if (((read_data >> BLOCK_STATUS_OFFS) & BLOCK_STATUS_MASK) == BLOCK_STATUS_NOT_LOCKED)
*result |= (FAIL << subphy);
}
return MV_OK;
}
struct interval {
u8 *vector;
u8 lendpnt; /* interval's left endpoint */
u8 rendpnt; /* interval's right endpoint */
u8 size; /* interval's size */
u8 lmarker; /* left marker */
u8 rmarker; /* right marker */
u8 pass_lendpnt; /* left endpoint of internal pass interval */
u8 pass_rendpnt; /* right endpoint of internal pass interval */
};
static int interval_init(u8 *vector, u8 lendpnt, u8 rendpnt,
u8 lmarker, u8 rmarker, struct interval *intrvl)
{
if (intrvl == NULL) {
printf("%s: NULL intrvl pointer found\n", __func__);
return MV_FAIL;
}
if (vector == NULL) {
printf("%s: NULL vector pointer found\n", __func__);
return MV_FAIL;
}
intrvl->vector = vector;
if (lendpnt >= rendpnt) {
printf("%s: incorrect lendpnt and/or rendpnt parameters found\n", __func__);
return MV_FAIL;
}
intrvl->lendpnt = lendpnt;
intrvl->rendpnt = rendpnt;
intrvl->size = rendpnt - lendpnt + 1;
if ((lmarker < lendpnt) || (lmarker > rendpnt)) {
printf("%s: incorrect lmarker parameter found\n", __func__);
return MV_FAIL;
}
intrvl->lmarker = lmarker;
if ((rmarker < lmarker) || (rmarker > (intrvl->rendpnt + intrvl->size))) {
printf("%s: incorrect rmarker parameter found\n", __func__);
return MV_FAIL;
}
intrvl->rmarker = rmarker;
return MV_OK;
}
static int interval_set(u8 pass_lendpnt, u8 pass_rendpnt, struct interval *intrvl)
{
if (intrvl == NULL) {
printf("%s: NULL intrvl pointer found\n", __func__);
return MV_FAIL;
}
intrvl->pass_lendpnt = pass_lendpnt;
intrvl->pass_rendpnt = pass_rendpnt;
return MV_OK;
}
static int interval_proc(struct interval *intrvl)
{
int curr;
int pass_lendpnt, pass_rendpnt;
int lmt;
int fcnt = 0, pcnt = 0;
if (intrvl == NULL) {
printf("%s: NULL intrvl pointer found\n", __func__);
return MV_FAIL;
}
/* count fails and passes */
curr = intrvl->lendpnt;
while (curr <= intrvl->rendpnt) {
if (intrvl->vector[curr] == PASS)
pcnt++;
else
fcnt++;
curr++;
}
/* check for all fail */
if (fcnt == intrvl->size) {
printf("%s: no pass found\n", __func__);
return MV_FAIL;
}
/* check for all pass */
if (pcnt == intrvl->size) {
if (interval_set(intrvl->lendpnt, intrvl->rendpnt, intrvl) != MV_OK)
return MV_FAIL;
return MV_OK;
}
/* proceed with rmarker */
curr = intrvl->rmarker;
if (intrvl->vector[curr % intrvl->size] == PASS) { /* pass at rmarker */
/* search for fail on right */
if (intrvl->rmarker > intrvl->rendpnt)
lmt = intrvl->rendpnt + intrvl->size;
else
lmt = intrvl->rmarker + intrvl->size - 1;
while ((curr <= lmt) &&
(intrvl->vector[curr % intrvl->size] == PASS))
curr++;
if (curr > lmt) { /* fail not found */
printf("%s: rmarker: fail following pass not found\n", __func__);
return MV_FAIL;
}
/* fail found */
pass_rendpnt = curr - 1;
} else { /* fail at rmarker */
/* search for pass on left */
if (intrvl->rmarker > intrvl->rendpnt)
lmt = intrvl->rmarker - intrvl->size + 1;
else
lmt = intrvl->lendpnt;
while ((curr >= lmt) &&
(intrvl->vector[curr % intrvl->size] == FAIL))
curr--;
if (curr < lmt) { /* pass not found */
printf("%s: rmarker: pass preceding fail not found\n", __func__);
return MV_FAIL;
}
/* pass found */
pass_rendpnt = curr;
}
/* search for fail on left */
curr = pass_rendpnt;
if (pass_rendpnt > intrvl->rendpnt)
lmt = pass_rendpnt - intrvl->size + 1;
else
lmt = intrvl->lendpnt;
while ((curr >= lmt) &&
(intrvl->vector[curr % intrvl->size] == PASS))
curr--;
if (curr < lmt) { /* fail not found */
printf("%s: rmarker: fail preceding pass not found\n", __func__);
return MV_FAIL;
}
/* fail found */
pass_lendpnt = curr + 1;
if (interval_set(pass_lendpnt, pass_rendpnt, intrvl) != MV_OK)
return MV_FAIL;
return MV_OK;
}
#define ADLL_TAPS_PER_PERIOD 64
int mv_ddr_dm_to_dq_diff_get(u8 vw_sphy_hi_lmt, u8 vw_sphy_lo_lmt, u8 *vw_vector,
int *vw_sphy_hi_diff, int *vw_sphy_lo_diff)
{
struct interval intrvl;
/* init interval structure */
if (interval_init(vw_vector, 0, ADLL_TAPS_PER_PERIOD - 1,
vw_sphy_lo_lmt, vw_sphy_hi_lmt, &intrvl) != MV_OK)
return MV_FAIL;
/* find pass sub-interval */
if (interval_proc(&intrvl) != MV_OK)
return MV_FAIL;
/* check for all pass */
if ((intrvl.pass_rendpnt == intrvl.rendpnt) &&
(intrvl.pass_lendpnt == intrvl.lendpnt)) {
printf("%s: no fail found\n", __func__);
return MV_FAIL;
}
*vw_sphy_hi_diff = intrvl.pass_rendpnt - vw_sphy_hi_lmt;
*vw_sphy_lo_diff = vw_sphy_lo_lmt - intrvl.pass_lendpnt;
return MV_OK;
}
static int mv_ddr_bist_tx(enum hws_access_type access_type)
{
mv_ddr_odpg_done_clr();
ddr3_tip_bist_operation(0, access_type, 0, BIST_START);
if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK)
return MV_FAIL;
ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP);
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS);
return MV_OK;
}
/* prepare odpg for bist operation */
#define WR_OP_ODPG_DATA_CMD_BURST_DLY 2
static int mv_ddr_odpg_bist_prepare(enum hws_pattern pattern, enum hws_access_type access_type,
enum hws_dir dir, enum hws_stress_jump stress_jump_addr,
enum hws_pattern_duration duration, u32 offset, u32 cs,
u32 pattern_addr_len, enum dm_direction dm_dir)
{
struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
u32 tx_burst_size;
u32 burst_delay;
u32 rd_mode;
/* odpg bist write enable */
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG,
(ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS),
(ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS));
/* odpg bist read enable/disable */
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG,
(dir == OPER_READ) ? (ODPG_WRBUF_RD_CTRL_ENA << ODPG_WRBUF_RD_CTRL_OFFS) :
(ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS),
(ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS));
if (pattern == PATTERN_00 || pattern == PATTERN_FF)
ddr3_tip_load_pattern_to_odpg(0, access_type, 0, pattern, offset);
else
mv_ddr_load_dm_pattern_to_odpg(access_type, pattern, dm_dir);
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_SIZE_REG, pattern_addr_len, MASK_ALL_BITS);
if (dir == OPER_WRITE) {
tx_burst_size = pattern_table[pattern].tx_burst_size;
burst_delay = WR_OP_ODPG_DATA_CMD_BURST_DLY;
rd_mode = ODPG_MODE_TX;
} else {
tx_burst_size = 0;
burst_delay = 0;
rd_mode = ODPG_MODE_RX;
}
ddr3_tip_configure_odpg(0, access_type, 0, dir, pattern_table[pattern].num_of_phases_tx,
tx_burst_size, pattern_table[pattern].num_of_phases_rx, burst_delay,
rd_mode, cs, stress_jump_addr, duration);
return MV_OK;
}
#define BYTES_PER_BURST_64BIT 0x20
#define BYTES_PER_BURST_32BIT 0x10
int mv_ddr_dm_vw_get(enum hws_pattern pattern, u32 cs, u8 *vw_vector)
{
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
u32 adll_tap;
u32 wr_ctrl_adll[MAX_BUS_NUM] = {0};
u32 rd_ctrl_adll[MAX_BUS_NUM] = {0};
u32 subphy;
u32 subphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
u32 odpg_addr = 0x0;
u32 result;
u32 idx;
/* burst length in bytes */
u32 burst_len = (MV_DDR_IS_64BIT_DRAM_MODE(tm->bus_act_mask) ?
BYTES_PER_BURST_64BIT : BYTES_PER_BURST_32BIT);
/* save dqs values to restore after algorithm's run */
ddr3_tip_read_adll_value(0, wr_ctrl_adll, CTX_PHY_REG(cs), MASK_ALL_BITS);
ddr3_tip_read_adll_value(0, rd_ctrl_adll, CRX_PHY_REG(cs), MASK_ALL_BITS);
/* fill memory with base pattern */
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS);
mv_ddr_odpg_bist_prepare(pattern, ACCESS_TYPE_UNICAST, OPER_WRITE, STRESS_NONE, DURATION_SINGLE,
bist_offset, cs, pattern_table[pattern].num_of_phases_tx,
(pattern == PATTERN_00) ? DM_DIR_DIRECT : DM_DIR_INVERSE);
for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) {
/* change target odpg address */
odpg_addr = adll_tap * burst_len;
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_BUFFER_OFFS_REG,
odpg_addr, MASK_ALL_BITS);
ddr3_tip_configure_odpg(0, ACCESS_TYPE_UNICAST, 0, OPER_WRITE,
pattern_table[pattern].num_of_phases_tx,
pattern_table[pattern].tx_burst_size,
pattern_table[pattern].num_of_phases_rx,
WR_OP_ODPG_DATA_CMD_BURST_DLY,
ODPG_MODE_TX, cs, STRESS_NONE, DURATION_SINGLE);
/* odpg bist write enable */
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG,
(ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS),
(ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS));
/* odpg bist read disable */
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG,
(ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS),
(ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS));
/* trigger odpg */
mv_ddr_bist_tx(ACCESS_TYPE_MULTICAST);
}
/* fill memory with vref pattern to increment addr using odpg bist */
mv_ddr_odpg_bist_prepare(PATTERN_VREF, ACCESS_TYPE_UNICAST, OPER_WRITE, STRESS_NONE, DURATION_SINGLE,
bist_offset, cs, pattern_table[pattern].num_of_phases_tx,
(pattern == PATTERN_00) ? DM_DIR_DIRECT : DM_DIR_INVERSE);
for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) {
ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_MULTICAST, 0,
DDR_PHY_DATA, CTX_PHY_REG(cs), adll_tap);
/* change target odpg address */
odpg_addr = adll_tap * burst_len;
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_BUFFER_OFFS_REG,
odpg_addr, MASK_ALL_BITS);
ddr3_tip_configure_odpg(0, ACCESS_TYPE_UNICAST, 0, OPER_WRITE,
pattern_table[pattern].num_of_phases_tx,
pattern_table[pattern].tx_burst_size,
pattern_table[pattern].num_of_phases_rx,
WR_OP_ODPG_DATA_CMD_BURST_DLY,
ODPG_MODE_TX, cs, STRESS_NONE, DURATION_SINGLE);
/* odpg bist write enable */
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG,
(ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS),
(ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS));
/* odpg bist read disable */
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG,
(ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS),
(ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS));
/* trigger odpg */
mv_ddr_bist_tx(ACCESS_TYPE_MULTICAST);
}
/* restore subphy's tx adll_tap to its position */
for (subphy = 0; subphy < subphy_max; subphy++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy);
ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_UNICAST,
subphy, DDR_PHY_DATA, CTX_PHY_REG(cs),
wr_ctrl_adll[subphy]);
}
/* read and validate bist (comparing with the base pattern) */
for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) {
result = 0;
odpg_addr = adll_tap * burst_len;
/* change addr to fit write */
mv_ddr_pattern_start_addr_set(pattern_table, pattern, odpg_addr);
mv_ddr_tip_bist(OPER_READ, 0, pattern, 0, &result);
for (subphy = 0; subphy < subphy_max; subphy++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy);
idx = ADLL_TAPS_PER_PERIOD * subphy + adll_tap;
vw_vector[idx] |= ((result >> subphy) & 0x1);
}
}
/* restore subphy's rx adll_tap to its position */
for (subphy = 0; subphy < subphy_max; subphy++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy);
ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_UNICAST,
subphy, DDR_PHY_DATA, CRX_PHY_REG(cs),
rd_ctrl_adll[subphy]);
}
return MV_OK;
}

View file

@ -3,12 +3,6 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define VALIDATE_WIN_LENGTH(e1, e2, maxsize) \
@ -22,6 +16,7 @@
#define NUM_OF_CENTRAL_TYPES 2
u32 start_pattern = PATTERN_KILLER_DQ0, end_pattern = PATTERN_KILLER_DQ7;
u32 start_if = 0, end_if = (MAX_INTERFACE_NUM - 1);
u8 bus_end_window[NUM_OF_CENTRAL_TYPES][MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 bus_start_window[NUM_OF_CENTRAL_TYPES][MAX_INTERFACE_NUM][MAX_BUS_NUM];
@ -65,7 +60,8 @@ static int ddr3_tip_centralization(u32 dev_num, u32 mode)
u8 current_window[BUS_WIDTH_IN_BITS];
u8 opt_window, waste_window, start_window_skew, end_window_skew;
u8 final_pup_window[MAX_INTERFACE_NUM][BUS_WIDTH_IN_BITS];
struct hws_topology_map *tm = ddr3_get_topology_map();
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
enum hws_training_result result_type = RESULT_PER_BIT;
enum hws_dir direction;
u32 *result[HWS_SEARCH_DIR_LIMIT];
@ -81,33 +77,33 @@ static int ddr3_tip_centralization(u32 dev_num, u32 mode)
u8 cons_tap = (mode == CENTRAL_TX) ? (64) : (0);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
/* save current cs enable reg val */
CHECK_STATUS(ddr3_tip_if_read
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, cs_enable_reg_val, MASK_ALL_BITS));
DUAL_DUNIT_CFG_REG, cs_enable_reg_val, MASK_ALL_BITS));
/* enable single cs */
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, (1 << 3), (1 << 3)));
DUAL_DUNIT_CFG_REG, (1 << 3), (1 << 3)));
}
if (mode == CENTRAL_TX) {
max_win_size = MAX_WINDOW_SIZE_TX;
reg_phy_off = WRITE_CENTRALIZATION_PHY_REG + (effective_cs * 4);
reg_phy_off = CTX_PHY_REG(effective_cs);
direction = OPER_WRITE;
} else {
max_win_size = MAX_WINDOW_SIZE_RX;
reg_phy_off = READ_CENTRALIZATION_PHY_REG + (effective_cs * 4);
reg_phy_off = CRX_PHY_REG(effective_cs);
direction = OPER_READ;
}
/* DB initialization */
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (bus_id = 0;
bus_id < tm->num_of_bus_per_interface; bus_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
bus_id < octets_per_if_num; bus_id++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
centralization_state[if_id][bus_id] = 0;
bus_end_window[mode][if_id][bus_id] =
(max_win_size - 1) + cons_tap;
@ -133,11 +129,11 @@ static int ddr3_tip_centralization(u32 dev_num, u32 mode)
PARAM_NOT_CARE, training_result);
for (if_id = start_if; if_id <= end_if; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (bus_id = 0;
bus_id <= tm->num_of_bus_per_interface - 1;
bus_id <= octets_per_if_num - 1;
bus_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
for (search_dir_id = HWS_LOW2HIGH;
search_dir_id <= HWS_HIGH2LOW;
@ -336,8 +332,10 @@ static int ddr3_tip_centralization(u32 dev_num, u32 mode)
[if_id][bus_id]));
centralization_state[if_id]
[bus_id] = 1;
if (debug_mode == 0)
if (debug_mode == 0) {
flow_result[if_id] = TEST_FAILED;
return MV_FAIL;
}
}
} /* ddr3_tip_centr_skip_min_win_check */
} /* pup */
@ -345,15 +343,14 @@ static int ddr3_tip_centralization(u32 dev_num, u32 mode)
} /* pattern */
for (if_id = start_if; if_id <= end_if; if_id++) {
if (IS_ACTIVE(tm->if_act_mask, if_id) == 0)
continue;
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
is_if_fail = 0;
flow_result[if_id] = TEST_SUCCESS;
for (bus_id = 0;
bus_id <= (tm->num_of_bus_per_interface - 1); bus_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
bus_id <= (octets_per_if_num - 1); bus_id++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
/* continue only if lock */
if (centralization_state[if_id][bus_id] != 1) {
@ -440,21 +437,21 @@ static int ddr3_tip_centralization(u32 dev_num, u32 mode)
ddr3_tip_bus_read(dev_num, if_id,
ACCESS_TYPE_UNICAST, bus_id,
DDR_PHY_DATA,
RESULT_DB_PHY_REG_ADDR +
RESULT_PHY_REG +
effective_cs, &reg);
reg = (reg & (~0x1f <<
((mode == CENTRAL_TX) ?
(RESULT_DB_PHY_REG_TX_OFFSET) :
(RESULT_DB_PHY_REG_RX_OFFSET))))
(RESULT_PHY_TX_OFFS) :
(RESULT_PHY_RX_OFFS))))
| pup_win_length <<
((mode == CENTRAL_TX) ?
(RESULT_DB_PHY_REG_TX_OFFSET) :
(RESULT_DB_PHY_REG_RX_OFFSET));
(RESULT_PHY_TX_OFFS) :
(RESULT_PHY_RX_OFFS));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST,
bus_id, DDR_PHY_DATA,
RESULT_DB_PHY_REG_ADDR +
RESULT_PHY_REG +
effective_cs, reg));
/* offset per CS is calculated earlier */
@ -480,9 +477,9 @@ static int ddr3_tip_centralization(u32 dev_num, u32 mode)
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
/* restore cs enable value */
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST,
if_id, CS_ENABLE_REG,
if_id, DUAL_DUNIT_CFG_REG,
cs_enable_reg_val[if_id],
MASK_ALL_BITS));
}
@ -508,29 +505,30 @@ int ddr3_tip_special_rx(u32 dev_num)
u32 cs_enable_reg_val[MAX_INTERFACE_NUM];
u32 temp = 0;
int pad_num = 0;
struct hws_topology_map *tm = ddr3_get_topology_map();
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
if (ddr3_tip_special_rx_run_once_flag != 0)
if ((ddr3_tip_special_rx_run_once_flag & (1 << effective_cs)) == (1 << effective_cs))
return MV_OK;
ddr3_tip_special_rx_run_once_flag = 1;
ddr3_tip_special_rx_run_once_flag |= (1 << effective_cs);
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
/* save current cs enable reg val */
CHECK_STATUS(ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST,
if_id, CS_ENABLE_REG,
if_id, DUAL_DUNIT_CFG_REG,
cs_enable_reg_val,
MASK_ALL_BITS));
/* enable single cs */
CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST,
if_id, CS_ENABLE_REG,
if_id, DUAL_DUNIT_CFG_REG,
(1 << 3), (1 << 3)));
}
max_win_size = MAX_WINDOW_SIZE_RX;
direction = OPER_READ;
pattern_id = PATTERN_VREF;
pattern_id = PATTERN_FULL_SSO1;
/* start flow */
ddr3_tip_ip_training_wrapper(dev_num, ACCESS_TYPE_MULTICAST,
@ -544,10 +542,10 @@ int ddr3_tip_special_rx(u32 dev_num)
PARAM_NOT_CARE, training_result);
for (if_id = start_if; if_id <= end_if; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup_id = 0;
pup_id <= tm->num_of_bus_per_interface; pup_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup_id);
pup_id <= octets_per_if_num; pup_id++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup_id);
for (search_dir_id = HWS_LOW2HIGH;
search_dir_id <= HWS_HIGH2LOW;
@ -620,13 +618,12 @@ int ddr3_tip_special_rx(u32 dev_num)
BUS_WIDTH_IN_BITS +
if_id *
BUS_WIDTH_IN_BITS *
tm->
num_of_bus_per_interface];
MAX_BUS_NUM];
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST,
pup_id, DDR_PHY_DATA,
PBS_RX_PHY_REG + pad_num,
PBS_RX_PHY_REG(effective_cs, pad_num),
&temp));
temp = (temp + 0xa > 31) ?
(31) : (temp + 0xa);
@ -636,7 +633,7 @@ int ddr3_tip_special_rx(u32 dev_num)
if_id,
ACCESS_TYPE_UNICAST,
pup_id, DDR_PHY_DATA,
PBS_RX_PHY_REG + pad_num,
PBS_RX_PHY_REG(effective_cs, pad_num),
temp));
}
DEBUG_CENTRALIZATION_ENGINE(
@ -649,25 +646,29 @@ int ddr3_tip_special_rx(u32 dev_num)
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup_id,
DDR_PHY_DATA, PBS_RX_PHY_REG + 4,
DDR_PHY_DATA,
PBS_RX_PHY_REG(effective_cs, 4),
&temp));
temp += 0xa;
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST,
pup_id, DDR_PHY_DATA,
PBS_RX_PHY_REG + 4, temp));
PBS_RX_PHY_REG(effective_cs, 4),
temp));
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup_id,
DDR_PHY_DATA, PBS_RX_PHY_REG + 5,
DDR_PHY_DATA,
PBS_RX_PHY_REG(effective_cs, 5),
&temp));
temp += 0xa;
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST,
pup_id, DDR_PHY_DATA,
PBS_RX_PHY_REG + 5, temp));
PBS_RX_PHY_REG(effective_cs, 5),
temp));
DEBUG_CENTRALIZATION_ENGINE(
DEBUG_LEVEL_INFO,
("Special: PBS:: I/F# %d , Bus# %d fix align to the right\n",
@ -694,15 +695,16 @@ int ddr3_tip_special_rx(u32 dev_num)
int ddr3_tip_print_centralization_result(u32 dev_num)
{
u32 if_id = 0, bus_id = 0;
struct hws_topology_map *tm = ddr3_get_topology_map();
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
printf("Centralization Results\n");
printf("I/F0 Result[0 - success 1-fail 2 - state_2 3 - state_3] ...\n");
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (bus_id = 0; bus_id < tm->num_of_bus_per_interface;
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (bus_id = 0; bus_id < octets_per_if_num;
bus_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
printf("%d ,\n", centralization_state[if_id][bus_id]);
}
}

View file

@ -3,16 +3,25 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
/* Device attributes structures */
enum mv_ddr_dev_attribute ddr_dev_attributes[MAX_DEVICE_NUM][MV_ATTR_LAST];
int ddr_dev_attr_init_done[MAX_DEVICE_NUM] = { 0 };
static inline u32 pattern_table_get_killer_word16(u8 dqs, u8 index);
static inline u32 pattern_table_get_sso_word(u8 sso, u8 index);
static inline u32 pattern_table_get_vref_word(u8 index);
static inline u32 pattern_table_get_vref_word16(u8 index);
static inline u32 pattern_table_get_sso_full_xtalk_word(u8 bit, u8 index);
static inline u32 pattern_table_get_sso_full_xtalk_word16(u8 bit, u8 index);
static inline u32 pattern_table_get_sso_xtalk_free_word(u8 bit, u8 index);
static inline u32 pattern_table_get_sso_xtalk_free_word16(u8 bit, u8 index);
static inline u32 pattern_table_get_isi_word(u8 index);
static inline u32 pattern_table_get_isi_word16(u8 index);
/* List of allowed frequency listed in order of enum hws_ddr_freq */
u32 freq_val[DDR_FREQ_LIMIT] = {
u32 freq_val[DDR_FREQ_LAST] = {
0, /*DDR_FREQ_LOW_FREQ */
400, /*DDR_FREQ_400, */
533, /*DDR_FREQ_533, */
@ -151,18 +160,18 @@ u8 twr_mask_table[] = {
10,
10,
10,
1, /*5*/
2, /*6*/
3, /*7*/
4, /*8*/
1, /* 5 */
2, /* 6 */
3, /* 7 */
4, /* 8 */
10,
5, /*10*/
5, /* 10 */
10,
6, /*12*/
6, /* 12 */
10,
7, /*14*/
7, /* 14 */
10,
0 /*16*/
0 /* 16 */
};
u8 cl_mask_table[] = {
@ -209,7 +218,11 @@ u16 rfc_table[] = {
110, /* 1G */
160, /* 2G */
260, /* 4G */
350 /* 8G */
350, /* 8G */
0, /* TODO: placeholder for 16-Mbit dev width */
0, /* TODO: placeholder for 32-Mbit dev width */
0, /* TODO: placeholder for 12-Mbit dev width */
0 /* TODO: placeholder for 24-Mbit dev width */
};
u32 speed_bin_table_t_rc[] = {
@ -233,7 +246,7 @@ u32 speed_bin_table_t_rc[] = {
43285,
44220,
45155,
46900
46090
};
u32 speed_bin_table_t_rcd_t_rp[] = {
@ -255,7 +268,7 @@ u32 speed_bin_table_t_rcd_t_rp[] = {
12840,
13910,
10285,
11022,
11220,
12155,
13090,
};
@ -356,13 +369,13 @@ u32 speed_bin_table(u8 index, enum speed_bin_table_elements element)
result = speed_bin_table_t_rcd_t_rp[index];
break;
case SPEED_BIN_TRAS:
if (index < 6)
if (index < SPEED_BIN_DDR_1066G)
result = 37500;
else if (index < 10)
else if (index < SPEED_BIN_DDR_1333J)
result = 36000;
else if (index < 14)
else if (index < SPEED_BIN_DDR_1600K)
result = 35000;
else if (index < 18)
else if (index < SPEED_BIN_DDR_1866M)
result = 34000;
else
result = 33000;
@ -371,49 +384,49 @@ u32 speed_bin_table(u8 index, enum speed_bin_table_elements element)
result = speed_bin_table_t_rc[index];
break;
case SPEED_BIN_TRRD1K:
if (index < 3)
if (index < SPEED_BIN_DDR_800E)
result = 10000;
else if (index < 6)
result = 7005;
else if (index < 14)
else if (index < SPEED_BIN_DDR_1066G)
result = 7500;
else if (index < SPEED_BIN_DDR_1600K)
result = 6000;
else
result = 5000;
break;
case SPEED_BIN_TRRD2K:
if (index < 6)
if (index < SPEED_BIN_DDR_1066G)
result = 10000;
else if (index < 14)
result = 7005;
else if (index < SPEED_BIN_DDR_1600K)
result = 7500;
else
result = 6000;
break;
case SPEED_BIN_TPD:
if (index < 3)
if (index < SPEED_BIN_DDR_800E)
result = 7500;
else if (index < 10)
else if (index < SPEED_BIN_DDR_1333J)
result = 5625;
else
result = 5000;
break;
case SPEED_BIN_TFAW1K:
if (index < 3)
if (index < SPEED_BIN_DDR_800E)
result = 40000;
else if (index < 6)
else if (index < SPEED_BIN_DDR_1066G)
result = 37500;
else if (index < 14)
else if (index < SPEED_BIN_DDR_1600K)
result = 30000;
else if (index < 18)
else if (index < SPEED_BIN_DDR_1866M)
result = 27000;
else
result = 25000;
break;
case SPEED_BIN_TFAW2K:
if (index < 6)
if (index < SPEED_BIN_DDR_1066G)
result = 50000;
else if (index < 10)
else if (index < SPEED_BIN_DDR_1333J)
result = 45000;
else if (index < 14)
else if (index < SPEED_BIN_DDR_1600K)
result = 40000;
else
result = 35000;
@ -465,14 +478,7 @@ static inline u32 pattern_table_get_killer_word16(u8 dqs, u8 index)
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_AGGRESSOR) :
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM);
byte0 |= pattern_killer_pattern_table_map[index * 2][role] << i;
}
for (i = 0; i < 8; i++) {
role = (i == dqs) ?
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_AGGRESSOR) :
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM);
byte1 |= pattern_killer_pattern_table_map
[index * 2 + 1][role] << i;
byte1 |= pattern_killer_pattern_table_map[index * 2 + 1][role] << i;
}
return byte0 | (byte0 << 8) | (byte1 << 16) | (byte1 << 24);
@ -488,6 +494,79 @@ static inline u32 pattern_table_get_sso_word(u8 sso, u8 index)
return 0xffffffff;
}
static inline u32 pattern_table_get_sso_full_xtalk_word(u8 bit, u8 index)
{
u8 byte = (1 << bit);
if ((index & 1) == 1)
byte = ~byte;
return byte | (byte << 8) | (byte << 16) | (byte << 24);
}
static inline u32 pattern_table_get_sso_xtalk_free_word(u8 bit, u8 index)
{
u8 byte = (1 << bit);
if ((index & 1) == 1)
byte = 0;
return byte | (byte << 8) | (byte << 16) | (byte << 24);
}
static inline u32 pattern_table_get_isi_word(u8 index)
{
u8 i0 = index % 32;
u8 i1 = index % 8;
u32 word;
if (i0 > 15)
word = ((i1 == 5) | (i1 == 7)) ? 0xffffffff : 0x0;
else
word = (i1 == 6) ? 0xffffffff : 0x0;
word = ((i0 % 16) > 7) ? ~word : word;
return word;
}
static inline u32 pattern_table_get_sso_full_xtalk_word16(u8 bit, u8 index)
{
u8 byte = (1 << bit);
if ((index & 1) == 1)
byte = ~byte;
return byte | (byte << 8) | ((~byte) << 16) | ((~byte) << 24);
}
static inline u32 pattern_table_get_sso_xtalk_free_word16(u8 bit, u8 index)
{
u8 byte = (1 << bit);
if ((index & 1) == 0)
return (byte << 16) | (byte << 24);
else
return byte | (byte << 8);
}
static inline u32 pattern_table_get_isi_word16(u8 index)
{
u8 i0 = index % 16;
u8 i1 = index % 4;
u32 word;
if (i0 > 7)
word = (i1 > 1) ? 0x0000ffff : 0x0;
else
word = (i1 == 3) ? 0xffff0000 : 0x0;
word = ((i0 % 8) > 3) ? ~word : word;
return word;
}
static inline u32 pattern_table_get_vref_word(u8 index)
{
if (0 == ((pattern_vref_pattern_table_map[index / 8] >>
@ -527,13 +606,13 @@ static inline u32 pattern_table_get_static_pbs_word(u8 index)
return temp | (temp << 8) | (temp << 16) | (temp << 24);
}
inline u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
{
u32 pattern;
struct hws_topology_map *tm = ddr3_get_topology_map();
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) == 0) {
/* 32bit patterns */
/* 32/64-bit patterns */
switch (type) {
case PATTERN_PBS1:
case PATTERN_PBS2:
@ -577,9 +656,9 @@ inline u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
break;
case PATTERN_TEST:
if (index > 1 && index < 6)
pattern = PATTERN_20;
else
pattern = PATTERN_00;
else
pattern = PATTERN_FF;
break;
case PATTERN_FULL_SSO0:
case PATTERN_FULL_SSO1:
@ -591,7 +670,34 @@ inline u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
case PATTERN_VREF:
pattern = pattern_table_get_vref_word(index);
break;
case PATTERN_SSO_FULL_XTALK_DQ0:
case PATTERN_SSO_FULL_XTALK_DQ1:
case PATTERN_SSO_FULL_XTALK_DQ2:
case PATTERN_SSO_FULL_XTALK_DQ3:
case PATTERN_SSO_FULL_XTALK_DQ4:
case PATTERN_SSO_FULL_XTALK_DQ5:
case PATTERN_SSO_FULL_XTALK_DQ6:
case PATTERN_SSO_FULL_XTALK_DQ7:
pattern = pattern_table_get_sso_full_xtalk_word(
(u8)(type - PATTERN_SSO_FULL_XTALK_DQ0), index);
break;
case PATTERN_SSO_XTALK_FREE_DQ0:
case PATTERN_SSO_XTALK_FREE_DQ1:
case PATTERN_SSO_XTALK_FREE_DQ2:
case PATTERN_SSO_XTALK_FREE_DQ3:
case PATTERN_SSO_XTALK_FREE_DQ4:
case PATTERN_SSO_XTALK_FREE_DQ5:
case PATTERN_SSO_XTALK_FREE_DQ6:
case PATTERN_SSO_XTALK_FREE_DQ7:
pattern = pattern_table_get_sso_xtalk_free_word(
(u8)(type - PATTERN_SSO_XTALK_FREE_DQ0), index);
break;
case PATTERN_ISI_XTALK_FREE:
pattern = pattern_table_get_isi_word(index);
break;
default:
DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("Error: %s: pattern type [%d] not supported\n",
__func__, (int)type));
pattern = 0;
break;
}
@ -630,7 +736,10 @@ inline u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
pattern = PATTERN_01;
break;
case PATTERN_TEST:
pattern = PATTERN_0080;
if ((index == 0) || (index == 3))
pattern = 0x00000000;
else
pattern = 0xFFFFFFFF;
break;
case PATTERN_FULL_SSO0:
pattern = 0x0000ffff;
@ -644,7 +753,34 @@ inline u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
case PATTERN_VREF:
pattern = pattern_table_get_vref_word16(index);
break;
case PATTERN_SSO_FULL_XTALK_DQ0:
case PATTERN_SSO_FULL_XTALK_DQ1:
case PATTERN_SSO_FULL_XTALK_DQ2:
case PATTERN_SSO_FULL_XTALK_DQ3:
case PATTERN_SSO_FULL_XTALK_DQ4:
case PATTERN_SSO_FULL_XTALK_DQ5:
case PATTERN_SSO_FULL_XTALK_DQ6:
case PATTERN_SSO_FULL_XTALK_DQ7:
pattern = pattern_table_get_sso_full_xtalk_word16(
(u8)(type - PATTERN_SSO_FULL_XTALK_DQ0), index);
break;
case PATTERN_SSO_XTALK_FREE_DQ0:
case PATTERN_SSO_XTALK_FREE_DQ1:
case PATTERN_SSO_XTALK_FREE_DQ2:
case PATTERN_SSO_XTALK_FREE_DQ3:
case PATTERN_SSO_XTALK_FREE_DQ4:
case PATTERN_SSO_XTALK_FREE_DQ5:
case PATTERN_SSO_XTALK_FREE_DQ6:
case PATTERN_SSO_XTALK_FREE_DQ7:
pattern = pattern_table_get_sso_xtalk_free_word16(
(u8)(type - PATTERN_SSO_XTALK_FREE_DQ0), index);
break;
case PATTERN_ISI_XTALK_FREE:
pattern = pattern_table_get_isi_word16(index);
break;
default:
DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("Error: %s: pattern type [%d] not supported\n",
__func__, (int)type));
pattern = 0;
break;
}
@ -652,3 +788,30 @@ inline u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
return pattern;
}
/* Device attribute functions */
void ddr3_tip_dev_attr_init(u32 dev_num)
{
u32 attr_id;
for (attr_id = 0; attr_id < MV_ATTR_LAST; attr_id++)
ddr_dev_attributes[dev_num][attr_id] = 0xFF;
ddr_dev_attr_init_done[dev_num] = 1;
}
u32 ddr3_tip_dev_attr_get(u32 dev_num, enum mv_ddr_dev_attribute attr_id)
{
if (ddr_dev_attr_init_done[dev_num] == 0)
ddr3_tip_dev_attr_init(dev_num);
return ddr_dev_attributes[dev_num][attr_id];
}
void ddr3_tip_dev_attr_set(u32 dev_num, enum mv_ddr_dev_attribute attr_id, u32 value)
{
if (ddr_dev_attr_init_done[dev_num] == 0)
ddr3_tip_dev_attr_init(dev_num);
ddr_dev_attributes[dev_num][attr_id] = value;
}

View file

@ -3,12 +3,6 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define VREF_INITIAL_STEP 3
@ -16,9 +10,8 @@
#define VREF_MAX_INDEX 7
#define MAX_VALUE (1024 - 1)
#define MIN_VALUE (-MAX_VALUE)
#define GET_RD_SAMPLE_DELAY(data, cs) ((data >> rd_sample_mask[cs]) & 0x1f)
#define GET_RD_SAMPLE_DELAY(data, cs) ((data >> rd_sample_mask[cs]) & 0xf)
u32 ck_delay = (u32)-1, ck_delay_16 = (u32)-1;
u32 ca_delay;
int ddr3_tip_centr_skip_min_win_check = 0;
u8 current_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM];
@ -48,45 +41,41 @@ static u32 rd_sample_mask[] = {
*/
int ddr3_tip_write_additional_odt_setting(u32 dev_num, u32 if_id)
{
u32 cs_num = 0, max_cs = 0, max_read_sample = 0, min_read_sample = 0x1f;
u32 cs_num = 0, max_read_sample = 0, min_read_sample = 0x1f;
u32 data_read[MAX_INTERFACE_NUM] = { 0 };
u32 read_sample[MAX_CS_NUM];
u32 val;
u32 pup_index;
int max_phase = MIN_VALUE, current_phase;
enum hws_access_type access_type = ACCESS_TYPE_UNICAST;
struct hws_topology_map *tm = ddr3_get_topology_map();
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
DUNIT_ODT_CONTROL_REG,
DUNIT_ODT_CTRL_REG,
0 << 8, 0x3 << 8));
CHECK_STATUS(ddr3_tip_if_read(dev_num, access_type, if_id,
READ_DATA_SAMPLE_DELAY,
RD_DATA_SMPL_DLYS_REG,
data_read, MASK_ALL_BITS));
val = data_read[if_id];
max_cs = hws_ddr3_tip_max_cs_get();
for (cs_num = 0; cs_num < max_cs; cs_num++) {
for (cs_num = 0; cs_num < MAX_CS_NUM; cs_num++) {
read_sample[cs_num] = GET_RD_SAMPLE_DELAY(val, cs_num);
/* find maximum of read_samples */
if (read_sample[cs_num] >= max_read_sample) {
if (read_sample[cs_num] == max_read_sample) {
/* search for max phase */;
} else {
max_read_sample = read_sample[cs_num];
if (read_sample[cs_num] == max_read_sample)
max_phase = MIN_VALUE;
}
else
max_read_sample = read_sample[cs_num];
for (pup_index = 0;
pup_index < tm->num_of_bus_per_interface;
pup_index < octets_per_if_num;
pup_index++) {
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup_index,
DDR_PHY_DATA,
RL_PHY_REG + CS_REG_VALUE(cs_num),
RL_PHY_REG(cs_num),
&val));
current_phase = ((int)val & 0xe0) >> 6;
@ -100,21 +89,19 @@ int ddr3_tip_write_additional_odt_setting(u32 dev_num, u32 if_id)
min_read_sample = read_sample[cs_num];
}
if (min_read_sample <= tm->interface_params[if_id].cas_l) {
min_read_sample = (int)tm->interface_params[if_id].cas_l;
}
min_read_sample = min_read_sample - 1;
max_read_sample = max_read_sample + 4 + (max_phase + 1) / 2 + 1;
if (min_read_sample >= 0xf)
min_read_sample = 0xf;
if (max_read_sample >= 0x1f)
max_read_sample = 0x1f;
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
ODT_TIMING_LOW,
DDR_ODT_TIMING_LOW_REG,
((min_read_sample - 1) << 12),
0xf << 12));
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
ODT_TIMING_LOW,
DDR_ODT_TIMING_LOW_REG,
(max_read_sample << 16),
0x1f << 16));
@ -123,7 +110,7 @@ int ddr3_tip_write_additional_odt_setting(u32 dev_num, u32 if_id)
int get_valid_win_rx(u32 dev_num, u32 if_id, u8 res[4])
{
u32 reg_pup = RESULT_DB_PHY_REG_ADDR;
u32 reg_pup = RESULT_PHY_REG;
u32 reg_data;
u32 cs_num;
int i;
@ -138,7 +125,7 @@ int get_valid_win_rx(u32 dev_num, u32 if_id, u8 res[4])
ACCESS_TYPE_UNICAST, i,
DDR_PHY_DATA, reg_pup,
&reg_data));
res[i] = (reg_data >> RESULT_DB_PHY_REG_RX_OFFSET) & 0x1f;
res[i] = (reg_data >> RESULT_PHY_RX_OFFS) & 0x1f;
}
return 0;
@ -176,7 +163,8 @@ int ddr3_tip_vref(u32 dev_num)
u32 copy_start_pattern, copy_end_pattern;
enum hws_result *flow_result = ddr3_tip_get_result_ptr(training_stage);
u8 res[4];
struct hws_topology_map *tm = ddr3_get_topology_map();
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
CHECK_STATUS(ddr3_tip_special_rx(dev_num));
@ -190,9 +178,9 @@ int ddr3_tip_vref(u32 dev_num)
/* init params */
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0;
pup < tm->num_of_bus_per_interface; pup++) {
pup < octets_per_if_num; pup++) {
current_vref[pup][if_id] = 0;
last_vref[pup][if_id] = 0;
lim_vref[pup][if_id] = 0;
@ -228,7 +216,7 @@ int ddr3_tip_vref(u32 dev_num)
}
/* TODO: Set number of active interfaces */
num_pup = tm->num_of_bus_per_interface * MAX_INTERFACE_NUM;
num_pup = octets_per_if_num * MAX_INTERFACE_NUM;
while ((algo_run_flag <= num_pup) & (while_count < 10)) {
while_count++;
@ -239,13 +227,13 @@ int ddr3_tip_vref(u32 dev_num)
/* Read Valid window results only for non converge pups */
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
if (interface_state[if_id] != 4) {
get_valid_win_rx(dev_num, if_id, res);
for (pup = 0;
pup < tm->num_of_bus_per_interface;
pup < octets_per_if_num;
pup++) {
VALIDATE_ACTIVE
VALIDATE_BUS_ACTIVE
(tm->bus_act_mask, pup);
if (pup_st[pup]
[if_id] ==
@ -263,14 +251,14 @@ int ddr3_tip_vref(u32 dev_num)
}
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
DEBUG_TRAINING_HW_ALG(
DEBUG_LEVEL_TRACE,
("current_valid_window: IF[ %d ] - ", if_id));
for (pup = 0;
pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE,
("%d ",
current_valid_window
@ -281,10 +269,10 @@ int ddr3_tip_vref(u32 dev_num)
/* Compare results and respond as function of state */
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0;
pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE,
("I/F[ %d ], pup[ %d ] STATE #%d (%d)\n",
if_id, pup,
@ -609,10 +597,10 @@ int ddr3_tip_vref(u32 dev_num)
}
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0;
pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup,
@ -640,7 +628,7 @@ int ddr3_tip_cmd_addr_init_delay(u32 dev_num, u32 adll_tap)
{
u32 if_id = 0;
u32 ck_num_adll_tap = 0, ca_num_adll_tap = 0, data = 0;
struct hws_topology_map *tm = ddr3_get_topology_map();
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
/*
* ck_delay_table is delaying the of the clock signal only.
@ -653,22 +641,18 @@ int ddr3_tip_cmd_addr_init_delay(u32 dev_num, u32 adll_tap)
*/
/* Calc ADLL Tap */
if ((ck_delay == -1) || (ck_delay_16 == -1)) {
if (ck_delay == PARAM_UNDEFINED)
DEBUG_TRAINING_HW_ALG(
DEBUG_LEVEL_ERROR,
("ERROR: One of ck_delay values not initialized!!!\n"));
}
("ERROR: ck_delay is not initialized!\n"));
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
/* Calc delay ps in ADLL tap */
if (tm->interface_params[if_id].bus_width ==
BUS_WIDTH_16)
ck_num_adll_tap = ck_delay_16 / adll_tap;
else
ck_num_adll_tap = ck_delay / adll_tap;
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
/* Calc delay ps in ADLL tap */
ck_num_adll_tap = ck_delay / adll_tap;
ca_num_adll_tap = ca_delay / adll_tap;
data = (ck_num_adll_tap & 0x3f) +
((ca_num_adll_tap & 0x3f) << 10);

View file

@ -10,11 +10,10 @@
#include "ddr_topology_def.h"
#include "ddr_training_ip_db.h"
#define DDR3_TIP_VERSION_STRING "DDR3 Training Sequence - Ver TIP-1.29."
#define MAX_CS_NUM 4
#define MAX_TOTAL_BUS_NUM (MAX_INTERFACE_NUM * MAX_BUS_NUM)
#define MAX_DQ_NUM 40
#define TIP_ENG_LOCK 0x02000000
#define TIP_TX_DLL_RANGE_MAX 64
#define GET_MIN(arg1, arg2) ((arg1) < (arg2)) ? (arg1) : (arg2)
#define GET_MAX(arg1, arg2) ((arg1) < (arg2)) ? (arg2) : (arg1)
@ -38,11 +37,15 @@
#define READ_LEVELING_TF_MASK_BIT 0x00010000
#define WRITE_LEVELING_SUPP_TF_MASK_BIT 0x00020000
#define DM_PBS_TX_MASK_BIT 0x00040000
#define RL_DQS_BURST_MASK_BIT 0x00080000
#define CENTRALIZATION_RX_MASK_BIT 0x00100000
#define CENTRALIZATION_TX_MASK_BIT 0x00200000
#define TX_EMPHASIS_MASK_BIT 0x00400000
#define PER_BIT_READ_LEVELING_TF_MASK_BIT 0x00800000
#define VREF_CALIBRATION_MASK_BIT 0x01000000
#define WRITE_LEVELING_LF_MASK_BIT 0x02000000
/* DDR4 Specific Training Mask bits */
enum hws_result {
TEST_FAILED = 0,
@ -79,6 +82,7 @@ enum auto_tune_stage {
TX_EMPHASIS,
LOAD_PATTERN_HIGH,
PER_BIT_READ_LEVELING_TF,
WRITE_LEVELING_LF,
MAX_STAGE_LIMIT
};
@ -110,7 +114,7 @@ struct pattern_info {
/* CL value for each frequency */
struct cl_val_per_freq {
u8 cl_val[DDR_FREQ_LIMIT];
u8 cl_val[DDR_FREQ_LAST];
};
struct cs_element {
@ -167,11 +171,14 @@ int hws_ddr3_tip_select_ddr_controller(u32 dev_num, int enable);
int hws_ddr3_tip_init_controller(u32 dev_num,
struct init_cntr_param *init_cntr_prm);
int hws_ddr3_tip_load_topology_map(u32 dev_num,
struct hws_topology_map *topology);
struct mv_ddr_topology_map *topology);
int hws_ddr3_tip_run_alg(u32 dev_num, enum hws_algo_type algo_type);
int hws_ddr3_tip_mode_read(u32 dev_num, struct mode_info *mode_info);
int hws_ddr3_tip_read_training_result(u32 dev_num,
enum hws_result result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM]);
int ddr3_tip_is_pup_lock(u32 *pup_buf, enum hws_training_result read_mode);
u8 ddr3_tip_get_buf_min(u8 *buf_ptr);
u8 ddr3_tip_get_buf_max(u8 *buf_ptr);
uint64_t mv_ddr_get_memory_size_per_cs_in_bits(void);
uint64_t mv_ddr_get_total_memory_size_in_bits(void);
#endif /* _DDR3_TRAINING_IP_H_ */

View file

@ -45,9 +45,13 @@ int hws_ddr3_run_bist(u32 dev_num, enum hws_pattern pattern, u32 *result,
u32 cs_num);
int ddr3_tip_run_sweep_test(int dev_num, u32 repeat_num, u32 direction,
u32 mode);
int ddr3_tip_run_leveling_sweep_test(int dev_num, u32 repeat_num,
u32 direction, u32 mode);
int ddr3_tip_print_regs(u32 dev_num);
int ddr3_tip_reg_dump(u32 dev_num);
int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type, u32 read_type,
u32 burst_length);
int mv_ddr_dm_to_dq_diff_get(u8 adll_byte_high, u8 adll_byte_low, u8 *vw_vector,
int *delta_h_adll, int *delta_l_adll);
int mv_ddr_dm_vw_get(enum hws_pattern pattern, u32 cs, u8 *vw_vector);
#endif /* _DDR3_TRAINING_IP_BIST_H_ */

View file

@ -9,7 +9,10 @@
enum hws_pattern {
PATTERN_PBS1,
PATTERN_PBS2,
PATTERN_PBS3,
PATTERN_TEST,
PATTERN_RL,
PATTERN_RL2,
PATTERN_STATIC_PBS,
PATTERN_KILLER_DQ0,
PATTERN_KILLER_DQ1,
@ -19,15 +22,73 @@ enum hws_pattern {
PATTERN_KILLER_DQ5,
PATTERN_KILLER_DQ6,
PATTERN_KILLER_DQ7,
PATTERN_PBS3,
PATTERN_RL2,
PATTERN_TEST,
PATTERN_VREF,
PATTERN_FULL_SSO0,
PATTERN_FULL_SSO1,
PATTERN_FULL_SSO2,
PATTERN_FULL_SSO3,
PATTERN_VREF,
PATTERN_LIMIT
PATTERN_LAST,
PATTERN_SSO_FULL_XTALK_DQ0,
PATTERN_SSO_FULL_XTALK_DQ1,
PATTERN_SSO_FULL_XTALK_DQ2,
PATTERN_SSO_FULL_XTALK_DQ3,
PATTERN_SSO_FULL_XTALK_DQ4,
PATTERN_SSO_FULL_XTALK_DQ5,
PATTERN_SSO_FULL_XTALK_DQ6,
PATTERN_SSO_FULL_XTALK_DQ7,
PATTERN_SSO_XTALK_FREE_DQ0,
PATTERN_SSO_XTALK_FREE_DQ1,
PATTERN_SSO_XTALK_FREE_DQ2,
PATTERN_SSO_XTALK_FREE_DQ3,
PATTERN_SSO_XTALK_FREE_DQ4,
PATTERN_SSO_XTALK_FREE_DQ5,
PATTERN_SSO_XTALK_FREE_DQ6,
PATTERN_SSO_XTALK_FREE_DQ7,
PATTERN_ISI_XTALK_FREE
};
enum mv_wl_supp_mode {
WRITE_LEVELING_SUPP_REG_MODE,
WRITE_LEVELING_SUPP_ECC_MODE_DATA_PUPS,
WRITE_LEVELING_SUPP_ECC_MODE_ECC_PUP4,
WRITE_LEVELING_SUPP_ECC_MODE_ECC_PUP3,
WRITE_LEVELING_SUPP_ECC_MODE_ECC_PUP8
};
enum mv_ddr_dev_attribute {
MV_ATTR_TIP_REV,
MV_ATTR_PHY_EDGE,
MV_ATTR_OCTET_PER_INTERFACE,
MV_ATTR_PLL_BEFORE_INIT,
MV_ATTR_TUNE_MASK,
MV_ATTR_INIT_FREQ,
MV_ATTR_MID_FREQ,
MV_ATTR_DFS_LOW_FREQ,
MV_ATTR_DFS_LOW_PHY,
MV_ATTR_DELAY_ENABLE,
MV_ATTR_CK_DELAY,
MV_ATTR_CA_DELAY,
MV_ATTR_INTERLEAVE_WA,
MV_ATTR_LAST
};
enum mv_ddr_tip_revison {
MV_TIP_REV_NA,
MV_TIP_REV_1, /* NP5 */
MV_TIP_REV_2, /* BC2 */
MV_TIP_REV_3, /* AC3 */
MV_TIP_REV_4, /* A-380/A-390 */
MV_TIP_REV_LAST
};
enum mv_ddr_phy_edge {
MV_DDR_PHY_EDGE_POSITIVE,
MV_DDR_PHY_EDGE_NEGATIVE
};
/* Device attribute functions */
void ddr3_tip_dev_attr_init(u32 dev_num);
u32 ddr3_tip_dev_attr_get(u32 dev_num, enum mv_ddr_dev_attribute attr_id);
void ddr3_tip_dev_attr_set(u32 dev_num, enum mv_ddr_dev_attribute attr_id, u32 value);
#endif /* _DDR3_TRAINING_IP_DB_H_ */

View file

@ -6,8 +6,6 @@
#ifndef _DDR3_TRAINING_IP_DEF_H
#define _DDR3_TRAINING_IP_DEF_H
#include "silicon_if.h"
#define PATTERN_55 0x55555555
#define PATTERN_AA 0xaaaaaaaa
#define PATTERN_80 0x80808080
@ -35,6 +33,7 @@
#define ADLL_RX_LENGTH 32
#define PARAM_NOT_CARE 0
#define PARAM_UNDEFINED 0xffffffff
#define READ_LEVELING_PHY_OFFSET 2
#define WRITE_LEVELING_PHY_OFFSET 0
@ -99,6 +98,8 @@
#define _1G 0x40000000
#define _2G 0x80000000
#define _4G 0x100000000
#define _8G 0x200000000
#define ADDR_SIZE_512MB 0x04000000
#define ADDR_SIZE_1GB 0x08000000
@ -163,10 +164,33 @@ enum hws_wl_supp {
ALIGN_SHIFT
};
enum mv_ddr_tip_bit_state {
BIT_LOW_UI,
BIT_HIGH_UI,
BIT_SPLIT_IN,
BIT_SPLIT_OUT,
BIT_STATE_LAST
};
enum mv_ddr_tip_byte_state{
BYTE_NOT_DEFINED,
BYTE_HOMOGENEOUS_LOW = 0x1,
BYTE_HOMOGENEOUS_HIGH = 0x2,
BYTE_HOMOGENEOUS_SPLIT_IN = 0x4,
BYTE_HOMOGENEOUS_SPLIT_OUT = 0x8,
BYTE_SPLIT_OUT_MIX = 0x10,
BYTE_STATE_LAST
};
struct reg_data {
u32 reg_addr;
u32 reg_data;
u32 reg_mask;
unsigned int reg_addr;
unsigned int reg_data;
unsigned int reg_mask;
};
enum dm_direction {
DM_DIR_INVERSE,
DM_DIR_DIRECT
};
#endif /* _DDR3_TRAINING_IP_DEF_H */

File diff suppressed because it is too large Load diff

View file

@ -37,8 +37,6 @@ int ddr3_tip_training_ip_test(u32 dev_num, enum hws_training_result result_type,
u32 num_of_iterations, u32 start_pattern,
u32 end_pattern);
int ddr3_tip_load_pattern_to_mem(u32 dev_num, enum hws_pattern pattern);
int ddr3_tip_load_pattern_to_mem_by_cpu(u32 dev_num, enum hws_pattern pattern,
u32 offset);
int ddr3_tip_load_all_pattern_to_mem(u32 dev_num);
int ddr3_tip_read_training_result(u32 dev_num, u32 if_id,
enum hws_access_type pup_access_type,
@ -75,10 +73,13 @@ int ddr3_tip_ip_training_wrapper(u32 dev_num, enum hws_access_type access_type,
enum hws_edge_compare edge_comp,
enum hws_ddr_cs train_cs_type, u32 cs_num,
enum hws_training_ip_stat *train_status);
int is_odpg_access_done(u32 dev_num, u32 if_id);
u8 mv_ddr_tip_sub_phy_byte_status_get(u32 if_id, u32 subphy_id);
void mv_ddr_tip_sub_phy_byte_status_set(u32 if_id, u32 subphy_id, u8 byte_status_data);
void ddr3_tip_print_bist_res(void);
struct pattern_info *ddr3_tip_get_pattern_table(void);
u16 *ddr3_tip_get_mask_results_dq_reg(void);
u16 *ddr3_tip_get_mask_results_pup_reg_map(void);
int mv_ddr_load_dm_pattern_to_odpg(enum hws_access_type access_type, enum hws_pattern pattern,
enum dm_direction dm_dir);
int mv_ddr_pattern_start_addr_set(struct pattern_info *pattern_tbl, enum hws_pattern pattern, u32 addr);
#endif /* _DDR3_TRAINING_IP_ENGINE_H_ */

View file

@ -8,45 +8,73 @@
#include "ddr3_training_ip.h"
#include "ddr3_training_ip_pbs.h"
#define MRS0_CMD 0x3
#define MRS1_CMD 0x4
#define MRS2_CMD 0x8
#define MRS3_CMD 0x9
/*
* Definitions of INTERFACE registers
*/
#define READ_BUFFER_SELECT 0x14a4
/*
* Definitions of PHY registers
*/
#include "mv_ddr_regs.h"
#define KILLER_PATTERN_LENGTH 32
#define EXT_ACCESS_BURST_LENGTH 8
#define IS_ACTIVE(if_mask , if_id) \
((if_mask) & (1 << (if_id)))
#define IS_ACTIVE(mask, id) \
((mask) & (1 << (id)))
#define VALIDATE_ACTIVE(mask, id) \
{ \
if (IS_ACTIVE(mask, id) == 0) \
continue; \
}
#define GET_TOPOLOGY_NUM_OF_BUSES() \
(ddr3_get_topology_map()->num_of_bus_per_interface)
#define IS_IF_ACTIVE(if_mask, if_id) \
((if_mask) & (1 << (if_id)))
#define VALIDATE_IF_ACTIVE(mask, id) \
{ \
if (IS_IF_ACTIVE(mask, id) == 0) \
continue; \
}
#define IS_BUS_ACTIVE(if_mask , if_id) \
(((if_mask) >> (if_id)) & 1)
#define VALIDATE_BUS_ACTIVE(mask, id) \
{ \
if (IS_BUS_ACTIVE(mask, id) == 0) \
continue; \
}
#define DDR3_IS_ECC_PUP3_MODE(if_mask) \
(((if_mask) == 0xb) ? 1 : 0)
#define DDR3_IS_ECC_PUP4_MODE(if_mask) \
(((((if_mask) & 0x10) == 0)) ? 0 : 1)
#define DDR3_IS_16BIT_DRAM_MODE(mask) \
(((((mask) & 0x4) == 0)) ? 1 : 0)
(((if_mask) == BUS_MASK_16BIT_ECC_PUP3) ? 1 : 0)
#define DDR3_IS_ECC_PUP4_MODE(if_mask) \
((if_mask == BUS_MASK_32BIT_ECC || if_mask == BUS_MASK_16BIT_ECC) ? 1 : 0)
#define DDR3_IS_16BIT_DRAM_MODE(mask) \
((mask == BUS_MASK_16BIT || mask == BUS_MASK_16BIT_ECC || mask == BUS_MASK_16BIT_ECC_PUP3) ? 1 : 0)
#define DDR3_IS_ECC_PUP8_MODE(if_mask) \
((if_mask == MV_DDR_32BIT_ECC_PUP8_BUS_MASK || if_mask == MV_DDR_64BIT_ECC_PUP8_BUS_MASK) ? 1 : 0)
#define MV_DDR_IS_64BIT_DRAM_MODE(mask) \
((((mask) & MV_DDR_64BIT_BUS_MASK) == MV_DDR_64BIT_BUS_MASK) || \
(((mask) & MV_DDR_64BIT_ECC_PUP8_BUS_MASK) == MV_DDR_64BIT_ECC_PUP8_BUS_MASK) ? 1 : 0)
#define MV_DDR_IS_32BIT_IN_64BIT_DRAM_MODE(mask, octets_per_if_num/* FIXME: get from ATF */) \
((octets_per_if_num == 9/* FIXME: get from ATF */) && \
((mask == BUS_MASK_32BIT) || \
(mask == MV_DDR_32BIT_ECC_PUP8_BUS_MASK)) ? 1 : 0)
#define MV_DDR_IS_HALF_BUS_DRAM_MODE(mask, octets_per_if_num/* FIXME: get from ATF */) \
(MV_DDR_IS_32BIT_IN_64BIT_DRAM_MODE(mask, octets_per_if_num) || DDR3_IS_16BIT_DRAM_MODE(mask))
#define ECC_READ_BUS_0 0
#define ECC_PHY_ACCESS_3 3
#define ECC_PHY_ACCESS_4 4
#define ECC_PHY_ACCESS_8 8
#define MEGA 1000000
#define BUS_WIDTH_IN_BITS 8
#define MAX_POLLING_ITERATIONS 1000000
#define NUM_OF_CS 4
#define ADLL_LENGTH 32
#define GP_RSVD0_REG 0x182e0
/*
* DFX address Space
@ -66,205 +94,20 @@
/* nsec */
#define TREFI_LOW 7800
#define TREFI_HIGH 3900
#define AUTO_ZQC_TIMING 15384
#define TR2R_VALUE_REG 0x180
#define TR2R_MASK_REG 0x180
#define TRFC_MASK_REG 0x7f
#define TR2W_MASK_REG 0x600
#define TW2W_HIGH_VALUE_REG 0x1800
#define TW2W_HIGH_MASK_REG 0xf800
#define TRFC_HIGH_VALUE_REG 0x20000
#define TRFC_HIGH_MASK_REG 0x70000
#define TR2R_HIGH_VALUE_REG 0x0
#define TR2R_HIGH_MASK_REG 0x380000
#define TMOD_VALUE_REG 0x16000000
#define TMOD_MASK_REG 0x1e000000
#define T_VALUE_REG 0x40000000
#define T_MASK_REG 0xc0000000
#define AUTO_ZQC_TIMING 15384
#define WRITE_XBAR_PORT1 0xc03f8077
#define READ_XBAR_PORT1 0xc03f8073
#define DISABLE_DDR_TUNING_DATA 0x02294285
#define ENABLE_DDR_TUNING_DATA 0x12294285
enum mr_number {
MR_CMD0,
MR_CMD1,
MR_CMD2,
MR_CMD3,
MR_LAST
};
#define ODPG_TRAINING_STATUS_REG 0x18488
#define ODPG_TRAINING_TRIGGER_REG 0x1030
#define ODPG_STATUS_DONE_REG 0x16fc
#define ODPG_ENABLE_REG 0x186d4
#define ODPG_ENABLE_OFFS 0
#define ODPG_DISABLE_OFFS 8
#define ODPG_TRAINING_CONTROL_REG 0x1034
#define ODPG_OBJ1_OPCODE_REG 0x103c
#define ODPG_OBJ1_ITER_CNT_REG 0x10b4
#define CALIB_OBJ_PRFA_REG 0x10c4
#define ODPG_WRITE_LEVELING_DONE_CNTR_REG 0x10f8
#define ODPG_WRITE_READ_MODE_ENABLE_REG 0x10fc
#define TRAINING_OPCODE_1_REG 0x10b4
#define SDRAM_CONFIGURATION_REG 0x1400
#define DDR_CONTROL_LOW_REG 0x1404
#define SDRAM_TIMING_LOW_REG 0x1408
#define SDRAM_TIMING_HIGH_REG 0x140c
#define SDRAM_ACCESS_CONTROL_REG 0x1410
#define SDRAM_OPEN_PAGE_CONTROL_REG 0x1414
#define SDRAM_OPERATION_REG 0x1418
#define DUNIT_CONTROL_HIGH_REG 0x1424
#define ODT_TIMING_LOW 0x1428
#define DDR_TIMING_REG 0x142c
#define ODT_TIMING_HI_REG 0x147c
#define SDRAM_INIT_CONTROL_REG 0x1480
#define SDRAM_ODT_CONTROL_HIGH_REG 0x1498
#define DUNIT_ODT_CONTROL_REG 0x149c
#define READ_BUFFER_SELECT_REG 0x14a4
#define DUNIT_MMASK_REG 0x14b0
#define CALIB_MACHINE_CTRL_REG 0x14cc
#define DRAM_DLL_TIMING_REG 0x14e0
#define DRAM_ZQ_INIT_TIMIMG_REG 0x14e4
#define DRAM_ZQ_TIMING_REG 0x14e8
#define DFS_REG 0x1528
#define READ_DATA_SAMPLE_DELAY 0x1538
#define READ_DATA_READY_DELAY 0x153c
#define TRAINING_REG 0x15b0
#define TRAINING_SW_1_REG 0x15b4
#define TRAINING_SW_2_REG 0x15b8
#define TRAINING_PATTERN_BASE_ADDRESS_REG 0x15bc
#define TRAINING_DBG_1_REG 0x15c0
#define TRAINING_DBG_2_REG 0x15c4
#define TRAINING_DBG_3_REG 0x15c8
#define RANK_CTRL_REG 0x15e0
#define TIMING_REG 0x15e4
#define DRAM_PHY_CONFIGURATION 0x15ec
#define MR0_REG 0x15d0
#define MR1_REG 0x15d4
#define MR2_REG 0x15d8
#define MR3_REG 0x15dc
#define TIMING_REG 0x15e4
#define ODPG_CTRL_CONTROL_REG 0x1600
#define ODPG_DATA_CONTROL_REG 0x1630
#define ODPG_PATTERN_ADDR_OFFSET_REG 0x1638
#define ODPG_DATA_BUF_SIZE_REG 0x163c
#define PHY_LOCK_STATUS_REG 0x1674
#define PHY_REG_FILE_ACCESS 0x16a0
#define TRAINING_WRITE_LEVELING_REG 0x16ac
#define ODPG_PATTERN_ADDR_REG 0x16b0
#define ODPG_PATTERN_DATA_HI_REG 0x16b4
#define ODPG_PATTERN_DATA_LOW_REG 0x16b8
#define ODPG_BIST_LAST_FAIL_ADDR_REG 0x16bc
#define ODPG_BIST_DATA_ERROR_COUNTER_REG 0x16c0
#define ODPG_BIST_FAILED_DATA_HI_REG 0x16c4
#define ODPG_BIST_FAILED_DATA_LOW_REG 0x16c8
#define ODPG_WRITE_DATA_ERROR_REG 0x16cc
#define CS_ENABLE_REG 0x16d8
#define WR_LEVELING_DQS_PATTERN_REG 0x16dc
#define ODPG_BIST_DONE 0x186d4
#define ODPG_BIST_DONE_BIT_OFFS 0
#define ODPG_BIST_DONE_BIT_VALUE 0
#define RESULT_CONTROL_BYTE_PUP_0_REG 0x1830
#define RESULT_CONTROL_BYTE_PUP_1_REG 0x1834
#define RESULT_CONTROL_BYTE_PUP_2_REG 0x1838
#define RESULT_CONTROL_BYTE_PUP_3_REG 0x183c
#define RESULT_CONTROL_BYTE_PUP_4_REG 0x18b0
#define RESULT_CONTROL_PUP_0_BIT_0_REG 0x18b4
#define RESULT_CONTROL_PUP_0_BIT_1_REG 0x18b8
#define RESULT_CONTROL_PUP_0_BIT_2_REG 0x18bc
#define RESULT_CONTROL_PUP_0_BIT_3_REG 0x18c0
#define RESULT_CONTROL_PUP_0_BIT_4_REG 0x18c4
#define RESULT_CONTROL_PUP_0_BIT_5_REG 0x18c8
#define RESULT_CONTROL_PUP_0_BIT_6_REG 0x18cc
#define RESULT_CONTROL_PUP_0_BIT_7_REG 0x18f0
#define RESULT_CONTROL_PUP_1_BIT_0_REG 0x18f4
#define RESULT_CONTROL_PUP_1_BIT_1_REG 0x18f8
#define RESULT_CONTROL_PUP_1_BIT_2_REG 0x18fc
#define RESULT_CONTROL_PUP_1_BIT_3_REG 0x1930
#define RESULT_CONTROL_PUP_1_BIT_4_REG 0x1934
#define RESULT_CONTROL_PUP_1_BIT_5_REG 0x1938
#define RESULT_CONTROL_PUP_1_BIT_6_REG 0x193c
#define RESULT_CONTROL_PUP_1_BIT_7_REG 0x19b0
#define RESULT_CONTROL_PUP_2_BIT_0_REG 0x19b4
#define RESULT_CONTROL_PUP_2_BIT_1_REG 0x19b8
#define RESULT_CONTROL_PUP_2_BIT_2_REG 0x19bc
#define RESULT_CONTROL_PUP_2_BIT_3_REG 0x19c0
#define RESULT_CONTROL_PUP_2_BIT_4_REG 0x19c4
#define RESULT_CONTROL_PUP_2_BIT_5_REG 0x19c8
#define RESULT_CONTROL_PUP_2_BIT_6_REG 0x19cc
#define RESULT_CONTROL_PUP_2_BIT_7_REG 0x19f0
#define RESULT_CONTROL_PUP_3_BIT_0_REG 0x19f4
#define RESULT_CONTROL_PUP_3_BIT_1_REG 0x19f8
#define RESULT_CONTROL_PUP_3_BIT_2_REG 0x19fc
#define RESULT_CONTROL_PUP_3_BIT_3_REG 0x1a30
#define RESULT_CONTROL_PUP_3_BIT_4_REG 0x1a34
#define RESULT_CONTROL_PUP_3_BIT_5_REG 0x1a38
#define RESULT_CONTROL_PUP_3_BIT_6_REG 0x1a3c
#define RESULT_CONTROL_PUP_3_BIT_7_REG 0x1ab0
#define RESULT_CONTROL_PUP_4_BIT_0_REG 0x1ab4
#define RESULT_CONTROL_PUP_4_BIT_1_REG 0x1ab8
#define RESULT_CONTROL_PUP_4_BIT_2_REG 0x1abc
#define RESULT_CONTROL_PUP_4_BIT_3_REG 0x1ac0
#define RESULT_CONTROL_PUP_4_BIT_4_REG 0x1ac4
#define RESULT_CONTROL_PUP_4_BIT_5_REG 0x1ac8
#define RESULT_CONTROL_PUP_4_BIT_6_REG 0x1acc
#define RESULT_CONTROL_PUP_4_BIT_7_REG 0x1af0
#define WL_PHY_REG 0x0
#define WRITE_CENTRALIZATION_PHY_REG 0x1
#define RL_PHY_REG 0x2
#define READ_CENTRALIZATION_PHY_REG 0x3
#define PBS_RX_PHY_REG 0x50
#define PBS_TX_PHY_REG 0x10
#define PHY_CONTROL_PHY_REG 0x90
#define BW_PHY_REG 0x92
#define RATE_PHY_REG 0x94
#define CMOS_CONFIG_PHY_REG 0xa2
#define PAD_ZRI_CALIB_PHY_REG 0xa4
#define PAD_ODT_CALIB_PHY_REG 0xa6
#define PAD_CONFIG_PHY_REG 0xa8
#define PAD_PRE_DISABLE_PHY_REG 0xa9
#define TEST_ADLL_REG 0xbf
#define CSN_IOB_VREF_REG(cs) (0xdb + (cs * 12))
#define CSN_IO_BASE_VREF_REG(cs) (0xd0 + (cs * 12))
#define RESULT_DB_PHY_REG_ADDR 0xc0
#define RESULT_DB_PHY_REG_RX_OFFSET 5
#define RESULT_DB_PHY_REG_TX_OFFSET 0
/* TBD - for NP5 use only CS 0 */
#define PHY_WRITE_DELAY(cs) WL_PHY_REG
/*( ( _cs_ == 0 ) ? 0x0 : 0x4 )*/
/* TBD - for NP5 use only CS 0 */
#define PHY_READ_DELAY(cs) RL_PHY_REG
#define DDR0_ADDR_1 0xf8258
#define DDR0_ADDR_2 0xf8254
#define DDR1_ADDR_1 0xf8270
#define DDR1_ADDR_2 0xf8270
#define DDR2_ADDR_1 0xf825c
#define DDR2_ADDR_2 0xf825c
#define DDR3_ADDR_1 0xf8264
#define DDR3_ADDR_2 0xf8260
#define DDR4_ADDR_1 0xf8274
#define DDR4_ADDR_2 0xf8274
#define GENERAL_PURPOSE_RESERVED0_REG 0x182e0
#define GET_BLOCK_ID_MAX_FREQ(dev_num, block_id) 800000
#define CS0_RD_LVL_REF_DLY_OFFS 0
#define CS0_RD_LVL_REF_DLY_LEN 0
#define CS0_RD_LVL_PH_SEL_OFFS 0
#define CS0_RD_LVL_PH_SEL_LEN 0
#define CS_REGISTER_ADDR_OFFSET 4
#define CALIBRATED_OBJECTS_REG_ADDR_OFFSET 0x10
#define MAX_POLLING_ITERATIONS 100000
#define PHASE_REG_OFFSET 32
#define NUM_BYTES_IN_BURST 31
#define NUM_OF_CS 4
#define CS_REG_VALUE(cs_num) (cs_mask_reg[cs_num])
#define ADLL_LENGTH 32
struct mv_ddr_mr_data {
u32 cmd;
u32 reg_addr;
};
struct write_supp_result {
enum hws_wl_supp stage;
@ -314,10 +157,11 @@ int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
u32 num_of_bursts, u32 *addr);
int ddr3_tip_dynamic_read_leveling(u32 dev_num, u32 ui_freq);
int mv_ddr_rl_dqs_burst(u32 dev_num, u32 if_id, u32 freq);
int ddr3_tip_legacy_dynamic_read_leveling(u32 dev_num);
int ddr3_tip_dynamic_per_bit_read_leveling(u32 dev_num, u32 ui_freq);
int ddr3_tip_legacy_dynamic_write_leveling(u32 dev_num);
int ddr3_tip_dynamic_write_leveling(u32 dev_num);
int ddr3_tip_dynamic_write_leveling(u32 dev_num, int phase_remove);
int ddr3_tip_dynamic_write_leveling_supp(u32 dev_num);
int ddr3_tip_static_init_controller(u32 dev_num);
int ddr3_tip_configure_phy(u32 dev_num);
@ -331,18 +175,21 @@ int ddr3_tip_configure_odpg(u32 dev_num, enum hws_access_type access_type,
u32 delay_between_burst, u32 rd_mode, u32 cs_num,
u32 addr_stress_jump, u32 single_pattern);
int ddr3_tip_set_atr(u32 dev_num, u32 flag_id, u32 value);
int ddr3_tip_write_mrs_cmd(u32 dev_num, u32 *cs_mask_arr, u32 cmd, u32 data,
u32 mask);
int ddr3_tip_write_mrs_cmd(u32 dev_num, u32 *cs_mask_arr, enum mr_number mr_num, u32 data, u32 mask);
int ddr3_tip_write_cs_result(u32 dev_num, u32 offset);
int ddr3_tip_get_first_active_if(u8 dev_num, u32 interface_mask, u32 *if_id);
int ddr3_tip_reset_fifo_ptr(u32 dev_num);
int read_pup_value(int pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
int reg_addr, u32 mask);
int read_adll_value(u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
int reg_addr, u32 mask);
int write_adll_value(u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
int reg_addr);
int ddr3_tip_read_pup_value(u32 dev_num,
u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
int reg_addr, u32 mask);
int ddr3_tip_read_adll_value(u32 dev_num,
u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
u32 reg_addr, u32 mask);
int ddr3_tip_write_adll_value(u32 dev_num,
u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
u32 reg_addr);
int ddr3_tip_tune_training_params(u32 dev_num,
struct tune_train_params *params);
struct page_element *mv_ddr_page_tbl_get(void);
#endif /* _DDR3_TRAINING_IP_FLOW_H_ */

View file

@ -62,7 +62,7 @@ typedef int (*HWS_TRAINING_IP_PBS_TX_FUNC_PTR)(u32 dev_num);
typedef int (*HWS_TRAINING_IP_SELECT_CONTROLLER_FUNC_PTR)(
u32 dev_num, int enable);
typedef int (*HWS_TRAINING_IP_TOPOLOGY_MAP_LOAD_FUNC_PTR)(
u32 dev_num, struct hws_topology_map *topology_map);
u32 dev_num, struct mv_ddr_topology_map *tm);
typedef int (*HWS_TRAINING_IP_STATIC_CONFIG_FUNC_PTR)(
u32 dev_num, enum hws_ddr_freq frequency,
enum hws_static_config_type static_config_type, u32 if_id);
@ -83,16 +83,27 @@ typedef int (*HWS_TRAINING_IP_LOAD_TOPOLOGY)(u32 dev_num, u32 config_num);
typedef int (*HWS_TRAINING_IP_READ_LEVELING)(u32 dev_num, u32 config_num);
typedef int (*HWS_TRAINING_IP_WRITE_LEVELING)(u32 dev_num, u32 config_num);
typedef u32 (*HWS_TRAINING_IP_GET_TEMP)(u8 dev_num);
typedef u8 (*HWS_TRAINING_IP_GET_RATIO)(u32 freq);
struct hws_tip_config_func_db {
HWS_TIP_DUNIT_MUX_SELECT_FUNC_PTR tip_dunit_mux_select_func;
HWS_TIP_DUNIT_REG_READ_FUNC_PTR tip_dunit_read_func;
HWS_TIP_DUNIT_REG_WRITE_FUNC_PTR tip_dunit_write_func;
void (*mv_ddr_dunit_read)(u32 addr, u32 mask, u32 *data);
void (*mv_ddr_dunit_write)(u32 addr, u32 mask, u32 data);
HWS_TIP_GET_FREQ_CONFIG_INFO tip_get_freq_config_info_func;
HWS_TIP_GET_DEVICE_INFO tip_get_device_info_func;
HWS_SET_FREQ_DIVIDER_FUNC_PTR tip_set_freq_divider_func;
HWS_GET_CS_CONFIG_FUNC_PTR tip_get_cs_config_info;
HWS_TRAINING_IP_GET_TEMP tip_get_temperature;
HWS_TRAINING_IP_GET_RATIO tip_get_clock_ratio;
HWS_TRAINING_IP_EXTERNAL_READ_PTR tip_external_read;
HWS_TRAINING_IP_EXTERNAL_WRITE_PTR tip_external_write;
int (*mv_ddr_phy_read)(enum hws_access_type phy_access,
u32 phy, enum hws_ddr_phy phy_type,
u32 reg_addr, u32 *data);
int (*mv_ddr_phy_write)(enum hws_access_type phy_access,
u32 phy, enum hws_ddr_phy phy_type,
u32 reg_addr, u32 data,
enum hws_operation op_type);
};
int ddr3_tip_init_config_func(u32 dev_num,

View file

@ -1,30 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR3_TRAINING_IP_STATIC_H_
#define _DDR3_TRAINING_IP_STATIC_H_
#include "ddr3_training_ip_def.h"
#include "ddr3_training_ip.h"
struct trip_delay_element {
u32 dqs_delay; /* DQS delay (m_sec) */
u32 ck_delay; /* CK Delay (m_sec) */
};
struct hws_tip_static_config_info {
u32 silicon_delay;
struct trip_delay_element *package_trace_arr;
struct trip_delay_element *board_trace_arr;
};
int ddr3_tip_run_static_alg(u32 dev_num, enum hws_ddr_freq freq);
int ddr3_tip_init_static_config_db(
u32 dev_num, struct hws_tip_static_config_info *static_config_info);
int ddr3_tip_init_specific_reg_config(u32 dev_num,
struct reg_data *reg_config_arr);
int ddr3_tip_static_phy_init_controller(u32 dev_num);
#endif /* _DDR3_TRAINING_IP_STATIC_H_ */

File diff suppressed because it is too large Load diff

View file

@ -11,6 +11,6 @@
int ddr3_tip_print_wl_supp_result(u32 dev_num);
int ddr3_tip_calc_cs_mask(u32 dev_num, u32 if_id, u32 effective_cs,
u32 *cs_mask);
u32 hws_ddr3_tip_max_cs_get(void);
u32 ddr3_tip_max_cs_get(u32 dev_num);
#endif /* _DDR3_TRAINING_LEVELING_H_ */

View file

@ -3,12 +3,6 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define TYPICAL_PBS_VALUE 12
@ -23,7 +17,7 @@ u8 max_pbs_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 min_pbs_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 max_adll_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 min_adll_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u32 pbsdelay_per_pup[NUM_OF_PBS_MODES][MAX_INTERFACE_NUM][MAX_BUS_NUM];
u32 pbsdelay_per_pup[NUM_OF_PBS_MODES][MAX_INTERFACE_NUM][MAX_BUS_NUM][MAX_CS_NUM];
u8 adll_shift_lock[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 adll_shift_val[MAX_INTERFACE_NUM][MAX_BUS_NUM];
enum hws_pattern pbs_pattern = PATTERN_VREF;
@ -49,34 +43,33 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
int init_val = (search_dir == HWS_LOW2HIGH) ? 0 : iterations;
enum hws_edge_compare search_edge = EDGE_FP;
u32 pup = 0, bit = 0, if_id = 0, all_lock = 0, cs_num = 0;
int reg_addr = 0;
u32 reg_addr = 0;
u32 validation_val = 0;
u32 cs_enable_reg_val[MAX_INTERFACE_NUM];
u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg();
u8 temp = 0;
struct hws_topology_map *tm = ddr3_get_topology_map();
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
/* save current cs enable reg val */
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
/* save current cs enable reg val */
CHECK_STATUS(ddr3_tip_if_read
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, cs_enable_reg_val, MASK_ALL_BITS));
DUAL_DUNIT_CFG_REG, cs_enable_reg_val, MASK_ALL_BITS));
/* enable single cs */
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, (1 << 3), (1 << 3)));
DUAL_DUNIT_CFG_REG, (1 << 3), (1 << 3)));
}
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(READ_CENTRALIZATION_PHY_REG +
(effective_cs * CS_REGISTER_ADDR_OFFSET)) :
(WRITE_CENTRALIZATION_PHY_REG +
(effective_cs * CS_REGISTER_ADDR_OFFSET));
read_adll_value(nominal_adll, reg_addr, MASK_ALL_BITS);
CRX_PHY_REG(effective_cs) :
CTX_PHY_REG(effective_cs);
ddr3_tip_read_adll_value(dev_num, nominal_adll, reg_addr, MASK_ALL_BITS);
/* stage 1 shift ADLL */
ddr3_tip_ip_training(dev_num, ACCESS_TYPE_MULTICAST,
@ -87,10 +80,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
pbs_pattern, search_edge, CS_SINGLE, cs_num,
train_status);
validation_val = (pbs_mode == PBS_RX_MODE) ? 0x1f : 0;
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
min_adll_per_pup[if_id][pup] =
(pbs_mode == PBS_RX_MODE) ? 0x1f : 0x3f;
pup_state[if_id][pup] = 0x3;
@ -100,8 +93,8 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
}
/* EBA */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
CHECK_STATUS(ddr3_tip_if_read
(dev_num, ACCESS_TYPE_MULTICAST,
@ -111,7 +104,7 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
res0, MASK_ALL_BITS));
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1;
if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE,
("FP I/F %d, bit:%d, pup:%d res0 0x%x\n",
if_id, bit, pup,
@ -176,10 +169,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
}
/* EEBA */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
if (pup_state[if_id][pup] != 4)
continue;
@ -335,10 +328,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
}
/* Print Stage result */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE,
("FP I/F %d, ADLL Shift for EBA: pup[%d] Lock status = %d Lock Val = %d,%d\n",
if_id, pup,
@ -350,10 +343,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO,
("Update ADLL Shift of all pups:\n"));
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
if (adll_shift_lock[if_id][pup] != 1)
continue;
/* if pup not locked continue to next pup */
@ -373,10 +366,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
/* PBS EEBA&EBA */
/* Start the Per Bit Skew search */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
max_pbs_per_pup[if_id][pup] = 0x0;
min_pbs_per_pup[if_id][pup] = 0x1f;
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
@ -400,10 +393,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
iterations, pbs_pattern, search_edge,
CS_SINGLE, cs_num, train_status);
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
if (adll_shift_lock[if_id][pup] != 1) {
/* if pup not lock continue to next pup */
continue;
@ -461,10 +454,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
/* Check all Pup lock */
all_lock = 1;
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
all_lock = all_lock * adll_shift_lock[if_id][pup];
}
}
@ -478,11 +471,11 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
search_dir = (pbs_mode == PBS_RX_MODE) ? HWS_LOW2HIGH :
HWS_HIGH2LOW;
init_val = (search_dir == HWS_LOW2HIGH) ? 0 : iterations;
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1;
if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
if (adll_shift_lock[if_id][pup] == 1) {
/*if pup lock continue to next pup */
continue;
@ -627,11 +620,11 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
search_edge, CS_SINGLE, cs_num,
train_status);
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1;
if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
CHECK_STATUS(ddr3_tip_if_read
(dev_num,
@ -696,7 +689,7 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
/* Check all Pup state */
all_lock = 1;
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
for (pup = 0; pup < octets_per_if_num; pup++) {
/*
* DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO,
* ("pup_state[%d][%d] = %d\n",if_id,pup,pup_state
@ -707,12 +700,12 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
/* END OF SBA */
/* Norm */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1;
if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
/* if pup not lock continue to next pup */
if (adll_shift_lock[if_id][pup] != 1) {
DEBUG_PBS_ENGINE(
@ -753,9 +746,9 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
/* DQ PBS register update with the final result */
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
DEBUG_PBS_ENGINE(
DEBUG_LEVEL_INFO,
@ -771,28 +764,32 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
pad_num = dq_map_table[
bit + pup * BUS_WIDTH_IN_BITS +
if_id * BUS_WIDTH_IN_BITS *
tm->num_of_bus_per_interface];
MAX_BUS_NUM];
DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO,
("result_mat: %d ",
result_mat[if_id][pup]
[bit]));
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(PBS_RX_PHY_REG + effective_cs * 0x10) :
(PBS_TX_PHY_REG + effective_cs * 0x10);
PBS_RX_PHY_REG(effective_cs, 0) :
PBS_TX_PHY_REG(effective_cs, 0);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr + pad_num,
result_mat[if_id][pup][bit]));
}
pbsdelay_per_pup[pbs_mode][if_id][pup] =
(max_pbs_per_pup[if_id][pup] ==
min_pbs_per_pup[if_id][pup]) ?
TYPICAL_PBS_VALUE :
((max_adll_per_pup[if_id][pup] -
min_adll_per_pup[if_id][pup]) * adll_tap /
(max_pbs_per_pup[if_id][pup] -
min_pbs_per_pup[if_id][pup]));
if (max_pbs_per_pup[if_id][pup] == min_pbs_per_pup[if_id][pup]) {
temp = TYPICAL_PBS_VALUE;
} else {
temp = ((max_adll_per_pup[if_id][pup] -
min_adll_per_pup[if_id][pup]) *
adll_tap /
(max_pbs_per_pup[if_id][pup] -
min_pbs_per_pup[if_id][pup]));
}
pbsdelay_per_pup[pbs_mode]
[if_id][pup][effective_cs] = temp;
/* RX results ready, write RX also */
if (pbs_mode == PBS_TX_MODE) {
@ -842,18 +839,18 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
DEBUG_PBS_ENGINE(
DEBUG_LEVEL_INFO,
(", PBS tap=%d [psec] ==> skew observed = %d\n",
pbsdelay_per_pup[pbs_mode][if_id][pup],
temp,
((max_pbs_per_pup[if_id][pup] -
min_pbs_per_pup[if_id][pup]) *
pbsdelay_per_pup[pbs_mode][if_id][pup])));
temp)));
}
}
/* Write back to the phy the default values */
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(READ_CENTRALIZATION_PHY_REG + effective_cs * 4) :
(WRITE_CENTRALIZATION_PHY_REG + effective_cs * 4);
write_adll_value(nominal_adll, reg_addr);
CRX_PHY_REG(effective_cs) :
CTX_PHY_REG(effective_cs);
ddr3_tip_write_adll_value(dev_num, nominal_adll, reg_addr);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
reg_addr = (pbs_mode == PBS_RX_MODE) ?
@ -865,24 +862,29 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
0));
/* restore cs enable value */
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, cs_enable_reg_val[if_id],
DUAL_DUNIT_CFG_REG, cs_enable_reg_val[if_id],
MASK_ALL_BITS));
}
/* exit test mode */
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ODPG_WRITE_READ_MODE_ENABLE_REG, 0xffff, MASK_ALL_BITS));
ODPG_WR_RD_MODE_ENA_REG, 0xffff, MASK_ALL_BITS));
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
/*
* meaning that there is no VW exist at all (No lock at
* the EBA ADLL shift at EBS)
*/
if (pup_state[if_id][pup] == 1)
return MV_FAIL;
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
/*
* no valid window found
* (no lock at EBA ADLL shift at EBS)
*/
if (pup_state[if_id][pup] == 1)
return MV_FAIL;
}
}
return MV_OK;
@ -912,14 +914,14 @@ int ddr3_tip_pbs_tx(u32 uidev_num)
return ddr3_tip_pbs(uidev_num, PBS_TX_MODE);
}
#ifndef EXCLUDE_SWITCH_DEBUG
#ifdef DDR_VIEWER_TOOL
/*
* Print PBS Result
*/
int ddr3_tip_print_all_pbs_result(u32 dev_num)
{
u32 curr_cs;
u32 max_cs = hws_ddr3_tip_max_cs_get();
u32 max_cs = ddr3_tip_max_cs_get(dev_num);
for (curr_cs = 0; curr_cs < max_cs; curr_cs++) {
ddr3_tip_print_pbs_result(dev_num, curr_cs, PBS_RX_MODE);
@ -936,21 +938,33 @@ int ddr3_tip_print_pbs_result(u32 dev_num, u32 cs_num, enum pbs_dir pbs_mode)
{
u32 data_value = 0, bit = 0, if_id = 0, pup = 0;
u32 reg_addr = (pbs_mode == PBS_RX_MODE) ?
(PBS_RX_PHY_REG + cs_num * 0x10) :
(PBS_TX_PHY_REG + cs_num * 0x10);
struct hws_topology_map *tm = ddr3_get_topology_map();
PBS_RX_PHY_REG(cs_num, 0) :
PBS_TX_PHY_REG(cs_num , 0);
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
printf("%s,CS%d,PBS,ADLLRATIO,,,",
(pbs_mode == PBS_RX_MODE) ? "Rx" : "Tx", cs_num);
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
printf("%d,",
pbsdelay_per_pup[pbs_mode][if_id][pup][cs_num]);
}
}
printf("CS%d, %s ,PBS\n", cs_num,
(pbs_mode == PBS_RX_MODE) ? "Rx" : "Tx");
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
printf("%s, DQ", (pbs_mode == PBS_RX_MODE) ? "Rx" : "Tx");
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
printf("%d ,PBS,,, ", bit);
for (pup = 0; pup <= tm->num_of_bus_per_interface;
for (pup = 0; pup <= octets_per_if_num;
pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup,
@ -965,7 +979,7 @@ int ddr3_tip_print_pbs_result(u32 dev_num, u32 cs_num, enum pbs_dir pbs_mode)
return MV_OK;
}
#endif
#endif /* DDR_VIEWER_TOOL */
/*
* Fixup PBS Result
@ -974,13 +988,14 @@ int ddr3_tip_clean_pbs_result(u32 dev_num, enum pbs_dir pbs_mode)
{
u32 if_id, pup, bit;
u32 reg_addr = (pbs_mode == PBS_RX_MODE) ?
(PBS_RX_PHY_REG + effective_cs * 0x10) :
(PBS_TX_PHY_REG + effective_cs * 0x10);
struct hws_topology_map *tm = ddr3_get_topology_map();
PBS_RX_PHY_REG(effective_cs, 0) :
PBS_TX_PHY_REG(effective_cs, 0);
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup <= tm->num_of_bus_per_interface; pup++) {
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup <= octets_per_if_num; pup++) {
for (bit = 0; bit <= BUS_WIDTH_IN_BITS + 3; bit++) {
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,

View file

@ -1,538 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
/* Design Guidelines parameters */
u32 g_zpri_data = 123; /* controller data - P drive strength */
u32 g_znri_data = 123; /* controller data - N drive strength */
u32 g_zpri_ctrl = 74; /* controller C/A - P drive strength */
u32 g_znri_ctrl = 74; /* controller C/A - N drive strength */
u32 g_zpodt_data = 45; /* controller data - P ODT */
u32 g_znodt_data = 45; /* controller data - N ODT */
u32 g_zpodt_ctrl = 45; /* controller data - P ODT */
u32 g_znodt_ctrl = 45; /* controller data - N ODT */
u32 g_odt_config_2cs = 0x120012;
u32 g_odt_config_1cs = 0x10000;
u32 g_rtt_nom = 0x44;
u32 g_dic = 0x2;
#ifdef STATIC_ALGO_SUPPORT
#define PARAM_NOT_CARE 0
#define MAX_STATIC_SEQ 48
u32 silicon_delay[HWS_MAX_DEVICE_NUM];
struct hws_tip_static_config_info static_config[HWS_MAX_DEVICE_NUM];
static reg_data *static_init_controller_config[HWS_MAX_DEVICE_NUM];
/* debug delay in write leveling */
int wl_debug_delay = 0;
/* pup register #3 for functional board */
int function_reg_value = 8;
u32 silicon;
u32 read_ready_delay_phase_offset[] = { 4, 4, 4, 4, 6, 6, 6, 6 };
static struct cs_element chip_select_map[] = {
/* CS Value (single only) Num_CS */
{0, 0},
{0, 1},
{1, 1},
{0, 2},
{2, 1},
{0, 2},
{0, 2},
{0, 3},
{3, 1},
{0, 2},
{0, 2},
{0, 3},
{0, 2},
{0, 3},
{0, 3},
{0, 4}
};
/*
* Register static init controller DB
*/
int ddr3_tip_init_specific_reg_config(u32 dev_num, reg_data *reg_config_arr)
{
static_init_controller_config[dev_num] = reg_config_arr;
return MV_OK;
}
/*
* Register static info DB
*/
int ddr3_tip_init_static_config_db(
u32 dev_num, struct hws_tip_static_config_info *static_config_info)
{
static_config[dev_num].board_trace_arr =
static_config_info->board_trace_arr;
static_config[dev_num].package_trace_arr =
static_config_info->package_trace_arr;
silicon_delay[dev_num] = static_config_info->silicon_delay;
return MV_OK;
}
/*
* Static round trip flow - Calculates the total round trip delay.
*/
int ddr3_tip_static_round_trip_arr_build(u32 dev_num,
struct trip_delay_element *table_ptr,
int is_wl, u32 *round_trip_delay_arr)
{
u32 bus_index, global_bus;
u32 if_id;
u32 bus_per_interface;
int sign;
u32 temp;
u32 board_trace;
struct trip_delay_element *pkg_delay_ptr;
struct hws_topology_map *tm = ddr3_get_topology_map();
/*
* In WL we calc the diff between Clock to DQs in RL we sum the round
* trip of Clock and DQs
*/
sign = (is_wl) ? -1 : 1;
bus_per_interface = GET_TOPOLOGY_NUM_OF_BUSES();
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (bus_index = 0; bus_index < bus_per_interface;
bus_index++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
global_bus = (if_id * bus_per_interface) + bus_index;
/* calculate total trip delay (package and board) */
board_trace = (table_ptr[global_bus].dqs_delay * sign) +
table_ptr[global_bus].ck_delay;
temp = (board_trace * 163) / 1000;
/* Convert the length to delay in psec units */
pkg_delay_ptr =
static_config[dev_num].package_trace_arr;
round_trip_delay_arr[global_bus] = temp +
(int)(pkg_delay_ptr[global_bus].dqs_delay *
sign) +
(int)pkg_delay_ptr[global_bus].ck_delay +
(int)((is_wl == 1) ? wl_debug_delay :
(int)silicon_delay[dev_num]);
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_TRACE,
("Round Trip Build round_trip_delay_arr[0x%x]: 0x%x temp 0x%x\n",
global_bus, round_trip_delay_arr[global_bus],
temp));
}
}
return MV_OK;
}
/*
* Write leveling for static flow - calculating the round trip delay of the
* DQS signal.
*/
int ddr3_tip_write_leveling_static_config(u32 dev_num, u32 if_id,
enum hws_ddr_freq frequency,
u32 *round_trip_delay_arr)
{
u32 bus_index; /* index to the bus loop */
u32 bus_start_index;
u32 bus_per_interface;
u32 phase = 0;
u32 adll = 0, adll_cen, adll_inv, adll_final;
u32 adll_period = MEGA / freq_val[frequency] / 64;
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("ddr3_tip_write_leveling_static_config\n"));
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_TRACE,
("dev_num 0x%x IF 0x%x freq %d (adll_period 0x%x)\n",
dev_num, if_id, frequency, adll_period));
bus_per_interface = GET_TOPOLOGY_NUM_OF_BUSES();
bus_start_index = if_id * bus_per_interface;
for (bus_index = bus_start_index;
bus_index < (bus_start_index + bus_per_interface); bus_index++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
phase = round_trip_delay_arr[bus_index] / (32 * adll_period);
adll = (round_trip_delay_arr[bus_index] -
(phase * 32 * adll_period)) / adll_period;
adll = (adll > 31) ? 31 : adll;
adll_cen = 16 + adll;
adll_inv = adll_cen / 32;
adll_final = adll_cen - (adll_inv * 32);
adll_final = (adll_final > 31) ? 31 : adll_final;
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("\t%d - phase 0x%x adll 0x%x\n",
bus_index, phase, adll));
/*
* Writing to all 4 phy of Interface number,
* bit 0 \96 4 \96 ADLL, bit 6-8 phase
*/
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
(bus_index % 4), DDR_PHY_DATA,
PHY_WRITE_DELAY(cs),
((phase << 6) + (adll & 0x1f)), 0x1df));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
ACCESS_TYPE_UNICAST, (bus_index % 4),
DDR_PHY_DATA, WRITE_CENTRALIZATION_PHY_REG,
((adll_inv & 0x1) << 5) + adll_final));
}
return MV_OK;
}
/*
* Read leveling for static flow
*/
int ddr3_tip_read_leveling_static_config(u32 dev_num,
u32 if_id,
enum hws_ddr_freq frequency,
u32 *total_round_trip_delay_arr)
{
u32 cs, data0, data1, data3 = 0;
u32 bus_index; /* index to the bus loop */
u32 bus_start_index;
u32 phase0, phase1, max_phase;
u32 adll0, adll1;
u32 cl_value;
u32 min_delay;
u32 sdr_period = MEGA / freq_val[frequency];
u32 ddr_period = MEGA / freq_val[frequency] / 2;
u32 adll_period = MEGA / freq_val[frequency] / 64;
enum hws_speed_bin speed_bin_index;
u32 rd_sample_dly[MAX_CS_NUM] = { 0 };
u32 rd_ready_del[MAX_CS_NUM] = { 0 };
u32 bus_per_interface = GET_TOPOLOGY_NUM_OF_BUSES();
struct hws_topology_map *tm = ddr3_get_topology_map();
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("ddr3_tip_read_leveling_static_config\n"));
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("dev_num 0x%x ifc 0x%x freq %d\n", dev_num,
if_id, frequency));
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_TRACE,
("Sdr_period 0x%x Ddr_period 0x%x adll_period 0x%x\n",
sdr_period, ddr_period, adll_period));
if (tm->interface_params[first_active_if].memory_freq ==
frequency) {
cl_value = tm->interface_params[first_active_if].cas_l;
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("cl_value 0x%x\n", cl_value));
} else {
speed_bin_index = tm->interface_params[if_id].speed_bin_index;
cl_value = cas_latency_table[speed_bin_index].cl_val[frequency];
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("cl_value 0x%x speed_bin_index %d\n",
cl_value, speed_bin_index));
}
bus_start_index = if_id * bus_per_interface;
for (bus_index = bus_start_index;
bus_index < (bus_start_index + bus_per_interface);
bus_index += 2) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
cs = chip_select_map[
tm->interface_params[if_id].as_bus_params[
(bus_index % 4)].cs_bitmask].cs_num;
/* read sample delay calculation */
min_delay = (total_round_trip_delay_arr[bus_index] <
total_round_trip_delay_arr[bus_index + 1]) ?
total_round_trip_delay_arr[bus_index] :
total_round_trip_delay_arr[bus_index + 1];
/* round down */
rd_sample_dly[cs] = 2 * (min_delay / (sdr_period * 2));
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_TRACE,
("\t%d - min_delay 0x%x cs 0x%x rd_sample_dly[cs] 0x%x\n",
bus_index, min_delay, cs, rd_sample_dly[cs]));
/* phase calculation */
phase0 = (total_round_trip_delay_arr[bus_index] -
(sdr_period * rd_sample_dly[cs])) / (ddr_period);
phase1 = (total_round_trip_delay_arr[bus_index + 1] -
(sdr_period * rd_sample_dly[cs])) / (ddr_period);
max_phase = (phase0 > phase1) ? phase0 : phase1;
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_TRACE,
("\tphase0 0x%x phase1 0x%x max_phase 0x%x\n",
phase0, phase1, max_phase));
/* ADLL calculation */
adll0 = (u32)((total_round_trip_delay_arr[bus_index] -
(sdr_period * rd_sample_dly[cs]) -
(ddr_period * phase0)) / adll_period);
adll0 = (adll0 > 31) ? 31 : adll0;
adll1 = (u32)((total_round_trip_delay_arr[bus_index + 1] -
(sdr_period * rd_sample_dly[cs]) -
(ddr_period * phase1)) / adll_period);
adll1 = (adll1 > 31) ? 31 : adll1;
/* The Read delay close the Read FIFO */
rd_ready_del[cs] = rd_sample_dly[cs] +
read_ready_delay_phase_offset[max_phase];
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_TRACE,
("\tadll0 0x%x adll1 0x%x rd_ready_del[cs] 0x%x\n",
adll0, adll1, rd_ready_del[cs]));
/*
* Write to the phy of Interface (bit 0 \96 4 \96 ADLL,
* bit 6-8 phase)
*/
data0 = ((phase0 << 6) + (adll0 & 0x1f));
data1 = ((phase1 << 6) + (adll1 & 0x1f));
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
(bus_index % 4), DDR_PHY_DATA, PHY_READ_DELAY(cs),
data0, 0x1df));
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
((bus_index + 1) % 4), DDR_PHY_DATA,
PHY_READ_DELAY(cs), data1, 0x1df));
}
for (bus_index = 0; bus_index < bus_per_interface; bus_index++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
bus_index, DDR_PHY_DATA, 0x3, data3, 0x1f));
}
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
READ_DATA_SAMPLE_DELAY,
(rd_sample_dly[0] + cl_value) + (rd_sample_dly[1] << 8),
MASK_ALL_BITS));
/* Read_ready_del0 bit 0-4 , CS bits 8-12 */
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
READ_DATA_READY_DELAY,
rd_ready_del[0] + (rd_ready_del[1] << 8) + cl_value,
MASK_ALL_BITS));
return MV_OK;
}
/*
* DDR3 Static flow
*/
int ddr3_tip_run_static_alg(u32 dev_num, enum hws_ddr_freq freq)
{
u32 if_id = 0;
struct trip_delay_element *table_ptr;
u32 wl_total_round_trip_delay_arr[MAX_TOTAL_BUS_NUM];
u32 rl_total_round_trip_delay_arr[MAX_TOTAL_BUS_NUM];
struct init_cntr_param init_cntr_prm;
int ret;
struct hws_topology_map *tm = ddr3_get_topology_map();
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("ddr3_tip_run_static_alg"));
init_cntr_prm.do_mrs_phy = 1;
init_cntr_prm.is_ctrl64_bit = 0;
init_cntr_prm.init_phy = 1;
ret = hws_ddr3_tip_init_controller(dev_num, &init_cntr_prm);
if (ret != MV_OK) {
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_ERROR,
("hws_ddr3_tip_init_controller failure\n"));
}
/* calculate the round trip delay for Write Leveling */
table_ptr = static_config[dev_num].board_trace_arr;
CHECK_STATUS(ddr3_tip_static_round_trip_arr_build
(dev_num, table_ptr, 1,
wl_total_round_trip_delay_arr));
/* calculate the round trip delay for Read Leveling */
CHECK_STATUS(ddr3_tip_static_round_trip_arr_build
(dev_num, table_ptr, 0,
rl_total_round_trip_delay_arr));
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
/* check if the interface is enabled */
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
/*
* Static frequency is defined according to init-frequency
* (not target)
*/
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Static IF %d freq %d\n",
if_id, freq));
CHECK_STATUS(ddr3_tip_write_leveling_static_config
(dev_num, if_id, freq,
wl_total_round_trip_delay_arr));
CHECK_STATUS(ddr3_tip_read_leveling_static_config
(dev_num, if_id, freq,
rl_total_round_trip_delay_arr));
}
return MV_OK;
}
/*
* Init controller for static flow
*/
int ddr3_tip_static_init_controller(u32 dev_num)
{
u32 index_cnt = 0;
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("ddr3_tip_static_init_controller\n"));
while (static_init_controller_config[dev_num][index_cnt].reg_addr !=
0) {
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
static_init_controller_config[dev_num][index_cnt].
reg_addr,
static_init_controller_config[dev_num][index_cnt].
reg_data,
static_init_controller_config[dev_num][index_cnt].
reg_mask));
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Init_controller index_cnt %d\n",
index_cnt));
index_cnt++;
}
return MV_OK;
}
int ddr3_tip_static_phy_init_controller(u32 dev_num)
{
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Phy Init Controller 2\n"));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa4,
0x3dfe));
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Phy Init Controller 3\n"));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa6,
0xcb2));
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Phy Init Controller 4\n"));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa9,
0));
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Static Receiver Calibration\n"));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xd0,
0x1f));
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Static V-REF Calibration\n"));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa8,
0x434));
return MV_OK;
}
#endif
/*
* Configure phy (called by static init controller) for static flow
*/
int ddr3_tip_configure_phy(u32 dev_num)
{
u32 if_id, phy_id;
struct hws_topology_map *tm = ddr3_get_topology_map();
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
PAD_ZRI_CALIB_PHY_REG,
((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data))));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
PAD_ZRI_CALIB_PHY_REG,
((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl))));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
PAD_ODT_CALIB_PHY_REG,
((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data))));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
PAD_ODT_CALIB_PHY_REG,
((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl))));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
PAD_PRE_DISABLE_PHY_REG, 0));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
CMOS_CONFIG_PHY_REG, 0));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
CMOS_CONFIG_PHY_REG, 0));
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
/* check if the interface is enabled */
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (phy_id = 0;
phy_id < tm->num_of_bus_per_interface;
phy_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, phy_id);
/* Vref & clamp */
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, phy_id, DDR_PHY_DATA,
PAD_CONFIG_PHY_REG,
((clamp_tbl[if_id] << 4) | vref),
((0x7 << 4) | 0x7)));
/* clamp not relevant for control */
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, phy_id, DDR_PHY_CONTROL,
PAD_CONFIG_PHY_REG, 0x4, 0x7));
}
}
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0x90,
0x6002));
return MV_OK;
}

View file

@ -0,0 +1,148 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR_ML_WRAPPER_H
#define _DDR_ML_WRAPPER_H
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X)
#define INTER_REGS_BASE SOC_REGS_PHY_BASE
#endif
/*
* MV_DEBUG_INIT need to be defines, otherwise the output of the
* DDR2 training code is not complete and misleading
*/
#define MV_DEBUG_INIT
#ifdef MV_DEBUG_INIT
#define DEBUG_INIT_S(s) puts(s)
#define DEBUG_INIT_D(d, l) printf("%x", d)
#define DEBUG_INIT_D_10(d, l) printf("%d", d)
#else
#define DEBUG_INIT_S(s)
#define DEBUG_INIT_D(d, l)
#define DEBUG_INIT_D_10(d, l)
#endif
#ifdef MV_DEBUG_INIT_FULL
#define DEBUG_INIT_FULL_S(s) puts(s)
#define DEBUG_INIT_FULL_D(d, l) printf("%x", d)
#define DEBUG_INIT_FULL_D_10(d, l) printf("%d", d)
#define DEBUG_WR_REG(reg, val) \
{ DEBUG_INIT_S("Write Reg: 0x"); DEBUG_INIT_D((reg), 8); \
DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); }
#define DEBUG_RD_REG(reg, val) \
{ DEBUG_INIT_S("Read Reg: 0x"); DEBUG_INIT_D((reg), 8); \
DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); }
#else
#define DEBUG_INIT_FULL_S(s)
#define DEBUG_INIT_FULL_D(d, l)
#define DEBUG_INIT_FULL_D_10(d, l)
#define DEBUG_WR_REG(reg, val)
#define DEBUG_RD_REG(reg, val)
#endif
#define DEBUG_INIT_FULL_C(s, d, l) \
{ DEBUG_INIT_FULL_S(s); \
DEBUG_INIT_FULL_D(d, l); \
DEBUG_INIT_FULL_S("\n"); }
#define DEBUG_INIT_C(s, d, l) \
{ DEBUG_INIT_S(s); DEBUG_INIT_D(d, l); DEBUG_INIT_S("\n"); }
/*
* Debug (Enable/Disable modules) and Error report
*/
#ifdef BASIC_DEBUG
#define MV_DEBUG_WL
#define MV_DEBUG_RL
#define MV_DEBUG_DQS_RESULTS
#endif
#ifdef FULL_DEBUG
#define MV_DEBUG_WL
#define MV_DEBUG_RL
#define MV_DEBUG_DQS
#define MV_DEBUG_PBS
#define MV_DEBUG_DFS
#define MV_DEBUG_MAIN_FULL
#define MV_DEBUG_DFS_FULL
#define MV_DEBUG_DQS_FULL
#define MV_DEBUG_RL_FULL
#define MV_DEBUG_WL_FULL
#endif
/* The following is a list of Marvell status */
#define MV_ERROR (-1)
#define MV_OK (0x00) /* Operation succeeded */
#define MV_FAIL (0x01) /* Operation failed */
#define MV_BAD_VALUE (0x02) /* Illegal value (general) */
#define MV_OUT_OF_RANGE (0x03) /* The value is out of range */
#define MV_BAD_PARAM (0x04) /* Illegal parameter in function called */
#define MV_BAD_PTR (0x05) /* Illegal pointer value */
#define MV_BAD_SIZE (0x06) /* Illegal size */
#define MV_BAD_STATE (0x07) /* Illegal state of state machine */
#define MV_SET_ERROR (0x08) /* Set operation failed */
#define MV_GET_ERROR (0x09) /* Get operation failed */
#define MV_CREATE_ERROR (0x0a) /* Fail while creating an item */
#define MV_NOT_FOUND (0x0b) /* Item not found */
#define MV_NO_MORE (0x0c) /* No more items found */
#define MV_NO_SUCH (0x0d) /* No such item */
#define MV_TIMEOUT (0x0e) /* Time Out */
#define MV_NO_CHANGE (0x0f) /* Parameter(s) is already in this value */
#define MV_NOT_SUPPORTED (0x10) /* This request is not support */
#define MV_NOT_IMPLEMENTED (0x11) /* Request supported but not implemented*/
#define MV_NOT_INITIALIZED (0x12) /* The item is not initialized */
#define MV_NO_RESOURCE (0x13) /* Resource not available (memory ...) */
#define MV_FULL (0x14) /* Item is full (Queue or table etc...) */
#define MV_EMPTY (0x15) /* Item is empty (Queue or table etc...) */
#define MV_INIT_ERROR (0x16) /* Error occured while INIT process */
#define MV_HW_ERROR (0x17) /* Hardware error */
#define MV_TX_ERROR (0x18) /* Transmit operation not succeeded */
#define MV_RX_ERROR (0x19) /* Recieve operation not succeeded */
#define MV_NOT_READY (0x1a) /* The other side is not ready yet */
#define MV_ALREADY_EXIST (0x1b) /* Tried to create existing item */
#define MV_OUT_OF_CPU_MEM (0x1c) /* Cpu memory allocation failed. */
#define MV_NOT_STARTED (0x1d) /* Not started yet */
#define MV_BUSY (0x1e) /* Item is busy. */
#define MV_TERMINATE (0x1f) /* Item terminates it's work. */
#define MV_NOT_ALIGNED (0x20) /* Wrong alignment */
#define MV_NOT_ALLOWED (0x21) /* Operation NOT allowed */
#define MV_WRITE_PROTECT (0x22) /* Write protected */
#define MV_INVALID (int)(-1)
/*
* Accessor functions for the registers
*/
static inline void reg_write(u32 addr, u32 val)
{
writel(val, INTER_REGS_BASE + addr);
}
static inline u32 reg_read(u32 addr)
{
return readl(INTER_REGS_BASE + addr);
}
static inline void reg_bit_set(u32 addr, u32 mask)
{
setbits_le32(INTER_REGS_BASE + addr, mask);
}
static inline void reg_bit_clr(u32 addr, u32 mask)
{
clrbits_le32(INTER_REGS_BASE + addr, mask);
}
#endif /* _DDR_ML_WRAPPER_H */

View file

@ -9,38 +9,13 @@
#include "ddr3_training_ip_def.h"
#include "ddr3_topology_def.h"
#if defined(CONFIG_ARMADA_38X)
#include "ddr3_a38x.h"
#if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X)
#include "mv_ddr_plat.h"
#endif
/* bus width in bits */
enum hws_bus_width {
BUS_WIDTH_4,
BUS_WIDTH_8,
BUS_WIDTH_16,
BUS_WIDTH_32
};
enum hws_temperature {
HWS_TEMP_LOW,
HWS_TEMP_NORMAL,
HWS_TEMP_HIGH
};
enum hws_mem_size {
MEM_512M,
MEM_1G,
MEM_2G,
MEM_4G,
MEM_8G,
MEM_SIZE_LAST
};
enum hws_timing {
HWS_TIM_DEFAULT,
HWS_TIM_1T,
HWS_TIM_2T
};
#include "mv_ddr_topology.h"
#include "mv_ddr_spd.h"
#include "ddr3_logging_def.h"
struct bus_params {
/* Chip Select (CS) bitmask (bits 0-CS0, bit 1- CS1 ...) */
@ -66,11 +41,11 @@ struct if_params {
/* Speed Bin Table */
enum hws_speed_bin speed_bin_index;
/* bus width of memory */
enum hws_bus_width bus_width;
/* sdram device width */
enum mv_ddr_dev_width bus_width;
/* Bus memory size (MBit) */
enum hws_mem_size memory_size;
/* total sdram capacity per die, megabits */
enum mv_ddr_die_capacity memory_size;
/* The DDR frequency for each interfaces */
enum hws_ddr_freq memory_freq;
@ -88,33 +63,52 @@ struct if_params {
u8 cas_l;
/* operation temperature */
enum hws_temperature interface_temp;
enum mv_ddr_temperature interface_temp;
/* 2T vs 1T mode (by default computed from number of CSs) */
enum hws_timing timing;
enum mv_ddr_timing timing;
};
struct hws_topology_map {
struct mv_ddr_topology_map {
/* debug level configuration */
enum mv_ddr_debug_level debug_level;
/* Number of interfaces (default is 12) */
u8 if_act_mask;
/* Controller configuration per interface */
struct if_params interface_params[MAX_INTERFACE_NUM];
/* BUS per interface (default is 4) */
u8 num_of_bus_per_interface;
/* Bit mask for active buses */
u8 bus_act_mask;
u16 bus_act_mask;
/* source of ddr configuration data */
enum mv_ddr_cfg_src cfg_src;
/* raw spd data */
union mv_ddr_spd_data spd_data;
/* timing parameters */
unsigned int timing_data[MV_DDR_TDATA_LAST];
};
/* DDR3 training global configuration parameters */
struct tune_train_params {
u32 ck_delay;
u32 ck_delay_16;
u32 p_finger;
u32 n_finger;
u32 phy_reg3_val;
u32 g_zpri_data;
u32 g_znri_data;
u32 g_zpri_ctrl;
u32 g_znri_ctrl;
u32 g_zpodt_data;
u32 g_znodt_data;
u32 g_zpodt_ctrl;
u32 g_znodt_ctrl;
u32 g_dic;
u32 g_odt_config;
u32 g_rtt_nom;
u32 g_rtt_wr;
u32 g_rtt_park;
};
#endif /* _DDR_TOPOLOGY_DEF_H */

View file

@ -0,0 +1 @@
const char mv_ddr_build_message[] = ""; const char mv_ddr_version_string[] = "mv_ddr: mv_ddr-armada-17.10.4";

View file

@ -0,0 +1,47 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include "mv_ddr_common.h"
#include "ddr_ml_wrapper.h"
void mv_ddr_ver_print(void)
{
printf("%s %s\n", mv_ddr_version_string, mv_ddr_build_message);
}
/* ceiling division for positive integers */
unsigned int ceil_div(unsigned int x, unsigned int y)
{
return (x % y) ? (x / y + 1) : (x / y);
}
/*
* time to number of clocks calculation based on the rounding algorithm
* using 97.4% inverse factor per JEDEC Standard No. 21-C, 4.1.2.L-4:
* Serial Presence Detect (SPD) for DDR4 SDRAM Modules
*/
unsigned int time_to_nclk(unsigned int t, unsigned int tclk)
{
/* t & tclk parameters are in ps */
return ((unsigned long)t * 1000 / tclk + 974) / 1000;
}
/* round division of two positive integers to the nearest whole number */
int round_div(unsigned int dividend, unsigned int divisor, unsigned int *quotient)
{
if (quotient == NULL) {
printf("%s: error: NULL quotient pointer found\n", __func__);
return MV_FAIL;
}
if (divisor == 0) {
printf("%s: error: division by zero\n", __func__);
return MV_FAIL;
} else {
*quotient = (dividend + divisor / 2) / divisor;
}
return MV_OK;
}

View file

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _MV_DDR_COMMON_H
#define _MV_DDR_COMMON_H
extern const char mv_ddr_build_message[];
extern const char mv_ddr_version_string[];
#define MV_DDR_NUM_BITS_IN_BYTE 8
#define MV_DDR_MEGA_BITS (1024 * 1024)
#define MV_DDR_32_BITS_MASK 0xffffffff
unsigned int ceil_div(unsigned int x, unsigned int y);
unsigned int time_to_nclk(unsigned int t, unsigned int tclk);
int round_div(unsigned int dividend, unsigned int divisor, unsigned int *quotient);
#endif /* _MV_DDR_COMMON_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,236 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _MV_DDR_PLAT_H
#define _MV_DDR_PLAT_H
#define MAX_INTERFACE_NUM 1
#define MAX_BUS_NUM 5
#define DDR_IF_CTRL_SUBPHYS_NUM 3
#define DFS_LOW_FREQ_VALUE 120
#define SDRAM_CS_SIZE 0xfffffff /* FIXME: implement a function for cs size for each platform */
#define INTER_REGS_BASE SOC_REGS_PHY_BASE
#define AP_INT_REG_START_ADDR 0xd0000000
#define AP_INT_REG_END_ADDR 0xd0100000
/* Controler bus divider 1 for 32 bit, 2 for 64 bit */
#define DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER 1
/* Tune internal training params values */
#define TUNE_TRAINING_PARAMS_CK_DELAY 160
#define TUNE_TRAINING_PARAMS_PHYREG3VAL 0xA
#define TUNE_TRAINING_PARAMS_PRI_DATA 123
#define TUNE_TRAINING_PARAMS_NRI_DATA 123
#define TUNE_TRAINING_PARAMS_PRI_CTRL 74
#define TUNE_TRAINING_PARAMS_NRI_CTRL 74
#define TUNE_TRAINING_PARAMS_P_ODT_DATA 45
#define TUNE_TRAINING_PARAMS_N_ODT_DATA 45
#define TUNE_TRAINING_PARAMS_P_ODT_CTRL 45
#define TUNE_TRAINING_PARAMS_N_ODT_CTRL 45
#define TUNE_TRAINING_PARAMS_DIC 0x2
#define TUNE_TRAINING_PARAMS_ODT_CONFIG_2CS 0x120012
#define TUNE_TRAINING_PARAMS_ODT_CONFIG_1CS 0x10000
#define TUNE_TRAINING_PARAMS_RTT_NOM 0x44
#define TUNE_TRAINING_PARAMS_RTT_WR_1CS 0x0 /*off*/
#define TUNE_TRAINING_PARAMS_RTT_WR_2CS 0x0 /*off*/
#define MARVELL_BOARD MARVELL_BOARD_ID_BASE
#define REG_DEVICE_SAR1_ADDR 0xe4204
#define RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET 17
#define RST2_CPU_DDR_CLOCK_SELECT_IN_MASK 0x1f
#define DEVICE_SAMPLE_AT_RESET2_REG 0x18604
#define DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET 0
#define DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ 0
#define DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_40MHZ 1
/* DRAM Windows */
#define REG_XBAR_WIN_5_CTRL_ADDR 0x20050
#define REG_XBAR_WIN_5_BASE_ADDR 0x20054
/* DRAM Windows */
#define REG_XBAR_WIN_4_CTRL_ADDR 0x20040
#define REG_XBAR_WIN_4_BASE_ADDR 0x20044
#define REG_XBAR_WIN_4_REMAP_ADDR 0x20048
#define REG_XBAR_WIN_7_REMAP_ADDR 0x20078
#define REG_XBAR_WIN_16_CTRL_ADDR 0x200d0
#define REG_XBAR_WIN_16_BASE_ADDR 0x200d4
#define REG_XBAR_WIN_16_REMAP_ADDR 0x200dc
#define REG_XBAR_WIN_19_CTRL_ADDR 0x200e8
#define REG_FASTPATH_WIN_BASE_ADDR(win) (0x20180 + (0x8 * win))
#define REG_FASTPATH_WIN_CTRL_ADDR(win) (0x20184 + (0x8 * win))
#define CPU_CONFIGURATION_REG(id) (0x21800 + (id * 0x100))
#define CPU_MRVL_ID_OFFSET 0x10
#define SAR1_CPU_CORE_MASK 0x00000018
#define SAR1_CPU_CORE_OFFSET 3
/* SatR defined too change topology busWidth and ECC configuration */
#define DDR_SATR_CONFIG_MASK_WIDTH 0x8
#define DDR_SATR_CONFIG_MASK_ECC 0x10
#define DDR_SATR_CONFIG_MASK_ECC_PUP 0x20
#define REG_SAMPLE_RESET_HIGH_ADDR 0x18600
#define MV_BOARD_REFCLK_25MHZ 25000000
#define MV_BOARD_REFCLK MV_BOARD_REFCLK_25MHZ
#define MAX_DQ_NUM 40
/* dram line buffer registers */
#define DLB_CTRL_REG 0x1700
#define DLB_EN_OFFS 0
#define DLB_EN_MASK 0x1
#define DLB_EN_ENA 1
#define DLB_EN_DIS 0
#define WR_COALESCE_EN_OFFS 2
#define WR_COALESCE_EN_MASK 0x1
#define WR_COALESCE_EN_ENA 1
#define WR_COALESCE_EN_DIS 0
#define AXI_PREFETCH_EN_OFFS 3
#define AXI_PREFETCH_EN_MASK 0x1
#define AXI_PREFETCH_EN_ENA 1
#define AXI_PREFETCH_EN_DIS 0
#define MBUS_PREFETCH_EN_OFFS 4
#define MBUS_PREFETCH_EN_MASK 0x1
#define MBUS_PREFETCH_EN_ENA 1
#define MBUS_PREFETCH_EN_DIS 0
#define PREFETCH_NXT_LN_SZ_TRIG_OFFS 6
#define PREFETCH_NXT_LN_SZ_TRIG_MASK 0x1
#define PREFETCH_NXT_LN_SZ_TRIG_ENA 1
#define PREFETCH_NXT_LN_SZ_TRIG_DIS 0
#define DLB_BUS_OPT_WT_REG 0x1704
#define DLB_AGING_REG 0x1708
#define DLB_EVICTION_CTRL_REG 0x170c
#define DLB_EVICTION_TIMERS_REG 0x1710
#define DLB_USER_CMD_REG 0x1714
#define DLB_WTS_DIFF_CS_REG 0x1770
#define DLB_WTS_DIFF_BG_REG 0x1774
#define DLB_WTS_SAME_BG_REG 0x1778
#define DLB_WTS_CMDS_REG 0x177c
#define DLB_WTS_ATTR_PRIO_REG 0x1780
#define DLB_QUEUE_MAP_REG 0x1784
#define DLB_SPLIT_REG 0x1788
/* Subphy result control per byte registers */
#define RESULT_CONTROL_BYTE_PUP_0_REG 0x1830
#define RESULT_CONTROL_BYTE_PUP_1_REG 0x1834
#define RESULT_CONTROL_BYTE_PUP_2_REG 0x1838
#define RESULT_CONTROL_BYTE_PUP_3_REG 0x183c
#define RESULT_CONTROL_BYTE_PUP_4_REG 0x18b0
/* Subphy result control per bit registers */
#define RESULT_CONTROL_PUP_0_BIT_0_REG 0x18b4
#define RESULT_CONTROL_PUP_0_BIT_1_REG 0x18b8
#define RESULT_CONTROL_PUP_0_BIT_2_REG 0x18bc
#define RESULT_CONTROL_PUP_0_BIT_3_REG 0x18c0
#define RESULT_CONTROL_PUP_0_BIT_4_REG 0x18c4
#define RESULT_CONTROL_PUP_0_BIT_5_REG 0x18c8
#define RESULT_CONTROL_PUP_0_BIT_6_REG 0x18cc
#define RESULT_CONTROL_PUP_0_BIT_7_REG 0x18f0
#define RESULT_CONTROL_PUP_1_BIT_0_REG 0x18f4
#define RESULT_CONTROL_PUP_1_BIT_1_REG 0x18f8
#define RESULT_CONTROL_PUP_1_BIT_2_REG 0x18fc
#define RESULT_CONTROL_PUP_1_BIT_3_REG 0x1930
#define RESULT_CONTROL_PUP_1_BIT_4_REG 0x1934
#define RESULT_CONTROL_PUP_1_BIT_5_REG 0x1938
#define RESULT_CONTROL_PUP_1_BIT_6_REG 0x193c
#define RESULT_CONTROL_PUP_1_BIT_7_REG 0x19b0
#define RESULT_CONTROL_PUP_2_BIT_0_REG 0x19b4
#define RESULT_CONTROL_PUP_2_BIT_1_REG 0x19b8
#define RESULT_CONTROL_PUP_2_BIT_2_REG 0x19bc
#define RESULT_CONTROL_PUP_2_BIT_3_REG 0x19c0
#define RESULT_CONTROL_PUP_2_BIT_4_REG 0x19c4
#define RESULT_CONTROL_PUP_2_BIT_5_REG 0x19c8
#define RESULT_CONTROL_PUP_2_BIT_6_REG 0x19cc
#define RESULT_CONTROL_PUP_2_BIT_7_REG 0x19f0
#define RESULT_CONTROL_PUP_3_BIT_0_REG 0x19f4
#define RESULT_CONTROL_PUP_3_BIT_1_REG 0x19f8
#define RESULT_CONTROL_PUP_3_BIT_2_REG 0x19fc
#define RESULT_CONTROL_PUP_3_BIT_3_REG 0x1a30
#define RESULT_CONTROL_PUP_3_BIT_4_REG 0x1a34
#define RESULT_CONTROL_PUP_3_BIT_5_REG 0x1a38
#define RESULT_CONTROL_PUP_3_BIT_6_REG 0x1a3c
#define RESULT_CONTROL_PUP_3_BIT_7_REG 0x1ab0
#define RESULT_CONTROL_PUP_4_BIT_0_REG 0x1ab4
#define RESULT_CONTROL_PUP_4_BIT_1_REG 0x1ab8
#define RESULT_CONTROL_PUP_4_BIT_2_REG 0x1abc
#define RESULT_CONTROL_PUP_4_BIT_3_REG 0x1ac0
#define RESULT_CONTROL_PUP_4_BIT_4_REG 0x1ac4
#define RESULT_CONTROL_PUP_4_BIT_5_REG 0x1ac8
#define RESULT_CONTROL_PUP_4_BIT_6_REG 0x1acc
#define RESULT_CONTROL_PUP_4_BIT_7_REG 0x1af0
/* CPU */
#define REG_BOOTROM_ROUTINE_ADDR 0x182d0
#define REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS 12
/* Matrix enables DRAM modes (bus width/ECC) per boardId */
#define TOPOLOGY_UPDATE_32BIT 0
#define TOPOLOGY_UPDATE_32BIT_ECC 1
#define TOPOLOGY_UPDATE_16BIT 2
#define TOPOLOGY_UPDATE_16BIT_ECC 3
#define TOPOLOGY_UPDATE_16BIT_ECC_PUP3 4
#define TOPOLOGY_UPDATE { \
/* 32Bit, 32bit ECC, 16bit, 16bit ECC PUP4, 16bit ECC PUP3 */ \
{1, 1, 1, 1, 1}, /* RD_NAS_68XX_ID */ \
{1, 1, 1, 1, 1}, /* DB_68XX_ID */ \
{1, 0, 1, 0, 1}, /* RD_AP_68XX_ID */ \
{1, 0, 1, 0, 1}, /* DB_AP_68XX_ID */ \
{1, 0, 1, 0, 1}, /* DB_GP_68XX_ID */ \
{0, 0, 1, 1, 0}, /* DB_BP_6821_ID */ \
{1, 1, 1, 1, 1} /* DB_AMC_6820_ID */ \
};
enum {
CPU_1066MHZ_DDR_400MHZ,
CPU_RESERVED_DDR_RESERVED0,
CPU_667MHZ_DDR_667MHZ,
CPU_800MHZ_DDR_800MHZ,
CPU_RESERVED_DDR_RESERVED1,
CPU_RESERVED_DDR_RESERVED2,
CPU_RESERVED_DDR_RESERVED3,
LAST_FREQ
};
/* struct used for DLB configuration array */
struct dlb_config {
u32 reg_addr;
u32 reg_data;
};
#define ACTIVE_INTERFACE_MASK 0x1
extern u32 dmin_phy_reg_table[][2];
extern u16 odt_slope[];
extern u16 odt_intercept[];
int mv_ddr_pre_training_soc_config(const char *ddr_type);
int mv_ddr_post_training_soc_config(const char *ddr_type);
void mv_ddr_mem_scrubbing(void);
void mv_ddr_odpg_enable(void);
void mv_ddr_odpg_disable(void);
void mv_ddr_odpg_done_clr(void);
int mv_ddr_is_odpg_done(u32 count);
void mv_ddr_training_enable(void);
int mv_ddr_is_training_done(u32 count, u32 *result);
u32 mv_ddr_dm_pad_get(void);
int mv_ddr_pre_training_fixup(void);
int mv_ddr_post_training_fixup(void);
int mv_ddr_manual_cal_do(void);
int ddr3_calc_mem_cs_size(u32 cs, uint64_t *cs_size);
#endif /* _MV_DDR_PLAT_H */

View file

@ -0,0 +1,446 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _MV_DDR_REGS_H
#define _MV_DDR_REGS_H
#define GLOB_CTRL_STATUS_REG 0x1030
#define TRAINING_TRIGGER_OFFS 0
#define TRAINING_TRIGGER_MASK 0x1
#define TRAINING_TRIGGER_ENA 1
#define TRAINING_DONE_OFFS 1
#define TRAINING_DONE_MASK 0x1
#define TRAINING_DONE_DONE 1
#define TRAINING_DONE_NOT_DONE 0
#define TRAINING_RESULT_OFFS 2
#define TRAINING_RESULT_MASK 0x1
#define TRAINING_RESULT_PASS 0
#define TRAINING_RESULT_FAIL 1
#define GENERAL_TRAINING_OPCODE_REG 0x1034
#define OPCODE_REG0_BASE 0x1038
#define OPCODE_REG0_REG(obj) (OPCODE_REG0_BASE + (obj) * 0x4)
#define OPCODE_REG1_BASE 0x10b0
#define OPCODE_REG1_REG(obj) (OPCODE_REG1_BASE + (obj) * 0x4)
#define CAL_PHY_BASE 0x10c0
#define CAL_PHY_REG(obj) (CAL_PHY_BASE + (obj) * 0x4)
#define WL_DONE_CNTR_REF_REG 0x10f8
#define ODPG_WR_RD_MODE_ENA_REG 0x10fc
#define SDRAM_CFG_REG 0x1400
#define REFRESH_OFFS 0
#define REFRESH_MASK 0x3fff
#define DRAM_TYPE_OFFS 14
#define DRAM_TYPE_MASK 0x1
#define BUS_IN_USE_OFFS 15
#define BUS_IN_USE_MASK 0x1
#define CPU_2DRAM_WR_BUFF_CUT_TH_OFFS 16
#define CPU_2DRAM_WR_BUFF_CUT_TH_MASK 0x1
#define REG_DIMM_OFFS 17
#define REG_DIMM_MASK 0x1
#define ECC_OFFS 18
#define ECC_MASK 0x1
#define IGNORE_ERRORS_OFFS 19
#define IGNORE_ERRORS_MASK 0x1
#define DRAM_TYPE_HIGH_OFFS 20
#define DRAM_TYPE_HIGH_MASK 0x1
#define SELF_REFRESH_MODE_OFFS 24
#define SELF_REFRESH_MODE_MASK 0x1
#define CPU_RD_PER_PROP_OFFS 25
#define CPU_RD_PER_PROP_MASK 0x1
#define DDR4_EMULATION_OFFS 26
#define DDR4_EMULATION_MASK 0x1
#define PHY_RF_RST_OFFS 27
#define PHY_RF_RST_MASK 0x1
#define PUP_RST_DIVIDER_OFFS 28
#define PUP_RST_DIVIDER_MASK 0x1
#define DATA_PUP_WR_RESET_OFFS 29
#define DATA_PUP_WR_RESET_MASK 0x1
#define DATA_PUP_RD_RESET_OFFS 30
#define DATA_PUP_RD_RESET_MASK 0x1
#define DATA_PUP_RD_RESET_ENA 0x0
#define DATA_PUP_RD_RESET_DIS 0x1
#define IO_BIST_OFFS 31
#define DATA_PUP_RD_RESET_MASK 0x1
#define DUNIT_CTRL_LOW_REG 0x1404
#define SDRAM_TIMING_LOW_REG 0x1408
#define SDRAM_TIMING_LOW_TRAS_OFFS 0
#define SDRAM_TIMING_LOW_TRAS_MASK 0xf
#define SDRAM_TIMING_LOW_TRCD_OFFS 4
#define SDRAM_TIMING_LOW_TRCD_MASK 0xf
#define SDRAM_TIMING_HIGH_TRCD_OFFS 22
#define SDRAM_TIMING_HIGH_TRCD_MASK 0x1
#define SDRAM_TIMING_LOW_TRP_OFFS 8
#define SDRAM_TIMING_LOW_TRP_MASK 0xf
#define SDRAM_TIMING_HIGH_TRP_OFFS 23
#define SDRAM_TIMING_HIGH_TRP_MASK 0x1
#define SDRAM_TIMING_LOW_TWR_OFFS 12
#define SDRAM_TIMING_LOW_TWR_MASK 0xf
#define SDRAM_TIMING_LOW_TWTR_OFFS 16
#define SDRAM_TIMING_LOW_TWTR_MASK 0xf
#define SDRAM_TIMING_LOW_TRAS_HIGH_OFFS 20
#define SDRAM_TIMING_LOW_TRAS_HIGH_MASK 0x3
#define SDRAM_TIMING_LOW_TRRD_OFFS 24
#define SDRAM_TIMING_LOW_TRRD_MASK 0xf
#define SDRAM_TIMING_LOW_TRTP_OFFS 28
#define SDRAM_TIMING_LOW_TRTP_MASK 0xf
#define SDRAM_TIMING_HIGH_REG 0x140c
#define SDRAM_TIMING_HIGH_TRFC_OFFS 0
#define SDRAM_TIMING_HIGH_TRFC_MASK 0x7f
#define SDRAM_TIMING_HIGH_TR2R_OFFS 7
#define SDRAM_TIMING_HIGH_TR2R_MASK 0x3
#define SDRAM_TIMING_HIGH_TR2W_W2R_OFFS 9
#define SDRAM_TIMING_HIGH_TR2W_W2R_MASK 0x3
#define SDRAM_TIMING_HIGH_TW2W_OFFS 11
#define SDRAM_TIMING_HIGH_TW2W_MASK 0x1f
#define SDRAM_TIMING_HIGH_TRFC_HIGH_OFFS 16
#define SDRAM_TIMING_HIGH_TRFC_HIGH_MASK 0x7
#define SDRAM_TIMING_HIGH_TR2R_HIGH_OFFS 19
#define SDRAM_TIMING_HIGH_TR2R_HIGH_MASK 0x7
#define SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_OFFS 22
#define SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_MASK 0x7
#define SDRAM_TIMING_HIGH_TMOD_OFFS 25
#define SDRAM_TIMING_HIGH_TMOD_MASK 0xf
#define SDRAM_TIMING_HIGH_TMOD_HIGH_OFFS 30
#define SDRAM_TIMING_HIGH_TMOD_HIGH_MASK 0x3
#define SDRAM_ADDR_CTRL_REG 0x1410
#define CS_STRUCT_BASE 0
#define CS_STRUCT_OFFS(cs) (CS_STRUCT_BASE + (cs) * 4)
#define CS_STRUCT_MASK 0x3
#define CS_SIZE_BASE 2
#define CS_SIZE_OFFS(cs) (CS_SIZE_BASE + (cs) * 4)
#define CS_SIZE_MASK 0x3
#define CS_SIZE_HIGH_BASE 20
#define CS_SIZE_HIGH_OFFS(cs) (CS_SIZE_HIGH_BASE + (cs))
#define CS_SIZE_HIGH_MASK 0x1
#define T_FAW_OFFS 24
#define T_FAW_MASK 0x7f
#define SDRAM_OPEN_PAGES_CTRL_REG 0x1414
#define SDRAM_OP_REG 0x1418
#define SDRAM_OP_CMD_OFFS 0
#define SDRAM_OP_CMD_MASK 0x1f
#define SDRAM_OP_CMD_CS_BASE 8
#define SDRAM_OP_CMD_CS_OFFS(cs) (SDRAM_OP_CMD_CS_BASE + (cs))
#define SDRAM_OP_CMD_CS_MASK 0x1
enum {
CMD_NORMAL,
CMD_PRECHARGE,
CMD_REFRESH,
CMD_DDR3_DDR4_MR0,
CMD_DDR3_DDR4_MR1,
CMD_NOP,
CMD_RES_0X6,
CMD_SELFREFRESH,
CMD_DDR3_DDR4_MR2,
CMD_DDR3_DDR4_MR3,
CMD_ACT_PDE,
CMD_PRE_PDE,
CMD_ZQCL,
CMD_ZQCS,
CMD_CWA,
CMD_RES_0XF,
CMD_DDR4_MR4,
CMD_DDR4_MR5,
CMD_DDR4_MR6,
DDR4_MPR_WR
};
#define DUNIT_CTRL_HIGH_REG 0x1424
#define CPU_INTERJECTION_ENA_OFFS 3
#define CPU_INTERJECTION_ENA_MASK 0x1
#define CPU_INTERJECTION_ENA_SPLIT_ENA 0
#define CPU_INTERJECTION_ENA_SPLIT_DIS 1
#define DDR_ODT_TIMING_LOW_REG 0x1428
#define DDR_TIMING_REG 0x142c
#define DDR_TIMING_TCCD_OFFS 18
#define DDR_TIMING_TCCD_MASK 0x7
#define DDR_TIMING_TPD_OFFS 0
#define DDR_TIMING_TPD_MASK 0xf
#define DDR_TIMING_TXPDLL_OFFS 4
#define DDR_TIMING_TXPDLL_MASK 0x1f
#define DDR_ODT_TIMING_HIGH_REG 0x147c
#define SDRAM_INIT_CTRL_REG 0x1480
#define DRAM_RESET_MASK_OFFS 1
#define DRAM_RESET_MASK_MASK 0x1
#define DRAM_RESET_MASK_NORMAL 0
#define DRAM_RESET_MASK_MASKED 1
#define SDRAM_ODT_CTRL_HIGH_REG 0x1498
#define DUNIT_ODT_CTRL_REG 0x149c
#define RD_BUFFER_SEL_REG 0x14a4
#define AXI_CTRL_REG 0x14a8
#define DUNIT_MMASK_REG 0x14b0
#define HORZ_SSTL_CAL_MACH_CTRL_REG 0x14c8
#define HORZ_POD_CAL_MACH_CTRL_REG 0x17c8
#define VERT_SSTL_CAL_MACH_CTRL_REG 0x1dc8
#define VERT_POD_CAL_MACH_CTRL_REG 0x1ec8
#define MAIN_PADS_CAL_MACH_CTRL_REG 0x14cc
#define DYN_PADS_CAL_ENABLE_OFFS 0
#define DYN_PADS_CAL_ENABLE_MASK 0x1
#define DYN_PADS_CAL_ENABLE_DIS 0
#define DYN_PADS_CAL_ENABLE_ENA 1
#define PADS_RECAL_OFFS 1
#define PADS_RECAL_MASK 0x1
#define DYN_PADS_CAL_BLOCK_OFFS 2
#define DYN_PADS_CAL_BLOCK_MASK 0x1
#define CAL_UPDATE_CTRL_OFFS 3
#define CAL_UPDATE_CTRL_MASK 0x3
#define CAL_UPDATE_CTRL_INT 1
#define CAL_UPDATE_CTRL_EXT 2
#define DYN_PADS_CAL_CNTR_OFFS 13
#define DYN_PADS_CAL_CNTR_MASK 0x3ffff
#define CAL_MACH_STATUS_OFFS 31
#define CAL_MACH_STATUS_MASK 0x1
#define CAL_MACH_BUSY 0
#define CAL_MACH_RDY 1
#define DRAM_DLL_TIMING_REG 0x14e0
#define DRAM_ZQ_INIT_TIMIMG_REG 0x14e4
#define DRAM_ZQ_TIMING_REG 0x14e8
#define DRAM_LONG_TIMING_REG 0x14ec
#define DDR4_TRRD_L_OFFS 0
#define DDR4_TRRD_L_MASK 0xf
#define DDR4_TWTR_L_OFFS 4
#define DDR4_TWTR_L_MASK 0xf
#define DDR_IO_REG 0x1524
#define DFS_REG 0x1528
#define RD_DATA_SMPL_DLYS_REG 0x1538
#define RD_SMPL_DLY_CS_BASE 0
#define RD_SMPL_DLY_CS_OFFS(cs) (RD_SMPL_DLY_CS_BASE + (cs) * 8)
#define RD_SMPL_DLY_CS_MASK 0x1f
#define RD_DATA_RDY_DLYS_REG 0x153c
#define RD_RDY_DLY_CS_BASE 0
#define RD_RDY_DLY_CS_OFFS(cs) (RD_RDY_DLY_CS_BASE + (cs) * 8)
#define RD_RDY_DLY_CS_MASK 0x1f
#define TRAINING_REG 0x15b0
#define TRN_START_OFFS 31
#define TRN_START_MASK 0x1
#define TRN_START_ENA 1
#define TRN_START_DIS 0
#define TRAINING_SW_1_REG 0x15b4
#define TRAINING_SW_2_REG 0x15b8
#define TRAINING_ECC_MUX_OFFS 1
#define TRAINING_ECC_MUX_MASK 0x1
#define TRAINING_ECC_MUX_DIS 0
#define TRAINING_ECC_MUX_ENA 1
#define TRAINING_SW_OVRD_OFFS 0
#define TRAINING_SW_OVRD_MASK 0x1
#define TRAINING_SW_OVRD_DIS 0
#define TRAINING_SW_OVRD_ENA 1
#define TRAINING_PATTERN_BASE_ADDR_REG 0x15bc
#define TRAINING_DBG_1_REG 0x15c0
#define TRAINING_DBG_2_REG 0x15c4
#define TRAINING_DBG_3_REG 0x15c8
#define TRN_DBG_RDY_INC_PH_2TO1_BASE 0
#define TRN_DBG_RDY_INC_PH_2TO1_OFFS(phase) (TRN_DBG_RDY_INC_PH_2TO1_BASE + (phase) * 3)
#define TRN_DBG_RDY_INC_PH_2TO1_MASK 0x7
#define DDR3_RANK_CTRL_REG 0x15e0
#define CS_EXIST_BASE 0
#define CS_EXIST_OFFS(cs) (CS_EXIST_BASE + (cs))
#define CS_EXIST_MASK 0x1
#define ZQC_CFG_REG 0x15e4
#define DRAM_PHY_CFG_REG 0x15ec
#define ODPG_CTRL_CTRL_REG 0x1600
#define ODPG_DATA_CTRL_REG 0x1630
#define ODPG_WRBUF_WR_CTRL_OFFS 0
#define ODPG_WRBUF_WR_CTRL_MASK 0x1
#define ODPG_WRBUF_WR_CTRL_DIS 0
#define ODPG_WRBUF_WR_CTRL_ENA 1
#define ODPG_WRBUF_RD_CTRL_OFFS 1
#define ODPG_WRBUF_RD_CTRL_MASK 0x1
#define ODPG_WRBUF_RD_CTRL_DIS 0
#define ODPG_WRBUF_RD_CTRL_ENA 1
#define ODPG_DATA_CBDEL_OFFS 15
#define ODPG_DATA_CBDEL_MASK 0x3f
#define ODPG_MODE_OFFS 25
#define ODPG_MODE_MASK 0x1
#define ODPG_MODE_RX 0
#define ODPG_MODE_TX 1
#define ODPG_DATA_CS_OFFS 26
#define ODPG_DATA_CS_MASK 0x3
#define ODPG_DISABLE_OFFS 30
#define ODPG_DISABLE_MASK 0x1
#define ODPG_DISABLE_DIS 1
#define ODPG_ENABLE_OFFS 31
#define ODPG_ENABLE_MASK 0x1
#define ODPG_ENABLE_ENA 1
#define ODPG_DATA_BUFFER_OFFS_REG 0x1638
#define ODPG_DATA_BUFFER_SIZE_REG 0x163c
#define PHY_LOCK_STATUS_REG 0x1674
#define PHY_REG_FILE_ACCESS_REG 0x16a0
#define PRFA_DATA_OFFS 0
#define PRFA_DATA_MASK 0xffff
#define PRFA_REG_NUM_OFFS 16
#define PRFA_REG_NUM_MASK 0x3f
#define PRFA_PUP_NUM_OFFS 22
#define PRFA_PUP_NUM_MASK 0xf
#define PRFA_PUP_CTRL_DATA_OFFS 26
#define PRFA_PUP_CTRL_DATA_MASK 0x1
#define PRFA_PUP_BCAST_WR_ENA_OFFS 27
#define PRFA_PUP_BCAST_WR_ENA_MASK 0x1
#define PRFA_REG_NUM_HI_OFFS 28
#define PRFA_REG_NUM_HI_MASK 0x3
#define PRFA_TYPE_OFFS 30
#define PRFA_TYPE_MASK 0x1
#define PRFA_REQ_OFFS 31
#define PRFA_REQ_MASK 0x1
#define PRFA_REQ_DIS 0x0
#define PRFA_REQ_ENA 0x1
#define TRAINING_WL_REG 0x16ac
#define ODPG_DATA_WR_ADDR_REG 0x16b0
#define ODPG_DATA_WR_ACK_OFFS 0
#define ODPG_DATA_WR_ACK_MASK 0x7f
#define ODPG_DATA_WR_DATA_OFFS 8
#define ODPG_DATA_WR_DATA_MASK 0xff
#define ODPG_DATA_WR_DATA_HIGH_REG 0x16b4
#define ODPG_DATA_WR_DATA_LOW_REG 0x16b8
#define ODPG_DATA_RX_WORD_ERR_ADDR_REG 0x16bc
#define ODPG_DATA_RX_WORD_ERR_CNTR_REG 0x16c0
#define ODPG_DATA_RX_WORD_ERR_DATA_HIGH_REG 0x16c4
#define ODPG_DATA_RX_WORD_ERR_DATA_LOW_REG 0x16c8
#define ODPG_DATA_WR_DATA_ERR_REG 0x16cc
#define DUAL_DUNIT_CFG_REG 0x16d8
#define FC_SAMPLE_STAGES_OFFS 0
#define FC_SAMPLE_STAGES_MASK 0x7
#define SINGLE_CS_PIN_OFFS 3
#define SINGLE_CS_PIN_MASK 0x1
#define SINGLE_CS_ENA 1
#define TUNING_ACTIVE_SEL_OFFS 6
#define TUNING_ACTIVE_SEL_MASK 0x1
#define TUNING_ACTIVE_SEL_MC 0
#define TUNING_ACTIVE_SEL_TIP 1
#define WL_DQS_PATTERN_REG 0x16dc
#define ODPG_DONE_STATUS_REG 0x16fc
#define ODPG_DONE_STATUS_BIT_OFFS 0
#define ODPG_DONE_STATUS_BIT_MASK 0x1
#define ODPG_DONE_STATUS_BIT_CLR 0
#define ODPG_DONE_STATUS_BIT_SET 1
#define RESULT_CTRL_BASE 0x1830
#define BLOCK_STATUS_OFFS 25
#define BLOCK_STATUS_MASK 0x1
#define BLOCK_STATUS_LOCK 1
#define BLOCK_STATUS_NOT_LOCKED 0
#define MR0_REG 0x15d0
#define MR1_REG 0x15d4
#define MR2_REG 0x15d8
#define MR3_REG 0x15dc
#define MRS0_CMD 0x3
#define MRS1_CMD 0x4
#define MRS2_CMD 0x8
#define MRS3_CMD 0x9
#define DRAM_PINS_MUX_REG 0x19d4
#define CTRL_PINS_MUX_OFFS 0
#define CTRL_PINS_MUX_MASK 0x3
enum {
DUNIT_DDR3_ON_BOARD,
DUNIT_DDR3_DIMM,
DUNIT_DDR4_ON_BOARD,
DUNIT_DDR4_DIMM
};
/* ddr phy registers */
#define WL_PHY_BASE 0x0
#define WL_PHY_REG(cs) (WL_PHY_BASE + (cs) * 0x4)
#define WR_LVL_PH_SEL_OFFS 6
#define WR_LVL_PH_SEL_MASK 0x7
#define WR_LVL_PH_SEL_PHASE1 1
#define WR_LVL_REF_DLY_OFFS 0
#define WR_LVL_REF_DLY_MASK 0x1f
#define CTRL_CENTER_DLY_OFFS 10
#define CTRL_CENTER_DLY_MASK 0x1f
#define CTRL_CENTER_DLY_INV_OFFS 15
#define CTRL_CENTER_DLY_INV_MASK 0x1
#define CTX_PHY_BASE 0x1
#define CTX_PHY_REG(cs) (CTX_PHY_BASE + (cs) * 0x4)
#define RL_PHY_BASE 0x2
#define RL_PHY_REG(cs) (RL_PHY_BASE + (cs) * 0x4)
#define RL_REF_DLY_OFFS 0
#define RL_REF_DLY_MASK 0x1f
#define RL_PH_SEL_OFFS 6
#define RL_PH_SEL_MASK 0x7
#define CRX_PHY_BASE 0x3
#define CRX_PHY_REG(cs) (CRX_PHY_BASE + (cs) * 0x4)
#define PHY_CTRL_PHY_REG 0x90
#define ADLL_CFG0_PHY_REG 0x92
#define ADLL_CFG1_PHY_REG 0x93
#define ADLL_CFG2_PHY_REG 0x94
#define CMOS_CONFIG_PHY_REG 0xa2
#define PAD_ZRI_CAL_PHY_REG 0xa4
#define PAD_ODT_CAL_PHY_REG 0xa6
#define PAD_CFG_PHY_REG 0xa8
#define PAD_PRE_DISABLE_PHY_REG 0xa9
#define TEST_ADLL_PHY_REG 0xbf
#define VREF_PHY_BASE 0xd0
#define VREF_PHY_REG(cs, bit) (VREF_PHY_BASE + (cs) * 12 + bit)
enum {
DQSP_PAD = 4,
DQSN_PAD
};
#define VREF_BCAST_PHY_BASE 0xdb
#define VREF_BCAST_PHY_REG(cs) (VREF_BCAST_PHY_BASE + (cs) * 12)
#define PBS_TX_PHY_BASE 0x10
#define PBS_TX_PHY_REG(cs, bit) (PBS_TX_PHY_BASE + (cs) * 0x10 + (bit))
#define PBS_TX_BCAST_PHY_BASE 0x1f
#define PBS_TX_BCAST_PHY_REG(cs) (PBS_TX_BCAST_PHY_BASE + (cs) * 0x10)
#define PBS_RX_PHY_BASE 0x50
#define PBS_RX_PHY_REG(cs, bit) (PBS_RX_PHY_BASE + (cs) * 0x10 + (bit))
#define PBS_RX_BCAST_PHY_BASE 0x5f
#define PBS_RX_BCAST_PHY_REG(cs) (PBS_RX_BCAST_PHY_BASE + (cs) * 0x10)
#define RESULT_PHY_REG 0xc0
#define RESULT_PHY_RX_OFFS 5
#define RESULT_PHY_TX_OFFS 0
#endif /* _MV_DDR_REGS_H */

View file

@ -0,0 +1,377 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include "mv_ddr_spd.h"
#define MV_DDR_SPD_DATA_MTB 125 /* medium timebase, ps */
#define MV_DDR_SPD_DATA_FTB 1 /* fine timebase, ps */
#define MV_DDR_SPD_MSB_OFFS 8 /* most significant byte offset, bits */
#define MV_DDR_SPD_SUPPORTED_CLS_NUM 30
static unsigned int mv_ddr_spd_supported_cls[MV_DDR_SPD_SUPPORTED_CLS_NUM];
int mv_ddr_spd_supported_cls_calc(union mv_ddr_spd_data *spd_data)
{
unsigned int byte, bit, start_cl;
start_cl = (spd_data->all_bytes[23] & 0x8) ? 23 : 7;
for (byte = 20; byte < 23; byte++) {
for (bit = 0; bit < 8; bit++) {
if (spd_data->all_bytes[byte] & (1 << bit))
mv_ddr_spd_supported_cls[(byte - 20) * 8 + bit] = start_cl + (byte - 20) * 8 + bit;
else
mv_ddr_spd_supported_cls[(byte - 20) * 8 + bit] = 0;
}
}
for (byte = 23, bit = 0; bit < 6; bit++) {
if (spd_data->all_bytes[byte] & (1 << bit))
mv_ddr_spd_supported_cls[(byte - 20) * 8 + bit] = start_cl + (byte - 20) * 8 + bit;
else
mv_ddr_spd_supported_cls[(byte - 20) * 8 + bit] = 0;
}
return 0;
}
unsigned int mv_ddr_spd_supported_cl_get(unsigned int cl)
{
unsigned int supported_cl;
int i = 0;
while (i < MV_DDR_SPD_SUPPORTED_CLS_NUM &&
mv_ddr_spd_supported_cls[i] < cl)
i++;
if (i < MV_DDR_SPD_SUPPORTED_CLS_NUM)
supported_cl = mv_ddr_spd_supported_cls[i];
else
supported_cl = 0;
return supported_cl;
}
int mv_ddr_spd_timing_calc(union mv_ddr_spd_data *spd_data, unsigned int timing_data[])
{
int calc_val;
/* t ck avg min, ps */
calc_val = spd_data->byte_fields.byte_18 * MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_125 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TCK_AVG_MIN] = calc_val;
/* t aa min, ps */
calc_val = spd_data->byte_fields.byte_24 * MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_123 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TAA_MIN] = calc_val;
/* t rfc1 min, ps */
timing_data[MV_DDR_TRFC1_MIN] = (spd_data->byte_fields.byte_30 +
(spd_data->byte_fields.byte_31 << MV_DDR_SPD_MSB_OFFS)) * MV_DDR_SPD_DATA_MTB;
/* t wr min, ps */
timing_data[MV_DDR_TWR_MIN] = (spd_data->byte_fields.byte_42 +
(spd_data->byte_fields.byte_41.bit_fields.t_wr_min_msn << MV_DDR_SPD_MSB_OFFS)) *
MV_DDR_SPD_DATA_MTB;
/* FIXME: wa: set twr to a default value, if it's unset on spd */
if (timing_data[MV_DDR_TWR_MIN] == 0)
timing_data[MV_DDR_TWR_MIN] = 15000;
/* t rcd min, ps */
calc_val = spd_data->byte_fields.byte_25 * MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_122 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TRCD_MIN] = calc_val;
/* t rp min, ps */
calc_val = spd_data->byte_fields.byte_26 * MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_121 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TRP_MIN] = calc_val;
/* t rc min, ps */
calc_val = (spd_data->byte_fields.byte_29 +
(spd_data->byte_fields.byte_27.bit_fields.t_rc_min_msn << MV_DDR_SPD_MSB_OFFS)) *
MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_120 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TRC_MIN] = calc_val;
/* t ras min, ps */
timing_data[MV_DDR_TRAS_MIN] = (spd_data->byte_fields.byte_28 +
(spd_data->byte_fields.byte_27.bit_fields.t_ras_min_msn << MV_DDR_SPD_MSB_OFFS)) *
MV_DDR_SPD_DATA_MTB;
/* t rrd s min, ps */
calc_val = spd_data->byte_fields.byte_38 * MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_119 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TRRD_S_MIN] = calc_val;
/* t rrd l min, ps */
calc_val = spd_data->byte_fields.byte_39 * MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_118 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TRRD_L_MIN] = calc_val;
/* t faw min, ps */
timing_data[MV_DDR_TFAW_MIN] = (spd_data->byte_fields.byte_37 +
(spd_data->byte_fields.byte_36.bit_fields.t_faw_min_msn << MV_DDR_SPD_MSB_OFFS)) *
MV_DDR_SPD_DATA_MTB;
/* t wtr s min, ps */
timing_data[MV_DDR_TWTR_S_MIN] = (spd_data->byte_fields.byte_44 +
(spd_data->byte_fields.byte_43.bit_fields.t_wtr_s_min_msn << MV_DDR_SPD_MSB_OFFS)) *
MV_DDR_SPD_DATA_MTB;
/* FIXME: wa: set twtr_s to a default value, if it's unset on spd */
if (timing_data[MV_DDR_TWTR_S_MIN] == 0)
timing_data[MV_DDR_TWTR_S_MIN] = 2500;
/* t wtr l min, ps */
timing_data[MV_DDR_TWTR_L_MIN] = (spd_data->byte_fields.byte_45 +
(spd_data->byte_fields.byte_43.bit_fields.t_wtr_l_min_msn << MV_DDR_SPD_MSB_OFFS)) *
MV_DDR_SPD_DATA_MTB;
/* FIXME: wa: set twtr_l to a default value, if it's unset on spd */
if (timing_data[MV_DDR_TWTR_L_MIN] == 0)
timing_data[MV_DDR_TWTR_L_MIN] = 7500;
return 0;
}
enum mv_ddr_dev_width mv_ddr_spd_dev_width_get(union mv_ddr_spd_data *spd_data)
{
unsigned char dev_width = spd_data->byte_fields.byte_12.bit_fields.device_width;
enum mv_ddr_dev_width ret_val;
switch (dev_width) {
case 0x00:
ret_val = MV_DDR_DEV_WIDTH_4BIT;
break;
case 0x01:
ret_val = MV_DDR_DEV_WIDTH_8BIT;
break;
case 0x02:
ret_val = MV_DDR_DEV_WIDTH_16BIT;
break;
case 0x03:
ret_val = MV_DDR_DEV_WIDTH_32BIT;
break;
default:
ret_val = MV_DDR_DEV_WIDTH_LAST;
}
return ret_val;
}
enum mv_ddr_die_capacity mv_ddr_spd_die_capacity_get(union mv_ddr_spd_data *spd_data)
{
unsigned char die_cap = spd_data->byte_fields.byte_4.bit_fields.die_capacity;
enum mv_ddr_die_capacity ret_val;
switch (die_cap) {
case 0x00:
ret_val = MV_DDR_DIE_CAP_256MBIT;
break;
case 0x01:
ret_val = MV_DDR_DIE_CAP_512MBIT;
break;
case 0x02:
ret_val = MV_DDR_DIE_CAP_1GBIT;
break;
case 0x03:
ret_val = MV_DDR_DIE_CAP_2GBIT;
break;
case 0x04:
ret_val = MV_DDR_DIE_CAP_4GBIT;
break;
case 0x05:
ret_val = MV_DDR_DIE_CAP_8GBIT;
break;
case 0x06:
ret_val = MV_DDR_DIE_CAP_16GBIT;
break;
case 0x07:
ret_val = MV_DDR_DIE_CAP_32GBIT;
break;
case 0x08:
ret_val = MV_DDR_DIE_CAP_12GBIT;
break;
case 0x09:
ret_val = MV_DDR_DIE_CAP_24GBIT;
break;
default:
ret_val = MV_DDR_DIE_CAP_LAST;
}
return ret_val;
}
unsigned char mv_ddr_spd_mem_mirror_get(union mv_ddr_spd_data *spd_data)
{
unsigned char mem_mirror = spd_data->byte_fields.byte_131.bit_fields.rank_1_mapping;
return mem_mirror;
}
enum mv_ddr_pkg_rank mv_ddr_spd_pri_bus_width_get(union mv_ddr_spd_data *spd_data)
{
unsigned char pri_bus_width = spd_data->byte_fields.byte_13.bit_fields.primary_bus_width;
enum mv_ddr_pri_bus_width ret_val;
switch (pri_bus_width) {
case 0x00:
ret_val = MV_DDR_PRI_BUS_WIDTH_8;
break;
case 0x01:
ret_val = MV_DDR_PRI_BUS_WIDTH_16;
break;
case 0x02:
ret_val = MV_DDR_PRI_BUS_WIDTH_32;
break;
case 0x03:
ret_val = MV_DDR_PRI_BUS_WIDTH_64;
break;
default:
ret_val = MV_DDR_PRI_BUS_WIDTH_LAST;
}
return ret_val;
}
enum mv_ddr_pkg_rank mv_ddr_spd_bus_width_ext_get(union mv_ddr_spd_data *spd_data)
{
unsigned char bus_width_ext = spd_data->byte_fields.byte_13.bit_fields.bus_width_ext;
enum mv_ddr_bus_width_ext ret_val;
switch (bus_width_ext) {
case 0x00:
ret_val = MV_DDR_BUS_WIDTH_EXT_0;
break;
case 0x01:
ret_val = MV_DDR_BUS_WIDTH_EXT_8;
break;
default:
ret_val = MV_DDR_BUS_WIDTH_EXT_LAST;
}
return ret_val;
}
static enum mv_ddr_pkg_rank mv_ddr_spd_pkg_rank_get(union mv_ddr_spd_data *spd_data)
{
unsigned char pkg_rank = spd_data->byte_fields.byte_12.bit_fields.dimm_pkg_ranks_num;
enum mv_ddr_pkg_rank ret_val;
switch (pkg_rank) {
case 0x00:
ret_val = MV_DDR_PKG_RANK_1;
break;
case 0x01:
ret_val = MV_DDR_PKG_RANK_2;
break;
case 0x02:
ret_val = MV_DDR_PKG_RANK_3;
break;
case 0x03:
ret_val = MV_DDR_PKG_RANK_4;
break;
case 0x04:
ret_val = MV_DDR_PKG_RANK_5;
break;
case 0x05:
ret_val = MV_DDR_PKG_RANK_6;
break;
case 0x06:
ret_val = MV_DDR_PKG_RANK_7;
break;
case 0x07:
ret_val = MV_DDR_PKG_RANK_8;
break;
default:
ret_val = MV_DDR_PKG_RANK_LAST;
}
return ret_val;
}
static enum mv_ddr_die_count mv_ddr_spd_die_count_get(union mv_ddr_spd_data *spd_data)
{
unsigned char die_count = spd_data->byte_fields.byte_6.bit_fields.die_count;
enum mv_ddr_die_count ret_val;
switch (die_count) {
case 0x00:
ret_val = MV_DDR_DIE_CNT_1;
break;
case 0x01:
ret_val = MV_DDR_DIE_CNT_2;
break;
case 0x02:
ret_val = MV_DDR_DIE_CNT_3;
break;
case 0x03:
ret_val = MV_DDR_DIE_CNT_4;
break;
case 0x04:
ret_val = MV_DDR_DIE_CNT_5;
break;
case 0x05:
ret_val = MV_DDR_DIE_CNT_6;
break;
case 0x06:
ret_val = MV_DDR_DIE_CNT_7;
break;
case 0x07:
ret_val = MV_DDR_DIE_CNT_8;
break;
default:
ret_val = MV_DDR_DIE_CNT_LAST;
}
return ret_val;
}
unsigned char mv_ddr_spd_cs_bit_mask_get(union mv_ddr_spd_data *spd_data)
{
unsigned char cs_bit_mask = 0x0;
enum mv_ddr_pkg_rank pkg_rank = mv_ddr_spd_pkg_rank_get(spd_data);
enum mv_ddr_die_count die_cnt = mv_ddr_spd_die_count_get(spd_data);
if (pkg_rank == MV_DDR_PKG_RANK_1 && die_cnt == MV_DDR_DIE_CNT_1)
cs_bit_mask = 0x1;
else if (pkg_rank == MV_DDR_PKG_RANK_1 && die_cnt == MV_DDR_DIE_CNT_2)
cs_bit_mask = 0x3;
else if (pkg_rank == MV_DDR_PKG_RANK_2 && die_cnt == MV_DDR_DIE_CNT_1)
cs_bit_mask = 0x3;
else if (pkg_rank == MV_DDR_PKG_RANK_2 && die_cnt == MV_DDR_DIE_CNT_2)
cs_bit_mask = 0xf;
return cs_bit_mask;
}
unsigned char mv_ddr_spd_dev_type_get(union mv_ddr_spd_data *spd_data)
{
unsigned char dev_type = spd_data->byte_fields.byte_2;
return dev_type;
}
unsigned char mv_ddr_spd_module_type_get(union mv_ddr_spd_data *spd_data)
{
unsigned char module_type = spd_data->byte_fields.byte_3.bit_fields.module_type;
return module_type;
}

View file

@ -0,0 +1,289 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _MV_DDR_SPD_H
#define _MV_DDR_SPD_H
#include "mv_ddr_topology.h"
/*
* Based on JEDEC Standard No. 21-C, 4.1.2.L-4:
* Serial Presence Detect (SPD) for DDR4 SDRAM Modules
*/
/* block 0: base configuration and dram parameters */
#define MV_DDR_SPD_DATA_BLOCK0_SIZE 128
/* block 1: module specific parameters sub-block */
#define MV_DDR_SPD_DATA_BLOCK1M_SIZE 64
/* block 1: hybrid memory parameters sub-block */
#define MV_DDR_SPD_DATA_BLOCK1H_SIZE 64
/* block 2: extended function parameter block */
#define MV_DDR_SPD_DATA_BLOCK2E_SIZE 64
/* block 2: manufacturing information */
#define MV_DDR_SPD_DATA_BLOCK2M_SIZE 64
/* block 3: end user programmable */
#define MV_DDR_SPD_DATA_BLOCK3_SIZE 128
#define MV_DDR_SPD_DEV_TYPE_DDR4 0xc
#define MV_DDR_SPD_MODULE_TYPE_UDIMM 0x2
#define MV_DDR_SPD_MODULE_TYPE_SO_DIMM 0x3
#define MV_DDR_SPD_MODULE_TYPE_MINI_UDIMM 0x6
#define MV_DDR_SPD_MODULE_TYPE_72BIT_SO_UDIMM 0x9
#define MV_DDR_SPD_MODULE_TYPE_16BIT_SO_DIMM 0xc
#define MV_DDR_SPD_MODULE_TYPE_32BIT_SO_DIMM 0xd
/*
* TODO: For now, the struct contains block 0 & block 1 with module specific
* parameters for unbuffered memory module types only.
*/
union mv_ddr_spd_data {
unsigned char all_bytes[MV_DDR_SPD_DATA_BLOCK0_SIZE +
MV_DDR_SPD_DATA_BLOCK1M_SIZE];
struct {
/* block 0 */
union { /* num of bytes used/num of bytes in spd device/crc coverage */
unsigned char all_bits;
struct {
unsigned char spd_bytes_used:4,
spd_bytes_total:3,
reserved:1;
} bit_fields;
} byte_0;
union { /* spd revision */
unsigned char all_bits;
struct {
unsigned char addtions_level:4,
encoding_level:4;
} bit_fields;
} byte_1;
unsigned char byte_2; /* key_byte/dram device type */
union { /* key byte/module type */
unsigned char all_bits;
struct {
unsigned char module_type:4,
hybrid_media:3,
hybrid:1;
} bit_fields;
} byte_3;
union { /* sdram density & banks */
unsigned char all_bits;
struct {
unsigned char die_capacity:4,
bank_address:2,
bank_group:2;
} bit_fields;
} byte_4;
union { /* sdram addressing */
unsigned char all_bits;
struct {
unsigned char col_address:3,
row_address:3,
reserved:2;
} bit_fields;
} byte_5;
union { /* sdram package type */
unsigned char all_bits;
struct {
unsigned char signal_loading:2,
reserved:2,
die_count:3,
sdram_package_type:1;
} bit_fields;
} byte_6;
union { /* sdram optional features */
unsigned char all_bits;
struct {
unsigned char mac:4, /* max activate count */
t_maw:2, /* max activate window */
reserved:2; /* all 0s */
} bit_fields;
} byte_7;
unsigned char byte_8; /* sdram thermal & refresh options; reserved; 0x00 */
union { /* other sdram optional features */
unsigned char all_bits;
struct {
unsigned char reserved:5, /* all 0s */
soft_ppr:1,
ppr:2; /* post package repair */
} bit_fields;
} byte_9;
union { /* secondary sdram package type */
unsigned char all_bits;
struct {
unsigned char signal_loading:2,
density_ratio:2, /* dram density ratio */
die_count:3,
sdram_package_type:1;
} bit_fields;
} byte_10;
union { /* module nominal voltage, vdd */
unsigned char all_bits;
struct {
unsigned char operable:1,
endurant:1,
reserved:5; /* all 0s */
} bit_fields;
} byte_11;
union { /* module organization*/
unsigned char all_bits;
struct {
unsigned char device_width:3,
dimm_pkg_ranks_num:3, /* package ranks per dimm number */
rank_mix:1,
reserved:1; /* 0 */
} bit_fields;
} byte_12;
union { /* module memory bus width */
unsigned char all_bits;
struct {
unsigned char primary_bus_width:3, /* in bits */
bus_width_ext:2, /* in bits */
reserved:3; /* all 0s */
} bit_fields;
} byte_13;
union { /* module thernal sensor */
unsigned char all_bits;
struct {
unsigned char reserved:7,
thermal_sensor:1;
} bit_fields;
} byte_14;
union { /* extended module type */
unsigned char all_bits;
struct {
unsigned char ext_base_module_type:4,
reserved:4; /* all 0s */
} bit_fields;
} byte_15;
unsigned char byte_16; /* reserved; 0x00 */
union { /* timebases */
unsigned char all_bits;
struct {
unsigned char ftb:2, /* fine timebase */
mtb:2, /* medium timebase */
reserved:4; /* all 0s */
} bit_fields;
} byte_17;
unsigned char byte_18; /* sdram min cycle time (t ck avg min), mtb */
unsigned char byte_19; /* sdram max cycle time (t ck avg max), mtb */
unsigned char byte_20; /* cas latencies supported, first byte */
unsigned char byte_21; /* cas latencies supported, second byte */
unsigned char byte_22; /* cas latencies supported, third byte */
unsigned char byte_23; /* cas latencies supported, fourth byte */
unsigned char byte_24; /* min cas latency time (t aa min), mtb */
unsigned char byte_25; /* min ras to cas delay time (t rcd min), mtb */
unsigned char byte_26; /* min row precharge delay time (t rp min), mtb */
union { /* upper nibbles for t ras min & t rc min */
unsigned char all_bits;
struct {
unsigned char t_ras_min_msn:4, /* t ras min most significant nibble */
t_rc_min_msn:4; /* t rc min most significant nibble */
} bit_fields;
} byte_27;
unsigned char byte_28; /* min active to precharge delay time (t ras min), l-s-byte, mtb */
unsigned char byte_29; /* min active to active/refresh delay time (t rc min), l-s-byte, mtb */
unsigned char byte_30; /* min refresh recovery delay time (t rfc1 min), l-s-byte, mtb */
unsigned char byte_31; /* min refresh recovery delay time (t rfc1 min), m-s-byte, mtb */
unsigned char byte_32; /* min refresh recovery delay time (t rfc2 min), l-s-byte, mtb */
unsigned char byte_33; /* min refresh recovery delay time (t rfc2 min), m-s-byte, mtb */
unsigned char byte_34; /* min refresh recovery delay time (t rfc4 min), l-s-byte, mtb */
unsigned char byte_35; /* min refresh recovery delay time (t rfc4 min), m-s-byte, mtb */
union { /* upper nibble for t faw */
unsigned char all_bits;
struct {
unsigned char t_faw_min_msn:4, /* t faw min most significant nibble */
reserved:4;
} bit_fields;
} byte_36;
unsigned char byte_37; /* min four activate window delay time (t faw min), l-s-byte, mtb */
/* byte 38: min activate to activate delay time (t rrd_s min), diff bank group, mtb */
unsigned char byte_38;
/* byte 39: min activate to activate delay time (t rrd_l min), same bank group, mtb */
unsigned char byte_39;
unsigned char byte_40; /* min cas to cas delay time (t ccd_l min), same bank group, mtb */
union { /* upper nibble for t wr min */
unsigned char all_bits;
struct {
unsigned char t_wr_min_msn:4, /* t wr min most significant nibble */
reserved:4;
} bit_fields;
} byte_41;
unsigned char byte_42; /* min write recovery time (t wr min) */
union { /* upper nibbles for t wtr min */
unsigned char all_bits;
struct {
unsigned char t_wtr_s_min_msn:4, /* t wtr s min most significant nibble */
t_wtr_l_min_msn:4; /* t wtr l min most significant nibble */
} bit_fields;
} byte_43;
unsigned char byte_44; /* min write to read time (t wtr s min), diff bank group, mtb */
unsigned char byte_45; /* min write to read time (t wtr l min), same bank group, mtb */
unsigned char bytes_46_59[14]; /* reserved; all 0s */
unsigned char bytes_60_77[18]; /* TODO: connector to sdram bit mapping */
unsigned char bytes_78_116[39]; /* reserved; all 0s */
/* fine offset for min cas to cas delay time (t ccd_l min), same bank group, ftb */
unsigned char byte_117;
/* fine offset for min activate to activate delay time (t rrd_l min), same bank group, ftb */
unsigned char byte_118;
/* fine offset for min activate to activate delay time (t rrd_s min), diff bank group, ftb */
unsigned char byte_119;
/* fine offset for min active to active/refresh delay time (t rc min), ftb */
unsigned char byte_120;
unsigned char byte_121; /* fine offset for min row precharge delay time (t rp min), ftb */
unsigned char byte_122; /* fine offset for min ras to cas delay time (t rcd min), ftb */
unsigned char byte_123; /* fine offset for min cas latency time (t aa min), ftb */
unsigned char byte_124; /* fine offset for sdram max cycle time (t ck avg max), ftb */
unsigned char byte_125; /* fine offset for sdram min cycle time (t ck avg min), ftb */
unsigned char byte_126; /* crc for base configuration section, l-s-byte */
unsigned char byte_127; /* crc for base configuration section, m-s-byte */
/*
* block 1: module specific parameters for unbuffered memory module types only
*/
union { /* (unbuffered) raw card extension, module nominal height */
unsigned char all_bits;
struct {
unsigned char nom_height_max:5, /* in mm */
raw_cad_ext:3;
} bit_fields;
} byte_128;
union { /* (unbuffered) module maximum thickness */
unsigned char all_bits;
struct {
unsigned char front_thickness_max:4, /* in mm */
back_thickness_max:4; /* in mm */
} bit_fields;
} byte_129;
union { /* (unbuffered) reference raw card used */
unsigned char all_bits;
struct {
unsigned char ref_raw_card:5,
ref_raw_card_rev:2,
ref_raw_card_ext:1;
} bit_fields;
} byte_130;
union { /* (unbuffered) address mapping from edge connector to dram */
unsigned char all_bits;
struct {
unsigned char rank_1_mapping:1,
reserved:7;
} bit_fields;
} byte_131;
unsigned char bytes_132_191[60]; /* reserved; all 0s */
} byte_fields;
};
int mv_ddr_spd_timing_calc(union mv_ddr_spd_data *spd_data, unsigned int timing_data[]);
enum mv_ddr_dev_width mv_ddr_spd_dev_width_get(union mv_ddr_spd_data *spd_data);
enum mv_ddr_die_capacity mv_ddr_spd_die_capacity_get(union mv_ddr_spd_data *spd_data);
unsigned char mv_ddr_spd_mem_mirror_get(union mv_ddr_spd_data *spd_data);
unsigned char mv_ddr_spd_cs_bit_mask_get(union mv_ddr_spd_data *spd_data);
unsigned char mv_ddr_spd_dev_type_get(union mv_ddr_spd_data *spd_data);
unsigned char mv_ddr_spd_module_type_get(union mv_ddr_spd_data *spd_data);
int mv_ddr_spd_supported_cls_calc(union mv_ddr_spd_data *spd_data);
unsigned int mv_ddr_spd_supported_cl_get(unsigned int cl);
enum mv_ddr_pkg_rank mv_ddr_spd_pri_bus_width_get(union mv_ddr_spd_data *spd_data);
enum mv_ddr_pkg_rank mv_ddr_spd_bus_width_ext_get(union mv_ddr_spd_data *spd_data);
#endif /* _MV_DDR_SPD_H */

View file

@ -0,0 +1,102 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include "mv_ddr_regs.h"
#include "mv_ddr_sys_env_lib.h"
static u32 mv_ddr_board_id_get(void)
{
#if defined(CONFIG_TARGET_DB_88F6820_GP)
return DB_GP_68XX_ID;
#else
/*
* Return 0 here for custom board as this should not be used
* for custom boards.
*/
return 0;
#endif
}
static u32 mv_ddr_board_id_index_get(u32 board_id)
{
/*
* Marvell Boards use 0x10 as base for Board ID:
* mask MSB to receive index for board ID
*/
return board_id & (MARVELL_BOARD_ID_MASK - 1);
}
/*
* read gpio input for suspend-wakeup indication
* return indicating suspend wakeup status:
* 0 - not supported,
* 1 - supported: read magic word detect wakeup,
* 2 - detected wakeup from gpio
*/
enum suspend_wakeup_status mv_ddr_sys_env_suspend_wakeup_check(void)
{
u32 reg, board_id_index, gpio;
struct board_wakeup_gpio board_gpio[] = MV_BOARD_WAKEUP_GPIO_INFO;
board_id_index = mv_ddr_board_id_index_get(mv_ddr_board_id_get());
if (!(sizeof(board_gpio) / sizeof(struct board_wakeup_gpio) >
board_id_index)) {
printf("\n_failed loading Suspend-Wakeup information (invalid board ID)\n");
return SUSPEND_WAKEUP_DISABLED;
}
/*
* - Detect if Suspend-Wakeup is supported on current board
* - Fetch the GPIO number for wakeup status input indication
*/
if (board_gpio[board_id_index].gpio_num == -1) {
/* Suspend to RAM is not supported */
return SUSPEND_WAKEUP_DISABLED;
} else if (board_gpio[board_id_index].gpio_num == -2) {
/*
* Suspend to RAM is supported but GPIO indication is
* not implemented - Skip
*/
return SUSPEND_WAKEUP_ENABLED;
} else {
gpio = board_gpio[board_id_index].gpio_num;
}
/* Initialize MPP for GPIO (set MPP = 0x0) */
reg = reg_read(MPP_CONTROL_REG(MPP_REG_NUM(gpio)));
/* reset MPP21 to 0x0, keep rest of MPP settings*/
reg &= ~MPP_MASK(gpio);
reg_write(MPP_CONTROL_REG(MPP_REG_NUM(gpio)), reg);
/* Initialize GPIO as input */
reg = reg_read(GPP_DATA_OUT_EN_REG(GPP_REG_NUM(gpio)));
reg |= GPP_MASK(gpio);
reg_write(GPP_DATA_OUT_EN_REG(GPP_REG_NUM(gpio)), reg);
/*
* Check GPP for input status from PIC: 0 - regular init,
* 1 - suspend wakeup
*/
reg = reg_read(GPP_DATA_IN_REG(GPP_REG_NUM(gpio)));
/* if GPIO is ON: wakeup from S2RAM indication detected */
return (reg & GPP_MASK(gpio)) ? SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED :
SUSPEND_WAKEUP_DISABLED;
}
/*
* get bit mask of enabled cs
* return bit mask of enabled cs:
* 1 - only cs0 enabled,
* 3 - both cs0 and cs1 enabled
*/
u32 mv_ddr_sys_env_get_cs_ena_from_reg(void)
{
return reg_read(DDR3_RANK_CTRL_REG) &
((CS_EXIST_MASK << CS_EXIST_OFFS(0)) |
(CS_EXIST_MASK << CS_EXIST_OFFS(1)) |
(CS_EXIST_MASK << CS_EXIST_OFFS(2)) |
(CS_EXIST_MASK << CS_EXIST_OFFS(3)));
}

View file

@ -0,0 +1,117 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _MV_DDR_SYS_ENV_LIB_H
#define _MV_DDR_SYS_ENV_LIB_H
#include "ddr_ml_wrapper.h"
/* device revision */
#define DEV_ID_REG 0x18238
#define DEV_VERSION_ID_REG 0x1823c
#define REVISON_ID_OFFS 8
#define REVISON_ID_MASK 0xf00
#define MPP_CONTROL_REG(id) (0x18000 + (id * 4))
#define GPP_DATA_OUT_REG(grp) (MV_GPP_REGS_BASE(grp) + 0x00)
#define GPP_DATA_OUT_EN_REG(grp) (MV_GPP_REGS_BASE(grp) + 0x04)
#define GPP_DATA_IN_REG(grp) (MV_GPP_REGS_BASE(grp) + 0x10)
#define MV_GPP_REGS_BASE(unit) (0x18100 + ((unit) * 0x40))
#define MPP_REG_NUM(GPIO_NUM) (GPIO_NUM / 8)
#define MPP_MASK(GPIO_NUM) (0xf << 4 * (GPIO_NUM - \
(MPP_REG_NUM(GPIO_NUM) * 8)));
#define GPP_REG_NUM(GPIO_NUM) (GPIO_NUM / 32)
#define GPP_MASK(GPIO_NUM) (1 << GPIO_NUM % 32)
/* device ID */
/* Board ID numbers */
#define MARVELL_BOARD_ID_MASK 0x10
/* Customer boards for A38x */
#define A38X_CUSTOMER_BOARD_ID_BASE 0x0
#define A38X_CUSTOMER_BOARD_ID0 (A38X_CUSTOMER_BOARD_ID_BASE + 0)
#define A38X_CUSTOMER_BOARD_ID1 (A38X_CUSTOMER_BOARD_ID_BASE + 1)
#define A38X_MV_MAX_CUSTOMER_BOARD_ID (A38X_CUSTOMER_BOARD_ID_BASE + 2)
#define A38X_MV_CUSTOMER_BOARD_NUM (A38X_MV_MAX_CUSTOMER_BOARD_ID - \
A38X_CUSTOMER_BOARD_ID_BASE)
/* Marvell boards for A38x */
#define A38X_MARVELL_BOARD_ID_BASE 0x10
#define RD_NAS_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 0)
#define DB_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 1)
#define RD_AP_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 2)
#define DB_AP_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 3)
#define DB_GP_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 4)
#define DB_BP_6821_ID (A38X_MARVELL_BOARD_ID_BASE + 5)
#define DB_AMC_6820_ID (A38X_MARVELL_BOARD_ID_BASE + 6)
#define A38X_MV_MAX_MARVELL_BOARD_ID (A38X_MARVELL_BOARD_ID_BASE + 7)
#define A38X_MV_MARVELL_BOARD_NUM (A38X_MV_MAX_MARVELL_BOARD_ID - \
A38X_MARVELL_BOARD_ID_BASE)
/* Marvell boards for A39x */
#define A39X_MARVELL_BOARD_ID_BASE 0x30
#define A39X_DB_69XX_ID (A39X_MARVELL_BOARD_ID_BASE + 0)
#define A39X_RD_69XX_ID (A39X_MARVELL_BOARD_ID_BASE + 1)
#define A39X_MV_MAX_MARVELL_BOARD_ID (A39X_MARVELL_BOARD_ID_BASE + 2)
#define A39X_MV_MARVELL_BOARD_NUM (A39X_MV_MAX_MARVELL_BOARD_ID - \
A39X_MARVELL_BOARD_ID_BASE)
struct board_wakeup_gpio {
u32 board_id;
int gpio_num;
};
enum suspend_wakeup_status {
SUSPEND_WAKEUP_DISABLED,
SUSPEND_WAKEUP_ENABLED,
SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED,
};
/*
* GPIO status indication for Suspend Wakeup:
* If suspend to RAM is supported and GPIO inidcation is implemented,
* set the gpio number
* If suspend to RAM is supported but GPIO indication is not implemented
* set '-2'
* If suspend to RAM is not supported set '-1'
*/
#ifdef CONFIG_CUSTOMER_BOARD_SUPPORT
#ifdef CONFIG_ARMADA_38X
#define MV_BOARD_WAKEUP_GPIO_INFO { \
{A38X_CUSTOMER_BOARD_ID0, -1 }, \
{A38X_CUSTOMER_BOARD_ID0, -1 }, \
};
#else
#define MV_BOARD_WAKEUP_GPIO_INFO { \
{A39X_CUSTOMER_BOARD_ID0, -1 }, \
{A39X_CUSTOMER_BOARD_ID0, -1 }, \
};
#endif /* CONFIG_ARMADA_38X */
#else
#ifdef CONFIG_ARMADA_38X
#define MV_BOARD_WAKEUP_GPIO_INFO { \
{RD_NAS_68XX_ID, -2 }, \
{DB_68XX_ID, -1 }, \
{RD_AP_68XX_ID, -2 }, \
{DB_AP_68XX_ID, -2 }, \
{DB_GP_68XX_ID, -2 }, \
{DB_BP_6821_ID, -2 }, \
{DB_AMC_6820_ID, -2 }, \
};
#else
#define MV_BOARD_WAKEUP_GPIO_INFO { \
{A39X_RD_69XX_ID, -1 }, \
{A39X_DB_69XX_ID, -1 }, \
};
#endif /* CONFIG_ARMADA_38X */
#endif /* CONFIG_CUSTOMER_BOARD_SUPPORT */
enum suspend_wakeup_status mv_ddr_sys_env_suspend_wakeup_check(void);
u32 mv_ddr_sys_env_get_cs_ena_from_reg(void);
#endif /* _MV_DDR_SYS_ENV_LIB_H */

View file

@ -0,0 +1,197 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include "mv_ddr_topology.h"
#include "mv_ddr_common.h"
#include "mv_ddr_spd.h"
#include "ddr3_init.h"
#include "ddr_topology_def.h"
#include "ddr3_training_ip_db.h"
#include "ddr3_training_ip.h"
unsigned int mv_ddr_cl_calc(unsigned int taa_min, unsigned int tclk)
{
unsigned int cl = ceil_div(taa_min, tclk);
return mv_ddr_spd_supported_cl_get(cl);
}
unsigned int mv_ddr_cwl_calc(unsigned int tclk)
{
unsigned int cwl;
if (tclk >= 1250)
cwl = 9;
else if (tclk >= 1071)
cwl = 10;
else if (tclk >= 938)
cwl = 11;
else if (tclk >= 833)
cwl = 12;
else
cwl = 0;
return cwl;
}
struct mv_ddr_topology_map *mv_ddr_topology_map_update(void)
{
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
enum hws_speed_bin speed_bin_index;
enum hws_ddr_freq freq = DDR_FREQ_LAST;
unsigned int tclk;
unsigned char val = 0;
int i;
if (tm->interface_params[0].memory_freq == DDR_FREQ_SAR)
tm->interface_params[0].memory_freq = mv_ddr_init_freq_get();
if (tm->cfg_src == MV_DDR_CFG_SPD) {
/* check dram device type */
val = mv_ddr_spd_dev_type_get(&tm->spd_data);
if (val != MV_DDR_SPD_DEV_TYPE_DDR4) {
printf("mv_ddr: unsupported dram device type found\n");
return NULL;
}
/* update topology map with timing data */
if (mv_ddr_spd_timing_calc(&tm->spd_data, tm->timing_data) > 0) {
printf("mv_ddr: negative timing data found\n");
return NULL;
}
/* update device width in topology map */
tm->interface_params[0].bus_width = mv_ddr_spd_dev_width_get(&tm->spd_data);
/* update die capacity in topology map */
tm->interface_params[0].memory_size = mv_ddr_spd_die_capacity_get(&tm->spd_data);
/* update bus bit mask in topology map */
tm->bus_act_mask = mv_ddr_bus_bit_mask_get();
/* update cs bit mask in topology map */
val = mv_ddr_spd_cs_bit_mask_get(&tm->spd_data);
for (i = 0; i < octets_per_if_num; i++) {
tm->interface_params[0].as_bus_params[i].cs_bitmask = val;
}
/* check dram module type */
val = mv_ddr_spd_module_type_get(&tm->spd_data);
switch (val) {
case MV_DDR_SPD_MODULE_TYPE_UDIMM:
case MV_DDR_SPD_MODULE_TYPE_SO_DIMM:
case MV_DDR_SPD_MODULE_TYPE_MINI_UDIMM:
case MV_DDR_SPD_MODULE_TYPE_72BIT_SO_UDIMM:
case MV_DDR_SPD_MODULE_TYPE_16BIT_SO_DIMM:
case MV_DDR_SPD_MODULE_TYPE_32BIT_SO_DIMM:
break;
default:
printf("mv_ddr: unsupported dram module type found\n");
return NULL;
}
/* update mirror bit mask in topology map */
val = mv_ddr_spd_mem_mirror_get(&tm->spd_data);
for (i = 0; i < octets_per_if_num; i++) {
tm->interface_params[0].as_bus_params[i].mirror_enable_bitmask = val << 1;
}
tclk = 1000000 / freq_val[tm->interface_params[0].memory_freq];
/* update cas write latency (cwl) */
val = mv_ddr_cwl_calc(tclk);
if (val == 0) {
printf("mv_ddr: unsupported cas write latency value found\n");
return NULL;
}
tm->interface_params[0].cas_wl = val;
/* update cas latency (cl) */
mv_ddr_spd_supported_cls_calc(&tm->spd_data);
val = mv_ddr_cl_calc(tm->timing_data[MV_DDR_TAA_MIN], tclk);
if (val == 0) {
printf("mv_ddr: unsupported cas latency value found\n");
return NULL;
}
tm->interface_params[0].cas_l = val;
} else if (tm->cfg_src == MV_DDR_CFG_DEFAULT) {
/* set cas and cas-write latencies per speed bin, if they unset */
speed_bin_index = tm->interface_params[0].speed_bin_index;
freq = tm->interface_params[0].memory_freq;
if (tm->interface_params[0].cas_l == 0)
tm->interface_params[0].cas_l =
cas_latency_table[speed_bin_index].cl_val[freq];
if (tm->interface_params[0].cas_wl == 0)
tm->interface_params[0].cas_wl =
cas_write_latency_table[speed_bin_index].cl_val[freq];
}
return tm;
}
unsigned short mv_ddr_bus_bit_mask_get(void)
{
unsigned short pri_and_ext_bus_width = 0x0;
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
if (tm->cfg_src == MV_DDR_CFG_SPD) {
enum mv_ddr_pri_bus_width pri_bus_width = mv_ddr_spd_pri_bus_width_get(&tm->spd_data);
enum mv_ddr_bus_width_ext bus_width_ext = mv_ddr_spd_bus_width_ext_get(&tm->spd_data);
switch (pri_bus_width) {
case MV_DDR_PRI_BUS_WIDTH_16:
pri_and_ext_bus_width = BUS_MASK_16BIT;
break;
case MV_DDR_PRI_BUS_WIDTH_32:
pri_and_ext_bus_width = BUS_MASK_32BIT;
break;
case MV_DDR_PRI_BUS_WIDTH_64:
pri_and_ext_bus_width = MV_DDR_64BIT_BUS_MASK;
break;
default:
pri_and_ext_bus_width = 0x0;
}
if (bus_width_ext == MV_DDR_BUS_WIDTH_EXT_8)
pri_and_ext_bus_width |= 1 << (octets_per_if_num - 1);
}
return pri_and_ext_bus_width;
}
unsigned int mv_ddr_if_bus_width_get(void)
{
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
unsigned int bus_width;
switch (tm->bus_act_mask) {
case BUS_MASK_16BIT:
case BUS_MASK_16BIT_ECC:
case BUS_MASK_16BIT_ECC_PUP3:
bus_width = 16;
break;
case BUS_MASK_32BIT:
case BUS_MASK_32BIT_ECC:
case MV_DDR_32BIT_ECC_PUP8_BUS_MASK:
bus_width = 32;
break;
case MV_DDR_64BIT_BUS_MASK:
case MV_DDR_64BIT_ECC_PUP8_BUS_MASK:
bus_width = 64;
break;
default:
printf("mv_ddr: unsupported bus active mask parameter found\n");
bus_width = 0;
}
return bus_width;
}

View file

@ -0,0 +1,129 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _MV_DDR_TOPOLOGY_H
#define _MV_DDR_TOPOLOGY_H
/* ddr bus masks */
#define BUS_MASK_32BIT 0xf
#define BUS_MASK_32BIT_ECC 0x1f
#define BUS_MASK_16BIT 0x3
#define BUS_MASK_16BIT_ECC 0x13
#define BUS_MASK_16BIT_ECC_PUP3 0xb
#define MV_DDR_64BIT_BUS_MASK 0xff
#define MV_DDR_64BIT_ECC_PUP8_BUS_MASK 0x1ff
#define MV_DDR_32BIT_ECC_PUP8_BUS_MASK 0x10f
/* source of ddr configuration data */
enum mv_ddr_cfg_src {
MV_DDR_CFG_DEFAULT, /* based on data in mv_ddr_topology_map structure */
MV_DDR_CFG_SPD, /* based on data in spd */
MV_DDR_CFG_USER, /* based on data from user */
MV_DDR_CFG_STATIC, /* based on data from user in register-value format */
MV_DDR_CFG_LAST
};
enum mv_ddr_num_of_sub_phys_per_ddr_unit {
SINGLE_SUB_PHY = 1,
TWO_SUB_PHYS = 2
};
enum mv_ddr_temperature {
MV_DDR_TEMP_LOW,
MV_DDR_TEMP_NORMAL,
MV_DDR_TEMP_HIGH
};
enum mv_ddr_timing {
MV_DDR_TIM_DEFAULT,
MV_DDR_TIM_1T,
MV_DDR_TIM_2T
};
enum mv_ddr_timing_data {
MV_DDR_TCK_AVG_MIN, /* sdram min cycle time (t ck avg min) */
MV_DDR_TAA_MIN, /* min cas latency time (t aa min) */
MV_DDR_TRFC1_MIN, /* min refresh recovery delay time (t rfc1 min) */
MV_DDR_TWR_MIN, /* min write recovery time (t wr min) */
MV_DDR_TRCD_MIN, /* min ras to cas delay time (t rcd min) */
MV_DDR_TRP_MIN, /* min row precharge delay time (t rp min) */
MV_DDR_TRC_MIN, /* min active to active/refresh delay time (t rc min) */
MV_DDR_TRAS_MIN, /* min active to precharge delay time (t ras min) */
MV_DDR_TRRD_S_MIN, /* min activate to activate delay time (t rrd_s min), diff bank group */
MV_DDR_TRRD_L_MIN, /* min activate to activate delay time (t rrd_l min), same bank group */
MV_DDR_TFAW_MIN, /* min four activate window delay time (t faw min) */
MV_DDR_TWTR_S_MIN, /* min write to read time (t wtr s min), diff bank group */
MV_DDR_TWTR_L_MIN, /* min write to read time (t wtr l min), same bank group */
MV_DDR_TDATA_LAST
};
enum mv_ddr_dev_width { /* sdram device width */
MV_DDR_DEV_WIDTH_4BIT,
MV_DDR_DEV_WIDTH_8BIT,
MV_DDR_DEV_WIDTH_16BIT,
MV_DDR_DEV_WIDTH_32BIT,
MV_DDR_DEV_WIDTH_LAST
};
enum mv_ddr_die_capacity { /* total sdram capacity per die, megabits */
MV_DDR_DIE_CAP_256MBIT,
MV_DDR_DIE_CAP_512MBIT = 0,
MV_DDR_DIE_CAP_1GBIT,
MV_DDR_DIE_CAP_2GBIT,
MV_DDR_DIE_CAP_4GBIT,
MV_DDR_DIE_CAP_8GBIT,
MV_DDR_DIE_CAP_16GBIT,
MV_DDR_DIE_CAP_32GBIT,
MV_DDR_DIE_CAP_12GBIT,
MV_DDR_DIE_CAP_24GBIT,
MV_DDR_DIE_CAP_LAST
};
enum mv_ddr_pkg_rank { /* number of package ranks per dimm */
MV_DDR_PKG_RANK_1,
MV_DDR_PKG_RANK_2,
MV_DDR_PKG_RANK_3,
MV_DDR_PKG_RANK_4,
MV_DDR_PKG_RANK_5,
MV_DDR_PKG_RANK_6,
MV_DDR_PKG_RANK_7,
MV_DDR_PKG_RANK_8,
MV_DDR_PKG_RANK_LAST
};
enum mv_ddr_pri_bus_width { /* number of primary bus width bits */
MV_DDR_PRI_BUS_WIDTH_8,
MV_DDR_PRI_BUS_WIDTH_16,
MV_DDR_PRI_BUS_WIDTH_32,
MV_DDR_PRI_BUS_WIDTH_64,
MV_DDR_PRI_BUS_WIDTH_LAST
};
enum mv_ddr_bus_width_ext { /* number of extension bus width bits */
MV_DDR_BUS_WIDTH_EXT_0,
MV_DDR_BUS_WIDTH_EXT_8,
MV_DDR_BUS_WIDTH_EXT_LAST
};
enum mv_ddr_die_count {
MV_DDR_DIE_CNT_1,
MV_DDR_DIE_CNT_2,
MV_DDR_DIE_CNT_3,
MV_DDR_DIE_CNT_4,
MV_DDR_DIE_CNT_5,
MV_DDR_DIE_CNT_6,
MV_DDR_DIE_CNT_7,
MV_DDR_DIE_CNT_8,
MV_DDR_DIE_CNT_LAST
};
unsigned int mv_ddr_cl_calc(unsigned int taa_min, unsigned int tclk);
unsigned int mv_ddr_cwl_calc(unsigned int tclk);
struct mv_ddr_topology_map *mv_ddr_topology_map_update(void);
struct dram_config *mv_ddr_dram_config_update(void);
unsigned short mv_ddr_bus_bit_mask_get(void);
unsigned int mv_ddr_if_bus_width_get(void);
#endif /* _MV_DDR_TOPOLOGY_H */

View file

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _SEQ_EXEC_H
#define _SEQ_EXEC_H
#define NA 0xff
#define DEFAULT_PARAM 0
#define MV_BOARD_TCLK_ERROR 0xffffffff
#define NO_DATA 0xffffffff
#define MAX_DATA_ARRAY 5
#define FIRST_CELL 0
/* Operation types */
enum mv_op {
WRITE_OP,
DELAY_OP,
POLL_OP,
};
/* Operation parameters */
struct op_params {
u32 unit_base_reg;
u32 unit_offset;
u32 mask;
u32 data[MAX_DATA_ARRAY]; /* data array */
u8 wait_time; /* msec */
u16 num_of_loops; /* for polling only */
};
/*
* Sequence parameters. Each sequence contains:
* 1. Sequence id.
* 2. Sequence size (total amount of operations during the sequence)
* 3. a series of operations. operations can be write, poll or delay
* 4. index in the data array (the entry where the relevant data sits)
*/
struct cfg_seq {
struct op_params *op_params_ptr;
u8 cfg_seq_size;
u8 data_arr_idx;
};
extern struct cfg_seq serdes_seq_db[];
/*
* A generic function type for executing an operation (write, poll or delay)
*/
typedef int (*op_execute_func_ptr)(u32 serdes_num, struct op_params *params,
u32 data_arr_idx);
/* Specific functions for executing each operation */
int write_op_execute(u32 serdes_num, struct op_params *params,
u32 data_arr_idx);
int delay_op_execute(u32 serdes_num, struct op_params *params,
u32 data_arr_idx);
int poll_op_execute(u32 serdes_num, struct op_params *params, u32 data_arr_idx);
enum mv_op get_cfg_seq_op(struct op_params *params);
int mv_seq_exec(u32 serdes_num, u32 seq_id);
#endif /*_SEQ_EXEC_H*/

View file

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef __silicon_if_H
#define __silicon_if_H
/* max number of devices supported by driver */
#ifdef CO_CPU_RUN
#define HWS_MAX_DEVICE_NUM (1)
#else
#define HWS_MAX_DEVICE_NUM (16)
#endif
#endif /* __silicon_if_H */

View file

@ -3,13 +3,6 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#include "xor_regs.h"
@ -21,39 +14,48 @@
#endif
static u32 ui_xor_regs_ctrl_backup;
static u32 ui_xor_regs_base_backup[MAX_CS];
static u32 ui_xor_regs_mask_backup[MAX_CS];
static u32 ui_xor_regs_base_backup[MAX_CS_NUM + 1];
static u32 ui_xor_regs_mask_backup[MAX_CS_NUM + 1];
void mv_sys_xor_init(u32 num_of_cs, u32 cs_ena, u32 cs_size, u32 base_delta)
void mv_sys_xor_init(u32 num_of_cs, u32 cs_ena, uint64_t cs_size, u32 base_delta)
{
u32 reg, ui, base, cs_count;
u32 reg, ui, cs_count;
uint64_t base, size_mask;
ui_xor_regs_ctrl_backup = reg_read(XOR_WINDOW_CTRL_REG(0, 0));
for (ui = 0; ui < MAX_CS; ui++)
for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
ui_xor_regs_base_backup[ui] =
reg_read(XOR_BASE_ADDR_REG(0, ui));
for (ui = 0; ui < MAX_CS; ui++)
for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
ui_xor_regs_mask_backup[ui] =
reg_read(XOR_SIZE_MASK_REG(0, ui));
reg = 0;
for (ui = 0; ui < (num_of_cs); ui++) {
/* Enable Window x for each CS */
reg |= (0x1 << (ui));
/* Enable Window x for each CS */
reg |= (0x3 << ((ui * 2) + 16));
for (ui = 0, cs_count = 0;
(cs_count < num_of_cs) && (ui < 8);
ui++, cs_count++) {
if (cs_ena & (1 << ui)) {
/* Enable Window x for each CS */
reg |= (0x1 << (ui));
/* Enable Window x for each CS */
reg |= (0x3 << ((ui * 2) + 16));
}
}
reg_write(XOR_WINDOW_CTRL_REG(0, 0), reg);
cs_count = 0;
for (ui = 0; ui < num_of_cs; ui++) {
for (ui = 0, cs_count = 0;
(cs_count < num_of_cs) && (ui < 8);
ui++, cs_count++) {
if (cs_ena & (1 << ui)) {
/*
* window x - Base - 0x00000000,
* Attribute 0x0e - DRAM
*/
base = cs_size * ui + base_delta;
/* fixed size 2GB for each CS */
size_mask = 0x7FFF0000;
switch (ui) {
case 0:
base |= 0xe00;
@ -67,13 +69,19 @@ void mv_sys_xor_init(u32 num_of_cs, u32 cs_ena, u32 cs_size, u32 base_delta)
case 3:
base |= 0x700;
break;
case 4: /* SRAM */
base = 0x40000000;
/* configure as shared transaction */
base |= 0x1F00;
size_mask = 0xF0000;
break;
}
reg_write(XOR_BASE_ADDR_REG(0, cs_count), base);
reg_write(XOR_BASE_ADDR_REG(0, ui), (u32)base);
size_mask = (cs_size / _64K) - 1;
size_mask = (size_mask << XESMRX_SIZE_MASK_OFFS) & XESMRX_SIZE_MASK_MASK;
/* window x - Size */
reg_write(XOR_SIZE_MASK_REG(0, cs_count), 0x7fff0000);
cs_count++;
reg_write(XOR_SIZE_MASK_REG(0, ui), (u32)size_mask);
}
}
@ -87,10 +95,10 @@ void mv_sys_xor_finish(void)
u32 ui;
reg_write(XOR_WINDOW_CTRL_REG(0, 0), ui_xor_regs_ctrl_backup);
for (ui = 0; ui < MAX_CS; ui++)
for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
reg_write(XOR_BASE_ADDR_REG(0, ui),
ui_xor_regs_base_backup[ui]);
for (ui = 0; ui < MAX_CS; ui++)
for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
reg_write(XOR_SIZE_MASK_REG(0, ui),
ui_xor_regs_mask_backup[ui]);
@ -153,11 +161,14 @@ int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl)
return MV_OK;
}
int mv_xor_mem_init(u32 chan, u32 start_ptr, u32 block_size,
int mv_xor_mem_init(u32 chan, u32 start_ptr, unsigned long long block_size,
u32 init_val_high, u32 init_val_low)
{
u32 temp;
if (block_size == _4G)
block_size -= 1;
/* Parameter checking */
if (chan >= MV_XOR_MAX_CHAN)
return MV_BAD_PARAM;
@ -328,28 +339,126 @@ void ddr3_new_tip_ecc_scrub(void)
{
u32 cs_c, max_cs;
u32 cs_ena = 0;
u32 dev_num = 0;
uint64_t total_mem_size, cs_mem_size = 0;
printf("DDR3 Training Sequence - Start scrubbing\n");
max_cs = hws_ddr3_tip_max_cs_get();
printf("DDR Training Sequence - Start scrubbing\n");
max_cs = ddr3_tip_max_cs_get(dev_num);
for (cs_c = 0; cs_c < max_cs; cs_c++)
cs_ena |= 1 << cs_c;
mv_sys_xor_init(max_cs, cs_ena, 0x80000000, 0);
/* assume that all CS have same size */
ddr3_calc_mem_cs_size(0, &cs_mem_size);
mv_xor_mem_init(0, 0x00000000, 0x80000000, 0xdeadbeef, 0xdeadbeef);
mv_sys_xor_init(max_cs, cs_ena, cs_mem_size, 0);
total_mem_size = max_cs * cs_mem_size;
mv_xor_mem_init(0, 0, total_mem_size, 0xdeadbeef, 0xdeadbeef);
/* wait for previous transfer completion */
while (mv_xor_state_get(0) != MV_IDLE)
;
mv_xor_mem_init(0, 0x80000000, 0x40000000, 0xdeadbeef, 0xdeadbeef);
/* wait for previous transfer completion */
while (mv_xor_state_get(0) != MV_IDLE)
;
/* Return XOR State */
mv_sys_xor_finish();
printf("DDR3 Training Sequence - End scrubbing\n");
}
/*
* mv_xor_transfer - Transfer data from source to destination in one of
* three modes: XOR, CRC32 or DMA
*
* DESCRIPTION:
* This function initiates XOR channel, according to function parameters,
* in order to perform XOR, CRC32 or DMA transaction.
* To gain maximum performance the user is asked to keep the following
* restrictions:
* 1) Selected engine is available (not busy).
* 2) This module does not take into consideration CPU MMU issues.
* In order for the XOR engine to access the appropriate source
* and destination, address parameters must be given in system
* physical mode.
* 3) This API does not take care of cache coherency issues. The source,
* destination and, in case of chain, the descriptor list are assumed
* to be cache coherent.
* 4) Parameters validity.
*
* INPUT:
* chan - XOR channel number.
* type - One of three: XOR, CRC32 and DMA operations.
* xor_chain_ptr - address of chain pointer
*
* OUTPUT:
* None.
*
* RETURN:
* MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
*
*******************************************************************************/
int mv_xor_transfer(u32 chan, enum xor_type type, u32 xor_chain_ptr)
{
u32 temp;
/* Parameter checking */
if (chan >= MV_XOR_MAX_CHAN) {
DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan));
return MV_BAD_PARAM;
}
if (mv_xor_state_get(chan) == MV_ACTIVE) {
DB(printf("%s: ERR. Channel is already active\n", __func__));
return MV_BUSY;
}
if (xor_chain_ptr == 0x0) {
DB(printf("%s: ERR. xor_chain_ptr is NULL pointer\n", __func__));
return MV_BAD_PARAM;
}
/* read configuration register and mask the operation mode field */
temp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)));
temp &= ~XEXCR_OPERATION_MODE_MASK;
switch (type) {
case MV_XOR:
if ((xor_chain_ptr & XEXDPR_DST_PTR_XOR_MASK) != 0) {
DB(printf("%s: ERR. Invalid chain pointer (bits [5:0] must be cleared)\n",
__func__));
return MV_BAD_PARAM;
}
/* set the operation mode to XOR */
temp |= XEXCR_OPERATION_MODE_XOR;
break;
case MV_DMA:
if ((xor_chain_ptr & XEXDPR_DST_PTR_DMA_MASK) != 0) {
DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n",
__func__));
return MV_BAD_PARAM;
}
/* set the operation mode to DMA */
temp |= XEXCR_OPERATION_MODE_DMA;
break;
case MV_CRC32:
if ((xor_chain_ptr & XEXDPR_DST_PTR_CRC_MASK) != 0) {
DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n",
__func__));
return MV_BAD_PARAM;
}
/* set the operation mode to CRC32 */
temp |= XEXCR_OPERATION_MODE_CRC;
break;
default:
return MV_BAD_PARAM;
}
/* write the operation mode to the register */
reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp);
/*
* update the NextDescPtr field in the XOR Engine [0..1] Next Descriptor
* Pointer Register (XExNDPR)
*/
reg_write(XOR_NEXT_DESC_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
xor_chain_ptr);
/* start transfer */
reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
XEXACTR_XESTART_MASK);
return MV_OK;
}

View file

@ -8,8 +8,6 @@
#define SRAM_BASE 0x40000000
#include "ddr3_hws_hw_training_def.h"
#define MV_XOR_MAX_UNIT 2 /* XOR unit == XOR engine */
#define MV_XOR_MAX_CHAN 4 /* total channels for all units */
#define MV_XOR_MAX_CHAN_PER_UNIT 2 /* channels for units */
@ -87,5 +85,6 @@ int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl);
int mv_xor_command_set(u32 chan, enum mv_command command);
int mv_xor_override_set(u32 chan, enum xor_override_target target, u32 win_num,
int enable);
int mv_xor_transfer(u32 chan, enum xor_type type, u32 xor_chain_ptr);
#endif

View file

@ -1562,6 +1562,10 @@ static int mvneta_start(struct udevice *dev)
phydev = phy_connect(pp->bus, pp->phyaddr, dev,
pp->phy_interface);
if (!phydev) {
printf("phy_connect failed\n");
return -ENODEV;
}
pp->phydev = phydev;
phy_config(phydev);

View file

@ -96,14 +96,48 @@ struct chip_serdes_phy_config {
void __iomem *hpipe3_base_addr;
u32 comphy_lanes_count;
u32 comphy_mux_bitcount;
const fdt32_t *comphy_mux_lane_order;
u32 cp_index;
};
/* Register helper functions */
void reg_set(void __iomem *addr, u32 data, u32 mask);
void reg_set_silent(void __iomem *addr, u32 data, u32 mask);
void reg_set16(void __iomem *addr, u16 data, u16 mask);
void reg_set_silent16(void __iomem *addr, u16 data, u16 mask);
static inline void reg_set_silent(void __iomem *addr, u32 data, u32 mask)
{
u32 reg_data;
reg_data = readl(addr);
reg_data &= ~mask;
reg_data |= data;
writel(reg_data, addr);
}
static inline void reg_set(void __iomem *addr, u32 data, u32 mask)
{
debug("Write to address = %#010lx, data = %#010x (mask = %#010x) - ",
(unsigned long)addr, data, mask);
debug("old value = %#010x ==> ", readl(addr));
reg_set_silent(addr, data, mask);
debug("new value %#010x\n", readl(addr));
}
static inline void reg_set_silent16(void __iomem *addr, u16 data, u16 mask)
{
u16 reg_data;
reg_data = readw(addr);
reg_data &= ~mask;
reg_data |= data;
writew(reg_data, addr);
}
static inline void reg_set16(void __iomem *addr, u16 data, u16 mask)
{
debug("Write to address = %#010lx, data = %#06x (mask = %#06x) - ",
(unsigned long)addr, data, mask);
debug("old value = %#06x ==> ", readw(addr));
reg_set_silent16(addr, data, mask);
debug("new value %#06x\n", readw(addr));
}
/* SoC specific init functions */
#ifdef CONFIG_ARMADA_3700

View file

@ -13,6 +13,38 @@
DECLARE_GLOBAL_DATA_PTR;
struct comphy_mux_data a3700_comphy_mux_data[] = {
/* Lane 0 */
{
4,
{
{ PHY_TYPE_UNCONNECTED, 0x0 },
{ PHY_TYPE_SGMII1, 0x0 },
{ PHY_TYPE_USB3_HOST0, 0x1 },
{ PHY_TYPE_USB3_DEVICE, 0x1 }
}
},
/* Lane 1 */
{
3,
{
{ PHY_TYPE_UNCONNECTED, 0x0},
{ PHY_TYPE_SGMII0, 0x0},
{ PHY_TYPE_PEX0, 0x1}
}
},
/* Lane 2 */
{
4,
{
{ PHY_TYPE_UNCONNECTED, 0x0},
{ PHY_TYPE_SATA0, 0x0},
{ PHY_TYPE_USB3_HOST0, 0x1},
{ PHY_TYPE_USB3_DEVICE, 0x1}
}
},
};
struct sgmii_phy_init_data_fix {
u16 addr;
u16 value;
@ -105,12 +137,11 @@ static u16 sgmii_phy_init[512] = {
*
* return: 1 on success, 0 on timeout
*/
static u32 comphy_poll_reg(void *addr, u32 val, u32 mask, u32 timeout,
u8 op_type)
static u32 comphy_poll_reg(void *addr, u32 val, u32 mask, u8 op_type)
{
u32 rval = 0xDEAD;
u32 rval = 0xDEAD, timeout;
for (; timeout > 0; timeout--) {
for (timeout = PLL_LOCK_TIMEOUT; timeout > 0; timeout--) {
if (op_type == POLL_16B_REG)
rval = readw(addr); /* 16 bit */
else
@ -133,85 +164,77 @@ static u32 comphy_poll_reg(void *addr, u32 val, u32 mask, u32 timeout,
*/
static int comphy_pcie_power_up(u32 speed, u32 invert)
{
int ret;
int ret;
debug_enter();
/*
* 1. Enable max PLL.
*/
reg_set16((void __iomem *)LANE_CFG1_ADDR(PCIE),
bf_use_max_pll_rate, 0);
reg_set16(phy_addr(PCIE, LANE_CFG1), bf_use_max_pll_rate, 0);
/*
* 2. Select 20 bit SERDES interface.
*/
reg_set16((void __iomem *)GLOB_CLK_SRC_LO_ADDR(PCIE),
bf_cfg_sel_20b, 0);
reg_set16(phy_addr(PCIE, GLOB_CLK_SRC_LO), bf_cfg_sel_20b, 0);
/*
* 3. Force to use reg setting for PCIe mode
*/
reg_set16((void __iomem *)MISC_REG1_ADDR(PCIE),
bf_sel_bits_pcie_force, 0);
reg_set16(phy_addr(PCIE, MISC_REG1), bf_sel_bits_pcie_force, 0);
/*
* 4. Change RX wait
*/
reg_set16((void __iomem *)PWR_MGM_TIM1_ADDR(PCIE), 0x10C, 0xFFFF);
reg_set16(phy_addr(PCIE, PWR_MGM_TIM1), 0x10C, 0xFFFF);
/*
* 5. Enable idle sync
*/
reg_set16((void __iomem *)UNIT_CTRL_ADDR(PCIE),
0x60 | rb_idle_sync_en, 0xFFFF);
reg_set16(phy_addr(PCIE, UNIT_CTRL), 0x60 | rb_idle_sync_en, 0xFFFF);
/*
* 6. Enable the output of 100M/125M/500M clock
*/
reg_set16((void __iomem *)MISC_REG0_ADDR(PCIE),
reg_set16(phy_addr(PCIE, MISC_REG0),
0xA00D | rb_clk500m_en | rb_clk100m_125m_en, 0xFFFF);
/*
* 7. Enable TX
*/
reg_set((void __iomem *)PHY_REF_CLK_ADDR, 0x1342, 0xFFFFFFFF);
reg_set(PCIE_REF_CLK_ADDR, 0x1342, 0xFFFFFFFF);
/*
* 8. Check crystal jumper setting and program the Power and PLL
* Control accordingly
*/
if (get_ref_clk() == 40) {
reg_set16((void __iomem *)PWR_PLL_CTRL_ADDR(PCIE),
0xFC63, 0xFFFF); /* 40 MHz */
/* 40 MHz */
reg_set16(phy_addr(PCIE, PWR_PLL_CTRL), 0xFC63, 0xFFFF);
} else {
reg_set16((void __iomem *)PWR_PLL_CTRL_ADDR(PCIE),
0xFC62, 0xFFFF); /* 25 MHz */
/* 25 MHz */
reg_set16(phy_addr(PCIE, PWR_PLL_CTRL), 0xFC62, 0xFFFF);
}
/*
* 9. Override Speed_PLL value and use MAC PLL
*/
reg_set16((void __iomem *)KVCO_CAL_CTRL_ADDR(PCIE),
0x0040 | rb_use_max_pll_rate, 0xFFFF);
reg_set16(phy_addr(PCIE, KVCO_CAL_CTRL), 0x0040 | rb_use_max_pll_rate,
0xFFFF);
/*
* 10. Check the Polarity invert bit
*/
if (invert & PHY_POLARITY_TXD_INVERT) {
reg_set16((void __iomem *)SYNC_PATTERN_ADDR(PCIE),
phy_txd_inv, 0);
}
if (invert & PHY_POLARITY_TXD_INVERT)
reg_set16(phy_addr(PCIE, SYNC_PATTERN), phy_txd_inv, 0);
if (invert & PHY_POLARITY_RXD_INVERT) {
reg_set16((void __iomem *)SYNC_PATTERN_ADDR(PCIE),
phy_rxd_inv, 0);
}
if (invert & PHY_POLARITY_RXD_INVERT)
reg_set16(phy_addr(PCIE, SYNC_PATTERN), phy_rxd_inv, 0);
/*
* 11. Release SW reset
*/
reg_set16((void __iomem *)GLOB_PHY_CTRL0_ADDR(PCIE),
reg_set16(phy_addr(PCIE, GLOB_PHY_CTRL0),
rb_mode_core_clk_freq_sel | rb_mode_pipe_width_32,
bf_soft_rst | bf_mode_refdiv);
@ -219,12 +242,11 @@ static int comphy_pcie_power_up(u32 speed, u32 invert)
udelay(PLL_SET_DELAY_US);
/* Assert PCLK enabled */
ret = comphy_poll_reg((void *)LANE_STAT1_ADDR(PCIE), /* address */
ret = comphy_poll_reg(phy_addr(PCIE, LANE_STAT1), /* address */
rb_txdclk_pclk_en, /* value */
rb_txdclk_pclk_en, /* mask */
PLL_LOCK_TIMEOUT, /* timeout */
POLL_16B_REG); /* 16bit */
if (ret == 0)
if (!ret)
printf("Failed to lock PCIe PLL\n");
debug_exit();
@ -233,6 +255,17 @@ static int comphy_pcie_power_up(u32 speed, u32 invert)
return ret;
}
/*
* reg_set_indirect
*
* return: void
*/
static void reg_set_indirect(u32 reg, u16 data, u16 mask)
{
reg_set(rh_vsreg_addr, reg, 0xFFFFFFFF);
reg_set(rh_vsreg_data, data, mask);
}
/*
* comphy_sata_power_up
*
@ -240,65 +273,57 @@ static int comphy_pcie_power_up(u32 speed, u32 invert)
*/
static int comphy_sata_power_up(void)
{
int ret;
int ret;
debug_enter();
/*
* 0. Swap SATA TX lines
*/
reg_set((void __iomem *)rh_vsreg_addr,
vphy_sync_pattern_reg, 0xFFFFFFFF);
reg_set((void __iomem *)rh_vsreg_data, bs_txd_inv, bs_txd_inv);
reg_set_indirect(vphy_sync_pattern_reg, bs_txd_inv, bs_txd_inv);
/*
* 1. Select 40-bit data width width
*/
reg_set((void __iomem *)rh_vsreg_addr, vphy_loopback_reg0, 0xFFFFFFFF);
reg_set((void __iomem *)rh_vsreg_data, 0x800, bs_phyintf_40bit);
reg_set_indirect(vphy_loopback_reg0, 0x800, bs_phyintf_40bit);
/*
* 2. Select reference clock and PHY mode (SATA)
*/
reg_set((void __iomem *)rh_vsreg_addr, vphy_power_reg0, 0xFFFFFFFF);
if (get_ref_clk() == 40) {
reg_set((void __iomem *)rh_vsreg_data,
0x3, 0x00FF); /* 40 MHz */
/* 40 MHz */
reg_set_indirect(vphy_power_reg0, 0x3, 0x00FF);
} else {
reg_set((void __iomem *)rh_vsreg_data,
0x1, 0x00FF); /* 25 MHz */
/* 20 MHz */
reg_set_indirect(vphy_power_reg0, 0x1, 0x00FF);
}
/*
* 3. Use maximum PLL rate (no power save)
*/
reg_set((void __iomem *)rh_vsreg_addr, vphy_calctl_reg, 0xFFFFFFFF);
reg_set((void __iomem *)rh_vsreg_data,
bs_max_pll_rate, bs_max_pll_rate);
reg_set_indirect(vphy_calctl_reg, bs_max_pll_rate, bs_max_pll_rate);
/*
* 4. Reset reserved bit (??)
*/
reg_set((void __iomem *)rh_vsreg_addr, vphy_reserve_reg, 0xFFFFFFFF);
reg_set((void __iomem *)rh_vsreg_data, 0, bs_phyctrl_frm_pin);
reg_set_indirect(vphy_reserve_reg, 0, bs_phyctrl_frm_pin);
/*
* 5. Set vendor-specific configuration (??)
*/
reg_set((void __iomem *)rh_vs0_a, vsata_ctrl_reg, 0xFFFFFFFF);
reg_set((void __iomem *)rh_vs0_d, bs_phy_pu_pll, bs_phy_pu_pll);
reg_set(rh_vs0_a, vsata_ctrl_reg, 0xFFFFFFFF);
reg_set(rh_vs0_d, bs_phy_pu_pll, bs_phy_pu_pll);
/* Wait for > 55 us to allow PLL be enabled */
udelay(PLL_SET_DELAY_US);
/* Assert SATA PLL enabled */
reg_set((void __iomem *)rh_vsreg_addr, vphy_loopback_reg0, 0xFFFFFFFF);
ret = comphy_poll_reg((void *)rh_vsreg_data, /* address */
bs_pll_ready_tx, /* value */
bs_pll_ready_tx, /* mask */
PLL_LOCK_TIMEOUT, /* timeout */
POLL_32B_REG); /* 32bit */
if (ret == 0)
reg_set(rh_vsreg_addr, vphy_loopback_reg0, 0xFFFFFFFF);
ret = comphy_poll_reg(rh_vsreg_data, /* address */
bs_pll_ready_tx, /* value */
bs_pll_ready_tx, /* mask */
POLL_32B_REG); /* 32bit */
if (!ret)
printf("Failed to lock SATA PLL\n");
debug_exit();
@ -306,138 +331,172 @@ static int comphy_sata_power_up(void)
return ret;
}
/*
* usb3_reg_set16
*
* return: void
*/
static void usb3_reg_set16(u32 reg, u16 data, u16 mask, u32 lane)
{
/*
* When Lane 2 PHY is for USB3, access the PHY registers
* through indirect Address and Data registers INDIR_ACC_PHY_ADDR
* (RD00E0178h [31:0]) and INDIR_ACC_PHY_DATA (RD00E017Ch [31:0])
* within the SATA Host Controller registers, Lane 2 base register
* offset is 0x200
*/
if (lane == 2)
reg_set_indirect(USB3PHY_LANE2_REG_BASE_OFFSET + reg, data,
mask);
else
reg_set16(phy_addr(USB3, reg), data, mask);
}
/*
* comphy_usb3_power_up
*
* return: 1 if PLL locked (OK), 0 otherwise (FAIL)
*/
static int comphy_usb3_power_up(u32 type, u32 speed, u32 invert)
static int comphy_usb3_power_up(u32 lane, u32 type, u32 speed, u32 invert)
{
int ret;
int ret;
debug_enter();
/*
* 1. Power up OTG module
*/
reg_set((void __iomem *)USB2_PHY_OTG_CTRL_ADDR, rb_pu_otg, 0);
reg_set(USB2_PHY_OTG_CTRL_ADDR, rb_pu_otg, 0);
/*
* 2. Set counter for 100us pulse in USB3 Host and Device
* restore default burst size limit (Reference Clock 31:24)
*/
reg_set((void __iomem *)USB3_CTRPUL_VAL_REG,
0x8 << 24, rb_usb3_ctr_100ns);
reg_set(USB3_CTRPUL_VAL_REG, 0x8 << 24, rb_usb3_ctr_100ns);
/* 0xd005c300 = 0x1001 */
/* set PRD_TXDEEMPH (3.5db de-emph) */
reg_set16((void __iomem *)LANE_CFG0_ADDR(USB3), 0x1, 0xFF);
usb3_reg_set16(LANE_CFG0, 0x1, 0xFF, lane);
/*
* unset BIT0: set Tx Electrical Idle Mode: Transmitter is in
* low impedance mode during electrical idle
* Set BIT0: enable transmitter in high impedance mode
* Set BIT[3:4]: delay 2 clock cycles for HiZ off latency
* Set BIT6: Tx detect Rx at HiZ mode
* Unset BIT15: set to 0 to set USB3 De-emphasize level to -3.5db
* together with bit 0 of COMPHY_REG_LANE_CFG0_ADDR
* register
*/
/* unset BIT4: set G2 Tx Datapath with no Delayed Latency */
/* unset BIT6: set Tx Detect Rx Mode at LoZ mode */
reg_set16((void __iomem *)LANE_CFG1_ADDR(USB3), 0x0, 0xFFFF);
usb3_reg_set16(LANE_CFG1,
tx_det_rx_mode | gen2_tx_data_dly_deft
| tx_elec_idle_mode_en,
prd_txdeemph1_mask | tx_det_rx_mode
| gen2_tx_data_dly_mask | tx_elec_idle_mode_en, lane);
/* 0xd005c310 = 0x93: set Spread Spectrum Clock Enabled */
reg_set16((void __iomem *)LANE_CFG4_ADDR(USB3),
bf_spread_spectrum_clock_en, 0x80);
/* 0xd005c310 = 0x93: set Spread Spectrum Clock Enabled */
usb3_reg_set16(LANE_CFG4, bf_spread_spectrum_clock_en, 0x80, lane);
/*
* set Override Margining Controls From the MAC: Use margining signals
* from lane configuration
*/
reg_set16((void __iomem *)TEST_MODE_CTRL_ADDR(USB3),
rb_mode_margin_override, 0xFFFF);
usb3_reg_set16(TEST_MODE_CTRL, rb_mode_margin_override, 0xFFFF, lane);
/* set Lane-to-Lane Bundle Clock Sampling Period = per PCLK cycles */
/* set Mode Clock Source = PCLK is generated from REFCLK */
reg_set16((void __iomem *)GLOB_CLK_SRC_LO_ADDR(USB3), 0x0, 0xFF);
usb3_reg_set16(GLOB_CLK_SRC_LO, 0x0, 0xFF, lane);
/* set G2 Spread Spectrum Clock Amplitude at 4K */
reg_set16((void __iomem *)GEN2_SETTING_2_ADDR(USB3), g2_tx_ssc_amp,
0xF000);
usb3_reg_set16(GEN2_SETTINGS_2, g2_tx_ssc_amp, 0xF000, lane);
/*
* unset G3 Spread Spectrum Clock Amplitude & set G3 TX and RX Register
* Master Current Select
*/
reg_set16((void __iomem *)GEN2_SETTING_3_ADDR(USB3), 0x0, 0xFFFF);
usb3_reg_set16(GEN2_SETTINGS_3, 0x0, 0xFFFF, lane);
/*
* 3. Check crystal jumper setting and program the Power and PLL
* Control accordingly
*/
if (get_ref_clk() == 40) {
reg_set16((void __iomem *)PWR_PLL_CTRL_ADDR(USB3), 0xFCA3,
0xFFFF); /* 40 MHz */
} else {
reg_set16((void __iomem *)PWR_PLL_CTRL_ADDR(USB3), 0xFCA2,
0xFFFF); /* 25 MHz */
}
/*
* 4. Change RX wait
*/
reg_set16((void __iomem *)PWR_MGM_TIM1_ADDR(USB3), 0x10C, 0xFFFF);
if (get_ref_clk() == 40) {
/* 40 MHz */
usb3_reg_set16(PWR_PLL_CTRL, 0xFCA3, 0xFFFF, lane);
usb3_reg_set16(PWR_MGM_TIM1, 0x10C, 0xFFFF, lane);
} else {
/* 25 MHz */
usb3_reg_set16(PWR_PLL_CTRL, 0xFCA2, 0xFFFF, lane);
usb3_reg_set16(PWR_MGM_TIM1, 0x107, 0xFFFF, lane);
}
/*
* 5. Enable idle sync
*/
reg_set16((void __iomem *)UNIT_CTRL_ADDR(USB3), 0x60 | rb_idle_sync_en,
0xFFFF);
usb3_reg_set16(UNIT_CTRL, 0x60 | rb_idle_sync_en, 0xFFFF, lane);
/*
* 6. Enable the output of 500M clock
*/
reg_set16((void __iomem *)MISC_REG0_ADDR(USB3), 0xA00D | rb_clk500m_en,
0xFFFF);
usb3_reg_set16(MISC_REG0, 0xA00D | rb_clk500m_en, 0xFFFF, lane);
/*
* 7. Set 20-bit data width
*/
reg_set16((void __iomem *)DIG_LB_EN_ADDR(USB3), 0x0400, 0xFFFF);
usb3_reg_set16(DIG_LB_EN, 0x0400, 0xFFFF, lane);
/*
* 8. Override Speed_PLL value and use MAC PLL
*/
reg_set16((void __iomem *)KVCO_CAL_CTRL_ADDR(USB3),
0x0040 | rb_use_max_pll_rate, 0xFFFF);
usb3_reg_set16(KVCO_CAL_CTRL, 0x0040 | rb_use_max_pll_rate, 0xFFFF,
lane);
/*
* 9. Check the Polarity invert bit
*/
if (invert & PHY_POLARITY_TXD_INVERT) {
reg_set16((void __iomem *)SYNC_PATTERN_ADDR(USB3),
phy_txd_inv, 0);
}
if (invert & PHY_POLARITY_TXD_INVERT)
usb3_reg_set16(SYNC_PATTERN, phy_txd_inv, 0, lane);
if (invert & PHY_POLARITY_RXD_INVERT) {
reg_set16((void __iomem *)SYNC_PATTERN_ADDR(USB3),
phy_rxd_inv, 0);
}
if (invert & PHY_POLARITY_RXD_INVERT)
usb3_reg_set16(SYNC_PATTERN, phy_rxd_inv, 0, lane);
/*
* 10. Release SW reset
* 10. Set max speed generation to USB3.0 5Gbps
*/
reg_set16((void __iomem *)GLOB_PHY_CTRL0_ADDR(USB3),
rb_mode_core_clk_freq_sel | rb_mode_pipe_width_32 | 0x20,
0xFFFF);
usb3_reg_set16(SYNC_MASK_GEN, 0x0400, 0x0C00, lane);
/*
* 11. Set capacitor value for FFE gain peaking to 0xF
*/
usb3_reg_set16(GEN3_SETTINGS_3, 0xF, 0xF, lane);
/*
* 12. Release SW reset
*/
usb3_reg_set16(GLOB_PHY_CTRL0,
rb_mode_core_clk_freq_sel | rb_mode_pipe_width_32
| 0x20, 0xFFFF, lane);
/* Wait for > 55 us to allow PCLK be enabled */
udelay(PLL_SET_DELAY_US);
/* Assert PCLK enabled */
ret = comphy_poll_reg((void *)LANE_STAT1_ADDR(USB3), /* address */
rb_txdclk_pclk_en, /* value */
rb_txdclk_pclk_en, /* mask */
PLL_LOCK_TIMEOUT, /* timeout */
POLL_16B_REG); /* 16bit */
if (ret == 0)
if (lane == 2) {
reg_set(rh_vsreg_addr,
LANE_STAT1 + USB3PHY_LANE2_REG_BASE_OFFSET,
0xFFFFFFFF);
ret = comphy_poll_reg(rh_vsreg_data, /* address */
rb_txdclk_pclk_en, /* value */
rb_txdclk_pclk_en, /* mask */
POLL_32B_REG); /* 32bit */
} else {
ret = comphy_poll_reg(phy_addr(USB3, LANE_STAT1), /* address */
rb_txdclk_pclk_en, /* value */
rb_txdclk_pclk_en, /* mask */
POLL_16B_REG); /* 16bit */
}
if (!ret)
printf("Failed to lock USB3 PLL\n");
/*
@ -454,7 +513,7 @@ static int comphy_usb3_power_up(u32 type, u32 speed, u32 invert)
* INT_MODE=ID in order to avoid unexpected
* behaviour or both interrupts together
*/
reg_set((void __iomem *)USB32_CTRL_BASE,
reg_set(USB32_CTRL_BASE,
usb32_ctrl_id_mode | usb32_ctrl_int_mode,
usb32_ctrl_id_mode | usb32_ctrl_soft_id |
usb32_ctrl_int_mode);
@ -472,7 +531,7 @@ static int comphy_usb3_power_up(u32 type, u32 speed, u32 invert)
*/
static int comphy_usb2_power_up(u8 usb32)
{
int ret;
int ret;
debug_enter();
@ -488,65 +547,61 @@ static int comphy_usb2_power_up(u8 usb32)
* See "PLL Settings for Typical REFCLK" table
*/
if (get_ref_clk() == 25) {
reg_set((void __iomem *)USB2_PHY_BASE(usb32),
5 | (96 << 16), 0x3F | (0xFF << 16) | (0x3 << 28));
reg_set(USB2_PHY_BASE(usb32), 5 | (96 << 16),
0x3F | (0xFF << 16) | (0x3 << 28));
}
/*
* 1. PHY pull up and disable USB2 suspend
*/
reg_set((void __iomem *)USB2_PHY_CTRL_ADDR(usb32),
reg_set(USB2_PHY_CTRL_ADDR(usb32),
RB_USB2PHY_SUSPM(usb32) | RB_USB2PHY_PU(usb32), 0);
if (usb32 != 0) {
/*
* 2. Power up OTG module
*/
reg_set((void __iomem *)USB2_PHY_OTG_CTRL_ADDR, rb_pu_otg, 0);
reg_set(USB2_PHY_OTG_CTRL_ADDR, rb_pu_otg, 0);
/*
* 3. Configure PHY charger detection
*/
reg_set((void __iomem *)USB2_PHY_CHRGR_DET_ADDR, 0,
reg_set(USB2_PHY_CHRGR_DET_ADDR, 0,
rb_cdp_en | rb_dcp_en | rb_pd_en | rb_cdp_dm_auto |
rb_enswitch_dp | rb_enswitch_dm | rb_pu_chrg_dtc);
}
/* Assert PLL calibration done */
ret = comphy_poll_reg((void *)USB2_PHY_CAL_CTRL_ADDR(usb32),
ret = comphy_poll_reg(USB2_PHY_CAL_CTRL_ADDR(usb32),
rb_usb2phy_pllcal_done, /* value */
rb_usb2phy_pllcal_done, /* mask */
PLL_LOCK_TIMEOUT, /* timeout */
POLL_32B_REG); /* 32bit */
if (ret == 0)
if (!ret)
printf("Failed to end USB2 PLL calibration\n");
/* Assert impedance calibration done */
ret = comphy_poll_reg((void *)USB2_PHY_CAL_CTRL_ADDR(usb32),
ret = comphy_poll_reg(USB2_PHY_CAL_CTRL_ADDR(usb32),
rb_usb2phy_impcal_done, /* value */
rb_usb2phy_impcal_done, /* mask */
PLL_LOCK_TIMEOUT, /* timeout */
POLL_32B_REG); /* 32bit */
if (ret == 0)
if (!ret)
printf("Failed to end USB2 impedance calibration\n");
/* Assert squetch calibration done */
ret = comphy_poll_reg((void *)USB2_PHY_RX_CHAN_CTRL1_ADDR(usb32),
ret = comphy_poll_reg(USB2_PHY_RX_CHAN_CTRL1_ADDR(usb32),
rb_usb2phy_sqcal_done, /* value */
rb_usb2phy_sqcal_done, /* mask */
PLL_LOCK_TIMEOUT, /* timeout */
POLL_32B_REG); /* 32bit */
if (ret == 0)
if (!ret)
printf("Failed to end USB2 unknown calibration\n");
/* Assert PLL is ready */
ret = comphy_poll_reg((void *)USB2_PHY_PLL_CTRL0_ADDR(usb32),
ret = comphy_poll_reg(USB2_PHY_PLL_CTRL0_ADDR(usb32),
rb_usb2phy_pll_ready, /* value */
rb_usb2phy_pll_ready, /* mask */
PLL_LOCK_TIMEOUT, /* timeout */
POLL_32B_REG); /* 32bit */
if (ret == 0)
if (!ret)
printf("Failed to lock USB2 PLL\n");
debug_exit();
@ -566,35 +621,34 @@ static int comphy_emmc_power_up(void)
/*
* 1. Bus power ON, Bus voltage 1.8V
*/
reg_set((void __iomem *)SDIO_HOST_CTRL1_ADDR, 0xB00, 0xF00);
reg_set(SDIO_HOST_CTRL1_ADDR, 0xB00, 0xF00);
/*
* 2. Set FIFO parameters
*/
reg_set((void __iomem *)SDIO_SDHC_FIFO_ADDR, 0x315, 0xFFFFFFFF);
reg_set(SDIO_SDHC_FIFO_ADDR, 0x315, 0xFFFFFFFF);
/*
* 3. Set Capabilities 1_2
*/
reg_set((void __iomem *)SDIO_CAP_12_ADDR, 0x25FAC8B2, 0xFFFFFFFF);
reg_set(SDIO_CAP_12_ADDR, 0x25FAC8B2, 0xFFFFFFFF);
/*
* 4. Set Endian
*/
reg_set((void __iomem *)SDIO_ENDIAN_ADDR, 0x00c00000, 0);
reg_set(SDIO_ENDIAN_ADDR, 0x00c00000, 0);
/*
* 4. Init PHY
*/
reg_set((void __iomem *)SDIO_PHY_TIMING_ADDR, 0x80000000, 0x80000000);
reg_set((void __iomem *)SDIO_PHY_PAD_CTRL0_ADDR, 0x50000000,
0xF0000000);
reg_set(SDIO_PHY_TIMING_ADDR, 0x80000000, 0x80000000);
reg_set(SDIO_PHY_PAD_CTRL0_ADDR, 0x50000000, 0xF0000000);
/*
* 5. DLL reset
*/
reg_set((void __iomem *)SDIO_DLL_RST_ADDR, 0xFFFEFFFF, 0);
reg_set((void __iomem *)SDIO_DLL_RST_ADDR, 0x00010000, 0);
reg_set(SDIO_DLL_RST_ADDR, 0xFFFEFFFF, 0);
reg_set(SDIO_DLL_RST_ADDR, 0x00010000, 0);
debug_exit();
@ -631,7 +685,7 @@ static void comphy_sgmii_phy_init(u32 lane, u32 speed)
val = sgmii_phy_init[addr];
}
phy_write16(lane, addr, val, 0xFFFF);
reg_set16(sgmiiphy_addr(lane, addr), val, 0xFFFF);
}
}
@ -642,14 +696,16 @@ static void comphy_sgmii_phy_init(u32 lane, u32 speed)
*/
static int comphy_sgmii_power_up(u32 lane, u32 speed, u32 invert)
{
int ret;
int ret;
u32 saved_selector;
debug_enter();
/*
* 1. Configure PHY to SATA/SAS mode by setting pin PIN_PIPE_SEL=0
*/
reg_set((void __iomem *)COMPHY_SEL_ADDR, 0, rf_compy_select(lane));
saved_selector = readl(COMPHY_SEL_ADDR);
reg_set(COMPHY_SEL_ADDR, 0, 0xFFFFFFFF);
/*
* 2. Reset PHY by setting PHY input port PIN_RESET=1.
@ -657,7 +713,7 @@ static int comphy_sgmii_power_up(u32 lane, u32 speed, u32 invert)
* PHY TXP/TXN output to idle state during PHY initialization
* 4. Set PHY input port PIN_PU_PLL=0, PIN_PU_RX=0, PIN_PU_TX=0.
*/
reg_set((void __iomem *)COMPHY_PHY_CFG1_ADDR(lane),
reg_set(COMPHY_PHY_CFG1_ADDR(lane),
rb_pin_reset_comphy | rb_pin_tx_idle | rb_pin_pu_iveref,
rb_pin_reset_core | rb_pin_pu_pll |
rb_pin_pu_rx | rb_pin_pu_tx);
@ -665,21 +721,20 @@ static int comphy_sgmii_power_up(u32 lane, u32 speed, u32 invert)
/*
* 5. Release reset to the PHY by setting PIN_RESET=0.
*/
reg_set((void __iomem *)COMPHY_PHY_CFG1_ADDR(lane),
0, rb_pin_reset_comphy);
reg_set(COMPHY_PHY_CFG1_ADDR(lane), 0, rb_pin_reset_comphy);
/*
* 7. Set PIN_PHY_GEN_TX[3:0] and PIN_PHY_GEN_RX[3:0] to decide
* COMPHY bit rate
*/
if (speed == PHY_SPEED_3_125G) { /* 3.125 GHz */
reg_set((void __iomem *)COMPHY_PHY_CFG1_ADDR(lane),
reg_set(COMPHY_PHY_CFG1_ADDR(lane),
(0x8 << rf_gen_rx_sel_shift) |
(0x8 << rf_gen_tx_sel_shift),
rf_gen_rx_select | rf_gen_tx_select);
} else if (speed == PHY_SPEED_1_25G) { /* 1.25 GHz */
reg_set((void __iomem *)COMPHY_PHY_CFG1_ADDR(lane),
reg_set(COMPHY_PHY_CFG1_ADDR(lane),
(0x6 << rf_gen_rx_sel_shift) |
(0x6 << rf_gen_tx_sel_shift),
rf_gen_rx_select | rf_gen_tx_select);
@ -695,26 +750,26 @@ static int comphy_sgmii_power_up(u32 lane, u32 speed, u32 invert)
mdelay(10);
/* 9. Program COMPHY register PHY_MODE */
phy_write16(lane, PHY_PWR_PLL_CTRL_ADDR,
PHY_MODE_SGMII << rf_phy_mode_shift, rf_phy_mode_mask);
reg_set16(sgmiiphy_addr(lane, PWR_PLL_CTRL),
PHY_MODE_SGMII << rf_phy_mode_shift, rf_phy_mode_mask);
/*
* 10. Set COMPHY register REFCLK_SEL to select the correct REFCLK
* source
*/
phy_write16(lane, PHY_MISC_REG0_ADDR, 0, rb_ref_clk_sel);
reg_set16(sgmiiphy_addr(lane, MISC_REG0), 0, rb_ref_clk_sel);
/*
* 11. Set correct reference clock frequency in COMPHY register
* REF_FREF_SEL.
*/
if (get_ref_clk() == 40) {
phy_write16(lane, PHY_PWR_PLL_CTRL_ADDR,
0x4 << rf_ref_freq_sel_shift, rf_ref_freq_sel_mask);
reg_set16(sgmiiphy_addr(lane, PWR_PLL_CTRL),
0x4 << rf_ref_freq_sel_shift, rf_ref_freq_sel_mask);
} else {
/* 25MHz */
phy_write16(lane, PHY_PWR_PLL_CTRL_ADDR,
0x1 << rf_ref_freq_sel_shift, rf_ref_freq_sel_mask);
reg_set16(sgmiiphy_addr(lane, PWR_PLL_CTRL),
0x1 << rf_ref_freq_sel_shift, rf_ref_freq_sel_mask);
}
/* 12. Program COMPHY register PHY_GEN_MAX[1:0] */
@ -730,7 +785,7 @@ static int comphy_sgmii_power_up(u32 lane, u32 speed, u32 invert)
* bus width
*/
/* 10bit */
phy_write16(lane, PHY_DIG_LB_EN_ADDR, 0, rf_data_width_mask);
reg_set16(sgmiiphy_addr(lane, DIG_LB_EN), 0, rf_data_width_mask);
/*
* 14. As long as DFE function needs to be enabled in any mode,
@ -773,10 +828,10 @@ static int comphy_sgmii_power_up(u32 lane, u32 speed, u32 invert)
* 18. Check the PHY Polarity invert bit
*/
if (invert & PHY_POLARITY_TXD_INVERT)
phy_write16(lane, PHY_SYNC_PATTERN_ADDR, phy_txd_inv, 0);
reg_set16(sgmiiphy_addr(lane, SYNC_PATTERN), phy_txd_inv, 0);
if (invert & PHY_POLARITY_RXD_INVERT)
phy_write16(lane, PHY_SYNC_PATTERN_ADDR, phy_rxd_inv, 0);
reg_set16(sgmiiphy_addr(lane, SYNC_PATTERN), phy_rxd_inv, 0);
/*
* 19. Set PHY input ports PIN_PU_PLL, PIN_PU_TX and PIN_PU_RX to 1
@ -784,7 +839,7 @@ static int comphy_sgmii_power_up(u32 lane, u32 speed, u32 invert)
* programming should be done before PIN_PU_PLL=1. There should be
* no register programming for normal PHY operation from this point.
*/
reg_set((void __iomem *)COMPHY_PHY_CFG1_ADDR(lane),
reg_set(COMPHY_PHY_CFG1_ADDR(lane),
rb_pin_pu_pll | rb_pin_pu_rx | rb_pin_pu_tx,
rb_pin_pu_pll | rb_pin_pu_rx | rb_pin_pu_tx);
@ -792,19 +847,17 @@ static int comphy_sgmii_power_up(u32 lane, u32 speed, u32 invert)
* 20. Wait for PHY power up sequence to finish by checking output ports
* PIN_PLL_READY_TX=1 and PIN_PLL_READY_RX=1.
*/
ret = comphy_poll_reg((void *)COMPHY_PHY_STAT1_ADDR(lane), /* address */
ret = comphy_poll_reg(COMPHY_PHY_STAT1_ADDR(lane), /* address */
rb_pll_ready_tx | rb_pll_ready_rx, /* value */
rb_pll_ready_tx | rb_pll_ready_rx, /* mask */
PLL_LOCK_TIMEOUT, /* timeout */
POLL_32B_REG); /* 32bit */
if (ret == 0)
if (!ret)
printf("Failed to lock PLL for SGMII PHY %d\n", lane);
/*
* 21. Set COMPHY input port PIN_TX_IDLE=0
*/
reg_set((void __iomem *)COMPHY_PHY_CFG1_ADDR(lane),
0x0, rb_pin_tx_idle);
reg_set(COMPHY_PHY_CFG1_ADDR(lane), 0x0, rb_pin_tx_idle);
/*
* 22. After valid data appear on PIN_RXDATA bus, set PIN_RX_INIT=1.
@ -814,17 +867,20 @@ static int comphy_sgmii_power_up(u32 lane, u32 speed, u32 invert)
* PIN_RX_INIT_DONE= 1.
* Please refer to RX initialization part for details.
*/
reg_set((void __iomem *)COMPHY_PHY_CFG1_ADDR(lane), rb_phy_rx_init,
0x0);
reg_set(COMPHY_PHY_CFG1_ADDR(lane), rb_phy_rx_init, 0x0);
ret = comphy_poll_reg((void *)COMPHY_PHY_STAT1_ADDR(lane), /* address */
ret = comphy_poll_reg(COMPHY_PHY_STAT1_ADDR(lane), /* address */
rb_rx_init_done, /* value */
rb_rx_init_done, /* mask */
PLL_LOCK_TIMEOUT, /* timeout */
POLL_32B_REG); /* 32bit */
if (ret == 0)
if (!ret)
printf("Failed to init RX of SGMII PHY %d\n", lane);
/*
* Restore saved selector.
*/
reg_set(COMPHY_SEL_ADDR, saved_selector, 0xFFFFFFFF);
debug_exit();
return ret;
@ -844,7 +900,7 @@ void comphy_dedicated_phys_init(void)
*/
if (usb32 == 0) {
node = fdt_node_offset_by_compatible(
blob, -1, "marvell,armada-3700-ehci");
blob, -1, "marvell,armada3700-ehci");
} else {
node = fdt_node_offset_by_compatible(
blob, -1, "marvell,armada3700-xhci");
@ -853,7 +909,7 @@ void comphy_dedicated_phys_init(void)
if (node > 0) {
if (fdtdec_get_is_enabled(blob, node)) {
ret = comphy_usb2_power_up(usb32);
if (ret == 0)
if (!ret)
printf("Failed to initialize UTMI PHY\n");
else
debug("UTMI PHY init succeed\n");
@ -871,7 +927,7 @@ void comphy_dedicated_phys_init(void)
if (node > 0) {
if (fdtdec_get_is_enabled(blob, node)) {
ret = comphy_sata_power_up();
if (ret == 0)
if (!ret)
printf("Failed to initialize SATA PHY\n");
else
debug("SATA PHY init succeed\n");
@ -892,7 +948,7 @@ void comphy_dedicated_phys_init(void)
if (node > 0) {
if (fdtdec_get_is_enabled(blob, node)) {
ret = comphy_emmc_power_up();
if (ret == 0)
if (!ret)
printf("Failed to initialize SDIO/eMMC PHY\n");
else
debug("SDIO/eMMC PHY init succeed\n");
@ -915,6 +971,10 @@ int comphy_a3700_init(struct chip_serdes_phy_config *chip_cfg,
debug_enter();
/* Initialize PHY mux */
chip_cfg->mux_data = a3700_comphy_mux_data;
comphy_mux_init(chip_cfg, serdes_map, COMPHY_SEL_ADDR);
for (lane = 0, comphy_map = serdes_map; lane < comphy_max_count;
lane++, comphy_map++) {
debug("Initialize serdes number %d\n", lane);
@ -933,7 +993,8 @@ int comphy_a3700_init(struct chip_serdes_phy_config *chip_cfg,
case PHY_TYPE_USB3_HOST0:
case PHY_TYPE_USB3_DEVICE:
ret = comphy_usb3_power_up(comphy_map->type,
ret = comphy_usb3_power_up(lane,
comphy_map->type,
comphy_map->speed,
comphy_map->invert);
break;
@ -950,7 +1011,7 @@ int comphy_a3700_init(struct chip_serdes_phy_config *chip_cfg,
ret = 1;
break;
}
if (ret == 0)
if (!ret)
printf("PLL is not locked - Failed to initialize lane %d\n",
lane);
}

View file

@ -9,7 +9,8 @@
#include "comphy.h"
#include "comphy_hpipe.h"
#define MVEBU_REG(offs) ((uintptr_t)MVEBU_REGISTER(offs))
#define MVEBU_REG(offs) \
((void __iomem *)(ulong)MVEBU_REGISTER(offs))
#define DEFAULT_REFCLK_MHZ 25
#define PLL_SET_DELAY_US 600
@ -21,9 +22,8 @@
* COMPHY SB definitions
*/
#define COMPHY_SEL_ADDR MVEBU_REG(0x0183FC)
#define rf_compy_select(lane) (0x1 << (((lane) == 1) ? 4 : 0))
#define COMPHY_PHY_CFG1_ADDR(lane) MVEBU_REG(0x018300 + (lane) * 0x28)
#define COMPHY_PHY_CFG1_ADDR(lane) MVEBU_REG(0x018300 + (1 - lane) * 0x28)
#define rb_pin_pu_iveref BIT(1)
#define rb_pin_reset_core BIT(11)
#define rb_pin_reset_comphy BIT(12)
@ -37,7 +37,7 @@
#define rf_gen_tx_select (0x0F << rf_gen_tx_sel_shift)
#define rb_phy_rx_init BIT(30)
#define COMPHY_PHY_STAT1_ADDR(lane) MVEBU_REG(0x018318 + (lane) * 0x28)
#define COMPHY_PHY_STAT1_ADDR(lane) MVEBU_REG(0x018318 + (1 - lane) * 0x28)
#define rb_rx_init_done BIT(0)
#define rb_pll_ready_rx BIT(2)
#define rb_pll_ready_tx BIT(3)
@ -58,125 +58,115 @@
#define USB2PHY2_BASE MVEBU_REG(0x05F000)
#define USB32_CTRL_BASE MVEBU_REG(0x05D800)
#define USB3PHY_SHFT 2
#define USB3PHY_LANE2_REG_BASE_OFFSET 0x200
#define SGMIIPHY_BASE(l) (l == 1 ? USB3PHY_BASE : PCIEPHY_BASE)
#define SGMIIPHY_ADDR(l, a) (((a & 0x00007FF) * 2) | SGMIIPHY_BASE(l))
#define phy_read16(l, a) read16((void __iomem *)SGMIIPHY_ADDR(l, a))
#define phy_write16(l, a, data, mask) \
reg_set16((void __iomem *)SGMIIPHY_ADDR(l, a), data, mask)
static inline void __iomem *sgmiiphy_addr(u32 lane, u32 addr)
{
addr = (addr & 0x00007FF) * 2;
if (lane == 1)
return PCIEPHY_BASE + addr;
else
return USB3PHY_BASE + addr;
}
/* units */
#define PCIE 1
#define USB3 2
enum phy_unit {
PCIE = 1,
USB3 = 2,
};
#define PHY_BASE(unit) ((unit == PCIE) ? PCIEPHY_BASE : USB3PHY_BASE)
#define PHY_SHFT(unit) ((unit == PCIE) ? PCIEPHY_SHFT : USB3PHY_SHFT)
static inline void __iomem *phy_addr(enum phy_unit unit, u32 addr)
{
if (unit == PCIE)
return PCIEPHY_BASE + addr * PCIEPHY_SHFT;
else
return USB3PHY_BASE + addr * USB3PHY_SHFT;
}
/* bit definition for USB32_CTRL_BASE (USB32 Control Mode) */
#define usb32_ctrl_id_mode BIT(0)
#define usb32_ctrl_soft_id BIT(1)
#define usb32_ctrl_int_mode BIT(4)
#define PHY_PWR_PLL_CTRL_ADDR 0x01 /* for phy_read16 and phy_write16 */
#define PWR_PLL_CTRL_ADDR(unit) \
(PHY_PWR_PLL_CTRL_ADDR * PHY_SHFT(unit) + PHY_BASE(unit))
#define PWR_PLL_CTRL 0x01
#define rf_phy_mode_shift 5
#define rf_phy_mode_mask (0x7 << rf_phy_mode_shift)
#define rf_ref_freq_sel_shift 0
#define rf_ref_freq_sel_mask (0x1F << rf_ref_freq_sel_shift)
#define PHY_MODE_SGMII 0x4
/* for phy_read16 and phy_write16 */
#define PHY_REG_KVCO_CAL_CTRL_ADDR 0x02
#define KVCO_CAL_CTRL_ADDR(unit) \
(PHY_REG_KVCO_CAL_CTRL_ADDR * PHY_SHFT(unit) + PHY_BASE(unit))
#define KVCO_CAL_CTRL 0x02
#define rb_use_max_pll_rate BIT(12)
#define rb_force_calibration_done BIT(9)
/* for phy_read16 and phy_write16 */
#define PHY_DIG_LB_EN_ADDR 0x23
#define DIG_LB_EN_ADDR(unit) \
(PHY_DIG_LB_EN_ADDR * PHY_SHFT(unit) + PHY_BASE(unit))
#define DIG_LB_EN 0x23
#define rf_data_width_shift 10
#define rf_data_width_mask (0x3 << rf_data_width_shift)
/* for phy_read16 and phy_write16 */
#define PHY_SYNC_PATTERN_ADDR 0x24
#define SYNC_PATTERN_ADDR(unit) \
(PHY_SYNC_PATTERN_ADDR * PHY_SHFT(unit) + PHY_BASE(unit))
#define SYNC_PATTERN 0x24
#define phy_txd_inv BIT(10)
#define phy_rxd_inv BIT(11)
/* for phy_read16 and phy_write16 */
#define PHY_REG_UNIT_CTRL_ADDR 0x48
#define UNIT_CTRL_ADDR(unit) \
(PHY_REG_UNIT_CTRL_ADDR * PHY_SHFT(unit) + PHY_BASE(unit))
#define SYNC_MASK_GEN 0x25
#define rb_idle_sync_en BIT(12)
/* for phy_read16 and phy_write16 */
#define PHY_REG_GEN2_SETTINGS_2 0x3e
#define GEN2_SETTING_2_ADDR(unit) \
(PHY_REG_GEN2_SETTINGS_2 * PHY_SHFT(unit) + PHY_BASE(unit))
#define UNIT_CTRL 0x48
#define GEN2_SETTINGS_2 0x3e
#define g2_tx_ssc_amp BIT(14)
/* for phy_read16 and phy_write16 */
#define PHY_REG_GEN2_SETTINGS_3 0x3f
#define GEN2_SETTING_3_ADDR(unit) \
(PHY_REG_GEN2_SETTINGS_3 * PHY_SHFT(unit) + PHY_BASE(unit))
#define GEN2_SETTINGS_3 0x3f
/* for phy_read16 and phy_write16 */
#define PHY_MISC_REG0_ADDR 0x4f
#define MISC_REG0_ADDR(unit) \
(PHY_MISC_REG0_ADDR * PHY_SHFT(unit) + PHY_BASE(unit))
#define GEN3_SETTINGS_3 0x112
#define MISC_REG0 0x4f
#define rb_clk100m_125m_en BIT(4)
#define rb_clk500m_en BIT(7)
#define rb_ref_clk_sel BIT(10)
/* for phy_read16 and phy_write16 */
#define PHY_REG_IFACE_REF_CLK_CTRL_ADDR 0x51
#define UNIT_IFACE_REF_CLK_CTRL_ADDR(unit) \
(PHY_REG_IFACE_REF_CLK_CTRL_ADDR * PHY_SHFT(unit) + PHY_BASE(unit))
#define UNIT_IFACE_REF_CLK_CTRL 0x51
#define rb_ref1m_gen_div_force BIT(8)
#define rf_ref1m_gen_div_value_shift 0
#define rf_ref1m_gen_div_value_mask (0xFF << rf_ref1m_gen_div_value_shift)
/* for phy_read16 and phy_write16 */
#define PHY_REG_ERR_CNT_CONST_CTRL_ADDR 0x6A
#define UNIT_ERR_CNT_CONST_CTRL_ADDR(unit) \
(PHY_REG_ERR_CNT_CONST_CTRL_ADDR * PHY_SHFT(unit) + PHY_BASE(unit))
#define UNIT_ERR_CNT_CONST_CTRL 0x6a
#define rb_fast_dfe_enable BIT(13)
#define MISC_REG1_ADDR(u) (0x73 * PHY_SHFT(u) + PHY_BASE(u))
#define MISC_REG1 0x73
#define bf_sel_bits_pcie_force BIT(15)
#define LANE_CFG0_ADDR(u) (0x180 * PHY_SHFT(u) + PHY_BASE(u))
#define LANE_CFG0 0x180
#define bf_use_max_pll_rate BIT(9)
#define LANE_CFG1_ADDR(u) (0x181 * PHY_SHFT(u) + PHY_BASE(u))
#define LANE_CFG1 0x181
#define bf_use_max_pll_rate BIT(9)
/* 0x5c310 = 0x93 (set BIT7) */
#define LANE_CFG4_ADDR(u) (0x188 * PHY_SHFT(u) + PHY_BASE(u))
#define prd_txdeemph1_mask BIT(15)
#define tx_det_rx_mode BIT(6)
#define gen2_tx_data_dly_deft (2 << 3)
#define gen2_tx_data_dly_mask (BIT(3) | BIT(4))
#define tx_elec_idle_mode_en BIT(0)
#define LANE_CFG4 0x188
#define bf_spread_spectrum_clock_en BIT(7)
#define LANE_STAT1_ADDR(u) (0x183 * PHY_SHFT(u) + PHY_BASE(u))
#define LANE_STAT1 0x183
#define rb_txdclk_pclk_en BIT(0)
#define GLOB_PHY_CTRL0_ADDR(u) (0x1c1 * PHY_SHFT(u) + PHY_BASE(u))
#define GLOB_PHY_CTRL0 0x1c1
#define bf_soft_rst BIT(0)
#define bf_mode_refdiv 0x30
#define rb_mode_core_clk_freq_sel BIT(9)
#define rb_mode_pipe_width_32 BIT(3)
#define TEST_MODE_CTRL_ADDR(u) (0x1c2 * PHY_SHFT(u) + PHY_BASE(u))
#define TEST_MODE_CTRL 0x1c2
#define rb_mode_margin_override BIT(2)
#define GLOB_CLK_SRC_LO_ADDR(u) (0x1c3 * PHY_SHFT(u) + PHY_BASE(u))
#define GLOB_CLK_SRC_LO 0x1c3
#define bf_cfg_sel_20b BIT(15)
#define PWR_MGM_TIM1_ADDR(u) (0x1d0 * PHY_SHFT(u) + PHY_BASE(u))
#define PWR_MGM_TIM1 0x1d0
#define PHY_REF_CLK_ADDR (0x4814 + PCIE_BASE)
#define PCIE_REF_CLK_ADDR (PCIE_BASE + 0x4814)
#define USB3_CTRPUL_VAL_REG (0x20 + USB32_BASE)
#define USB3H_CTRPUL_VAL_REG (0x3454 + USB32H_BASE)

View file

@ -17,11 +17,13 @@
DECLARE_GLOBAL_DATA_PTR;
static char *get_speed_string(u32 speed)
static const char *get_speed_string(u32 speed)
{
char *speed_strings[] = {"1.25 Gbps", "1.5 Gbps", "2.5 Gbps",
"3.0 Gbps", "3.125 Gbps", "5 Gbps", "6 Gbps",
"6.25 Gbps", "10.31 Gbps" };
static const char * const speed_strings[] = {
"1.25 Gbps", "1.5 Gbps", "2.5 Gbps",
"3.0 Gbps", "3.125 Gbps", "5 Gbps", "6 Gbps",
"6.25 Gbps", "10.31 Gbps"
};
if (speed < 0 || speed > PHY_SPEED_MAX)
return "invalid";
@ -29,14 +31,16 @@ static char *get_speed_string(u32 speed)
return speed_strings[speed];
}
static char *get_type_string(u32 type)
static const char *get_type_string(u32 type)
{
char *type_strings[] = {"UNCONNECTED", "PEX0", "PEX1", "PEX2", "PEX3",
"SATA0", "SATA1", "SATA2", "SATA3", "SGMII0",
"SGMII1", "SGMII2", "SGMII3", "QSGMII",
"USB3_HOST0", "USB3_HOST1", "USB3_DEVICE",
"XAUI0", "XAUI1", "XAUI2", "XAUI3",
"RXAUI0", "RXAUI1", "SFI", "IGNORE"};
static const char * const type_strings[] = {
"UNCONNECTED", "PEX0", "PEX1", "PEX2", "PEX3",
"SATA0", "SATA1", "SATA2", "SATA3", "SGMII0",
"SGMII1", "SGMII2", "SGMII3", "QSGMII",
"USB3_HOST0", "USB3_HOST1", "USB3_DEVICE",
"XAUI0", "XAUI1", "XAUI2", "XAUI3",
"RXAUI0", "RXAUI1", "SFI", "IGNORE"
};
if (type < 0 || type > PHY_TYPE_MAX)
return "invalid";
@ -44,44 +48,6 @@ static char *get_type_string(u32 type)
return type_strings[type];
}
void reg_set(void __iomem *addr, u32 data, u32 mask)
{
debug("Write to address = %#010lx, data = %#010x (mask = %#010x) - ",
(unsigned long)addr, data, mask);
debug("old value = %#010x ==> ", readl(addr));
reg_set_silent(addr, data, mask);
debug("new value %#010x\n", readl(addr));
}
void reg_set_silent(void __iomem *addr, u32 data, u32 mask)
{
u32 reg_data;
reg_data = readl(addr);
reg_data &= ~mask;
reg_data |= data;
writel(reg_data, addr);
}
void reg_set16(void __iomem *addr, u16 data, u16 mask)
{
debug("Write to address = %#010lx, data = %#06x (mask = %#06x) - ",
(unsigned long)addr, data, mask);
debug("old value = %#06x ==> ", readw(addr));
reg_set_silent16(addr, data, mask);
debug("new value %#06x\n", readw(addr));
}
void reg_set_silent16(void __iomem *addr, u16 data, u16 mask)
{
u16 reg_data;
reg_data = readw(addr);
reg_data &= ~mask;
reg_data |= data;
writew(reg_data, addr);
}
void comphy_print(struct chip_serdes_phy_config *chip_cfg,
struct comphy_map *comphy_map_data)
{
@ -134,6 +100,10 @@ static int comphy_probe(struct udevice *dev)
return -EINVAL;
}
chip_cfg->comphy_mux_lane_order =
fdtdec_locate_array(blob, node, "mux-lane-order",
chip_cfg->comphy_lanes_count);
if (device_is_compatible(dev, "marvell,comphy-armada-3700"))
chip_cfg->ptr_comphy_chip_init = comphy_a3700_init;

View file

@ -78,7 +78,8 @@ static u32 comphy_mux_get_mux_value(struct comphy_mux_data *mux_data,
static void comphy_mux_reg_write(struct comphy_mux_data *mux_data,
struct comphy_map *comphy_map_data,
int comphy_max_lanes,
void __iomem *selector_base, u32 bitcount)
void __iomem *selector_base,
const fdt32_t *mux_lane_order, u32 bitcount)
{
u32 lane, value, offset, mask;
@ -89,7 +90,15 @@ static void comphy_mux_reg_write(struct comphy_mux_data *mux_data,
if (comphy_map_data->type == PHY_TYPE_IGNORE)
continue;
offset = lane * bitcount;
/*
* if the order of nodes in selector base register is
* nontrivial, use mapping from mux_lane_order
*/
if (mux_lane_order)
offset = fdt32_to_cpu(mux_lane_order[lane]) * bitcount;
else
offset = lane * bitcount;
mask = (((1 << bitcount) - 1) << offset);
value = (comphy_mux_get_mux_value(mux_data,
comphy_map_data->type,
@ -105,6 +114,7 @@ void comphy_mux_init(struct chip_serdes_phy_config *chip_cfg,
void __iomem *selector_base)
{
struct comphy_mux_data *mux_data;
const fdt32_t *mux_lane_order;
u32 mux_bitcount;
u32 comphy_max_lanes;
@ -112,13 +122,14 @@ void comphy_mux_init(struct chip_serdes_phy_config *chip_cfg,
comphy_max_lanes = chip_cfg->comphy_lanes_count;
mux_data = chip_cfg->mux_data;
mux_lane_order = chip_cfg->comphy_mux_lane_order;
mux_bitcount = chip_cfg->comphy_mux_bitcount;
/* check if the configuration is valid */
comphy_mux_check_config(mux_data, comphy_map_data, comphy_max_lanes);
/* Init COMPHY selectors */
comphy_mux_reg_write(mux_data, comphy_map_data, comphy_max_lanes,
selector_base, mux_bitcount);
selector_base, mux_lane_order, mux_bitcount);
debug_exit();
}

View file

@ -104,6 +104,7 @@ config ICH_SPI
config MVEBU_A3700_SPI
bool "Marvell Armada 3700 SPI driver"
select CLK_ARMADA_3720
help
Enable the Marvell Armada 3700 SPI driver. This driver can be
used to access the SPI NOR flash on platforms embedding this

View file

@ -9,6 +9,7 @@
#include <dm.h>
#include <malloc.h>
#include <spi.h>
#include <clk.h>
#include <wait_bit.h>
#include <asm/io.h>
@ -21,9 +22,8 @@ DECLARE_GLOBAL_DATA_PTR;
#define MVEBU_SPI_A3700_CLK_POL BIT(7)
#define MVEBU_SPI_A3700_FIFO_EN BIT(17)
#define MVEBU_SPI_A3700_SPI_EN_0 BIT(16)
#define MVEBU_SPI_A3700_CLK_PRESCALE_BIT 0
#define MVEBU_SPI_A3700_CLK_PRESCALE_MASK \
(0x1f << MVEBU_SPI_A3700_CLK_PRESCALE_BIT)
#define MVEBU_SPI_A3700_CLK_PRESCALE_MASK 0x1f
/* SPI registers */
struct spi_reg {
@ -35,8 +35,7 @@ struct spi_reg {
struct mvebu_spi_platdata {
struct spi_reg *spireg;
unsigned int frequency;
unsigned int clock;
struct clk clk;
};
static void spi_cs_activate(struct spi_reg *reg, int cs)
@ -177,17 +176,18 @@ static int mvebu_spi_set_speed(struct udevice *bus, uint hz)
{
struct mvebu_spi_platdata *plat = dev_get_platdata(bus);
struct spi_reg *reg = plat->spireg;
u32 data;
u32 data, prescale;
data = readl(&reg->cfg);
/* Set Prescaler */
data &= ~MVEBU_SPI_A3700_CLK_PRESCALE_MASK;
prescale = DIV_ROUND_UP(clk_get_rate(&plat->clk), hz);
if (prescale > 0x1f)
prescale = 0x1f;
else if (prescale > 0xf)
prescale = 0x10 + (prescale + 1) / 2;
/* Calculate Prescaler = (spi_input_freq / spi_max_freq) */
if (hz > plat->frequency)
hz = plat->frequency;
data |= plat->clock / hz;
data &= ~MVEBU_SPI_A3700_CLK_PRESCALE_MASK;
data |= prescale & MVEBU_SPI_A3700_CLK_PRESCALE_MASK;
writel(data, &reg->cfg);
@ -251,21 +251,24 @@ static int mvebu_spi_probe(struct udevice *bus)
static int mvebu_spi_ofdata_to_platdata(struct udevice *bus)
{
struct mvebu_spi_platdata *plat = dev_get_platdata(bus);
int ret;
plat->spireg = (struct spi_reg *)devfdt_get_addr(bus);
/*
* FIXME
* Right now, mvebu does not have a clock infrastructure in U-Boot
* which should be used to query the input clock to the SPI
* controller. Once this clock driver is integrated into U-Boot
* it should be used to read the input clock and the DT property
* can be removed.
*/
plat->clock = fdtdec_get_int(gd->fdt_blob, dev_of_offset(bus),
"clock-frequency", 160000);
plat->frequency = fdtdec_get_int(gd->fdt_blob, dev_of_offset(bus),
"spi-max-frequency", 40000);
ret = clk_get_by_index(bus, 0, &plat->clk);
if (ret) {
dev_err(bus, "cannot get clock\n");
return ret;
}
return 0;
}
static int mvebu_spi_remove(struct udevice *bus)
{
struct mvebu_spi_platdata *plat = dev_get_platdata(bus);
clk_free(&plat->clk);
return 0;
}
@ -293,4 +296,5 @@ U_BOOT_DRIVER(mvebu_spi) = {
.ofdata_to_platdata = mvebu_spi_ofdata_to_platdata,
.platdata_auto_alloc_size = sizeof(struct mvebu_spi_platdata),
.probe = mvebu_spi_probe,
.remove = mvebu_spi_remove,
};

View file

@ -60,6 +60,15 @@ config WDT_SANDBOX
can be probed and supports all of the methods of WDT, but does not
really do anything.
config WDT_ARMADA_37XX
bool "Marvell Armada 37xx watchdog timer support"
depends on WDT && ARMADA_3700
help
Enable this to support Watchdog Timer on Marvell Armada 37xx SoC.
There are 4 possible clocks which can be used on these SoCs. This
driver uses the second clock (ID 1), assuming that so will also
Linux's driver.
config WDT_ASPEED
bool "Aspeed ast2400/ast2500 watchdog timer support"
depends on WDT

View file

@ -16,6 +16,7 @@ obj-$(CONFIG_TANGIER_WATCHDOG) += tangier_wdt.o
obj-$(CONFIG_ULP_WATCHDOG) += ulp_wdog.o
obj-$(CONFIG_WDT) += wdt-uclass.o
obj-$(CONFIG_WDT_SANDBOX) += sandbox_wdt.o
obj-$(CONFIG_WDT_ARMADA_37XX) += armada-37xx-wdt.o
obj-$(CONFIG_WDT_ASPEED) += ast_wdt.o
obj-$(CONFIG_WDT_BCM6345) += bcm6345_wdt.o
obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o

View file

@ -0,0 +1,175 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Marvell Armada 37xx SoC Watchdog Driver
*
* Marek Behun <marek.behun@nic.cz>
*/
#include <common.h>
#include <dm.h>
#include <wdt.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
DECLARE_GLOBAL_DATA_PTR;
struct a37xx_wdt {
void __iomem *sel_reg;
void __iomem *reg;
ulong clk_rate;
u64 timeout;
};
/*
* We use Counter 1 for watchdog timer, because so does Marvell's Linux by
* default.
*/
#define CNTR_CTRL 0x10
#define CNTR_CTRL_ENABLE 0x0001
#define CNTR_CTRL_ACTIVE 0x0002
#define CNTR_CTRL_MODE_MASK 0x000c
#define CNTR_CTRL_MODE_ONESHOT 0x0000
#define CNTR_CTRL_PRESCALE_MASK 0xff00
#define CNTR_CTRL_PRESCALE_MIN 2
#define CNTR_CTRL_PRESCALE_SHIFT 8
#define CNTR_COUNT_LOW 0x14
#define CNTR_COUNT_HIGH 0x18
static void set_counter_value(struct a37xx_wdt *priv)
{
writel(priv->timeout & 0xffffffff, priv->reg + CNTR_COUNT_LOW);
writel(priv->timeout >> 32, priv->reg + CNTR_COUNT_HIGH);
}
static void a37xx_wdt_enable(struct a37xx_wdt *priv)
{
u32 reg = readl(priv->reg + CNTR_CTRL);
reg |= CNTR_CTRL_ENABLE;
writel(reg, priv->reg + CNTR_CTRL);
}
static void a37xx_wdt_disable(struct a37xx_wdt *priv)
{
u32 reg = readl(priv->reg + CNTR_CTRL);
reg &= ~CNTR_CTRL_ENABLE;
writel(reg, priv->reg + CNTR_CTRL);
}
static int a37xx_wdt_reset(struct udevice *dev)
{
struct a37xx_wdt *priv = dev_get_priv(dev);
if (!priv->timeout)
return -EINVAL;
a37xx_wdt_disable(priv);
set_counter_value(priv);
a37xx_wdt_enable(priv);
return 0;
}
static int a37xx_wdt_expire_now(struct udevice *dev, ulong flags)
{
struct a37xx_wdt *priv = dev_get_priv(dev);
a37xx_wdt_disable(priv);
priv->timeout = 0;
set_counter_value(priv);
a37xx_wdt_enable(priv);
return 0;
}
static int a37xx_wdt_start(struct udevice *dev, u64 ms, ulong flags)
{
struct a37xx_wdt *priv = dev_get_priv(dev);
u32 reg;
reg = readl(priv->reg + CNTR_CTRL);
if (reg & CNTR_CTRL_ACTIVE)
return -EBUSY;
/* set mode */
reg = (reg & ~CNTR_CTRL_MODE_MASK) | CNTR_CTRL_MODE_ONESHOT;
/* set prescaler to the min value */
reg &= ~CNTR_CTRL_PRESCALE_MASK;
reg |= CNTR_CTRL_PRESCALE_MIN << CNTR_CTRL_PRESCALE_SHIFT;
priv->timeout = ms * priv->clk_rate / 1000 / CNTR_CTRL_PRESCALE_MIN;
writel(reg, priv->reg + CNTR_CTRL);
set_counter_value(priv);
a37xx_wdt_enable(priv);
return 0;
}
static int a37xx_wdt_stop(struct udevice *dev)
{
struct a37xx_wdt *priv = dev_get_priv(dev);
a37xx_wdt_disable(priv);
return 0;
}
static int a37xx_wdt_probe(struct udevice *dev)
{
struct a37xx_wdt *priv = dev_get_priv(dev);
fdt_addr_t addr;
addr = dev_read_addr_index(dev, 0);
if (addr == FDT_ADDR_T_NONE)
goto err;
priv->sel_reg = (void __iomem *)addr;
addr = dev_read_addr_index(dev, 1);
if (addr == FDT_ADDR_T_NONE)
goto err;
priv->reg = (void __iomem *)addr;
priv->clk_rate = (ulong)get_ref_clk() * 1000000;
a37xx_wdt_disable(priv);
/*
* We use timer 1 as watchdog timer (because Marvell's Linux uses that
* timer as default), therefore we only set bit TIMER1_IS_WCHDOG_TIMER.
*/
writel(1 << 1, priv->sel_reg);
return 0;
err:
dev_err(dev, "no io address\n");
return -ENODEV;
}
static const struct wdt_ops a37xx_wdt_ops = {
.start = a37xx_wdt_start,
.reset = a37xx_wdt_reset,
.stop = a37xx_wdt_stop,
.expire_now = a37xx_wdt_expire_now,
};
static const struct udevice_id a37xx_wdt_ids[] = {
{ .compatible = "marvell,armada-3700-wdt" },
{}
};
U_BOOT_DRIVER(a37xx_wdt) = {
.name = "armada_37xx_wdt",
.id = UCLASS_WDT,
.of_match = a37xx_wdt_ids,
.probe = a37xx_wdt_probe,
.priv_auto_alloc_size = sizeof(struct a37xx_wdt),
.ops = &a37xx_wdt_ops,
};

View file

@ -0,0 +1,108 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Copyright (C) 2018 Marek Behun <marek.behun@nic.cz>
*
* Based on mvebu_armada-37xx.h by Stefan Roese <sr@denx.de>
*/
#ifndef _CONFIG_TURRIS_MOX_H
#define _CONFIG_TURRIS_MOX_H
#define CONFIG_LAST_STAGE_INIT
/*
* High Level Configuration Options (easy to change)
*/
#define CONFIG_DISPLAY_BOARDINFO_LATE
/* additions for new ARM relocation support */
#define CONFIG_SYS_SDRAM_BASE 0x00000000
#define CONFIG_NR_DRAM_BANKS 1
/* auto boot */
#define CONFIG_PREBOOT
#define CONFIG_SYS_BAUDRATE_TABLE { 9600, 19200, 38400, 57600, \
115200, 230400, 460800, 921600 }
/*
* For booting Linux, the board info and command line data
* have to be in the first 8 MB of memory, since this is
* the maximum mapped by the Linux kernel during initialization.
*/
#define CONFIG_CMDLINE_TAG /* enable passing of ATAGs */
#define CONFIG_INITRD_TAG /* enable INITRD tag */
#define CONFIG_SETUP_MEMORY_TAGS /* enable memory tag */
#define CONFIG_SYS_CBSIZE 1024 /* Console I/O Buff Size */
/*
* Size of malloc() pool
*/
#define CONFIG_SYS_MALLOC_LEN (4 << 20) /* 4MiB for malloc() */
/*
* Other required minimal configurations
*/
#define CONFIG_ARCH_CPU_INIT /* call arch_cpu_init() */
#define CONFIG_SYS_LOAD_ADDR 0x00800000 /* default load adr- 8M */
#define CONFIG_SYS_MEMTEST_START 0x00800000 /* 8M */
#define CONFIG_SYS_MEMTEST_END 0x00ffffff /*(_16M -1) */
#define CONFIG_SYS_RESET_ADDRESS 0xffff0000 /* Rst Vector Adr */
#define CONFIG_SYS_MAXARGS 32 /* max number of command args */
#define CONFIG_SYS_ALT_MEMTEST
/* End of 16M scrubbed by training in bootrom */
#define CONFIG_SYS_INIT_SP_ADDR (CONFIG_SYS_TEXT_BASE + 0xFF0000)
/*
* I2C
*/
#define CONFIG_I2C_MV
#define CONFIG_SYS_I2C_SLAVE 0x0
/*
* SPI Flash configuration
*/
#define CONFIG_ENV_SPI_BUS 0
#define CONFIG_ENV_SPI_CS 0
/* SPI NOR flash default params, used by sf commands */
#define CONFIG_SF_DEFAULT_SPEED 20000000
#define CONFIG_SF_DEFAULT_MODE SPI_MODE_0
#define CONFIG_ENV_SPI_MODE CONFIG_SF_DEFAULT_MODE
/* Environment in SPI NOR flash */
#define CONFIG_ENV_OFFSET 0x180000 /* as Marvell U-Boot version */
#define CONFIG_ENV_SIZE (64 << 10) /* 64KiB */
#define CONFIG_ENV_SECT_SIZE (64 << 10) /* 64KiB sectors */
/*
* Ethernet Driver configuration
*/
#define CONFIG_ENV_OVERWRITE /* ethaddr can be reprogrammed */
#define CONFIG_ARP_TIMEOUT 200
#define CONFIG_NET_RETRY_COUNT 50
#define CONFIG_PHY_MARVELL
#define CONFIG_USB_MAX_CONTROLLER_COUNT (3 + 3)
#define BOOT_TARGET_DEVICES(func) \
func(MMC, mmc, 0) \
func(USB, usb, 0) \
func(PXE, pxe, na) \
func(DHCP, dhcp, na)
#include <config_distro_bootcmd.h>
#define CONFIG_EXTRA_ENV_SETTINGS \
"scriptaddr=0x4d00000\0" \
"pxefile_addr_r=0x4e00000\0" \
"fdt_addr_r=0x4f00000\0" \
"kernel_addr_r=0x5000000\0" \
"ramdisk_addr_r=0x8000000\0" \
BOOTENV
#endif /* _CONFIG_TURRIS_MOX_H */