ARM: mvebu: a38x: sync ddr training code with upstream

This syncs drivers/ddr/marvell/a38x/ with the mv_ddr-armada-17.10 branch
of https://github.com/MarvellEmbeddedProcessors/mv-ddr-marvell.git.

The upstream code is incorporated omitting the ddr4 and apn806 and
folding the nested a38x directory up one level. After that a
semi-automated step is used to drop unused features with unifdef

  find drivers/ddr/marvell/a38x/ -name '*.[ch]' | \
    xargs unifdef -m -UMV_DDR -UMV_DDR_ATF -UCONFIG_DDR4 \
		-UCONFIG_APN806 -UCONFIG_MC_STATIC \
		-UCONFIG_MC_STATIC_PRINT -UCONFIG_PHY_STATIC \
		-UCONFIG_64BIT

INTER_REGS_BASE is updated to be defined as SOC_REGS_PHY_BASE.

Some now empty files are removed and the ternary license is replaced
with a SPDX GPL-2.0+ identifier.

Signed-off-by: Chris Packham <judge.packham@gmail.com>
Signed-off-by: Stefan Roese <sr@denx.de>
This commit is contained in:
Chris Packham 2018-05-10 13:28:29 +12:00 committed by Stefan Roese
parent 00a7767766
commit 2b4ffbf6b4
56 changed files with 7952 additions and 5147 deletions

View file

@ -27,7 +27,7 @@
# include <wdt.h>
#endif
#include "../drivers/ddr/marvell/a38x/ddr3_a38x_topology.h"
#include "../drivers/ddr/marvell/a38x/ddr3_init.h"
#include <../serdes/a38x/high_speed_env_spec.h>
DECLARE_GLOBAL_DATA_PTR;
@ -200,7 +200,8 @@ static bool omnia_read_eeprom(struct omnia_eeprom *oep)
* be used by the DDR3 init code in the SPL U-Boot version to configure
* the DDR3 controller.
*/
static struct hws_topology_map board_topology_map_1g = {
static struct mv_ddr_topology_map board_topology_map_1g = {
DEBUG_LEVEL_ERROR,
0x1, /* active interfaces */
/* cs_mask, mirror, dqs_swap, ck_swap X PUPs */
{ { { {0x1, 0, 0, 0},
@ -209,17 +210,19 @@ static struct hws_topology_map board_topology_map_1g = {
{0x1, 0, 0, 0},
{0x1, 0, 0, 0} },
SPEED_BIN_DDR_1600K, /* speed_bin */
BUS_WIDTH_16, /* memory_width */
MEM_4G, /* mem_size */
MV_DDR_DEV_WIDTH_16BIT, /* memory_width */
MV_DDR_DIE_CAP_4GBIT, /* mem_size */
DDR_FREQ_800, /* frequency */
0, 0, /* cas_wl cas_l */
HWS_TEMP_NORMAL, /* temperature */
HWS_TIM_2T} }, /* timing (force 2t) */
5, /* Num Of Bus Per Interface*/
BUS_MASK_32BIT /* Busses mask */
MV_DDR_TEMP_NORMAL} }, /* temperature */
BUS_MASK_32BIT, /* Busses mask */
MV_DDR_CFG_DEFAULT, /* ddr configuration data source */
{ {0} }, /* raw spd data */
{0} /* timing parameters */
};
static struct hws_topology_map board_topology_map_2g = {
static struct mv_ddr_topology_map board_topology_map_2g = {
DEBUG_LEVEL_ERROR,
0x1, /* active interfaces */
/* cs_mask, mirror, dqs_swap, ck_swap X PUPs */
{ { { {0x1, 0, 0, 0},
@ -228,17 +231,18 @@ static struct hws_topology_map board_topology_map_2g = {
{0x1, 0, 0, 0},
{0x1, 0, 0, 0} },
SPEED_BIN_DDR_1600K, /* speed_bin */
BUS_WIDTH_16, /* memory_width */
MEM_8G, /* mem_size */
MV_DDR_DEV_WIDTH_16BIT, /* memory_width */
MV_DDR_DIE_CAP_8GBIT, /* mem_size */
DDR_FREQ_800, /* frequency */
0, 0, /* cas_wl cas_l */
HWS_TEMP_NORMAL, /* temperature */
HWS_TIM_2T} }, /* timing (force 2t) */
5, /* Num Of Bus Per Interface*/
BUS_MASK_32BIT /* Busses mask */
MV_DDR_TEMP_NORMAL} }, /* temperature */
BUS_MASK_32BIT, /* Busses mask */
MV_DDR_CFG_DEFAULT, /* ddr configuration data source */
{ {0} }, /* raw spd data */
{0} /* timing parameters */
};
struct hws_topology_map *ddr3_get_topology_map(void)
struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
{
static int mem = 0;
struct omnia_eeprom oep;

View file

@ -11,7 +11,7 @@
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "../drivers/ddr/marvell/a38x/ddr3_a38x_topology.h"
#include "../drivers/ddr/marvell/a38x/ddr3_init.h"
#include <../serdes/a38x/high_speed_env_spec.h>
DECLARE_GLOBAL_DATA_PTR;
@ -55,7 +55,8 @@ int hws_board_topology_load(struct serdes_map **serdes_map_array, u8 *count)
* be used by the DDR3 init code in the SPL U-Boot version to configure
* the DDR3 controller.
*/
static struct hws_topology_map board_topology_map = {
static struct mv_ddr_topology_map board_topology_map = {
DEBUG_LEVEL_ERROR,
0x1, /* active interfaces */
/* cs_mask, mirror, dqs_swap, ck_swap X PUPs */
{ { { {0x1, 0, 0, 0},
@ -64,17 +65,18 @@ static struct hws_topology_map board_topology_map = {
{0x1, 0, 0, 0},
{0x1, 0, 0, 0} },
SPEED_BIN_DDR_1866L, /* speed_bin */
BUS_WIDTH_8, /* memory_width */
MEM_2G, /* mem_size */
MV_DDR_DEV_WIDTH_8BIT, /* memory_width */
MV_DDR_DIE_CAP_2GBIT, /* mem_size */
DDR_FREQ_800, /* frequency */
0, 0, /* cas_wl cas_l */
HWS_TEMP_LOW, /* temperature */
HWS_TIM_DEFAULT} }, /* timing */
5, /* Num Of Bus Per Interface*/
BUS_MASK_32BIT /* Busses mask */
MV_DDR_TEMP_LOW} }, /* temperature */
BUS_MASK_32BIT, /* Busses mask */
MV_DDR_CFG_DEFAULT, /* ddr configuration data source */
{ {0} }, /* raw spd data */
{0} /* timing parameters */
};
struct hws_topology_map *ddr3_get_topology_map(void)
struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
{
/* Return the board topology as defined in the board code */
return &board_topology_map;

View file

@ -11,7 +11,7 @@
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "../drivers/ddr/marvell/a38x/ddr3_a38x_topology.h"
#include "../drivers/ddr/marvell/a38x/ddr3_init.h"
#include <../serdes/a38x/high_speed_env_spec.h>
DECLARE_GLOBAL_DATA_PTR;
@ -76,7 +76,8 @@ int hws_board_topology_load(struct serdes_map **serdes_map_array, u8 *count)
* be used by the DDR3 init code in the SPL U-Boot version to configure
* the DDR3 controller.
*/
static struct hws_topology_map board_topology_map = {
static struct mv_ddr_topology_map board_topology_map = {
DEBUG_LEVEL_ERROR,
0x1, /* active interfaces */
/* cs_mask, mirror, dqs_swap, ck_swap X PUPs */
{ { { {0x1, 0, 0, 0},
@ -85,17 +86,18 @@ static struct hws_topology_map board_topology_map = {
{0x1, 0, 0, 0},
{0x1, 0, 0, 0} },
SPEED_BIN_DDR_1866L, /* speed_bin */
BUS_WIDTH_8, /* memory_width */
MEM_4G, /* mem_size */
MV_DDR_DEV_WIDTH_8BIT, /* memory_width */
MV_DDR_DIE_CAP_4GBIT, /* mem_size */
DDR_FREQ_800, /* frequency */
0, 0, /* cas_wl cas_l */
HWS_TEMP_LOW, /* temperature */
HWS_TIM_DEFAULT} }, /* timing */
5, /* Num Of Bus Per Interface*/
BUS_MASK_32BIT /* Busses mask */
MV_DDR_TEMP_LOW} }, /* temperature */
BUS_MASK_32BIT, /* Busses mask */
MV_DDR_CFG_DEFAULT, /* ddr configuration data source */
{ {0} }, /* raw spd data */
{0} /* timing parameters */
};
struct hws_topology_map *ddr3_get_topology_map(void)
struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
{
/* Return the board topology as defined in the board code */
return &board_topology_map;

View file

@ -12,7 +12,7 @@
#include <asm/arch/cpu.h>
#include <asm-generic/gpio.h>
#include "../drivers/ddr/marvell/a38x/ddr3_a38x_topology.h"
#include "../drivers/ddr/marvell/a38x/ddr3_init.h"
#include "../arch/arm/mach-mvebu/serdes/a38x/high_speed_env_spec.h"
#include "keyprogram.h"
@ -39,7 +39,8 @@ DECLARE_GLOBAL_DATA_PTR;
* be used by the DDR3 init code in the SPL U-Boot version to configure
* the DDR3 controller.
*/
static struct hws_topology_map ddr_topology_map = {
static struct mv_ddr_topology_map ddr_topology_map = {
DEBUG_LEVEL_ERROR,
0x1, /* active interfaces */
/* cs_mask, mirror, dqs_swap, ck_swap X PUPs */
{ { { {0x1, 0, 0, 0},
@ -48,14 +49,16 @@ static struct hws_topology_map ddr_topology_map = {
{0x1, 0, 0, 0},
{0x1, 0, 0, 0} },
SPEED_BIN_DDR_1600K, /* speed_bin */
BUS_WIDTH_16, /* memory_width */
MEM_4G, /* mem_size */
MV_DDR_DEV_WIDTH_16BIT, /* memory_width */
MV_DDR_DIE_CAP_4GBIT, /* mem_size */
DDR_FREQ_533, /* frequency */
0, 0, /* cas_wl cas_l */
HWS_TEMP_LOW, /* temperature */
HWS_TIM_DEFAULT} }, /* timing */
5, /* Num Of Bus Per Interface*/
BUS_MASK_32BIT /* Busses mask */
MV_DDR_TEMP_LOW} }, /* temperature */
BUS_MASK_32BIT, /* Busses mask */
MV_DDR_CFG_DEFAULT, /* ddr configuration data source */
{ {0} }, /* raw spd data */
{0} /* timing parameters */
};
static struct serdes_map serdes_topology_map[] = {
@ -121,7 +124,7 @@ void board_pex_config(void)
#endif
}
struct hws_topology_map *ddr3_get_topology_map(void)
struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
{
return &ddr_topology_map;
}

View file

@ -11,7 +11,7 @@
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "../drivers/ddr/marvell/a38x/ddr3_a38x_topology.h"
#include "../drivers/ddr/marvell/a38x/ddr3_init.h"
#include <../serdes/a38x/high_speed_env_spec.h>
DECLARE_GLOBAL_DATA_PTR;
@ -69,7 +69,8 @@ int hws_board_topology_load(struct serdes_map **serdes_map_array, u8 *count)
* be used by the DDR3 init code in the SPL U-Boot version to configure
* the DDR3 controller.
*/
static struct hws_topology_map board_topology_map = {
static struct mv_ddr_topology_map board_topology_map = {
DEBUG_LEVEL_ERROR,
0x1, /* active interfaces */
/* cs_mask, mirror, dqs_swap, ck_swap X PUPs */
{ { { {0x1, 0, 0, 0},
@ -78,17 +79,18 @@ static struct hws_topology_map board_topology_map = {
{0x1, 0, 0, 0},
{0x1, 0, 0, 0} },
SPEED_BIN_DDR_1600K, /* speed_bin */
BUS_WIDTH_16, /* memory_width */
MEM_4G, /* mem_size */
MV_DDR_DEV_WIDTH_16BIT, /* memory_width */
MV_DDR_DIE_CAP_4GBIT, /* mem_size */
DDR_FREQ_800, /* frequency */
0, 0, /* cas_wl cas_l */
HWS_TEMP_LOW, /* temperature */
HWS_TIM_DEFAULT} }, /* timing */
5, /* Num Of Bus Per Interface*/
BUS_MASK_32BIT /* Busses mask */
MV_DDR_TEMP_LOW} }, /* temperature */
BUS_MASK_32BIT, /* Busses mask */
MV_DDR_CFG_DEFAULT, /* ddr configuration data source */
{ {0} }, /* raw spd data */
{0} /* timing parameters */
};
struct hws_topology_map *ddr3_get_topology_map(void)
struct mv_ddr_topology_map *mv_ddr_topology_map_get(void)
{
/* Return the board topology as defined in the board code */
return &board_topology_map;

View file

@ -1,9 +1,8 @@
# SPDX-License-Identifier: GPL-2.0+
obj-$(CONFIG_SPL_BUILD) += ddr3_a38x.o
obj-$(CONFIG_SPL_BUILD) += ddr3_a38x_training.o
obj-$(CONFIG_SPL_BUILD) += mv_ddr_plat.o
obj-$(CONFIG_SPL_BUILD) += mv_ddr_sys_env_lib.o
obj-$(CONFIG_SPL_BUILD) += ddr3_debug.o
obj-$(CONFIG_SPL_BUILD) += ddr3_hws_hw_training.o
obj-$(CONFIG_SPL_BUILD) += ddr3_init.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_bist.o
@ -13,5 +12,8 @@ obj-$(CONFIG_SPL_BUILD) += ddr3_training_hw_algo.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_ip_engine.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_leveling.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_pbs.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_static.o
obj-$(CONFIG_SPL_BUILD) += mv_ddr_build_message.o
obj-$(CONFIG_SPL_BUILD) += mv_ddr_common.o
obj-$(CONFIG_SPL_BUILD) += mv_ddr_spd.o
obj-$(CONFIG_SPL_BUILD) += mv_ddr_topology.o
obj-$(CONFIG_SPL_BUILD) += xor.o

View file

@ -1,685 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define A38X_NUMBER_OF_INTERFACES 5
#define SAR_DEV_ID_OFFS 27
#define SAR_DEV_ID_MASK 0x7
/* Termal Sensor Registers */
#define TSEN_STATE_REG 0xe4070
#define TSEN_STATE_OFFSET 31
#define TSEN_STATE_MASK (0x1 << TSEN_STATE_OFFSET)
#define TSEN_CONF_REG 0xe4074
#define TSEN_CONF_RST_OFFSET 8
#define TSEN_CONF_RST_MASK (0x1 << TSEN_CONF_RST_OFFSET)
#define TSEN_STATUS_REG 0xe4078
#define TSEN_STATUS_READOUT_VALID_OFFSET 10
#define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \
TSEN_STATUS_READOUT_VALID_OFFSET)
#define TSEN_STATUS_TEMP_OUT_OFFSET 0
#define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
static struct dfx_access interface_map[] = {
/* Pipe Client */
{ 0, 17 },
{ 1, 7 },
{ 1, 11 },
{ 0, 3 },
{ 1, 25 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 }
};
/* This array hold the board round trip delay (DQ and CK) per <interface,bus> */
struct trip_delay_element a38x_board_round_trip_delay_array[] = {
/* 1st board */
/* Interface bus DQS-delay CK-delay */
{ 3952, 5060 },
{ 3192, 4493 },
{ 4785, 6677 },
{ 3413, 7267 },
{ 4282, 6086 }, /* ECC PUP */
{ 3952, 5134 },
{ 3192, 4567 },
{ 4785, 6751 },
{ 3413, 7341 },
{ 4282, 6160 }, /* ECC PUP */
/* 2nd board */
/* Interface bus DQS-delay CK-delay */
{ 3952, 5060 },
{ 3192, 4493 },
{ 4785, 6677 },
{ 3413, 7267 },
{ 4282, 6086 }, /* ECC PUP */
{ 3952, 5134 },
{ 3192, 4567 },
{ 4785, 6751 },
{ 3413, 7341 },
{ 4282, 6160 } /* ECC PUP */
};
static u8 a38x_bw_per_freq[DDR_FREQ_LIMIT] = {
0x3, /* DDR_FREQ_100 */
0x4, /* DDR_FREQ_400 */
0x4, /* DDR_FREQ_533 */
0x5, /* DDR_FREQ_667 */
0x5, /* DDR_FREQ_800 */
0x5, /* DDR_FREQ_933 */
0x5, /* DDR_FREQ_1066 */
0x3, /* DDR_FREQ_311 */
0x3, /* DDR_FREQ_333 */
0x4, /* DDR_FREQ_467 */
0x5, /* DDR_FREQ_850 */
0x5, /* DDR_FREQ_600 */
0x3, /* DDR_FREQ_300 */
0x5, /* DDR_FREQ_900 */
0x3, /* DDR_FREQ_360 */
0x5 /* DDR_FREQ_1000 */
};
static u8 a38x_rate_per_freq[DDR_FREQ_LIMIT] = {
/*TBD*/ 0x1, /* DDR_FREQ_100 */
0x2, /* DDR_FREQ_400 */
0x2, /* DDR_FREQ_533 */
0x2, /* DDR_FREQ_667 */
0x2, /* DDR_FREQ_800 */
0x3, /* DDR_FREQ_933 */
0x3, /* DDR_FREQ_1066 */
0x1, /* DDR_FREQ_311 */
0x1, /* DDR_FREQ_333 */
0x2, /* DDR_FREQ_467 */
0x2, /* DDR_FREQ_850 */
0x2, /* DDR_FREQ_600 */
0x1, /* DDR_FREQ_300 */
0x2, /* DDR_FREQ_900 */
0x1, /* DDR_FREQ_360 */
0x2 /* DDR_FREQ_1000 */
};
static u16 a38x_vco_freq_per_sar[] = {
666, /* 0 */
1332,
800,
1600,
1066,
2132,
1200,
2400,
1332,
1332,
1500,
1500,
1600, /* 12 */
1600,
1700,
1700,
1866,
1866,
1800, /* 18 */
2000,
2000,
4000,
2132,
2132,
2300,
2300,
2400,
2400,
2500,
2500,
800
};
u32 pipe_multicast_mask;
u32 dq_bit_map_2_phy_pin[] = {
1, 0, 2, 6, 9, 8, 3, 7, /* 0 */
8, 9, 1, 7, 2, 6, 3, 0, /* 1 */
3, 9, 7, 8, 1, 0, 2, 6, /* 2 */
1, 0, 6, 2, 8, 3, 7, 9, /* 3 */
0, 1, 2, 9, 7, 8, 3, 6, /* 4 */
};
static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
enum hws_ddr_freq freq);
/*
* Read temperature TJ value
*/
u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
{
int reg = 0;
/* Initiates TSEN hardware reset once */
if ((reg_read(TSEN_CONF_REG) & TSEN_CONF_RST_MASK) == 0)
reg_bit_set(TSEN_CONF_REG, TSEN_CONF_RST_MASK);
mdelay(10);
/* Check if the readout field is valid */
if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
printf("%s: TSEN not ready\n", __func__);
return 0;
}
reg = reg_read(TSEN_STATUS_REG);
reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
}
/*
* Name: ddr3_tip_a38x_get_freq_config.
* Desc:
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum hws_ddr_freq freq,
struct hws_tip_freq_config_info
*freq_config_info)
{
if (a38x_bw_per_freq[freq] == 0xff)
return MV_NOT_SUPPORTED;
if (freq_config_info == NULL)
return MV_BAD_PARAM;
freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
freq_config_info->is_supported = 1;
return MV_OK;
}
/*
* Name: ddr3_tip_a38x_pipe_enable.
* Desc:
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_pipe_enable(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, int enable)
{
u32 data_value, pipe_enable_mask = 0;
if (enable == 0) {
pipe_enable_mask = 0;
} else {
if (interface_access == ACCESS_TYPE_MULTICAST)
pipe_enable_mask = pipe_multicast_mask;
else
pipe_enable_mask = (1 << interface_map[if_id].pipe);
}
CHECK_STATUS(ddr3_tip_reg_read
(dev_num, PIPE_ENABLE_ADDR, &data_value, MASK_ALL_BITS));
data_value = (data_value & (~0xff)) | pipe_enable_mask;
CHECK_STATUS(ddr3_tip_reg_write(dev_num, PIPE_ENABLE_ADDR, data_value));
return MV_OK;
}
/*
* Name: ddr3_tip_a38x_if_write.
* Desc:
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_if_write(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 data_value,
u32 mask)
{
u32 ui_data_read;
if (mask != MASK_ALL_BITS) {
CHECK_STATUS(ddr3_tip_a38x_if_read
(dev_num, ACCESS_TYPE_UNICAST, if_id, reg_addr,
&ui_data_read, MASK_ALL_BITS));
data_value = (ui_data_read & (~mask)) | (data_value & mask);
}
reg_write(reg_addr, data_value);
return MV_OK;
}
/*
* Name: ddr3_tip_a38x_if_read.
* Desc:
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_if_read(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 *data, u32 mask)
{
*data = reg_read(reg_addr) & mask;
return MV_OK;
}
/*
* Name: ddr3_tip_a38x_select_ddr_controller.
* Desc: Enable/Disable access to Marvell's server.
* Args: dev_num - device number
* enable - whether to enable or disable the server
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
{
u32 reg;
reg = reg_read(CS_ENABLE_REG);
if (enable)
reg |= (1 << 6);
else
reg &= ~(1 << 6);
reg_write(CS_ENABLE_REG, reg);
return MV_OK;
}
/*
* Name: ddr3_tip_init_a38x_silicon.
* Desc: init Training SW DB.
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
static int ddr3_tip_init_a38x_silicon(u32 dev_num, u32 board_id)
{
struct hws_tip_config_func_db config_func;
enum hws_ddr_freq ddr_freq;
int status;
struct hws_topology_map *tm = ddr3_get_topology_map();
/* new read leveling version */
config_func.tip_dunit_read_func = ddr3_tip_a38x_if_read;
config_func.tip_dunit_write_func = ddr3_tip_a38x_if_write;
config_func.tip_dunit_mux_select_func =
ddr3_tip_a38x_select_ddr_controller;
config_func.tip_get_freq_config_info_func =
ddr3_tip_a38x_get_freq_config;
config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
ddr3_tip_init_config_func(dev_num, &config_func);
ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
status = ddr3_tip_a38x_get_init_freq(dev_num, &ddr_freq);
if (MV_OK != status) {
DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
("DDR3 silicon get target frequency - FAILED 0x%x\n",
status));
return status;
}
rl_version = 1;
mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
LOAD_PATTERN_MASK_BIT |
SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
/* LOAD_PATTERN_2_MASK_BIT | */
WRITE_LEVELING_SUPP_MASK_BIT |
READ_LEVELING_MASK_BIT |
PBS_RX_MASK_BIT |
PBS_TX_MASK_BIT |
SET_TARGET_FREQ_MASK_BIT |
WRITE_LEVELING_TF_MASK_BIT |
WRITE_LEVELING_SUPP_TF_MASK_BIT |
READ_LEVELING_TF_MASK_BIT |
CENTRALIZATION_RX_MASK_BIT |
CENTRALIZATION_TX_MASK_BIT);
rl_mid_freq_wa = 1;
if ((ddr_freq == DDR_FREQ_333) || (ddr_freq == DDR_FREQ_400)) {
mask_tune_func = (WRITE_LEVELING_MASK_BIT |
LOAD_PATTERN_2_MASK_BIT |
WRITE_LEVELING_SUPP_MASK_BIT |
READ_LEVELING_MASK_BIT |
PBS_RX_MASK_BIT |
PBS_TX_MASK_BIT |
CENTRALIZATION_RX_MASK_BIT |
CENTRALIZATION_TX_MASK_BIT);
rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
}
/* Supplementary not supported for ECC modes */
if (1 == ddr3_if_ecc_enabled()) {
mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
mask_tune_func &= ~PBS_TX_MASK_BIT;
mask_tune_func &= ~PBS_RX_MASK_BIT;
}
if (ck_delay == -1)
ck_delay = 160;
if (ck_delay_16 == -1)
ck_delay_16 = 160;
ca_delay = 0;
delay_enable = 1;
calibration_update_control = 1;
init_freq = tm->interface_params[first_active_if].memory_freq;
ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
return MV_OK;
}
int ddr3_a38x_update_topology_map(u32 dev_num, struct hws_topology_map *tm)
{
u32 if_id = 0;
enum hws_ddr_freq freq;
ddr3_tip_a38x_get_init_freq(dev_num, &freq);
tm->interface_params[if_id].memory_freq = freq;
/*
* re-calc topology parameters according to topology updates
* (if needed)
*/
CHECK_STATUS(hws_ddr3_tip_load_topology_map(dev_num, tm));
return MV_OK;
}
int ddr3_tip_init_a38x(u32 dev_num, u32 board_id)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
if (NULL == tm)
return MV_FAIL;
ddr3_a38x_update_topology_map(dev_num, tm);
ddr3_tip_init_a38x_silicon(dev_num, board_id);
return MV_OK;
}
int ddr3_tip_a38x_get_init_freq(int dev_num, enum hws_ddr_freq *freq)
{
u32 reg;
/* Read sample at reset setting */
reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
switch (reg) {
case 0x0:
case 0x1:
*freq = DDR_FREQ_333;
break;
case 0x2:
case 0x3:
*freq = DDR_FREQ_400;
break;
case 0x4:
case 0xd:
*freq = DDR_FREQ_533;
break;
case 0x6:
*freq = DDR_FREQ_600;
break;
case 0x8:
case 0x11:
case 0x14:
*freq = DDR_FREQ_667;
break;
case 0xc:
case 0x15:
case 0x1b:
*freq = DDR_FREQ_800;
break;
case 0x10:
*freq = DDR_FREQ_933;
break;
case 0x12:
*freq = DDR_FREQ_900;
break;
case 0x13:
*freq = DDR_FREQ_900;
break;
default:
*freq = 0;
return MV_NOT_SUPPORTED;
}
return MV_OK;
}
int ddr3_tip_a38x_get_medium_freq(int dev_num, enum hws_ddr_freq *freq)
{
u32 reg;
/* Read sample at reset setting */
reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
switch (reg) {
case 0x0:
case 0x1:
/* Medium is same as TF to run PBS in this freq */
*freq = DDR_FREQ_333;
break;
case 0x2:
case 0x3:
/* Medium is same as TF to run PBS in this freq */
*freq = DDR_FREQ_400;
break;
case 0x4:
case 0xd:
*freq = DDR_FREQ_533;
break;
case 0x8:
case 0x11:
case 0x14:
*freq = DDR_FREQ_333;
break;
case 0xc:
case 0x15:
case 0x1b:
*freq = DDR_FREQ_400;
break;
case 0x6:
*freq = DDR_FREQ_300;
break;
case 0x12:
*freq = DDR_FREQ_360;
break;
case 0x13:
*freq = DDR_FREQ_400;
break;
default:
*freq = 0;
return MV_NOT_SUPPORTED;
}
return MV_OK;
}
u32 ddr3_tip_get_init_freq(void)
{
enum hws_ddr_freq freq;
ddr3_tip_a38x_get_init_freq(0, &freq);
return freq;
}
static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
enum hws_ddr_freq frequency)
{
u32 divider = 0;
u32 sar_val;
if (if_id != 0) {
DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
("A38x does not support interface 0x%x\n",
if_id));
return MV_BAD_PARAM;
}
/* get VCO freq index */
sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
divider = a38x_vco_freq_per_sar[sar_val] / freq_val[frequency];
/* Set Sync mode */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x20220, 0x0,
0x1000));
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe42f4, 0x0,
0x200));
/* cpupll_clkdiv_reset_mask */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0x1f,
0xff));
/* cpupll_clkdiv_reload_smooth */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
(0x2 << 8), (0xff << 8)));
/* cpupll_clkdiv_relax_en */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
(0x2 << 24), (0xff << 24)));
/* write the divider */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4268,
(divider << 8), (0x3f << 8)));
/* set cpupll_clkdiv_reload_ratio */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264,
(1 << 8), (1 << 8)));
/* undet cpupll_clkdiv_reload_ratio */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
(1 << 8)));
/* clear cpupll_clkdiv_reload_force */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
(0xff << 8)));
/* clear cpupll_clkdiv_relax_en */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
(0xff << 24)));
/* clear cpupll_clkdiv_reset_mask */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
0xff));
/* Dunit training clock + 1:1 mode */
if ((frequency == DDR_FREQ_LOW_FREQ) || (freq_val[frequency] <= 400)) {
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
(1 << 16), (1 << 16)));
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
(0 << 15), (1 << 15)));
} else {
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
0, (1 << 16)));
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
(1 << 15), (1 << 15)));
}
return MV_OK;
}
/*
* external read from memory
*/
int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
u32 num_of_bursts, u32 *data)
{
u32 burst_num;
for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
data[burst_num] = readl(reg_addr + 4 * burst_num);
return MV_OK;
}
/*
* external write to memory
*/
int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
u32 num_of_bursts, u32 *data) {
u32 burst_num;
for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
writel(data[burst_num], reg_addr + 4 * burst_num);
return MV_OK;
}
int ddr3_silicon_pre_init(void)
{
return ddr3_silicon_init();
}
int ddr3_post_run_alg(void)
{
return MV_OK;
}
int ddr3_silicon_post_init(void)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
/* Set half bus width */
if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
REG_SDRAM_CONFIG_ADDR, 0x0, 0x8000));
}
return MV_OK;
}
int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
{
info_ptr->device_id = 0x6800;
info_ptr->ck_delay = ck_delay;
return MV_OK;
}

View file

@ -1,92 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR3_A38X_H
#define _DDR3_A38X_H
#define MAX_INTERFACE_NUM 1
#define MAX_BUS_NUM 5
#include "ddr3_hws_hw_training_def.h"
#define ECC_SUPPORT
/* right now, we're not supporting this in mainline */
#undef SUPPORT_STATIC_DUNIT_CONFIG
/* Controler bus divider 1 for 32 bit, 2 for 64 bit */
#define DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER 1
/* Tune internal training params values */
#define TUNE_TRAINING_PARAMS_CK_DELAY 160
#define TUNE_TRAINING_PARAMS_CK_DELAY_16 160
#define TUNE_TRAINING_PARAMS_PFINGER 41
#define TUNE_TRAINING_PARAMS_NFINGER 43
#define TUNE_TRAINING_PARAMS_PHYREG3VAL 0xa
#define MARVELL_BOARD MARVELL_BOARD_ID_BASE
#define REG_DEVICE_SAR1_ADDR 0xe4204
#define RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET 17
#define RST2_CPU_DDR_CLOCK_SELECT_IN_MASK 0x1f
/* DRAM Windows */
#define REG_XBAR_WIN_5_CTRL_ADDR 0x20050
#define REG_XBAR_WIN_5_BASE_ADDR 0x20054
/* DRAM Windows */
#define REG_XBAR_WIN_4_CTRL_ADDR 0x20040
#define REG_XBAR_WIN_4_BASE_ADDR 0x20044
#define REG_XBAR_WIN_4_REMAP_ADDR 0x20048
#define REG_XBAR_WIN_7_REMAP_ADDR 0x20078
#define REG_XBAR_WIN_16_CTRL_ADDR 0x200d0
#define REG_XBAR_WIN_16_BASE_ADDR 0x200d4
#define REG_XBAR_WIN_16_REMAP_ADDR 0x200dc
#define REG_XBAR_WIN_19_CTRL_ADDR 0x200e8
#define REG_FASTPATH_WIN_BASE_ADDR(win) (0x20180 + (0x8 * win))
#define REG_FASTPATH_WIN_CTRL_ADDR(win) (0x20184 + (0x8 * win))
/* SatR defined too change topology busWidth and ECC configuration */
#define DDR_SATR_CONFIG_MASK_WIDTH 0x8
#define DDR_SATR_CONFIG_MASK_ECC 0x10
#define DDR_SATR_CONFIG_MASK_ECC_PUP 0x20
#define REG_SAMPLE_RESET_HIGH_ADDR 0x18600
#define MV_BOARD_REFCLK MV_BOARD_REFCLK_25MHZ
/* Matrix enables DRAM modes (bus width/ECC) per boardId */
#define TOPOLOGY_UPDATE_32BIT 0
#define TOPOLOGY_UPDATE_32BIT_ECC 1
#define TOPOLOGY_UPDATE_16BIT 2
#define TOPOLOGY_UPDATE_16BIT_ECC 3
#define TOPOLOGY_UPDATE_16BIT_ECC_PUP3 4
#define TOPOLOGY_UPDATE { \
/* 32Bit, 32bit ECC, 16bit, 16bit ECC PUP4, 16bit ECC PUP3 */ \
{1, 1, 1, 1, 1}, /* RD_NAS_68XX_ID */ \
{1, 1, 1, 1, 1}, /* DB_68XX_ID */ \
{1, 0, 1, 0, 1}, /* RD_AP_68XX_ID */ \
{1, 0, 1, 0, 1}, /* DB_AP_68XX_ID */ \
{1, 0, 1, 0, 1}, /* DB_GP_68XX_ID */ \
{0, 0, 1, 1, 0}, /* DB_BP_6821_ID */ \
{1, 1, 1, 1, 1} /* DB_AMC_6820_ID */ \
};
enum {
CPU_1066MHZ_DDR_400MHZ,
CPU_RESERVED_DDR_RESERVED0,
CPU_667MHZ_DDR_667MHZ,
CPU_800MHZ_DDR_800MHZ,
CPU_RESERVED_DDR_RESERVED1,
CPU_RESERVED_DDR_RESERVED2,
CPU_RESERVED_DDR_RESERVED3,
LAST_FREQ
};
#define ACTIVE_INTERFACE_MASK 0x1
#endif /* _DDR3_A38X_H */

View file

@ -1,21 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR3_A38X_TOPOLOGY_H
#define _DDR3_A38X_TOPOLOGY_H
#include "ddr_topology_def.h"
/* Bus mask variants */
#define BUS_MASK_32BIT 0xf
#define BUS_MASK_32BIT_ECC 0x1f
#define BUS_MASK_16BIT 0x3
#define BUS_MASK_16BIT_ECC 0x13
#define BUS_MASK_16BIT_ECC_PUP3 0xb
#define DYNAMIC_CS_SIZE_CONFIG
#define DISABLE_L2_FILTERING_DURING_DDR_TRAINING
#endif /* _DDR3_A38X_TOPOLOGY_H */

View file

@ -1,39 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
/*
* Name: ddr3_tip_init_silicon
* Desc: initiate silicon parameters
* Args:
* Notes:
* Returns: required value
*/
int ddr3_silicon_init(void)
{
int status;
static int init_done;
if (init_done == 1)
return MV_OK;
status = ddr3_tip_init_a38x(0, 0);
if (MV_OK != status) {
printf("DDR3 A38x silicon init - FAILED 0x%x\n", status);
return status;
}
init_done = 1;
return MV_OK;
}

File diff suppressed because it is too large Load diff

View file

@ -1,147 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define REG_READ_DATA_SAMPLE_DELAYS_ADDR 0x1538
#define REG_READ_DATA_SAMPLE_DELAYS_MASK 0x1f
#define REG_READ_DATA_SAMPLE_DELAYS_OFFS 8
#define REG_READ_DATA_READY_DELAYS_ADDR 0x153c
#define REG_READ_DATA_READY_DELAYS_MASK 0x1f
#define REG_READ_DATA_READY_DELAYS_OFFS 8
int ddr3_if_ecc_enabled(void)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
if (DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask) ||
DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask))
return 1;
else
return 0;
}
int ddr3_pre_algo_config(void)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
/* Set Bus3 ECC training mode */
if (DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask)) {
/* Set Bus3 ECC MUX */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
REG_SDRAM_PINS_MUX, 0x100, 0x100));
}
/* Set regular ECC training mode (bus4 and bus 3) */
if ((DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask)) ||
(DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask))) {
/* Enable ECC Write MUX */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
TRAINING_SW_2_REG, 0x100, 0x100));
/* General ECC enable */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
REG_SDRAM_CONFIG_ADDR, 0x40000, 0x40000));
/* Disable Read Data ECC MUX */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
TRAINING_SW_2_REG, 0x0, 0x2));
}
return MV_OK;
}
int ddr3_post_algo_config(void)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
int status;
status = ddr3_post_run_alg();
if (MV_OK != status) {
printf("DDR3 Post Run Alg - FAILED 0x%x\n", status);
return status;
}
/* Un_set ECC training mode */
if ((DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask)) ||
(DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask))) {
/* Disable ECC Write MUX */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
TRAINING_SW_2_REG, 0x0, 0x100));
/* General ECC and Bus3 ECC MUX remains enabled */
}
return MV_OK;
}
int ddr3_hws_hw_training(void)
{
enum hws_algo_type algo_mode = ALGO_TYPE_DYNAMIC;
int status;
struct init_cntr_param init_param;
status = ddr3_silicon_pre_init();
if (MV_OK != status) {
printf("DDR3 Pre silicon Config - FAILED 0x%x\n", status);
return status;
}
init_param.do_mrs_phy = 1;
#if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X)
init_param.is_ctrl64_bit = 0;
#else
init_param.is_ctrl64_bit = 1;
#endif
#if defined(CONFIG_ALLEYCAT3) || defined(CONFIG_ARMADA_38X) || \
defined(CONFIG_ARMADA_39X)
init_param.init_phy = 1;
#else
init_param.init_phy = 0;
#endif
init_param.msys_init = 1;
status = hws_ddr3_tip_init_controller(0, &init_param);
if (MV_OK != status) {
printf("DDR3 init controller - FAILED 0x%x\n", status);
return status;
}
status = ddr3_silicon_post_init();
if (MV_OK != status) {
printf("DDR3 Post Init - FAILED 0x%x\n", status);
return status;
}
status = ddr3_pre_algo_config();
if (MV_OK != status) {
printf("DDR3 Pre Algo Config - FAILED 0x%x\n", status);
return status;
}
/* run algorithm in order to configure the PHY */
status = hws_ddr3_tip_run_alg(0, algo_mode);
if (MV_OK != status) {
printf("DDR3 run algorithm - FAILED 0x%x\n", status);
return status;
}
status = ddr3_post_algo_config();
if (MV_OK != status) {
printf("DDR3 Post Algo Config - FAILED 0x%x\n", status);
return status;
}
return MV_OK;
}

View file

@ -1,48 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR3_HWS_HW_TRAINING_H
#define _DDR3_HWS_HW_TRAINING_H
/* struct used for DLB configuration array */
struct dlb_config {
u32 reg_addr;
u32 reg_data;
};
/* Topology update structure */
struct topology_update_info {
int update_ecc;
u8 ecc;
int update_width;
u8 width;
int update_ecc_pup3_mode;
u8 ecc_pup_mode_offset;
};
/* Topology update defines */
#define TOPOLOGY_UPDATE_WIDTH_16BIT 1
#define TOPOLOGY_UPDATE_WIDTH_32BIT 0
#define TOPOLOGY_UPDATE_WIDTH_32BIT_MASK 0xf
#define TOPOLOGY_UPDATE_WIDTH_16BIT_MASK 0x3
#define TOPOLOGY_UPDATE_ECC_ON 1
#define TOPOLOGY_UPDATE_ECC_OFF 0
#define TOPOLOGY_UPDATE_ECC_OFFSET_PUP4 4
#define TOPOLOGY_UPDATE_ECC_OFFSET_PUP3 3
/*
* 1. L2 filter should be set at binary header to 0xd000000,
* to avoid conflict with internal register IO.
* 2. U-Boot modifies internal registers base to 0xf100000,
* and than should update L2 filter accordingly to 0xf000000 (3.75 GB)
*/
/* temporary limit l2 filter to 3GiB (LSP issue) */
#define L2_FILTER_FOR_MAX_MEMORY_SIZE 0xc0000000
#define ADDRESS_FILTERING_END_REGISTER 0x8c04
#define SUB_VERSION 0
#endif /* _DDR3_HWS_HW_TRAINING_H */

View file

@ -1,461 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR3_HWS_HW_TRAINING_DEF_H
#define _DDR3_HWS_HW_TRAINING_DEF_H
#define SAR_DDR3_FREQ_MASK 0xfe00000
#define SAR_CPU_FAB_GET(cpu, fab) (((cpu & 0x7) << 21) | \
((fab & 0xf) << 24))
#define MAX_CS 4
#define MIN_DIMM_ADDR 0x50
#define FAR_END_DIMM_ADDR 0x50
#define MAX_DIMM_ADDR 0x60
#define SDRAM_CS_SIZE 0xfffffff
#define SDRAM_CS_BASE 0x0
#define SDRAM_DIMM_SIZE 0x80000000
#define CPU_CONFIGURATION_REG(id) (0x21800 + (id * 0x100))
#define CPU_MRVL_ID_OFFSET 0x10
#define SAR1_CPU_CORE_MASK 0x38000000
#define SAR1_CPU_CORE_OFFSET 27
#define NEW_FABRIC_TWSI_ADDR 0x4e
#ifdef DB_784MP_GP
#define BUS_WIDTH_ECC_TWSI_ADDR 0x4e
#else
#define BUS_WIDTH_ECC_TWSI_ADDR 0x4f
#endif
#define MV_MAX_DDR3_STATIC_SIZE 50
#define MV_DDR3_MODES_NUMBER 30
#define RESUME_RL_PATTERNS_ADDR 0xfe0000
#define RESUME_RL_PATTERNS_SIZE 0x100
#define RESUME_TRAINING_VALUES_ADDR (RESUME_RL_PATTERNS_ADDR + \
RESUME_RL_PATTERNS_SIZE)
#define RESUME_TRAINING_VALUES_MAX 0xcd0
#define BOOT_INFO_ADDR (RESUME_RL_PATTERNS_ADDR + 0x1000)
#define CHECKSUM_RESULT_ADDR (BOOT_INFO_ADDR + 0x1000)
#define NUM_OF_REGISTER_ADDR (CHECKSUM_RESULT_ADDR + 4)
#define SUSPEND_MAGIC_WORD 0xdeadb002
#define REGISTER_LIST_END 0xffffffff
/* MISC */
#define INTER_REGS_BASE SOC_REGS_PHY_BASE
/* DDR */
#define REG_SDRAM_CONFIG_ADDR 0x1400
#define REG_SDRAM_CONFIG_MASK 0x9fffffff
#define REG_SDRAM_CONFIG_RFRS_MASK 0x3fff
#define REG_SDRAM_CONFIG_WIDTH_OFFS 15
#define REG_SDRAM_CONFIG_REGDIMM_OFFS 17
#define REG_SDRAM_CONFIG_ECC_OFFS 18
#define REG_SDRAM_CONFIG_IERR_OFFS 19
#define REG_SDRAM_CONFIG_PUPRSTDIV_OFFS 28
#define REG_SDRAM_CONFIG_RSTRD_OFFS 30
#define REG_SDRAM_PINS_MUX 0x19d4
#define REG_DUNIT_CTRL_LOW_ADDR 0x1404
#define REG_DUNIT_CTRL_LOW_2T_OFFS 3
#define REG_DUNIT_CTRL_LOW_2T_MASK 0x3
#define REG_DUNIT_CTRL_LOW_DPDE_OFFS 14
#define REG_SDRAM_TIMING_LOW_ADDR 0x1408
#define REG_SDRAM_TIMING_HIGH_ADDR 0x140c
#define REG_SDRAM_TIMING_H_R2R_OFFS 7
#define REG_SDRAM_TIMING_H_R2R_MASK 0x3
#define REG_SDRAM_TIMING_H_R2W_W2R_OFFS 9
#define REG_SDRAM_TIMING_H_R2W_W2R_MASK 0x3
#define REG_SDRAM_TIMING_H_W2W_OFFS 11
#define REG_SDRAM_TIMING_H_W2W_MASK 0x1f
#define REG_SDRAM_TIMING_H_R2R_H_OFFS 19
#define REG_SDRAM_TIMING_H_R2R_H_MASK 0x7
#define REG_SDRAM_TIMING_H_R2W_W2R_H_OFFS 22
#define REG_SDRAM_TIMING_H_R2W_W2R_H_MASK 0x7
#define REG_SDRAM_ADDRESS_CTRL_ADDR 0x1410
#define REG_SDRAM_ADDRESS_SIZE_OFFS 2
#define REG_SDRAM_ADDRESS_SIZE_HIGH_OFFS 18
#define REG_SDRAM_ADDRESS_CTRL_STRUCT_OFFS 4
#define REG_SDRAM_OPEN_PAGES_ADDR 0x1414
#define REG_SDRAM_OPERATION_CS_OFFS 8
#define REG_SDRAM_OPERATION_ADDR 0x1418
#define REG_SDRAM_OPERATION_CWA_DELAY_SEL_OFFS 24
#define REG_SDRAM_OPERATION_CWA_DATA_OFFS 20
#define REG_SDRAM_OPERATION_CWA_DATA_MASK 0xf
#define REG_SDRAM_OPERATION_CWA_RC_OFFS 16
#define REG_SDRAM_OPERATION_CWA_RC_MASK 0xf
#define REG_SDRAM_OPERATION_CMD_MR0 0xf03
#define REG_SDRAM_OPERATION_CMD_MR1 0xf04
#define REG_SDRAM_OPERATION_CMD_MR2 0xf08
#define REG_SDRAM_OPERATION_CMD_MR3 0xf09
#define REG_SDRAM_OPERATION_CMD_RFRS 0xf02
#define REG_SDRAM_OPERATION_CMD_CWA 0xf0e
#define REG_SDRAM_OPERATION_CMD_RFRS_DONE 0xf
#define REG_SDRAM_OPERATION_CMD_MASK 0xf
#define REG_SDRAM_OPERATION_CS_OFFS 8
#define REG_OUDDR3_TIMING_ADDR 0x142c
#define REG_SDRAM_MODE_ADDR 0x141c
#define REG_SDRAM_EXT_MODE_ADDR 0x1420
#define REG_DDR_CONT_HIGH_ADDR 0x1424
#define REG_ODT_TIME_LOW_ADDR 0x1428
#define REG_ODT_ON_CTL_RD_OFFS 12
#define REG_ODT_OFF_CTL_RD_OFFS 16
#define REG_SDRAM_ERROR_ADDR 0x1454
#define REG_SDRAM_AUTO_PWR_SAVE_ADDR 0x1474
#define REG_ODT_TIME_HIGH_ADDR 0x147c
#define REG_SDRAM_INIT_CTRL_ADDR 0x1480
#define REG_SDRAM_INIT_CTRL_OFFS 0
#define REG_SDRAM_INIT_CKE_ASSERT_OFFS 2
#define REG_SDRAM_INIT_RESET_DEASSERT_OFFS 3
#define REG_SDRAM_INIT_RESET_MASK_OFFS 1
#define REG_SDRAM_ODT_CTRL_LOW_ADDR 0x1494
#define REG_SDRAM_ODT_CTRL_HIGH_ADDR 0x1498
#define REG_SDRAM_ODT_CTRL_HIGH_OVRD_MASK 0x0
#define REG_SDRAM_ODT_CTRL_HIGH_OVRD_ENA 0x3
#define REG_DUNIT_ODT_CTRL_ADDR 0x149c
#define REG_DUNIT_ODT_CTRL_OVRD_OFFS 8
#define REG_DUNIT_ODT_CTRL_OVRD_VAL_OFFS 9
#define REG_DRAM_FIFO_CTRL_ADDR 0x14a0
#define REG_DRAM_AXI_CTRL_ADDR 0x14a8
#define REG_DRAM_AXI_CTRL_AXIDATABUSWIDTH_OFFS 0
#define REG_METAL_MASK_ADDR 0x14b0
#define REG_METAL_MASK_MASK 0xdfffffff
#define REG_METAL_MASK_RETRY_OFFS 0
#define REG_DRAM_ADDR_CTRL_DRIVE_STRENGTH_ADDR 0x14c0
#define REG_DRAM_DATA_DQS_DRIVE_STRENGTH_ADDR 0x14c4
#define REG_DRAM_VER_CAL_MACHINE_CTRL_ADDR 0x14c8
#define REG_DRAM_MAIN_PADS_CAL_ADDR 0x14cc
#define REG_DRAM_HOR_CAL_MACHINE_CTRL_ADDR 0x17c8
#define REG_CS_SIZE_SCRATCH_ADDR 0x1504
#define REG_DYNAMIC_POWER_SAVE_ADDR 0x1520
#define REG_DDR_IO_ADDR 0x1524
#define REG_DDR_IO_CLK_RATIO_OFFS 15
#define REG_DFS_ADDR 0x1528
#define REG_DFS_DLLNEXTSTATE_OFFS 0
#define REG_DFS_BLOCK_OFFS 1
#define REG_DFS_SR_OFFS 2
#define REG_DFS_ATSR_OFFS 3
#define REG_DFS_RECONF_OFFS 4
#define REG_DFS_CL_NEXT_STATE_OFFS 8
#define REG_DFS_CL_NEXT_STATE_MASK 0xf
#define REG_DFS_CWL_NEXT_STATE_OFFS 12
#define REG_DFS_CWL_NEXT_STATE_MASK 0x7
#define REG_READ_DATA_SAMPLE_DELAYS_ADDR 0x1538
#define REG_READ_DATA_SAMPLE_DELAYS_MASK 0x1f
#define REG_READ_DATA_SAMPLE_DELAYS_OFFS 8
#define REG_READ_DATA_READY_DELAYS_ADDR 0x153c
#define REG_READ_DATA_READY_DELAYS_MASK 0x1f
#define REG_READ_DATA_READY_DELAYS_OFFS 8
#define START_BURST_IN_ADDR 1
#define REG_DRAM_TRAINING_SHADOW_ADDR 0x18488
#define REG_DRAM_TRAINING_ADDR 0x15b0
#define REG_DRAM_TRAINING_LOW_FREQ_OFFS 0
#define REG_DRAM_TRAINING_PATTERNS_OFFS 4
#define REG_DRAM_TRAINING_MED_FREQ_OFFS 2
#define REG_DRAM_TRAINING_WL_OFFS 3
#define REG_DRAM_TRAINING_RL_OFFS 6
#define REG_DRAM_TRAINING_DQS_RX_OFFS 15
#define REG_DRAM_TRAINING_DQS_TX_OFFS 16
#define REG_DRAM_TRAINING_CS_OFFS 20
#define REG_DRAM_TRAINING_RETEST_OFFS 24
#define REG_DRAM_TRAINING_DFS_FREQ_OFFS 27
#define REG_DRAM_TRAINING_DFS_REQ_OFFS 29
#define REG_DRAM_TRAINING_ERROR_OFFS 30
#define REG_DRAM_TRAINING_AUTO_OFFS 31
#define REG_DRAM_TRAINING_RETEST_PAR 0x3
#define REG_DRAM_TRAINING_RETEST_MASK 0xf8ffffff
#define REG_DRAM_TRAINING_CS_MASK 0xff0fffff
#define REG_DRAM_TRAINING_PATTERNS_MASK 0xff0f0000
#define REG_DRAM_TRAINING_1_ADDR 0x15b4
#define REG_DRAM_TRAINING_1_TRNBPOINT_OFFS 16
#define REG_DRAM_TRAINING_2_ADDR 0x15b8
#define REG_DRAM_TRAINING_2_OVERRUN_OFFS 17
#define REG_DRAM_TRAINING_2_FIFO_RST_OFFS 4
#define REG_DRAM_TRAINING_2_RL_MODE_OFFS 3
#define REG_DRAM_TRAINING_2_WL_MODE_OFFS 2
#define REG_DRAM_TRAINING_2_ECC_MUX_OFFS 1
#define REG_DRAM_TRAINING_2_SW_OVRD_OFFS 0
#define REG_DRAM_TRAINING_PATTERN_BASE_ADDR 0x15bc
#define REG_DRAM_TRAINING_PATTERN_BASE_OFFS 3
#define REG_TRAINING_DEBUG_2_ADDR 0x15c4
#define REG_TRAINING_DEBUG_2_OFFS 16
#define REG_TRAINING_DEBUG_2_MASK 0x3
#define REG_TRAINING_DEBUG_3_ADDR 0x15c8
#define REG_TRAINING_DEBUG_3_OFFS 3
#define REG_TRAINING_DEBUG_3_MASK 0x7
#define MR_CS_ADDR_OFFS 4
#define REG_DDR3_MR0_ADDR 0x15d0
#define REG_DDR3_MR0_CS_ADDR 0x1870
#define REG_DDR3_MR0_CL_MASK 0x74
#define REG_DDR3_MR0_CL_OFFS 2
#define REG_DDR3_MR0_CL_HIGH_OFFS 3
#define CL_MASK 0xf
#define REG_DDR3_MR1_ADDR 0x15d4
#define REG_DDR3_MR1_CS_ADDR 0x1874
#define REG_DDR3_MR1_RTT_MASK 0xfffffdbb
#define REG_DDR3_MR1_DLL_ENA_OFFS 0
#define REG_DDR3_MR1_RTT_DISABLED 0x0
#define REG_DDR3_MR1_RTT_RZQ2 0x40
#define REG_DDR3_MR1_RTT_RZQ4 0x2
#define REG_DDR3_MR1_RTT_RZQ6 0x42
#define REG_DDR3_MR1_RTT_RZQ8 0x202
#define REG_DDR3_MR1_RTT_RZQ12 0x4
/* WL-disabled, OB-enabled */
#define REG_DDR3_MR1_OUTBUF_WL_MASK 0xffffef7f
/* Output Buffer Disabled */
#define REG_DDR3_MR1_OUTBUF_DIS_OFFS 12
#define REG_DDR3_MR1_WL_ENA_OFFS 7
#define REG_DDR3_MR1_WL_ENA 0x80 /* WL Enabled */
#define REG_DDR3_MR1_ODT_MASK 0xfffffdbb
#define REG_DDR3_MR2_ADDR 0x15d8
#define REG_DDR3_MR2_CS_ADDR 0x1878
#define REG_DDR3_MR2_CWL_OFFS 3
#define REG_DDR3_MR2_CWL_MASK 0x7
#define REG_DDR3_MR2_ODT_MASK 0xfffff9ff
#define REG_DDR3_MR3_ADDR 0x15dc
#define REG_DDR3_MR3_CS_ADDR 0x187c
#define REG_DDR3_RANK_CTRL_ADDR 0x15e0
#define REG_DDR3_RANK_CTRL_CS_ENA_MASK 0xf
#define REG_DDR3_RANK_CTRL_MIRROR_OFFS 4
#define REG_ZQC_CONF_ADDR 0x15e4
#define REG_DRAM_PHY_CONFIG_ADDR 0x15ec
#define REG_DRAM_PHY_CONFIG_MASK 0x3fffffff
#define REG_ODPG_CNTRL_ADDR 0x1600
#define REG_ODPG_CNTRL_OFFS 21
#define REG_PHY_LOCK_MASK_ADDR 0x1670
#define REG_PHY_LOCK_MASK_MASK 0xfffff000
#define REG_PHY_LOCK_STATUS_ADDR 0x1674
#define REG_PHY_LOCK_STATUS_LOCK_OFFS 9
#define REG_PHY_LOCK_STATUS_LOCK_MASK 0xfff
#define REG_PHY_LOCK_APLL_ADLL_STATUS_MASK 0x7ff
#define REG_PHY_REGISTRY_FILE_ACCESS_ADDR 0x16a0
#define REG_PHY_REGISTRY_FILE_ACCESS_OP_WR 0xc0000000
#define REG_PHY_REGISTRY_FILE_ACCESS_OP_RD 0x80000000
#define REG_PHY_REGISTRY_FILE_ACCESS_OP_DONE 0x80000000
#define REG_PHY_BC_OFFS 27
#define REG_PHY_CNTRL_OFFS 26
#define REG_PHY_CS_OFFS 16
#define REG_PHY_DQS_REF_DLY_OFFS 10
#define REG_PHY_PHASE_OFFS 8
#define REG_PHY_PUP_OFFS 22
#define REG_TRAINING_WL_ADDR 0x16ac
#define REG_TRAINING_WL_CS_MASK 0xfffffffc
#define REG_TRAINING_WL_UPD_OFFS 2
#define REG_TRAINING_WL_CS_DONE_OFFS 3
#define REG_TRAINING_WL_RATIO_MASK 0xffffff0f
#define REG_TRAINING_WL_1TO1 0x50
#define REG_TRAINING_WL_2TO1 0x10
#define REG_TRAINING_WL_DELAYEXP_MASK 0x20000000
#define REG_TRAINING_WL_RESULTS_MASK 0x000001ff
#define REG_TRAINING_WL_RESULTS_OFFS 20
#define REG_REGISTERED_DRAM_CTRL_ADDR 0x16d0
#define REG_REGISTERED_DRAM_CTRL_SR_FLOAT_OFFS 15
#define REG_REGISTERED_DRAM_CTRL_PARITY_MASK 0x3f
/* DLB */
#define REG_STATIC_DRAM_DLB_CONTROL 0x1700
#define DLB_BUS_OPTIMIZATION_WEIGHTS_REG 0x1704
#define DLB_AGING_REGISTER 0x1708
#define DLB_EVICTION_CONTROL_REG 0x170c
#define DLB_EVICTION_TIMERS_REGISTER_REG 0x1710
#define DLB_USER_COMMAND_REG 0x1714
#define DLB_BUS_WEIGHTS_DIFF_CS 0x1770
#define DLB_BUS_WEIGHTS_DIFF_BG 0x1774
#define DLB_BUS_WEIGHTS_SAME_BG 0x1778
#define DLB_BUS_WEIGHTS_RD_WR 0x177c
#define DLB_BUS_WEIGHTS_ATTR_SYS_PRIO 0x1780
#define DLB_MAIN_QUEUE_MAP 0x1784
#define DLB_LINE_SPLIT 0x1788
#define DLB_ENABLE 0x1
#define DLB_WRITE_COALESING (0x1 << 2)
#define DLB_AXI_PREFETCH_EN (0x1 << 3)
#define DLB_MBUS_PREFETCH_EN (0x1 << 4)
#define PREFETCH_N_LN_SZ_TR (0x1 << 6)
#define DLB_INTERJECTION_ENABLE (0x1 << 3)
/* CPU */
#define REG_BOOTROM_ROUTINE_ADDR 0x182d0
#define REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS 12
#define REG_DRAM_INIT_CTRL_STATUS_ADDR 0x18488
#define REG_DRAM_INIT_CTRL_TRN_CLK_OFFS 16
#define REG_CPU_DIV_CLK_CTRL_0_NEW_RATIO 0x000200ff
#define REG_DRAM_INIT_CTRL_STATUS_2_ADDR 0x1488
#define REG_CPU_DIV_CLK_CTRL_0_ADDR 0x18700
#define REG_CPU_DIV_CLK_CTRL_1_ADDR 0x18704
#define REG_CPU_DIV_CLK_CTRL_2_ADDR 0x18708
#define REG_CPU_DIV_CLK_CTRL_3_ADDR 0x1870c
#define REG_CPU_DIV_CLK_CTRL_3_FREQ_MASK 0xffffc0ff
#define REG_CPU_DIV_CLK_CTRL_3_FREQ_OFFS 8
#define REG_CPU_DIV_CLK_CTRL_4_ADDR 0x18710
#define REG_CPU_DIV_CLK_STATUS_0_ADDR 0x18718
#define REG_CPU_DIV_CLK_ALL_STABLE_OFFS 8
#define REG_CPU_PLL_CTRL_0_ADDR 0x1871c
#define REG_CPU_PLL_STATUS_0_ADDR 0x18724
#define REG_CORE_DIV_CLK_CTRL_ADDR 0x18740
#define REG_CORE_DIV_CLK_STATUS_ADDR 0x18744
#define REG_DDRPHY_APLL_CTRL_ADDR 0x18780
#define REG_DDRPHY_APLL_CTRL_2_ADDR 0x18784
#define REG_SFABRIC_CLK_CTRL_ADDR 0x20858
#define REG_SFABRIC_CLK_CTRL_SMPL_OFFS 8
/* DRAM Windows */
#define REG_XBAR_WIN_19_CTRL_ADDR 0x200e8
#define REG_XBAR_WIN_4_CTRL_ADDR 0x20040
#define REG_XBAR_WIN_4_BASE_ADDR 0x20044
#define REG_XBAR_WIN_4_REMAP_ADDR 0x20048
#define REG_FASTPATH_WIN_0_CTRL_ADDR 0x20184
#define REG_XBAR_WIN_7_REMAP_ADDR 0x20078
/* SRAM */
#define REG_CDI_CONFIG_ADDR 0x20220
#define REG_SRAM_WINDOW_0_ADDR 0x20240
#define REG_SRAM_WINDOW_0_ENA_OFFS 0
#define REG_SRAM_WINDOW_1_ADDR 0x20244
#define REG_SRAM_L2_ENA_ADDR 0x8500
#define REG_SRAM_CLEAN_BY_WAY_ADDR 0x87bc
/* Timers */
#define REG_TIMERS_CTRL_ADDR 0x20300
#define REG_TIMERS_EVENTS_ADDR 0x20304
#define REG_TIMER0_VALUE_ADDR 0x20314
#define REG_TIMER1_VALUE_ADDR 0x2031c
#define REG_TIMER0_ENABLE_MASK 0x1
#define MV_BOARD_REFCLK_25MHZ 25000000
#define CNTMR_RELOAD_REG(tmr) (REG_TIMERS_CTRL_ADDR + 0x10 + (tmr * 8))
#define CNTMR_VAL_REG(tmr) (REG_TIMERS_CTRL_ADDR + 0x14 + (tmr * 8))
#define CNTMR_CTRL_REG(tmr) (REG_TIMERS_CTRL_ADDR)
#define CTCR_ARM_TIMER_EN_OFFS(timer) (timer * 2)
#define CTCR_ARM_TIMER_EN_MASK(timer) (1 << CTCR_ARM_TIMER_EN_OFFS(timer))
#define CTCR_ARM_TIMER_EN(timer) (1 << CTCR_ARM_TIMER_EN_OFFS(timer))
#define CTCR_ARM_TIMER_AUTO_OFFS(timer) (1 + (timer * 2))
#define CTCR_ARM_TIMER_AUTO_MASK(timer) (1 << CTCR_ARM_TIMER_EN_OFFS(timer))
#define CTCR_ARM_TIMER_AUTO_EN(timer) (1 << CTCR_ARM_TIMER_AUTO_OFFS(timer))
/* PMU */
#define REG_PMU_I_F_CTRL_ADDR 0x1c090
#define REG_PMU_DUNIT_BLK_OFFS 16
#define REG_PMU_DUNIT_RFRS_OFFS 20
#define REG_PMU_DUNIT_ACK_OFFS 24
/* MBUS */
#define MBUS_UNITS_PRIORITY_CONTROL_REG (MBUS_REGS_OFFSET + 0x420)
#define FABRIC_UNITS_PRIORITY_CONTROL_REG (MBUS_REGS_OFFSET + 0x424)
#define MBUS_UNITS_PREFETCH_CONTROL_REG (MBUS_REGS_OFFSET + 0x428)
#define FABRIC_UNITS_PREFETCH_CONTROL_REG (MBUS_REGS_OFFSET + 0x42c)
#define REG_PM_STAT_MASK_ADDR 0x2210c
#define REG_PM_STAT_MASK_CPU0_IDLE_MASK_OFFS 16
#define REG_PM_EVENT_STAT_MASK_ADDR 0x22120
#define REG_PM_EVENT_STAT_MASK_DFS_DONE_OFFS 17
#define REG_PM_CTRL_CONFIG_ADDR 0x22104
#define REG_PM_CTRL_CONFIG_DFS_REQ_OFFS 18
#define REG_FABRIC_LOCAL_IRQ_MASK_ADDR 0x218c4
#define REG_FABRIC_LOCAL_IRQ_PMU_MASK_OFFS 18
/* Controller revision info */
#define PCI_CLASS_CODE_AND_REVISION_ID 0x008
#define PCCRIR_REVID_OFFS 0 /* Revision ID */
#define PCCRIR_REVID_MASK (0xff << PCCRIR_REVID_OFFS)
/* Power Management Clock Gating Control Register */
#define POWER_MNG_CTRL_REG 0x18220
#define PMC_PEXSTOPCLOCK_OFFS(p) ((p) < 8 ? (5 + (p)) : (18 + (p)))
#define PMC_PEXSTOPCLOCK_MASK(p) (1 << PMC_PEXSTOPCLOCK_OFFS(p))
#define PMC_PEXSTOPCLOCK_EN(p) (1 << PMC_PEXSTOPCLOCK_OFFS(p))
#define PMC_PEXSTOPCLOCK_STOP(p) (0 << PMC_PEXSTOPCLOCK_OFFS(p))
/* TWSI */
#define TWSI_DATA_ADDR_MASK 0x7
#define TWSI_DATA_ADDR_OFFS 1
/* General */
#define MAX_CS 4
/* Frequencies */
#define FAB_OPT 21
#define CLK_CPU 12
#define CLK_VCO (2 * CLK_CPU)
#define CLK_DDR 12
/* CPU Frequencies: */
#define CLK_CPU_1000 0
#define CLK_CPU_1066 1
#define CLK_CPU_1200 2
#define CLK_CPU_1333 3
#define CLK_CPU_1500 4
#define CLK_CPU_1666 5
#define CLK_CPU_1800 6
#define CLK_CPU_2000 7
#define CLK_CPU_600 8
#define CLK_CPU_667 9
#define CLK_CPU_800 0xa
/* Extra Cpu Frequencies: */
#define CLK_CPU_1600 11
#define CLK_CPU_2133 12
#define CLK_CPU_2200 13
#define CLK_CPU_2400 14
#endif /* _DDR3_HWS_HW_TRAINING_DEF_H */

View file

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR3_HWS_SIL_TRAINING_H
#define _DDR3_HWS_SIL_TRAINING_H
#include "ddr3_training_ip.h"
#include "ddr3_training_ip_prv_if.h"
int ddr3_silicon_pre_config(void);
int ddr3_silicon_init(void);
int ddr3_silicon_get_ddr_target_freq(u32 *ddr_freq);
#endif /* _DDR3_HWS_SIL_TRAINING_H */

View file

@ -3,239 +3,30 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#include "mv_ddr_common.h"
#include "../../../../arch/arm/mach-mvebu/serdes/a38x/sys_env_lib.h"
static struct dlb_config ddr3_dlb_config_table[] = {
{REG_STATIC_DRAM_DLB_CONTROL, 0x2000005c},
{DLB_BUS_OPTIMIZATION_WEIGHTS_REG, 0x00880000},
{DLB_AGING_REGISTER, 0x0f7f007f},
{DLB_EVICTION_CONTROL_REG, 0x0000129f},
{DLB_EVICTION_TIMERS_REGISTER_REG, 0x00ff0000},
{DLB_BUS_WEIGHTS_DIFF_CS, 0x04030802},
{DLB_BUS_WEIGHTS_DIFF_BG, 0x00000a02},
{DLB_BUS_WEIGHTS_SAME_BG, 0x09000a01},
{DLB_BUS_WEIGHTS_RD_WR, 0x00020005},
{DLB_BUS_WEIGHTS_ATTR_SYS_PRIO, 0x00060f10},
{DLB_MAIN_QUEUE_MAP, 0x00000543},
{DLB_LINE_SPLIT, 0x00000000},
{DLB_USER_COMMAND_REG, 0x00000000},
{0x0, 0x0}
};
static struct dlb_config ddr3_dlb_config_table_a0[] = {
{REG_STATIC_DRAM_DLB_CONTROL, 0x2000005c},
{DLB_BUS_OPTIMIZATION_WEIGHTS_REG, 0x00880000},
{DLB_AGING_REGISTER, 0x0f7f007f},
{DLB_EVICTION_CONTROL_REG, 0x0000129f},
{DLB_EVICTION_TIMERS_REGISTER_REG, 0x00ff0000},
{DLB_BUS_WEIGHTS_DIFF_CS, 0x04030802},
{DLB_BUS_WEIGHTS_DIFF_BG, 0x00000a02},
{DLB_BUS_WEIGHTS_SAME_BG, 0x09000a01},
{DLB_BUS_WEIGHTS_RD_WR, 0x00020005},
{DLB_BUS_WEIGHTS_ATTR_SYS_PRIO, 0x00060f10},
{DLB_MAIN_QUEUE_MAP, 0x00000543},
{DLB_LINE_SPLIT, 0x00000000},
{DLB_USER_COMMAND_REG, 0x00000000},
{0x0, 0x0}
};
#if defined(CONFIG_ARMADA_38X)
struct dram_modes {
char *mode_name;
u8 cpu_freq;
u8 fab_freq;
u8 chip_id;
u8 chip_board_rev;
struct reg_data *regs;
};
struct dram_modes ddr_modes[] = {
};
#endif /* defined(CONFIG_ARMADA_38X) */
/* Translates topology map definitions to real memory size in bits */
/*
* Translates topology map definitions to real memory size in bits
* (per values in ddr3_training_ip_def.h)
*/
u32 mem_size[] = {
ADDR_SIZE_512MB, ADDR_SIZE_1GB, ADDR_SIZE_2GB, ADDR_SIZE_4GB,
ADDR_SIZE_512MB,
ADDR_SIZE_1GB,
ADDR_SIZE_2GB,
ADDR_SIZE_4GB,
ADDR_SIZE_8GB
};
static char *ddr_type = "DDR3";
/*
* Set 1 to use dynamic DUNIT configuration,
* set 0 (supported for A380 and AC3) to configure DUNIT in values set by
* ddr3_tip_init_specific_reg_config
* generic_init_controller controls D-unit configuration:
* '1' - dynamic D-unit configuration,
*/
u8 generic_init_controller = 1;
static int ddr3_hws_tune_training_params(u8 dev_num);
/* device revision */
#define DEV_VERSION_ID_REG 0x1823c
#define REVISON_ID_OFFS 8
#define REVISON_ID_MASK 0xf00
/* A38x revisions */
#define MV_88F68XX_Z1_ID 0x0
#define MV_88F68XX_A0_ID 0x4
/* A39x revisions */
#define MV_88F69XX_Z1_ID 0x2
/*
* sys_env_dlb_config_ptr_get
*
* DESCRIPTION: defines pointer to to DLB COnfiguration table
*
* INPUT: none
*
* OUTPUT: pointer to DLB COnfiguration table
*
* RETURN:
* returns pointer to DLB COnfiguration table
*/
struct dlb_config *sys_env_dlb_config_ptr_get(void)
{
#ifdef CONFIG_ARMADA_39X
return &ddr3_dlb_config_table_a0[0];
#else
if (sys_env_device_rev_get() == MV_88F68XX_A0_ID)
return &ddr3_dlb_config_table_a0[0];
else
return &ddr3_dlb_config_table[0];
#endif
}
/*
* sys_env_get_cs_ena_from_reg
*
* DESCRIPTION: Get bit mask of enabled CS
*
* INPUT: None
*
* OUTPUT: None
*
* RETURN:
* Bit mask of enabled CS, 1 if only CS0 enabled,
* 3 if both CS0 and CS1 enabled
*/
u32 sys_env_get_cs_ena_from_reg(void)
{
return reg_read(REG_DDR3_RANK_CTRL_ADDR) &
REG_DDR3_RANK_CTRL_CS_ENA_MASK;
}
static void ddr3_restore_and_set_final_windows(u32 *win)
{
u32 win_ctrl_reg, num_of_win_regs;
u32 cs_ena = sys_env_get_cs_ena_from_reg();
u32 ui;
win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
num_of_win_regs = 16;
/* Return XBAR windows 4-7 or 16-19 init configuration */
for (ui = 0; ui < num_of_win_regs; ui++)
reg_write((win_ctrl_reg + 0x4 * ui), win[ui]);
printf("%s Training Sequence - Switching XBAR Window to FastPath Window\n",
ddr_type);
#if defined DYNAMIC_CS_SIZE_CONFIG
if (ddr3_fast_path_dynamic_cs_size_config(cs_ena) != MV_OK)
printf("ddr3_fast_path_dynamic_cs_size_config FAILED\n");
#else
u32 reg, cs;
reg = 0x1fffffe1;
for (cs = 0; cs < MAX_CS; cs++) {
if (cs_ena & (1 << cs)) {
reg |= (cs << 2);
break;
}
}
/* Open fast path Window to - 0.5G */
reg_write(REG_FASTPATH_WIN_0_CTRL_ADDR, reg);
#endif
}
static int ddr3_save_and_set_training_windows(u32 *win)
{
u32 cs_ena;
u32 reg, tmp_count, cs, ui;
u32 win_ctrl_reg, win_base_reg, win_remap_reg;
u32 num_of_win_regs, win_jump_index;
win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
win_base_reg = REG_XBAR_WIN_4_BASE_ADDR;
win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR;
win_jump_index = 0x10;
num_of_win_regs = 16;
struct hws_topology_map *tm = ddr3_get_topology_map();
#ifdef DISABLE_L2_FILTERING_DURING_DDR_TRAINING
/*
* Disable L2 filtering during DDR training
* (when Cross Bar window is open)
*/
reg_write(ADDRESS_FILTERING_END_REGISTER, 0);
#endif
cs_ena = tm->interface_params[0].as_bus_params[0].cs_bitmask;
/* Close XBAR Window 19 - Not needed */
/* {0x000200e8} - Open Mbus Window - 2G */
reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0);
/* Save XBAR Windows 4-19 init configurations */
for (ui = 0; ui < num_of_win_regs; ui++)
win[ui] = reg_read(win_ctrl_reg + 0x4 * ui);
/* Open XBAR Windows 4-7 or 16-19 for other CS */
reg = 0;
tmp_count = 0;
for (cs = 0; cs < MAX_CS; cs++) {
if (cs_ena & (1 << cs)) {
switch (cs) {
case 0:
reg = 0x0e00;
break;
case 1:
reg = 0x0d00;
break;
case 2:
reg = 0x0b00;
break;
case 3:
reg = 0x0700;
break;
}
reg |= (1 << 0);
reg |= (SDRAM_CS_SIZE & 0xffff0000);
reg_write(win_ctrl_reg + win_jump_index * tmp_count,
reg);
reg = (((SDRAM_CS_SIZE + 1) * (tmp_count)) &
0xffff0000);
reg_write(win_base_reg + win_jump_index * tmp_count,
reg);
if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR)
reg_write(win_remap_reg +
win_jump_index * tmp_count, 0);
tmp_count++;
}
}
return MV_OK;
}
static int mv_ddr_training_params_set(u8 dev_num);
/*
* Name: ddr3_init - Main DDR3 Init function
@ -246,464 +37,182 @@ static int ddr3_save_and_set_training_windows(u32 *win)
*/
int ddr3_init(void)
{
u32 reg = 0;
u32 soc_num;
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
u32 octets_per_if_num;
int status;
u32 win[16];
int is_manual_cal_done;
/* SoC/Board special Initializtions */
/* Get version from internal library */
ddr3_print_version();
/* Print mv_ddr version */
mv_ddr_ver_print();
/*Add sub_version string */
DEBUG_INIT_C("", SUB_VERSION, 1);
mv_ddr_pre_training_fixup();
/* Switching CPU to MRVL ID */
soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >>
SAR1_CPU_CORE_OFFSET;
switch (soc_num) {
case 0x3:
case 0x1:
reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET);
case 0x0:
reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET);
default:
break;
/* SoC/Board special initializations */
mv_ddr_pre_training_soc_config(ddr_type);
/* Set log level for training library */
mv_ddr_user_log_level_set(DEBUG_BLOCK_ALL);
mv_ddr_early_init();
if (mv_ddr_topology_map_update() == NULL) {
printf("mv_ddr: failed to update topology\n");
return MV_FAIL;
}
/*
* Set DRAM Reset Mask in case detected GPIO indication of wakeup from
* suspend i.e the DRAM values will not be overwritten / reset when
* waking from suspend
*/
if (sys_env_suspend_wakeup_check() ==
SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED) {
reg_bit_set(REG_SDRAM_INIT_CTRL_ADDR,
1 << REG_SDRAM_INIT_RESET_MASK_OFFS);
}
if (mv_ddr_early_init2() != MV_OK)
return MV_FAIL;
/*
* Stage 0 - Set board configuration
*/
/* Check if DRAM is already initialized */
if (reg_read(REG_BOOTROM_ROUTINE_ADDR) &
(1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)) {
printf("%s Training Sequence - 2nd boot - Skip\n", ddr_type);
return MV_OK;
}
/*
* Stage 1 - Dunit Setup
*/
/* Fix read ready phases for all SOC in reg 0x15c8 */
reg = reg_read(REG_TRAINING_DEBUG_3_ADDR);
reg &= ~(REG_TRAINING_DEBUG_3_MASK);
reg |= 0x4; /* Phase 0 */
reg &= ~(REG_TRAINING_DEBUG_3_MASK << REG_TRAINING_DEBUG_3_OFFS);
reg |= (0x4 << (1 * REG_TRAINING_DEBUG_3_OFFS)); /* Phase 1 */
reg &= ~(REG_TRAINING_DEBUG_3_MASK << (3 * REG_TRAINING_DEBUG_3_OFFS));
reg |= (0x6 << (3 * REG_TRAINING_DEBUG_3_OFFS)); /* Phase 3 */
reg &= ~(REG_TRAINING_DEBUG_3_MASK << (4 * REG_TRAINING_DEBUG_3_OFFS));
reg |= (0x6 << (4 * REG_TRAINING_DEBUG_3_OFFS));
reg &= ~(REG_TRAINING_DEBUG_3_MASK << (5 * REG_TRAINING_DEBUG_3_OFFS));
reg |= (0x6 << (5 * REG_TRAINING_DEBUG_3_OFFS));
reg_write(REG_TRAINING_DEBUG_3_ADDR, reg);
/*
* Axi_bresp_mode[8] = Compliant,
* Axi_addr_decode_cntrl[11] = Internal,
* Axi_data_bus_width[0] = 128bit
* */
/* 0x14a8 - AXI Control Register */
reg_write(REG_DRAM_AXI_CTRL_ADDR, 0);
/*
* Stage 2 - Training Values Setup
*/
/* Set X-BAR windows for the training sequence */
ddr3_save_and_set_training_windows(win);
/* Tune training algo paramteres */
status = ddr3_hws_tune_training_params(0);
/* Set training algorithm's parameters */
status = mv_ddr_training_params_set(0);
if (MV_OK != status)
return status;
/* Set log level for training lib */
ddr3_hws_set_log_level(DEBUG_BLOCK_ALL, DEBUG_LEVEL_ERROR);
/* Start New Training IP */
status = ddr3_hws_hw_training();
mv_ddr_mc_config();
is_manual_cal_done = mv_ddr_manual_cal_do();
mv_ddr_mc_init();
if (!is_manual_cal_done) {
}
status = ddr3_silicon_post_init();
if (MV_OK != status) {
printf("DDR3 Post Init - FAILED 0x%x\n", status);
return status;
}
/* PHY initialization (Training) */
status = hws_ddr3_tip_run_alg(0, ALGO_TYPE_DYNAMIC);
if (MV_OK != status) {
printf("%s Training Sequence - FAILED\n", ddr_type);
return status;
}
/*
* Stage 3 - Finish
*/
/* Restore and set windows */
ddr3_restore_and_set_final_windows(win);
#if defined(CONFIG_PHY_STATIC_PRINT)
mv_ddr_phy_static_print();
#endif
/* Update DRAM init indication in bootROM register */
reg = reg_read(REG_BOOTROM_ROUTINE_ADDR);
reg_write(REG_BOOTROM_ROUTINE_ADDR,
reg | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS));
/* Post MC/PHY initializations */
mv_ddr_post_training_soc_config(ddr_type);
/* DLB config */
ddr3_new_tip_dlb_config();
mv_ddr_post_training_fixup();
#if defined(ECC_SUPPORT)
if (ddr3_if_ecc_enabled())
octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
if (ddr3_if_ecc_enabled()) {
if (MV_DDR_IS_64BIT_DRAM_MODE(tm->bus_act_mask) ||
MV_DDR_IS_32BIT_IN_64BIT_DRAM_MODE(tm->bus_act_mask, octets_per_if_num))
mv_ddr_mem_scrubbing();
else
ddr3_new_tip_ecc_scrub();
#endif
}
printf("%s Training Sequence - Ended Successfully\n", ddr_type);
printf("mv_ddr: completed successfully\n");
return MV_OK;
}
/*
* Name: ddr3_get_cpu_freq
* Desc: read S@R and return CPU frequency
* Args:
* Notes:
* Returns: required value
*/
u32 ddr3_get_cpu_freq(void)
uint64_t mv_ddr_get_memory_size_per_cs_in_bits(void)
{
return ddr3_tip_get_init_freq();
uint64_t memory_size_per_cs;
u32 bus_cnt, num_of_active_bus = 0;
u32 num_of_sub_phys_per_ddr_unit = 0;
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
u32 octets_per_if_num = ddr3_tip_dev_attr_get(DEV_NUM_0, MV_ATTR_OCTET_PER_INTERFACE);
/* count the number of active bus */
for (bus_cnt = 0; bus_cnt < octets_per_if_num - 1/* ignore ecc octet */; bus_cnt++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_cnt);
num_of_active_bus++;
}
/*
* Name: ddr3_get_fab_opt
* Desc: read S@R and return CPU frequency
* Args:
* Notes:
* Returns: required value
*/
u32 ddr3_get_fab_opt(void)
{
return 0; /* No fabric */
/* calculate number of sub-phys per ddr unit */
if (tm->interface_params[0].bus_width/* supports only single interface */ == MV_DDR_DEV_WIDTH_16BIT)
num_of_sub_phys_per_ddr_unit = TWO_SUB_PHYS;
if (tm->interface_params[0].bus_width/* supports only single interface */ == MV_DDR_DEV_WIDTH_8BIT)
num_of_sub_phys_per_ddr_unit = SINGLE_SUB_PHY;
/* calculate dram size per cs */
memory_size_per_cs = (uint64_t)mem_size[tm->interface_params[0].memory_size] * (uint64_t)num_of_active_bus
/ (uint64_t)num_of_sub_phys_per_ddr_unit * (uint64_t)MV_DDR_NUM_BITS_IN_BYTE;
return memory_size_per_cs;
}
/*
* Name: ddr3_get_static_m_cValue - Init Memory controller with
* static parameters
* Desc: Use this routine to init the controller without the HW training
* procedure.
* User must provide compatible header file with registers data.
* Args: None.
* Notes:
* Returns: None.
*/
u32 ddr3_get_static_mc_value(u32 reg_addr, u32 offset1, u32 mask1,
u32 offset2, u32 mask2)
uint64_t mv_ddr_get_total_memory_size_in_bits(void)
{
u32 reg, temp;
uint64_t total_memory_size = 0;
uint64_t memory_size_per_cs = 0;
reg = reg_read(reg_addr);
/* get the number of cs */
u32 max_cs = ddr3_tip_max_cs_get(DEV_NUM_0);
temp = (reg >> offset1) & mask1;
if (mask2)
temp |= (reg >> offset2) & mask2;
memory_size_per_cs = mv_ddr_get_memory_size_per_cs_in_bits();
total_memory_size = (uint64_t)max_cs * memory_size_per_cs;
return temp;
return total_memory_size;
}
/*
* Name: ddr3_get_static_ddr_mode - Init Memory controller with
* static parameters
* Desc: Use this routine to init the controller without the HW training
* procedure.
* User must provide compatible header file with registers data.
* Args: None.
* Notes:
* Returns: None.
*/
u32 ddr3_get_static_ddr_mode(void)
int ddr3_if_ecc_enabled(void)
{
u32 chip_board_rev, i;
u32 size;
/* Valid only for A380 only, MSYS using dynamic controller config */
#ifdef CONFIG_CUSTOMER_BOARD_SUPPORT
/*
* Customer boards select DDR mode according to
* board ID & Sample@Reset
*/
chip_board_rev = mv_board_id_get();
#else
/* Marvell boards select DDR mode according to Sample@Reset only */
chip_board_rev = MARVELL_BOARD;
#endif
size = ARRAY_SIZE(ddr_modes);
for (i = 0; i < size; i++) {
if ((ddr3_get_cpu_freq() == ddr_modes[i].cpu_freq) &&
(ddr3_get_fab_opt() == ddr_modes[i].fab_freq) &&
(chip_board_rev == ddr_modes[i].chip_board_rev))
return i;
}
DEBUG_INIT_S("\n*** Error: ddr3_get_static_ddr_mode: No match for requested DDR mode. ***\n\n");
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
if (DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask) ||
DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask) ||
DDR3_IS_ECC_PUP8_MODE(tm->bus_act_mask))
return 1;
else
return 0;
}
/******************************************************************************
* Name: ddr3_get_cs_num_from_reg
/*
* Name: mv_ddr_training_params_set
* Desc:
* Args:
* Notes:
* Notes: sets internal training params
* Returns:
*/
u32 ddr3_get_cs_num_from_reg(void)
{
u32 cs_ena = sys_env_get_cs_ena_from_reg();
u32 cs_count = 0;
u32 cs;
for (cs = 0; cs < MAX_CS; cs++) {
if (cs_ena & (1 << cs))
cs_count++;
}
return cs_count;
}
void get_target_freq(u32 freq_mode, u32 *ddr_freq, u32 *hclk_ps)
{
u32 tmp, hclk = 200;
switch (freq_mode) {
case 4:
tmp = 1; /* DDR_400; */
hclk = 200;
break;
case 0x8:
tmp = 1; /* DDR_666; */
hclk = 333;
break;
case 0xc:
tmp = 1; /* DDR_800; */
hclk = 400;
break;
default:
*ddr_freq = 0;
*hclk_ps = 0;
break;
}
*ddr_freq = tmp; /* DDR freq define */
*hclk_ps = 1000000 / hclk; /* values are 1/HCLK in ps */
return;
}
void ddr3_new_tip_dlb_config(void)
{
u32 reg, i = 0;
struct dlb_config *config_table_ptr = sys_env_dlb_config_ptr_get();
/* Write the configuration */
while (config_table_ptr[i].reg_addr != 0) {
reg_write(config_table_ptr[i].reg_addr,
config_table_ptr[i].reg_data);
i++;
}
/* Enable DLB */
reg = reg_read(REG_STATIC_DRAM_DLB_CONTROL);
reg |= DLB_ENABLE | DLB_WRITE_COALESING | DLB_AXI_PREFETCH_EN |
DLB_MBUS_PREFETCH_EN | PREFETCH_N_LN_SZ_TR;
reg_write(REG_STATIC_DRAM_DLB_CONTROL, reg);
}
int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena)
{
u32 reg, cs;
u32 mem_total_size = 0;
u32 cs_mem_size = 0;
u32 mem_total_size_c, cs_mem_size_c;
#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
u32 physical_mem_size;
u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE;
struct hws_topology_map *tm = ddr3_get_topology_map();
#endif
/* Open fast path windows */
for (cs = 0; cs < MAX_CS; cs++) {
if (cs_ena & (1 << cs)) {
/* get CS size */
if (ddr3_calc_mem_cs_size(cs, &cs_mem_size) != MV_OK)
return MV_FAIL;
#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
/*
* if number of address pins doesn't allow to use max
* mem size that is defined in topology
* mem size is defined by DEVICE_MAX_DRAM_ADDRESS_SIZE
*/
physical_mem_size = mem_size
[tm->interface_params[0].memory_size];
if (ddr3_get_device_width(cs) == 16) {
/*
* 16bit mem device can be twice more - no need
* in less significant pin
*/
max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2;
}
if (physical_mem_size > max_mem_size) {
cs_mem_size = max_mem_size *
(ddr3_get_bus_width() /
ddr3_get_device_width(cs));
printf("Updated Physical Mem size is from 0x%x to %x\n",
physical_mem_size,
DEVICE_MAX_DRAM_ADDRESS_SIZE);
}
#endif
/* set fast path window control for the cs */
reg = 0xffffe1;
reg |= (cs << 2);
reg |= (cs_mem_size - 1) & 0xffff0000;
/*Open fast path Window */
reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg);
/* Set fast path window base address for the cs */
reg = ((cs_mem_size) * cs) & 0xffff0000;
/* Set base address */
reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg);
/*
* Since memory size may be bigger than 4G the summ may
* be more than 32 bit word,
* so to estimate the result divide mem_total_size and
* cs_mem_size by 0x10000 (it is equal to >> 16)
*/
mem_total_size_c = mem_total_size >> 16;
cs_mem_size_c = cs_mem_size >> 16;
/* if the sum less than 2 G - calculate the value */
if (mem_total_size_c + cs_mem_size_c < 0x10000)
mem_total_size += cs_mem_size;
else /* put max possible size */
mem_total_size = L2_FILTER_FOR_MAX_MEMORY_SIZE;
}
}
/* Set L2 filtering to Max Memory size */
reg_write(ADDRESS_FILTERING_END_REGISTER, mem_total_size);
return MV_OK;
}
u32 ddr3_get_bus_width(void)
{
u32 bus_width;
bus_width = (reg_read(REG_SDRAM_CONFIG_ADDR) & 0x8000) >>
REG_SDRAM_CONFIG_WIDTH_OFFS;
return (bus_width == 0) ? 16 : 32;
}
u32 ddr3_get_device_width(u32 cs)
{
u32 device_width;
device_width = (reg_read(REG_SDRAM_ADDRESS_CTRL_ADDR) &
(0x3 << (REG_SDRAM_ADDRESS_CTRL_STRUCT_OFFS * cs))) >>
(REG_SDRAM_ADDRESS_CTRL_STRUCT_OFFS * cs);
return (device_width == 0) ? 8 : 16;
}
static int ddr3_get_device_size(u32 cs)
{
u32 device_size_low, device_size_high, device_size;
u32 data, cs_low_offset, cs_high_offset;
cs_low_offset = REG_SDRAM_ADDRESS_SIZE_OFFS + cs * 4;
cs_high_offset = REG_SDRAM_ADDRESS_SIZE_OFFS +
REG_SDRAM_ADDRESS_SIZE_HIGH_OFFS + cs;
data = reg_read(REG_SDRAM_ADDRESS_CTRL_ADDR);
device_size_low = (data >> cs_low_offset) & 0x3;
device_size_high = (data >> cs_high_offset) & 0x1;
device_size = device_size_low | (device_size_high << 2);
switch (device_size) {
case 0:
return 2048;
case 2:
return 512;
case 3:
return 1024;
case 4:
return 4096;
case 5:
return 8192;
case 1:
default:
DEBUG_INIT_C("Error: Wrong device size of Cs: ", cs, 1);
/*
* Small value will give wrong emem size in
* ddr3_calc_mem_cs_size
*/
return 0;
}
}
int ddr3_calc_mem_cs_size(u32 cs, u32 *cs_size)
{
int cs_mem_size;
/* Calculate in GiB */
cs_mem_size = ((ddr3_get_bus_width() / ddr3_get_device_width(cs)) *
ddr3_get_device_size(cs)) / 8;
/*
* Multiple controller bus width, 2x for 64 bit
* (SoC controller may be 32 or 64 bit,
* so bit 15 in 0x1400, that means if whole bus used or only half,
* have a differnt meaning
*/
cs_mem_size *= DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER;
if (!cs_mem_size || (cs_mem_size == 64) || (cs_mem_size == 4096)) {
DEBUG_INIT_C("Error: Wrong Memory size of Cs: ", cs, 1);
return MV_BAD_VALUE;
}
*cs_size = cs_mem_size << 20;
return MV_OK;
}
/*
* Name: ddr3_hws_tune_training_params
* Desc:
* Args:
* Notes: Tune internal training params
* Returns:
*/
static int ddr3_hws_tune_training_params(u8 dev_num)
static int mv_ddr_training_params_set(u8 dev_num)
{
struct tune_train_params params;
int status;
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
u32 if_id;
u32 cs_num;
CHECK_STATUS(ddr3_tip_get_first_active_if
(dev_num, tm->if_act_mask,
&if_id));
CHECK_STATUS(calc_cs_num(dev_num, if_id, &cs_num));
/* NOTE: do not remove any field initilization */
params.ck_delay = TUNE_TRAINING_PARAMS_CK_DELAY;
params.ck_delay_16 = TUNE_TRAINING_PARAMS_CK_DELAY_16;
params.p_finger = TUNE_TRAINING_PARAMS_PFINGER;
params.n_finger = TUNE_TRAINING_PARAMS_NFINGER;
params.phy_reg3_val = TUNE_TRAINING_PARAMS_PHYREG3VAL;
params.g_zpri_data = TUNE_TRAINING_PARAMS_PRI_DATA;
params.g_znri_data = TUNE_TRAINING_PARAMS_NRI_DATA;
params.g_zpri_ctrl = TUNE_TRAINING_PARAMS_PRI_CTRL;
params.g_znri_ctrl = TUNE_TRAINING_PARAMS_NRI_CTRL;
params.g_znodt_data = TUNE_TRAINING_PARAMS_N_ODT_DATA;
params.g_zpodt_ctrl = TUNE_TRAINING_PARAMS_P_ODT_CTRL;
params.g_znodt_ctrl = TUNE_TRAINING_PARAMS_N_ODT_CTRL;
params.g_zpodt_data = TUNE_TRAINING_PARAMS_P_ODT_DATA;
params.g_dic = TUNE_TRAINING_PARAMS_DIC;
params.g_rtt_nom = TUNE_TRAINING_PARAMS_RTT_NOM;
if (cs_num == 1) {
params.g_rtt_wr = TUNE_TRAINING_PARAMS_RTT_WR_1CS;
params.g_odt_config = TUNE_TRAINING_PARAMS_ODT_CONFIG_1CS;
} else {
params.g_rtt_wr = TUNE_TRAINING_PARAMS_RTT_WR_2CS;
params.g_odt_config = TUNE_TRAINING_PARAMS_ODT_CONFIG_2CS;
}
status = ddr3_tip_tune_training_params(dev_num, &params);
if (MV_OK != status) {

View file

@ -6,12 +6,12 @@
#ifndef _DDR3_INIT_H
#define _DDR3_INIT_H
#if defined(CONFIG_ARMADA_38X)
#include "ddr3_a38x.h"
#include "ddr3_a38x_topology.h"
#include "ddr_ml_wrapper.h"
#if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X)
#include "mv_ddr_plat.h"
#endif
#include "ddr3_hws_hw_training.h"
#include "ddr3_hws_sil_training.h"
#include "seq_exec.h"
#include "ddr3_logging_def.h"
#include "ddr3_training_hw_algo.h"
#include "ddr3_training_ip.h"
@ -20,119 +20,9 @@
#include "ddr3_training_ip_flow.h"
#include "ddr3_training_ip_pbs.h"
#include "ddr3_training_ip_prv_if.h"
#include "ddr3_training_ip_static.h"
#include "ddr3_training_leveling.h"
#include "xor.h"
/*
* MV_DEBUG_INIT need to be defines, otherwise the output of the
* DDR2 training code is not complete and misleading
*/
#define MV_DEBUG_INIT
#ifdef MV_DEBUG_INIT
#define DEBUG_INIT_S(s) puts(s)
#define DEBUG_INIT_D(d, l) printf("%x", d)
#define DEBUG_INIT_D_10(d, l) printf("%d", d)
#else
#define DEBUG_INIT_S(s)
#define DEBUG_INIT_D(d, l)
#define DEBUG_INIT_D_10(d, l)
#endif
#ifdef MV_DEBUG_INIT_FULL
#define DEBUG_INIT_FULL_S(s) puts(s)
#define DEBUG_INIT_FULL_D(d, l) printf("%x", d)
#define DEBUG_INIT_FULL_D_10(d, l) printf("%d", d)
#define DEBUG_WR_REG(reg, val) \
{ DEBUG_INIT_S("Write Reg: 0x"); DEBUG_INIT_D((reg), 8); \
DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); }
#define DEBUG_RD_REG(reg, val) \
{ DEBUG_INIT_S("Read Reg: 0x"); DEBUG_INIT_D((reg), 8); \
DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); }
#else
#define DEBUG_INIT_FULL_S(s)
#define DEBUG_INIT_FULL_D(d, l)
#define DEBUG_INIT_FULL_D_10(d, l)
#define DEBUG_WR_REG(reg, val)
#define DEBUG_RD_REG(reg, val)
#endif
#define DEBUG_INIT_FULL_C(s, d, l) \
{ DEBUG_INIT_FULL_S(s); \
DEBUG_INIT_FULL_D(d, l); \
DEBUG_INIT_FULL_S("\n"); }
#define DEBUG_INIT_C(s, d, l) \
{ DEBUG_INIT_S(s); DEBUG_INIT_D(d, l); DEBUG_INIT_S("\n"); }
/*
* Debug (Enable/Disable modules) and Error report
*/
#ifdef BASIC_DEBUG
#define MV_DEBUG_WL
#define MV_DEBUG_RL
#define MV_DEBUG_DQS_RESULTS
#endif
#ifdef FULL_DEBUG
#define MV_DEBUG_WL
#define MV_DEBUG_RL
#define MV_DEBUG_DQS
#define MV_DEBUG_PBS
#define MV_DEBUG_DFS
#define MV_DEBUG_MAIN_FULL
#define MV_DEBUG_DFS_FULL
#define MV_DEBUG_DQS_FULL
#define MV_DEBUG_RL_FULL
#define MV_DEBUG_WL_FULL
#endif
#if defined(CONFIG_ARMADA_38X)
#include "ddr3_a38x.h"
#include "ddr3_a38x_topology.h"
#endif
/* The following is a list of Marvell status */
#define MV_ERROR (-1)
#define MV_OK (0x00) /* Operation succeeded */
#define MV_FAIL (0x01) /* Operation failed */
#define MV_BAD_VALUE (0x02) /* Illegal value (general) */
#define MV_OUT_OF_RANGE (0x03) /* The value is out of range */
#define MV_BAD_PARAM (0x04) /* Illegal parameter in function called */
#define MV_BAD_PTR (0x05) /* Illegal pointer value */
#define MV_BAD_SIZE (0x06) /* Illegal size */
#define MV_BAD_STATE (0x07) /* Illegal state of state machine */
#define MV_SET_ERROR (0x08) /* Set operation failed */
#define MV_GET_ERROR (0x09) /* Get operation failed */
#define MV_CREATE_ERROR (0x0a) /* Fail while creating an item */
#define MV_NOT_FOUND (0x0b) /* Item not found */
#define MV_NO_MORE (0x0c) /* No more items found */
#define MV_NO_SUCH (0x0d) /* No such item */
#define MV_TIMEOUT (0x0e) /* Time Out */
#define MV_NO_CHANGE (0x0f) /* Parameter(s) is already in this value */
#define MV_NOT_SUPPORTED (0x10) /* This request is not support */
#define MV_NOT_IMPLEMENTED (0x11) /* Request supported but not implemented*/
#define MV_NOT_INITIALIZED (0x12) /* The item is not initialized */
#define MV_NO_RESOURCE (0x13) /* Resource not available (memory ...) */
#define MV_FULL (0x14) /* Item is full (Queue or table etc...) */
#define MV_EMPTY (0x15) /* Item is empty (Queue or table etc...) */
#define MV_INIT_ERROR (0x16) /* Error occurred while INIT process */
#define MV_HW_ERROR (0x17) /* Hardware error */
#define MV_TX_ERROR (0x18) /* Transmit operation not succeeded */
#define MV_RX_ERROR (0x19) /* Recieve operation not succeeded */
#define MV_NOT_READY (0x1a) /* The other side is not ready yet */
#define MV_ALREADY_EXIST (0x1b) /* Tried to create existing item */
#define MV_OUT_OF_CPU_MEM (0x1c) /* Cpu memory allocation failed. */
#define MV_NOT_STARTED (0x1d) /* Not started yet */
#define MV_BUSY (0x1e) /* Item is busy. */
#define MV_TERMINATE (0x1f) /* Item terminates it's work. */
#define MV_NOT_ALIGNED (0x20) /* Wrong alignment */
#define MV_NOT_ALLOWED (0x21) /* Operation NOT allowed */
#define MV_WRITE_PROTECT (0x22) /* Write protected */
#define MV_INVALID (int)(-1)
/* For checking function return values */
#define CHECK_STATUS(orig_func) \
{ \
@ -142,6 +32,14 @@
return status; \
}
#define GET_MAX_VALUE(x, y) \
((x) > (y)) ? (x) : (y)
#define SUB_VERSION 0
/* max number of devices supported by driver */
#define MAX_DEVICE_NUM 1
enum log_level {
MV_LOG_LEVEL_0,
MV_LOG_LEVEL_1,
@ -150,28 +48,27 @@ enum log_level {
};
/* Globals */
extern u8 debug_training;
extern u8 debug_training, debug_calibration, debug_ddr4_centralization,
debug_tap_tuning, debug_dm_tuning;
extern u8 is_reg_dump;
extern u8 generic_init_controller;
extern u32 freq_val[];
/* list of allowed frequency listed in order of enum hws_ddr_freq */
extern u32 freq_val[DDR_FREQ_LAST];
extern u32 is_pll_old;
extern struct cl_val_per_freq cas_latency_table[];
extern struct pattern_info pattern_table[];
extern struct cl_val_per_freq cas_write_latency_table[];
extern u8 debug_training;
extern u8 debug_centralization, debug_training_ip, debug_training_bist,
debug_pbs, debug_training_static, debug_leveling;
extern u32 pipe_multicast_mask;
extern struct hws_tip_config_func_db config_func_info[];
extern u8 cs_mask_reg[];
extern u8 twr_mask_table[];
extern u8 cl_mask_table[];
extern u8 cwl_mask_table[];
extern u16 rfc_table[];
extern u32 speed_bin_table_t_rc[];
extern u32 speed_bin_table_t_rcd_t_rp[];
extern u32 ck_delay, ck_delay_16;
extern u32 vref_init_val;
extern u32 g_zpri_data;
extern u32 g_znri_data;
extern u32 g_zpri_ctrl;
@ -181,39 +78,28 @@ extern u32 g_znodt_data;
extern u32 g_zpodt_ctrl;
extern u32 g_znodt_ctrl;
extern u32 g_dic;
extern u32 g_odt_config_2cs;
extern u32 g_odt_config_1cs;
extern u32 g_odt_config;
extern u32 g_rtt_nom;
extern u32 g_rtt_wr;
extern u32 g_rtt_park;
extern u8 debug_training_access;
extern u8 debug_training_a38x;
extern u32 first_active_if;
extern enum hws_ddr_freq init_freq;
extern u32 delay_enable, ck_delay, ck_delay_16, ca_delay;
extern u32 delay_enable, ck_delay, ca_delay;
extern u32 mask_tune_func;
extern u32 rl_version;
extern int rl_mid_freq_wa;
extern u8 calibration_update_control; /* 2 external only, 1 is internal only */
extern enum hws_ddr_freq medium_freq;
extern u32 ck_delay, ck_delay_16;
extern enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM];
extern u32 first_active_if;
extern u32 mask_tune_func;
extern u32 freq_val[];
extern enum hws_ddr_freq init_freq;
extern enum hws_ddr_freq low_freq;
extern enum hws_ddr_freq medium_freq;
extern u8 generic_init_controller;
extern enum auto_tune_stage training_stage;
extern u32 is_pll_before_init;
extern u32 is_adll_calib_before_init;
extern u32 is_dfs_in_init;
extern int wl_debug_delay;
extern u32 silicon_delay[HWS_MAX_DEVICE_NUM];
extern u32 p_finger;
extern u32 n_finger;
extern u32 freq_val[DDR_FREQ_LIMIT];
extern u32 silicon_delay[MAX_DEVICE_NUM];
extern u32 start_pattern, end_pattern;
extern u32 phy_reg0_val;
extern u32 phy_reg1_val;
@ -221,172 +107,93 @@ extern u32 phy_reg2_val;
extern u32 phy_reg3_val;
extern enum hws_pattern sweep_pattern;
extern enum hws_pattern pbs_pattern;
extern u8 is_rzq6;
extern u32 znri_data_phy_val;
extern u32 zpri_data_phy_val;
extern u32 znri_ctrl_phy_val;
extern u32 zpri_ctrl_phy_val;
extern u8 debug_training_access;
extern u32 g_znri_data;
extern u32 g_zpri_data;
extern u32 g_znri_ctrl;
extern u32 g_zpri_ctrl;
extern u32 finger_test, p_finger_start, p_finger_end, n_finger_start,
n_finger_end, p_finger_step, n_finger_step;
extern u32 mode2_t;
extern u32 mode_2t;
extern u32 xsb_validate_type;
extern u32 xsb_validation_base_address;
extern u32 odt_additional;
extern u32 debug_mode;
extern u32 delay_enable;
extern u32 ca_delay;
extern u32 debug_dunit;
extern u32 clamp_tbl[];
extern u32 freq_mask[HWS_MAX_DEVICE_NUM][DDR_FREQ_LIMIT];
extern u32 start_pattern, end_pattern;
extern u32 freq_mask[MAX_DEVICE_NUM][DDR_FREQ_LAST];
extern u32 maxt_poll_tries;
extern u32 is_bist_reset_bit;
extern u8 debug_training_bist;
extern u8 vref_window_size[MAX_INTERFACE_NUM][MAX_BUS_NUM];
extern u32 debug_mode;
extern u32 effective_cs;
extern int ddr3_tip_centr_skip_min_win_check;
extern u32 *dq_map_table;
extern enum auto_tune_stage training_stage;
extern u8 debug_centralization;
extern u32 delay_enable;
extern u32 start_pattern, end_pattern;
extern u32 freq_val[DDR_FREQ_LIMIT];
extern u8 debug_training_hw_alg;
extern enum auto_tune_stage training_stage;
extern u8 debug_training_ip;
extern enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM];
extern enum auto_tune_stage training_stage;
extern u32 effective_cs;
extern u8 debug_leveling;
extern enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM];
extern enum auto_tune_stage training_stage;
extern u32 rl_version;
extern struct cl_val_per_freq cas_latency_table[];
extern u32 start_xsb_offset;
extern u32 debug_mode;
extern u32 odt_config;
extern u32 effective_cs;
extern u32 phy_reg1_val;
extern u8 debug_pbs;
extern u32 effective_cs;
extern u16 mask_results_dq_reg_map[];
extern enum hws_ddr_freq medium_freq;
extern u32 freq_val[];
extern enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM];
extern enum auto_tune_stage training_stage;
extern u32 debug_mode;
extern u32 *dq_map_table;
extern u32 vref;
extern struct cl_val_per_freq cas_latency_table[];
extern u32 target_freq;
extern struct hws_tip_config_func_db config_func_info[HWS_MAX_DEVICE_NUM];
extern u32 clamp_tbl[];
extern u32 init_freq;
/* list of allowed frequency listed in order of enum hws_ddr_freq */
extern u32 freq_val[];
extern u8 debug_training_static;
extern u32 first_active_if;
extern u32 dfs_low_freq;
extern u32 mem_size[];
extern u32 nominal_avs;
extern u32 extension_avs;
/* Prototypes */
int ddr3_init(void);
int ddr3_tip_enable_init_sequence(u32 dev_num);
int ddr3_tip_init_a38x(u32 dev_num, u32 board_id);
int ddr3_hws_hw_training(void);
int ddr3_silicon_pre_init(void);
int ddr3_hws_hw_training(enum hws_algo_type algo_mode);
int mv_ddr_early_init(void);
int mv_ddr_early_init2(void);
int ddr3_silicon_post_init(void);
int ddr3_post_run_alg(void);
int ddr3_if_ecc_enabled(void);
void ddr3_new_tip_ecc_scrub(void);
void ddr3_print_version(void);
void ddr3_new_tip_dlb_config(void);
struct hws_topology_map *ddr3_get_topology_map(void);
void mv_ddr_ver_print(void);
struct mv_ddr_topology_map *mv_ddr_topology_map_get(void);
int ddr3_if_ecc_enabled(void);
int ddr3_tip_reg_write(u32 dev_num, u32 reg_addr, u32 data);
int ddr3_tip_reg_read(u32 dev_num, u32 reg_addr, u32 *data, u32 reg_mask);
int ddr3_silicon_get_ddr_target_freq(u32 *ddr_freq);
int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum hws_ddr_freq freq,
struct hws_tip_freq_config_info
*freq_config_info);
int ddr3_a38x_update_topology_map(u32 dev_num,
struct hws_topology_map *topology_map);
int ddr3_tip_a38x_get_init_freq(int dev_num, enum hws_ddr_freq *freq);
int ddr3_tip_a38x_get_medium_freq(int dev_num, enum hws_ddr_freq *freq);
int ddr3_tip_a38x_if_read(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 *data, u32 mask);
int ddr3_tip_a38x_if_write(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 data, u32 mask);
int ddr3_tip_a38x_get_device_info(u8 dev_num,
struct ddr3_device_info *info_ptr);
int ddr3_tip_init_a38x(u32 dev_num, u32 board_id);
int print_adll(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM]);
int print_ph(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM]);
int read_phase_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
int reg_addr, u32 mask);
int write_leveling_value(u32 dev_num, u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
u32 pup_ph_values[MAX_INTERFACE_NUM * MAX_BUS_NUM], int reg_addr);
int ddr3_tip_restore_dunit_regs(u32 dev_num);
void print_topology(struct hws_topology_map *topology_db);
void print_topology(struct mv_ddr_topology_map *tm);
u32 mv_board_id_get(void);
int ddr3_load_topology_map(void);
int ddr3_tip_init_specific_reg_config(u32 dev_num,
struct reg_data *reg_config_arr);
u32 ddr3_tip_get_init_freq(void);
void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level);
void mv_ddr_user_log_level_set(enum ddr_lib_debug_block block);
int ddr3_tip_tune_training_params(u32 dev_num,
struct tune_train_params *params);
void get_target_freq(u32 freq_mode, u32 *ddr_freq, u32 *hclk_ps);
int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena);
void ddr3_fast_path_static_cs_size_config(u32 cs_ena);
u32 ddr3_get_device_width(u32 cs);
u32 mv_board_id_index_get(u32 board_id);
u32 mv_board_id_get(void);
u32 ddr3_get_bus_width(void);
void ddr3_set_log_level(u32 n_log_level);
int ddr3_calc_mem_cs_size(u32 cs, u32 *cs_size);
int calc_cs_num(u32 dev_num, u32 if_id, u32 *cs_num);
int hws_ddr3_cs_base_adr_calc(u32 if_id, u32 cs, u32 *cs_base_addr);
int ddr3_tip_print_pbs_result(u32 dev_num, u32 cs_num, enum pbs_dir pbs_mode);
int ddr3_tip_clean_pbs_result(u32 dev_num, enum pbs_dir pbs_mode);
int ddr3_tip_static_round_trip_arr_build(u32 dev_num,
struct trip_delay_element *table_ptr,
int is_wl, u32 *round_trip_delay_arr);
u32 hws_ddr3_tip_max_cs_get(void);
/*
* Accessor functions for the registers
*/
static inline void reg_write(u32 addr, u32 val)
{
writel(val, INTER_REGS_BASE + addr);
}
static inline u32 reg_read(u32 addr)
{
return readl(INTER_REGS_BASE + addr);
}
static inline void reg_bit_set(u32 addr, u32 mask)
{
setbits_le32(INTER_REGS_BASE + addr, mask);
}
static inline void reg_bit_clr(u32 addr, u32 mask)
{
clrbits_le32(INTER_REGS_BASE + addr, mask);
}
u32 mv_ddr_init_freq_get(void);
void mv_ddr_mc_config(void);
int mv_ddr_mc_init(void);
void mv_ddr_set_calib_controller(void);
#endif /* _DDR3_INIT_H */

View file

@ -73,10 +73,14 @@
#endif
#endif
/* Logging defines */
#define DEBUG_LEVEL_TRACE 1
#define DEBUG_LEVEL_INFO 2
#define DEBUG_LEVEL_ERROR 3
enum mv_ddr_debug_level {
DEBUG_LEVEL_TRACE = 1,
DEBUG_LEVEL_INFO = 2,
DEBUG_LEVEL_ERROR = 3,
DEBUG_LEVEL_LAST
};
enum ddr_lib_debug_block {
DEBUG_BLOCK_STATIC,

View file

@ -6,6 +6,7 @@
#ifndef __DDR3_PATTERNS_64_H
#define __DDR3_PATTERNS_64_H
#define FAB_OPT 21
/*
* Patterns Declerations
*/

View file

@ -6,8 +6,9 @@
#ifndef _DDR3_TOPOLOGY_DEF_H
#define _DDR3_TOPOLOGY_DEF_H
/* TOPOLOGY */
#define DEV_NUM_0 0
/* TOPOLOGY */
enum hws_speed_bin {
SPEED_BIN_DDR_800D,
SPEED_BIN_DDR_800E,
@ -53,7 +54,8 @@ enum hws_ddr_freq {
DDR_FREQ_900,
DDR_FREQ_360,
DDR_FREQ_1000,
DDR_FREQ_LIMIT
DDR_FREQ_LAST,
DDR_FREQ_SAR
};
enum speed_bin_table_elements {

File diff suppressed because it is too large Load diff

View file

@ -3,12 +3,6 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
static u32 bist_offset = 32;
@ -24,7 +18,7 @@ static int ddr3_tip_bist_operation(u32 dev_num,
*/
int ddr3_tip_bist_activate(u32 dev_num, enum hws_pattern pattern,
enum hws_access_type access_type, u32 if_num,
enum hws_dir direction,
enum hws_dir dir,
enum hws_stress_jump addr_stress_jump,
enum hws_pattern_duration duration,
enum hws_bist_operation oper_type,
@ -32,103 +26,43 @@ int ddr3_tip_bist_activate(u32 dev_num, enum hws_pattern pattern,
{
u32 tx_burst_size;
u32 delay_between_burst;
u32 rd_mode, val;
u32 poll_cnt = 0, max_poll = 1000, i, start_if, end_if;
u32 rd_mode;
struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
u32 read_data[MAX_INTERFACE_NUM];
struct hws_topology_map *tm = ddr3_get_topology_map();
/* ODPG Write enable from BIST */
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_DATA_CONTROL_REG, 0x1, 0x1));
/* ODPG Read enable/disable from BIST */
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_DATA_CONTROL_REG,
(direction == OPER_READ) ?
0x2 : 0, 0x2));
CHECK_STATUS(ddr3_tip_load_pattern_to_odpg(dev_num, access_type, if_num,
pattern, offset));
/* odpg bist write enable */
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG,
(ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS),
(ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS));
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_DATA_BUF_SIZE_REG,
pattern_addr_length, MASK_ALL_BITS));
tx_burst_size = (direction == OPER_WRITE) ?
/* odpg bist read enable/disable */
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG,
(dir == OPER_READ) ? (ODPG_WRBUF_RD_CTRL_ENA << ODPG_WRBUF_RD_CTRL_OFFS) :
(ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS),
(ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS));
ddr3_tip_load_pattern_to_odpg(0, access_type, 0, pattern, offset);
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_SIZE_REG, pattern_addr_length, MASK_ALL_BITS);
tx_burst_size = (dir == OPER_WRITE) ?
pattern_table[pattern].tx_burst_size : 0;
delay_between_burst = (direction == OPER_WRITE) ? 2 : 0;
rd_mode = (direction == OPER_WRITE) ? 1 : 0;
CHECK_STATUS(ddr3_tip_configure_odpg
(dev_num, access_type, if_num, direction,
delay_between_burst = (dir == OPER_WRITE) ? 2 : 0;
rd_mode = (dir == OPER_WRITE) ? 1 : 0;
ddr3_tip_configure_odpg(0, access_type, 0, dir,
pattern_table[pattern].num_of_phases_tx, tx_burst_size,
pattern_table[pattern].num_of_phases_rx,
delay_between_burst,
rd_mode, cs_num, addr_stress_jump, duration));
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_PATTERN_ADDR_OFFSET_REG,
offset, MASK_ALL_BITS));
rd_mode, cs_num, addr_stress_jump, duration);
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_OFFS_REG, offset, MASK_ALL_BITS);
if (oper_type == BIST_STOP) {
CHECK_STATUS(ddr3_tip_bist_operation(dev_num, access_type,
if_num, BIST_STOP));
ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP);
} else {
CHECK_STATUS(ddr3_tip_bist_operation(dev_num, access_type,
if_num, BIST_START));
if (duration != DURATION_CONT) {
/*
* This pdelay is a WA, becuase polling fives "done"
* also the odpg did nmot finish its task
*/
if (access_type == ACCESS_TYPE_MULTICAST) {
start_if = 0;
end_if = MAX_INTERFACE_NUM - 1;
} else {
start_if = if_num;
end_if = if_num;
}
for (i = start_if; i <= end_if; i++) {
VALIDATE_ACTIVE(tm->
if_act_mask, i);
for (poll_cnt = 0; poll_cnt < max_poll;
poll_cnt++) {
CHECK_STATUS(ddr3_tip_if_read
(dev_num,
ACCESS_TYPE_UNICAST,
if_num, ODPG_BIST_DONE,
read_data,
MASK_ALL_BITS));
val = read_data[i];
if ((val & 0x1) == 0x0) {
/*
* In SOC type devices this bit
* is self clear so, if it was
* cleared all good
*/
break;
}
}
if (poll_cnt >= max_poll) {
DEBUG_TRAINING_BIST_ENGINE
(DEBUG_LEVEL_ERROR,
("Bist poll failure 2\n"));
CHECK_STATUS(ddr3_tip_if_write
(dev_num,
ACCESS_TYPE_UNICAST,
if_num,
ODPG_DATA_CONTROL_REG, 0,
MASK_ALL_BITS));
ddr3_tip_bist_operation(0, access_type, 0, BIST_START);
if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK)
return MV_FAIL;
ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP);
}
}
CHECK_STATUS(ddr3_tip_bist_operation
(dev_num, access_type, if_num, BIST_STOP));
}
}
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_DATA_CONTROL_REG, 0,
MASK_ALL_BITS));
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS);
return MV_OK;
}
@ -141,34 +75,34 @@ int ddr3_tip_bist_read_result(u32 dev_num, u32 if_id,
{
int ret;
u32 read_data[MAX_INTERFACE_NUM];
struct hws_topology_map *tm = ddr3_get_topology_map();
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
if (IS_ACTIVE(tm->if_act_mask, if_id) == 0)
if (IS_IF_ACTIVE(tm->if_act_mask, if_id) == 0)
return MV_NOT_SUPPORTED;
DEBUG_TRAINING_BIST_ENGINE(DEBUG_LEVEL_TRACE,
("ddr3_tip_bist_read_result if_id %d\n",
if_id));
ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
ODPG_BIST_FAILED_DATA_HI_REG, read_data,
ODPG_DATA_RX_WORD_ERR_DATA_HIGH_REG, read_data,
MASK_ALL_BITS);
if (ret != MV_OK)
return ret;
pst_bist_result->bist_fail_high = read_data[if_id];
ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
ODPG_BIST_FAILED_DATA_LOW_REG, read_data,
ODPG_DATA_RX_WORD_ERR_DATA_LOW_REG, read_data,
MASK_ALL_BITS);
if (ret != MV_OK)
return ret;
pst_bist_result->bist_fail_low = read_data[if_id];
ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
ODPG_BIST_LAST_FAIL_ADDR_REG, read_data,
ODPG_DATA_RX_WORD_ERR_ADDR_REG, read_data,
MASK_ALL_BITS);
if (ret != MV_OK)
return ret;
pst_bist_result->bist_last_fail_addr = read_data[if_id];
ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
ODPG_BIST_DATA_ERROR_COUNTER_REG, read_data,
ODPG_DATA_RX_WORD_ERR_CNTR_REG, read_data,
MASK_ALL_BITS);
if (ret != MV_OK)
return ret;
@ -187,10 +121,10 @@ int hws_ddr3_run_bist(u32 dev_num, enum hws_pattern pattern, u32 *result,
u32 i = 0;
u32 win_base;
struct bist_result st_bist_result;
struct hws_topology_map *tm = ddr3_get_topology_map();
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
for (i = 0; i < MAX_INTERFACE_NUM; i++) {
VALIDATE_ACTIVE(tm->if_act_mask, i);
VALIDATE_IF_ACTIVE(tm->if_act_mask, i);
hws_ddr3_cs_base_adr_calc(i, cs_num, &win_base);
ret = ddr3_tip_bist_activate(dev_num, pattern,
ACCESS_TYPE_UNICAST,
@ -233,13 +167,10 @@ static int ddr3_tip_bist_operation(u32 dev_num,
enum hws_access_type access_type,
u32 if_id, enum hws_bist_operation oper_type)
{
if (oper_type == BIST_STOP) {
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
ODPG_BIST_DONE, 1 << 8, 1 << 8));
} else {
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
ODPG_BIST_DONE, 1, 1));
}
if (oper_type == BIST_STOP)
mv_ddr_odpg_disable();
else
mv_ddr_odpg_enable();
return MV_OK;
}
@ -253,11 +184,10 @@ void ddr3_tip_print_bist_res(void)
u32 i;
struct bist_result st_bist_result[MAX_INTERFACE_NUM];
int res;
struct hws_topology_map *tm = ddr3_get_topology_map();
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
for (i = 0; i < MAX_INTERFACE_NUM; i++) {
if (IS_ACTIVE(tm->if_act_mask, i) == 0)
continue;
VALIDATE_IF_ACTIVE(tm->if_act_mask, i);
res = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result[i]);
if (res != MV_OK) {
@ -273,9 +203,7 @@ void ddr3_tip_print_bist_res(void)
("interface | error_cnt | fail_low | fail_high | fail_addr\n"));
for (i = 0; i < MAX_INTERFACE_NUM; i++) {
if (IS_ACTIVE(tm->if_act_mask, i) ==
0)
continue;
VALIDATE_IF_ACTIVE(tm->if_act_mask, i);
DEBUG_TRAINING_BIST_ENGINE(
DEBUG_LEVEL_INFO,
@ -286,3 +214,389 @@ void ddr3_tip_print_bist_res(void)
st_bist_result[i].bist_last_fail_addr));
}
}
enum {
PASS,
FAIL
};
#define TIP_ITERATION_NUM 31
static int mv_ddr_tip_bist(enum hws_dir dir, u32 val, enum hws_pattern pattern, u32 cs, u32 *result)
{
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
enum hws_training_ip_stat training_result;
u16 *reg_map = ddr3_tip_get_mask_results_pup_reg_map();
u32 max_subphy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
u32 subphy, read_data;
ddr3_tip_ip_training(0, ACCESS_TYPE_MULTICAST, 0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
RESULT_PER_BYTE, HWS_CONTROL_ELEMENT_ADLL, HWS_LOW2HIGH, dir, tm->if_act_mask, val,
TIP_ITERATION_NUM, pattern, EDGE_FP, CS_SINGLE, cs, &training_result);
for (subphy = 0; subphy < max_subphy; subphy++) {
ddr3_tip_if_read(0, ACCESS_TYPE_UNICAST, 0, reg_map[subphy], &read_data, MASK_ALL_BITS);
if (((read_data >> BLOCK_STATUS_OFFS) & BLOCK_STATUS_MASK) == BLOCK_STATUS_NOT_LOCKED)
*result |= (FAIL << subphy);
}
return MV_OK;
}
struct interval {
u8 *vector;
u8 lendpnt; /* interval's left endpoint */
u8 rendpnt; /* interval's right endpoint */
u8 size; /* interval's size */
u8 lmarker; /* left marker */
u8 rmarker; /* right marker */
u8 pass_lendpnt; /* left endpoint of internal pass interval */
u8 pass_rendpnt; /* right endpoint of internal pass interval */
};
static int interval_init(u8 *vector, u8 lendpnt, u8 rendpnt,
u8 lmarker, u8 rmarker, struct interval *intrvl)
{
if (intrvl == NULL) {
printf("%s: NULL intrvl pointer found\n", __func__);
return MV_FAIL;
}
if (vector == NULL) {
printf("%s: NULL vector pointer found\n", __func__);
return MV_FAIL;
}
intrvl->vector = vector;
if (lendpnt >= rendpnt) {
printf("%s: incorrect lendpnt and/or rendpnt parameters found\n", __func__);
return MV_FAIL;
}
intrvl->lendpnt = lendpnt;
intrvl->rendpnt = rendpnt;
intrvl->size = rendpnt - lendpnt + 1;
if ((lmarker < lendpnt) || (lmarker > rendpnt)) {
printf("%s: incorrect lmarker parameter found\n", __func__);
return MV_FAIL;
}
intrvl->lmarker = lmarker;
if ((rmarker < lmarker) || (rmarker > (intrvl->rendpnt + intrvl->size))) {
printf("%s: incorrect rmarker parameter found\n", __func__);
return MV_FAIL;
}
intrvl->rmarker = rmarker;
return MV_OK;
}
static int interval_set(u8 pass_lendpnt, u8 pass_rendpnt, struct interval *intrvl)
{
if (intrvl == NULL) {
printf("%s: NULL intrvl pointer found\n", __func__);
return MV_FAIL;
}
intrvl->pass_lendpnt = pass_lendpnt;
intrvl->pass_rendpnt = pass_rendpnt;
return MV_OK;
}
static int interval_proc(struct interval *intrvl)
{
int curr;
int pass_lendpnt, pass_rendpnt;
int lmt;
int fcnt = 0, pcnt = 0;
if (intrvl == NULL) {
printf("%s: NULL intrvl pointer found\n", __func__);
return MV_FAIL;
}
/* count fails and passes */
curr = intrvl->lendpnt;
while (curr <= intrvl->rendpnt) {
if (intrvl->vector[curr] == PASS)
pcnt++;
else
fcnt++;
curr++;
}
/* check for all fail */
if (fcnt == intrvl->size) {
printf("%s: no pass found\n", __func__);
return MV_FAIL;
}
/* check for all pass */
if (pcnt == intrvl->size) {
if (interval_set(intrvl->lendpnt, intrvl->rendpnt, intrvl) != MV_OK)
return MV_FAIL;
return MV_OK;
}
/* proceed with rmarker */
curr = intrvl->rmarker;
if (intrvl->vector[curr % intrvl->size] == PASS) { /* pass at rmarker */
/* search for fail on right */
if (intrvl->rmarker > intrvl->rendpnt)
lmt = intrvl->rendpnt + intrvl->size;
else
lmt = intrvl->rmarker + intrvl->size - 1;
while ((curr <= lmt) &&
(intrvl->vector[curr % intrvl->size] == PASS))
curr++;
if (curr > lmt) { /* fail not found */
printf("%s: rmarker: fail following pass not found\n", __func__);
return MV_FAIL;
}
/* fail found */
pass_rendpnt = curr - 1;
} else { /* fail at rmarker */
/* search for pass on left */
if (intrvl->rmarker > intrvl->rendpnt)
lmt = intrvl->rmarker - intrvl->size + 1;
else
lmt = intrvl->lendpnt;
while ((curr >= lmt) &&
(intrvl->vector[curr % intrvl->size] == FAIL))
curr--;
if (curr < lmt) { /* pass not found */
printf("%s: rmarker: pass preceding fail not found\n", __func__);
return MV_FAIL;
}
/* pass found */
pass_rendpnt = curr;
}
/* search for fail on left */
curr = pass_rendpnt;
if (pass_rendpnt > intrvl->rendpnt)
lmt = pass_rendpnt - intrvl->size + 1;
else
lmt = intrvl->lendpnt;
while ((curr >= lmt) &&
(intrvl->vector[curr % intrvl->size] == PASS))
curr--;
if (curr < lmt) { /* fail not found */
printf("%s: rmarker: fail preceding pass not found\n", __func__);
return MV_FAIL;
}
/* fail found */
pass_lendpnt = curr + 1;
if (interval_set(pass_lendpnt, pass_rendpnt, intrvl) != MV_OK)
return MV_FAIL;
return MV_OK;
}
#define ADLL_TAPS_PER_PERIOD 64
int mv_ddr_dm_to_dq_diff_get(u8 vw_sphy_hi_lmt, u8 vw_sphy_lo_lmt, u8 *vw_vector,
int *vw_sphy_hi_diff, int *vw_sphy_lo_diff)
{
struct interval intrvl;
/* init interval structure */
if (interval_init(vw_vector, 0, ADLL_TAPS_PER_PERIOD - 1,
vw_sphy_lo_lmt, vw_sphy_hi_lmt, &intrvl) != MV_OK)
return MV_FAIL;
/* find pass sub-interval */
if (interval_proc(&intrvl) != MV_OK)
return MV_FAIL;
/* check for all pass */
if ((intrvl.pass_rendpnt == intrvl.rendpnt) &&
(intrvl.pass_lendpnt == intrvl.lendpnt)) {
printf("%s: no fail found\n", __func__);
return MV_FAIL;
}
*vw_sphy_hi_diff = intrvl.pass_rendpnt - vw_sphy_hi_lmt;
*vw_sphy_lo_diff = vw_sphy_lo_lmt - intrvl.pass_lendpnt;
return MV_OK;
}
static int mv_ddr_bist_tx(enum hws_access_type access_type)
{
mv_ddr_odpg_done_clr();
ddr3_tip_bist_operation(0, access_type, 0, BIST_START);
if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK)
return MV_FAIL;
ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP);
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS);
return MV_OK;
}
/* prepare odpg for bist operation */
#define WR_OP_ODPG_DATA_CMD_BURST_DLY 2
static int mv_ddr_odpg_bist_prepare(enum hws_pattern pattern, enum hws_access_type access_type,
enum hws_dir dir, enum hws_stress_jump stress_jump_addr,
enum hws_pattern_duration duration, u32 offset, u32 cs,
u32 pattern_addr_len, enum dm_direction dm_dir)
{
struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
u32 tx_burst_size;
u32 burst_delay;
u32 rd_mode;
/* odpg bist write enable */
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG,
(ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS),
(ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS));
/* odpg bist read enable/disable */
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG,
(dir == OPER_READ) ? (ODPG_WRBUF_RD_CTRL_ENA << ODPG_WRBUF_RD_CTRL_OFFS) :
(ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS),
(ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS));
if (pattern == PATTERN_00 || pattern == PATTERN_FF)
ddr3_tip_load_pattern_to_odpg(0, access_type, 0, pattern, offset);
else
mv_ddr_load_dm_pattern_to_odpg(access_type, pattern, dm_dir);
ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_SIZE_REG, pattern_addr_len, MASK_ALL_BITS);
if (dir == OPER_WRITE) {
tx_burst_size = pattern_table[pattern].tx_burst_size;
burst_delay = WR_OP_ODPG_DATA_CMD_BURST_DLY;
rd_mode = ODPG_MODE_TX;
} else {
tx_burst_size = 0;
burst_delay = 0;
rd_mode = ODPG_MODE_RX;
}
ddr3_tip_configure_odpg(0, access_type, 0, dir, pattern_table[pattern].num_of_phases_tx,
tx_burst_size, pattern_table[pattern].num_of_phases_rx, burst_delay,
rd_mode, cs, stress_jump_addr, duration);
return MV_OK;
}
#define BYTES_PER_BURST_64BIT 0x20
#define BYTES_PER_BURST_32BIT 0x10
int mv_ddr_dm_vw_get(enum hws_pattern pattern, u32 cs, u8 *vw_vector)
{
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
u32 adll_tap;
u32 wr_ctrl_adll[MAX_BUS_NUM] = {0};
u32 rd_ctrl_adll[MAX_BUS_NUM] = {0};
u32 subphy;
u32 subphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
u32 odpg_addr = 0x0;
u32 result;
u32 idx;
/* burst length in bytes */
u32 burst_len = (MV_DDR_IS_64BIT_DRAM_MODE(tm->bus_act_mask) ?
BYTES_PER_BURST_64BIT : BYTES_PER_BURST_32BIT);
/* save dqs values to restore after algorithm's run */
ddr3_tip_read_adll_value(0, wr_ctrl_adll, CTX_PHY_REG(cs), MASK_ALL_BITS);
ddr3_tip_read_adll_value(0, rd_ctrl_adll, CRX_PHY_REG(cs), MASK_ALL_BITS);
/* fill memory with base pattern */
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS);
mv_ddr_odpg_bist_prepare(pattern, ACCESS_TYPE_UNICAST, OPER_WRITE, STRESS_NONE, DURATION_SINGLE,
bist_offset, cs, pattern_table[pattern].num_of_phases_tx,
(pattern == PATTERN_00) ? DM_DIR_DIRECT : DM_DIR_INVERSE);
for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) {
/* change target odpg address */
odpg_addr = adll_tap * burst_len;
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_BUFFER_OFFS_REG,
odpg_addr, MASK_ALL_BITS);
ddr3_tip_configure_odpg(0, ACCESS_TYPE_UNICAST, 0, OPER_WRITE,
pattern_table[pattern].num_of_phases_tx,
pattern_table[pattern].tx_burst_size,
pattern_table[pattern].num_of_phases_rx,
WR_OP_ODPG_DATA_CMD_BURST_DLY,
ODPG_MODE_TX, cs, STRESS_NONE, DURATION_SINGLE);
/* odpg bist write enable */
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG,
(ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS),
(ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS));
/* odpg bist read disable */
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG,
(ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS),
(ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS));
/* trigger odpg */
mv_ddr_bist_tx(ACCESS_TYPE_MULTICAST);
}
/* fill memory with vref pattern to increment addr using odpg bist */
mv_ddr_odpg_bist_prepare(PATTERN_VREF, ACCESS_TYPE_UNICAST, OPER_WRITE, STRESS_NONE, DURATION_SINGLE,
bist_offset, cs, pattern_table[pattern].num_of_phases_tx,
(pattern == PATTERN_00) ? DM_DIR_DIRECT : DM_DIR_INVERSE);
for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) {
ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_MULTICAST, 0,
DDR_PHY_DATA, CTX_PHY_REG(cs), adll_tap);
/* change target odpg address */
odpg_addr = adll_tap * burst_len;
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_BUFFER_OFFS_REG,
odpg_addr, MASK_ALL_BITS);
ddr3_tip_configure_odpg(0, ACCESS_TYPE_UNICAST, 0, OPER_WRITE,
pattern_table[pattern].num_of_phases_tx,
pattern_table[pattern].tx_burst_size,
pattern_table[pattern].num_of_phases_rx,
WR_OP_ODPG_DATA_CMD_BURST_DLY,
ODPG_MODE_TX, cs, STRESS_NONE, DURATION_SINGLE);
/* odpg bist write enable */
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG,
(ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS),
(ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS));
/* odpg bist read disable */
ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG,
(ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS),
(ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS));
/* trigger odpg */
mv_ddr_bist_tx(ACCESS_TYPE_MULTICAST);
}
/* restore subphy's tx adll_tap to its position */
for (subphy = 0; subphy < subphy_max; subphy++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy);
ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_UNICAST,
subphy, DDR_PHY_DATA, CTX_PHY_REG(cs),
wr_ctrl_adll[subphy]);
}
/* read and validate bist (comparing with the base pattern) */
for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) {
result = 0;
odpg_addr = adll_tap * burst_len;
/* change addr to fit write */
mv_ddr_pattern_start_addr_set(pattern_table, pattern, odpg_addr);
mv_ddr_tip_bist(OPER_READ, 0, pattern, 0, &result);
for (subphy = 0; subphy < subphy_max; subphy++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy);
idx = ADLL_TAPS_PER_PERIOD * subphy + adll_tap;
vw_vector[idx] |= ((result >> subphy) & 0x1);
}
}
/* restore subphy's rx adll_tap to its position */
for (subphy = 0; subphy < subphy_max; subphy++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy);
ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_UNICAST,
subphy, DDR_PHY_DATA, CRX_PHY_REG(cs),
rd_ctrl_adll[subphy]);
}
return MV_OK;
}

View file

@ -3,12 +3,6 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define VALIDATE_WIN_LENGTH(e1, e2, maxsize) \
@ -22,6 +16,7 @@
#define NUM_OF_CENTRAL_TYPES 2
u32 start_pattern = PATTERN_KILLER_DQ0, end_pattern = PATTERN_KILLER_DQ7;
u32 start_if = 0, end_if = (MAX_INTERFACE_NUM - 1);
u8 bus_end_window[NUM_OF_CENTRAL_TYPES][MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 bus_start_window[NUM_OF_CENTRAL_TYPES][MAX_INTERFACE_NUM][MAX_BUS_NUM];
@ -65,7 +60,8 @@ static int ddr3_tip_centralization(u32 dev_num, u32 mode)
u8 current_window[BUS_WIDTH_IN_BITS];
u8 opt_window, waste_window, start_window_skew, end_window_skew;
u8 final_pup_window[MAX_INTERFACE_NUM][BUS_WIDTH_IN_BITS];
struct hws_topology_map *tm = ddr3_get_topology_map();
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
enum hws_training_result result_type = RESULT_PER_BIT;
enum hws_dir direction;
u32 *result[HWS_SEARCH_DIR_LIMIT];
@ -81,33 +77,33 @@ static int ddr3_tip_centralization(u32 dev_num, u32 mode)
u8 cons_tap = (mode == CENTRAL_TX) ? (64) : (0);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
/* save current cs enable reg val */
CHECK_STATUS(ddr3_tip_if_read
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, cs_enable_reg_val, MASK_ALL_BITS));
DUAL_DUNIT_CFG_REG, cs_enable_reg_val, MASK_ALL_BITS));
/* enable single cs */
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, (1 << 3), (1 << 3)));
DUAL_DUNIT_CFG_REG, (1 << 3), (1 << 3)));
}
if (mode == CENTRAL_TX) {
max_win_size = MAX_WINDOW_SIZE_TX;
reg_phy_off = WRITE_CENTRALIZATION_PHY_REG + (effective_cs * 4);
reg_phy_off = CTX_PHY_REG(effective_cs);
direction = OPER_WRITE;
} else {
max_win_size = MAX_WINDOW_SIZE_RX;
reg_phy_off = READ_CENTRALIZATION_PHY_REG + (effective_cs * 4);
reg_phy_off = CRX_PHY_REG(effective_cs);
direction = OPER_READ;
}
/* DB initialization */
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (bus_id = 0;
bus_id < tm->num_of_bus_per_interface; bus_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
bus_id < octets_per_if_num; bus_id++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
centralization_state[if_id][bus_id] = 0;
bus_end_window[mode][if_id][bus_id] =
(max_win_size - 1) + cons_tap;
@ -133,11 +129,11 @@ static int ddr3_tip_centralization(u32 dev_num, u32 mode)
PARAM_NOT_CARE, training_result);
for (if_id = start_if; if_id <= end_if; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (bus_id = 0;
bus_id <= tm->num_of_bus_per_interface - 1;
bus_id <= octets_per_if_num - 1;
bus_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
for (search_dir_id = HWS_LOW2HIGH;
search_dir_id <= HWS_HIGH2LOW;
@ -336,24 +332,25 @@ static int ddr3_tip_centralization(u32 dev_num, u32 mode)
[if_id][bus_id]));
centralization_state[if_id]
[bus_id] = 1;
if (debug_mode == 0)
if (debug_mode == 0) {
flow_result[if_id] = TEST_FAILED;
return MV_FAIL;
}
}
} /* ddr3_tip_centr_skip_min_win_check */
} /* pup */
} /* interface */
} /* pattern */
for (if_id = start_if; if_id <= end_if; if_id++) {
if (IS_ACTIVE(tm->if_act_mask, if_id) == 0)
continue;
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
is_if_fail = 0;
flow_result[if_id] = TEST_SUCCESS;
for (bus_id = 0;
bus_id <= (tm->num_of_bus_per_interface - 1); bus_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
bus_id <= (octets_per_if_num - 1); bus_id++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
/* continue only if lock */
if (centralization_state[if_id][bus_id] != 1) {
@ -440,21 +437,21 @@ static int ddr3_tip_centralization(u32 dev_num, u32 mode)
ddr3_tip_bus_read(dev_num, if_id,
ACCESS_TYPE_UNICAST, bus_id,
DDR_PHY_DATA,
RESULT_DB_PHY_REG_ADDR +
RESULT_PHY_REG +
effective_cs, &reg);
reg = (reg & (~0x1f <<
((mode == CENTRAL_TX) ?
(RESULT_DB_PHY_REG_TX_OFFSET) :
(RESULT_DB_PHY_REG_RX_OFFSET))))
(RESULT_PHY_TX_OFFS) :
(RESULT_PHY_RX_OFFS))))
| pup_win_length <<
((mode == CENTRAL_TX) ?
(RESULT_DB_PHY_REG_TX_OFFSET) :
(RESULT_DB_PHY_REG_RX_OFFSET));
(RESULT_PHY_TX_OFFS) :
(RESULT_PHY_RX_OFFS));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST,
bus_id, DDR_PHY_DATA,
RESULT_DB_PHY_REG_ADDR +
RESULT_PHY_REG +
effective_cs, reg));
/* offset per CS is calculated earlier */
@ -480,9 +477,9 @@ static int ddr3_tip_centralization(u32 dev_num, u32 mode)
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
/* restore cs enable value */
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST,
if_id, CS_ENABLE_REG,
if_id, DUAL_DUNIT_CFG_REG,
cs_enable_reg_val[if_id],
MASK_ALL_BITS));
}
@ -508,29 +505,30 @@ int ddr3_tip_special_rx(u32 dev_num)
u32 cs_enable_reg_val[MAX_INTERFACE_NUM];
u32 temp = 0;
int pad_num = 0;
struct hws_topology_map *tm = ddr3_get_topology_map();
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
if (ddr3_tip_special_rx_run_once_flag != 0)
if ((ddr3_tip_special_rx_run_once_flag & (1 << effective_cs)) == (1 << effective_cs))
return MV_OK;
ddr3_tip_special_rx_run_once_flag = 1;
ddr3_tip_special_rx_run_once_flag |= (1 << effective_cs);
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
/* save current cs enable reg val */
CHECK_STATUS(ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST,
if_id, CS_ENABLE_REG,
if_id, DUAL_DUNIT_CFG_REG,
cs_enable_reg_val,
MASK_ALL_BITS));
/* enable single cs */
CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST,
if_id, CS_ENABLE_REG,
if_id, DUAL_DUNIT_CFG_REG,
(1 << 3), (1 << 3)));
}
max_win_size = MAX_WINDOW_SIZE_RX;
direction = OPER_READ;
pattern_id = PATTERN_VREF;
pattern_id = PATTERN_FULL_SSO1;
/* start flow */
ddr3_tip_ip_training_wrapper(dev_num, ACCESS_TYPE_MULTICAST,
@ -544,10 +542,10 @@ int ddr3_tip_special_rx(u32 dev_num)
PARAM_NOT_CARE, training_result);
for (if_id = start_if; if_id <= end_if; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup_id = 0;
pup_id <= tm->num_of_bus_per_interface; pup_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup_id);
pup_id <= octets_per_if_num; pup_id++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup_id);
for (search_dir_id = HWS_LOW2HIGH;
search_dir_id <= HWS_HIGH2LOW;
@ -620,13 +618,12 @@ int ddr3_tip_special_rx(u32 dev_num)
BUS_WIDTH_IN_BITS +
if_id *
BUS_WIDTH_IN_BITS *
tm->
num_of_bus_per_interface];
MAX_BUS_NUM];
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST,
pup_id, DDR_PHY_DATA,
PBS_RX_PHY_REG + pad_num,
PBS_RX_PHY_REG(effective_cs, pad_num),
&temp));
temp = (temp + 0xa > 31) ?
(31) : (temp + 0xa);
@ -636,7 +633,7 @@ int ddr3_tip_special_rx(u32 dev_num)
if_id,
ACCESS_TYPE_UNICAST,
pup_id, DDR_PHY_DATA,
PBS_RX_PHY_REG + pad_num,
PBS_RX_PHY_REG(effective_cs, pad_num),
temp));
}
DEBUG_CENTRALIZATION_ENGINE(
@ -649,25 +646,29 @@ int ddr3_tip_special_rx(u32 dev_num)
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup_id,
DDR_PHY_DATA, PBS_RX_PHY_REG + 4,
DDR_PHY_DATA,
PBS_RX_PHY_REG(effective_cs, 4),
&temp));
temp += 0xa;
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST,
pup_id, DDR_PHY_DATA,
PBS_RX_PHY_REG + 4, temp));
PBS_RX_PHY_REG(effective_cs, 4),
temp));
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup_id,
DDR_PHY_DATA, PBS_RX_PHY_REG + 5,
DDR_PHY_DATA,
PBS_RX_PHY_REG(effective_cs, 5),
&temp));
temp += 0xa;
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST,
pup_id, DDR_PHY_DATA,
PBS_RX_PHY_REG + 5, temp));
PBS_RX_PHY_REG(effective_cs, 5),
temp));
DEBUG_CENTRALIZATION_ENGINE(
DEBUG_LEVEL_INFO,
("Special: PBS:: I/F# %d , Bus# %d fix align to the right\n",
@ -694,15 +695,16 @@ int ddr3_tip_special_rx(u32 dev_num)
int ddr3_tip_print_centralization_result(u32 dev_num)
{
u32 if_id = 0, bus_id = 0;
struct hws_topology_map *tm = ddr3_get_topology_map();
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
printf("Centralization Results\n");
printf("I/F0 Result[0 - success 1-fail 2 - state_2 3 - state_3] ...\n");
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (bus_id = 0; bus_id < tm->num_of_bus_per_interface;
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (bus_id = 0; bus_id < octets_per_if_num;
bus_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, bus_id);
printf("%d ,\n", centralization_state[if_id][bus_id]);
}
}

View file

@ -3,16 +3,25 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
/* Device attributes structures */
enum mv_ddr_dev_attribute ddr_dev_attributes[MAX_DEVICE_NUM][MV_ATTR_LAST];
int ddr_dev_attr_init_done[MAX_DEVICE_NUM] = { 0 };
static inline u32 pattern_table_get_killer_word16(u8 dqs, u8 index);
static inline u32 pattern_table_get_sso_word(u8 sso, u8 index);
static inline u32 pattern_table_get_vref_word(u8 index);
static inline u32 pattern_table_get_vref_word16(u8 index);
static inline u32 pattern_table_get_sso_full_xtalk_word(u8 bit, u8 index);
static inline u32 pattern_table_get_sso_full_xtalk_word16(u8 bit, u8 index);
static inline u32 pattern_table_get_sso_xtalk_free_word(u8 bit, u8 index);
static inline u32 pattern_table_get_sso_xtalk_free_word16(u8 bit, u8 index);
static inline u32 pattern_table_get_isi_word(u8 index);
static inline u32 pattern_table_get_isi_word16(u8 index);
/* List of allowed frequency listed in order of enum hws_ddr_freq */
u32 freq_val[DDR_FREQ_LIMIT] = {
u32 freq_val[DDR_FREQ_LAST] = {
0, /*DDR_FREQ_LOW_FREQ */
400, /*DDR_FREQ_400, */
533, /*DDR_FREQ_533, */
@ -209,7 +218,11 @@ u16 rfc_table[] = {
110, /* 1G */
160, /* 2G */
260, /* 4G */
350 /* 8G */
350, /* 8G */
0, /* TODO: placeholder for 16-Mbit dev width */
0, /* TODO: placeholder for 32-Mbit dev width */
0, /* TODO: placeholder for 12-Mbit dev width */
0 /* TODO: placeholder for 24-Mbit dev width */
};
u32 speed_bin_table_t_rc[] = {
@ -233,7 +246,7 @@ u32 speed_bin_table_t_rc[] = {
43285,
44220,
45155,
46900
46090
};
u32 speed_bin_table_t_rcd_t_rp[] = {
@ -255,7 +268,7 @@ u32 speed_bin_table_t_rcd_t_rp[] = {
12840,
13910,
10285,
11022,
11220,
12155,
13090,
};
@ -356,13 +369,13 @@ u32 speed_bin_table(u8 index, enum speed_bin_table_elements element)
result = speed_bin_table_t_rcd_t_rp[index];
break;
case SPEED_BIN_TRAS:
if (index < 6)
if (index < SPEED_BIN_DDR_1066G)
result = 37500;
else if (index < 10)
else if (index < SPEED_BIN_DDR_1333J)
result = 36000;
else if (index < 14)
else if (index < SPEED_BIN_DDR_1600K)
result = 35000;
else if (index < 18)
else if (index < SPEED_BIN_DDR_1866M)
result = 34000;
else
result = 33000;
@ -371,49 +384,49 @@ u32 speed_bin_table(u8 index, enum speed_bin_table_elements element)
result = speed_bin_table_t_rc[index];
break;
case SPEED_BIN_TRRD1K:
if (index < 3)
if (index < SPEED_BIN_DDR_800E)
result = 10000;
else if (index < 6)
result = 7005;
else if (index < 14)
else if (index < SPEED_BIN_DDR_1066G)
result = 7500;
else if (index < SPEED_BIN_DDR_1600K)
result = 6000;
else
result = 5000;
break;
case SPEED_BIN_TRRD2K:
if (index < 6)
if (index < SPEED_BIN_DDR_1066G)
result = 10000;
else if (index < 14)
result = 7005;
else if (index < SPEED_BIN_DDR_1600K)
result = 7500;
else
result = 6000;
break;
case SPEED_BIN_TPD:
if (index < 3)
if (index < SPEED_BIN_DDR_800E)
result = 7500;
else if (index < 10)
else if (index < SPEED_BIN_DDR_1333J)
result = 5625;
else
result = 5000;
break;
case SPEED_BIN_TFAW1K:
if (index < 3)
if (index < SPEED_BIN_DDR_800E)
result = 40000;
else if (index < 6)
else if (index < SPEED_BIN_DDR_1066G)
result = 37500;
else if (index < 14)
else if (index < SPEED_BIN_DDR_1600K)
result = 30000;
else if (index < 18)
else if (index < SPEED_BIN_DDR_1866M)
result = 27000;
else
result = 25000;
break;
case SPEED_BIN_TFAW2K:
if (index < 6)
if (index < SPEED_BIN_DDR_1066G)
result = 50000;
else if (index < 10)
else if (index < SPEED_BIN_DDR_1333J)
result = 45000;
else if (index < 14)
else if (index < SPEED_BIN_DDR_1600K)
result = 40000;
else
result = 35000;
@ -465,14 +478,7 @@ static inline u32 pattern_table_get_killer_word16(u8 dqs, u8 index)
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_AGGRESSOR) :
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM);
byte0 |= pattern_killer_pattern_table_map[index * 2][role] << i;
}
for (i = 0; i < 8; i++) {
role = (i == dqs) ?
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_AGGRESSOR) :
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM);
byte1 |= pattern_killer_pattern_table_map
[index * 2 + 1][role] << i;
byte1 |= pattern_killer_pattern_table_map[index * 2 + 1][role] << i;
}
return byte0 | (byte0 << 8) | (byte1 << 16) | (byte1 << 24);
@ -488,6 +494,79 @@ static inline u32 pattern_table_get_sso_word(u8 sso, u8 index)
return 0xffffffff;
}
static inline u32 pattern_table_get_sso_full_xtalk_word(u8 bit, u8 index)
{
u8 byte = (1 << bit);
if ((index & 1) == 1)
byte = ~byte;
return byte | (byte << 8) | (byte << 16) | (byte << 24);
}
static inline u32 pattern_table_get_sso_xtalk_free_word(u8 bit, u8 index)
{
u8 byte = (1 << bit);
if ((index & 1) == 1)
byte = 0;
return byte | (byte << 8) | (byte << 16) | (byte << 24);
}
static inline u32 pattern_table_get_isi_word(u8 index)
{
u8 i0 = index % 32;
u8 i1 = index % 8;
u32 word;
if (i0 > 15)
word = ((i1 == 5) | (i1 == 7)) ? 0xffffffff : 0x0;
else
word = (i1 == 6) ? 0xffffffff : 0x0;
word = ((i0 % 16) > 7) ? ~word : word;
return word;
}
static inline u32 pattern_table_get_sso_full_xtalk_word16(u8 bit, u8 index)
{
u8 byte = (1 << bit);
if ((index & 1) == 1)
byte = ~byte;
return byte | (byte << 8) | ((~byte) << 16) | ((~byte) << 24);
}
static inline u32 pattern_table_get_sso_xtalk_free_word16(u8 bit, u8 index)
{
u8 byte = (1 << bit);
if ((index & 1) == 0)
return (byte << 16) | (byte << 24);
else
return byte | (byte << 8);
}
static inline u32 pattern_table_get_isi_word16(u8 index)
{
u8 i0 = index % 16;
u8 i1 = index % 4;
u32 word;
if (i0 > 7)
word = (i1 > 1) ? 0x0000ffff : 0x0;
else
word = (i1 == 3) ? 0xffff0000 : 0x0;
word = ((i0 % 8) > 3) ? ~word : word;
return word;
}
static inline u32 pattern_table_get_vref_word(u8 index)
{
if (0 == ((pattern_vref_pattern_table_map[index / 8] >>
@ -527,13 +606,13 @@ static inline u32 pattern_table_get_static_pbs_word(u8 index)
return temp | (temp << 8) | (temp << 16) | (temp << 24);
}
inline u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
{
u32 pattern;
struct hws_topology_map *tm = ddr3_get_topology_map();
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) == 0) {
/* 32bit patterns */
/* 32/64-bit patterns */
switch (type) {
case PATTERN_PBS1:
case PATTERN_PBS2:
@ -577,9 +656,9 @@ inline u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
break;
case PATTERN_TEST:
if (index > 1 && index < 6)
pattern = PATTERN_20;
else
pattern = PATTERN_00;
else
pattern = PATTERN_FF;
break;
case PATTERN_FULL_SSO0:
case PATTERN_FULL_SSO1:
@ -591,7 +670,34 @@ inline u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
case PATTERN_VREF:
pattern = pattern_table_get_vref_word(index);
break;
case PATTERN_SSO_FULL_XTALK_DQ0:
case PATTERN_SSO_FULL_XTALK_DQ1:
case PATTERN_SSO_FULL_XTALK_DQ2:
case PATTERN_SSO_FULL_XTALK_DQ3:
case PATTERN_SSO_FULL_XTALK_DQ4:
case PATTERN_SSO_FULL_XTALK_DQ5:
case PATTERN_SSO_FULL_XTALK_DQ6:
case PATTERN_SSO_FULL_XTALK_DQ7:
pattern = pattern_table_get_sso_full_xtalk_word(
(u8)(type - PATTERN_SSO_FULL_XTALK_DQ0), index);
break;
case PATTERN_SSO_XTALK_FREE_DQ0:
case PATTERN_SSO_XTALK_FREE_DQ1:
case PATTERN_SSO_XTALK_FREE_DQ2:
case PATTERN_SSO_XTALK_FREE_DQ3:
case PATTERN_SSO_XTALK_FREE_DQ4:
case PATTERN_SSO_XTALK_FREE_DQ5:
case PATTERN_SSO_XTALK_FREE_DQ6:
case PATTERN_SSO_XTALK_FREE_DQ7:
pattern = pattern_table_get_sso_xtalk_free_word(
(u8)(type - PATTERN_SSO_XTALK_FREE_DQ0), index);
break;
case PATTERN_ISI_XTALK_FREE:
pattern = pattern_table_get_isi_word(index);
break;
default:
DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("Error: %s: pattern type [%d] not supported\n",
__func__, (int)type));
pattern = 0;
break;
}
@ -630,7 +736,10 @@ inline u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
pattern = PATTERN_01;
break;
case PATTERN_TEST:
pattern = PATTERN_0080;
if ((index == 0) || (index == 3))
pattern = 0x00000000;
else
pattern = 0xFFFFFFFF;
break;
case PATTERN_FULL_SSO0:
pattern = 0x0000ffff;
@ -644,7 +753,34 @@ inline u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
case PATTERN_VREF:
pattern = pattern_table_get_vref_word16(index);
break;
case PATTERN_SSO_FULL_XTALK_DQ0:
case PATTERN_SSO_FULL_XTALK_DQ1:
case PATTERN_SSO_FULL_XTALK_DQ2:
case PATTERN_SSO_FULL_XTALK_DQ3:
case PATTERN_SSO_FULL_XTALK_DQ4:
case PATTERN_SSO_FULL_XTALK_DQ5:
case PATTERN_SSO_FULL_XTALK_DQ6:
case PATTERN_SSO_FULL_XTALK_DQ7:
pattern = pattern_table_get_sso_full_xtalk_word16(
(u8)(type - PATTERN_SSO_FULL_XTALK_DQ0), index);
break;
case PATTERN_SSO_XTALK_FREE_DQ0:
case PATTERN_SSO_XTALK_FREE_DQ1:
case PATTERN_SSO_XTALK_FREE_DQ2:
case PATTERN_SSO_XTALK_FREE_DQ3:
case PATTERN_SSO_XTALK_FREE_DQ4:
case PATTERN_SSO_XTALK_FREE_DQ5:
case PATTERN_SSO_XTALK_FREE_DQ6:
case PATTERN_SSO_XTALK_FREE_DQ7:
pattern = pattern_table_get_sso_xtalk_free_word16(
(u8)(type - PATTERN_SSO_XTALK_FREE_DQ0), index);
break;
case PATTERN_ISI_XTALK_FREE:
pattern = pattern_table_get_isi_word16(index);
break;
default:
DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("Error: %s: pattern type [%d] not supported\n",
__func__, (int)type));
pattern = 0;
break;
}
@ -652,3 +788,30 @@ inline u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
return pattern;
}
/* Device attribute functions */
void ddr3_tip_dev_attr_init(u32 dev_num)
{
u32 attr_id;
for (attr_id = 0; attr_id < MV_ATTR_LAST; attr_id++)
ddr_dev_attributes[dev_num][attr_id] = 0xFF;
ddr_dev_attr_init_done[dev_num] = 1;
}
u32 ddr3_tip_dev_attr_get(u32 dev_num, enum mv_ddr_dev_attribute attr_id)
{
if (ddr_dev_attr_init_done[dev_num] == 0)
ddr3_tip_dev_attr_init(dev_num);
return ddr_dev_attributes[dev_num][attr_id];
}
void ddr3_tip_dev_attr_set(u32 dev_num, enum mv_ddr_dev_attribute attr_id, u32 value)
{
if (ddr_dev_attr_init_done[dev_num] == 0)
ddr3_tip_dev_attr_init(dev_num);
ddr_dev_attributes[dev_num][attr_id] = value;
}

View file

@ -3,12 +3,6 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define VREF_INITIAL_STEP 3
@ -16,9 +10,8 @@
#define VREF_MAX_INDEX 7
#define MAX_VALUE (1024 - 1)
#define MIN_VALUE (-MAX_VALUE)
#define GET_RD_SAMPLE_DELAY(data, cs) ((data >> rd_sample_mask[cs]) & 0x1f)
#define GET_RD_SAMPLE_DELAY(data, cs) ((data >> rd_sample_mask[cs]) & 0xf)
u32 ck_delay = (u32)-1, ck_delay_16 = (u32)-1;
u32 ca_delay;
int ddr3_tip_centr_skip_min_win_check = 0;
u8 current_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM];
@ -48,45 +41,41 @@ static u32 rd_sample_mask[] = {
*/
int ddr3_tip_write_additional_odt_setting(u32 dev_num, u32 if_id)
{
u32 cs_num = 0, max_cs = 0, max_read_sample = 0, min_read_sample = 0x1f;
u32 cs_num = 0, max_read_sample = 0, min_read_sample = 0x1f;
u32 data_read[MAX_INTERFACE_NUM] = { 0 };
u32 read_sample[MAX_CS_NUM];
u32 val;
u32 pup_index;
int max_phase = MIN_VALUE, current_phase;
enum hws_access_type access_type = ACCESS_TYPE_UNICAST;
struct hws_topology_map *tm = ddr3_get_topology_map();
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
DUNIT_ODT_CONTROL_REG,
DUNIT_ODT_CTRL_REG,
0 << 8, 0x3 << 8));
CHECK_STATUS(ddr3_tip_if_read(dev_num, access_type, if_id,
READ_DATA_SAMPLE_DELAY,
RD_DATA_SMPL_DLYS_REG,
data_read, MASK_ALL_BITS));
val = data_read[if_id];
max_cs = hws_ddr3_tip_max_cs_get();
for (cs_num = 0; cs_num < max_cs; cs_num++) {
for (cs_num = 0; cs_num < MAX_CS_NUM; cs_num++) {
read_sample[cs_num] = GET_RD_SAMPLE_DELAY(val, cs_num);
/* find maximum of read_samples */
if (read_sample[cs_num] >= max_read_sample) {
if (read_sample[cs_num] == max_read_sample) {
/* search for max phase */;
} else {
max_read_sample = read_sample[cs_num];
if (read_sample[cs_num] == max_read_sample)
max_phase = MIN_VALUE;
}
else
max_read_sample = read_sample[cs_num];
for (pup_index = 0;
pup_index < tm->num_of_bus_per_interface;
pup_index < octets_per_if_num;
pup_index++) {
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup_index,
DDR_PHY_DATA,
RL_PHY_REG + CS_REG_VALUE(cs_num),
RL_PHY_REG(cs_num),
&val));
current_phase = ((int)val & 0xe0) >> 6;
@ -100,21 +89,19 @@ int ddr3_tip_write_additional_odt_setting(u32 dev_num, u32 if_id)
min_read_sample = read_sample[cs_num];
}
if (min_read_sample <= tm->interface_params[if_id].cas_l) {
min_read_sample = (int)tm->interface_params[if_id].cas_l;
}
min_read_sample = min_read_sample - 1;
max_read_sample = max_read_sample + 4 + (max_phase + 1) / 2 + 1;
if (min_read_sample >= 0xf)
min_read_sample = 0xf;
if (max_read_sample >= 0x1f)
max_read_sample = 0x1f;
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
ODT_TIMING_LOW,
DDR_ODT_TIMING_LOW_REG,
((min_read_sample - 1) << 12),
0xf << 12));
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
ODT_TIMING_LOW,
DDR_ODT_TIMING_LOW_REG,
(max_read_sample << 16),
0x1f << 16));
@ -123,7 +110,7 @@ int ddr3_tip_write_additional_odt_setting(u32 dev_num, u32 if_id)
int get_valid_win_rx(u32 dev_num, u32 if_id, u8 res[4])
{
u32 reg_pup = RESULT_DB_PHY_REG_ADDR;
u32 reg_pup = RESULT_PHY_REG;
u32 reg_data;
u32 cs_num;
int i;
@ -138,7 +125,7 @@ int get_valid_win_rx(u32 dev_num, u32 if_id, u8 res[4])
ACCESS_TYPE_UNICAST, i,
DDR_PHY_DATA, reg_pup,
&reg_data));
res[i] = (reg_data >> RESULT_DB_PHY_REG_RX_OFFSET) & 0x1f;
res[i] = (reg_data >> RESULT_PHY_RX_OFFS) & 0x1f;
}
return 0;
@ -176,7 +163,8 @@ int ddr3_tip_vref(u32 dev_num)
u32 copy_start_pattern, copy_end_pattern;
enum hws_result *flow_result = ddr3_tip_get_result_ptr(training_stage);
u8 res[4];
struct hws_topology_map *tm = ddr3_get_topology_map();
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
CHECK_STATUS(ddr3_tip_special_rx(dev_num));
@ -190,9 +178,9 @@ int ddr3_tip_vref(u32 dev_num)
/* init params */
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0;
pup < tm->num_of_bus_per_interface; pup++) {
pup < octets_per_if_num; pup++) {
current_vref[pup][if_id] = 0;
last_vref[pup][if_id] = 0;
lim_vref[pup][if_id] = 0;
@ -228,7 +216,7 @@ int ddr3_tip_vref(u32 dev_num)
}
/* TODO: Set number of active interfaces */
num_pup = tm->num_of_bus_per_interface * MAX_INTERFACE_NUM;
num_pup = octets_per_if_num * MAX_INTERFACE_NUM;
while ((algo_run_flag <= num_pup) & (while_count < 10)) {
while_count++;
@ -239,13 +227,13 @@ int ddr3_tip_vref(u32 dev_num)
/* Read Valid window results only for non converge pups */
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
if (interface_state[if_id] != 4) {
get_valid_win_rx(dev_num, if_id, res);
for (pup = 0;
pup < tm->num_of_bus_per_interface;
pup < octets_per_if_num;
pup++) {
VALIDATE_ACTIVE
VALIDATE_BUS_ACTIVE
(tm->bus_act_mask, pup);
if (pup_st[pup]
[if_id] ==
@ -263,14 +251,14 @@ int ddr3_tip_vref(u32 dev_num)
}
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
DEBUG_TRAINING_HW_ALG(
DEBUG_LEVEL_TRACE,
("current_valid_window: IF[ %d ] - ", if_id));
for (pup = 0;
pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE,
("%d ",
current_valid_window
@ -281,10 +269,10 @@ int ddr3_tip_vref(u32 dev_num)
/* Compare results and respond as function of state */
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0;
pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE,
("I/F[ %d ], pup[ %d ] STATE #%d (%d)\n",
if_id, pup,
@ -609,10 +597,10 @@ int ddr3_tip_vref(u32 dev_num)
}
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0;
pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup,
@ -640,7 +628,7 @@ int ddr3_tip_cmd_addr_init_delay(u32 dev_num, u32 adll_tap)
{
u32 if_id = 0;
u32 ck_num_adll_tap = 0, ca_num_adll_tap = 0, data = 0;
struct hws_topology_map *tm = ddr3_get_topology_map();
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
/*
* ck_delay_table is delaying the of the clock signal only.
@ -653,22 +641,18 @@ int ddr3_tip_cmd_addr_init_delay(u32 dev_num, u32 adll_tap)
*/
/* Calc ADLL Tap */
if ((ck_delay == -1) || (ck_delay_16 == -1)) {
if (ck_delay == PARAM_UNDEFINED)
DEBUG_TRAINING_HW_ALG(
DEBUG_LEVEL_ERROR,
("ERROR: One of ck_delay values not initialized!!!\n"));
}
("ERROR: ck_delay is not initialized!\n"));
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
/* Calc delay ps in ADLL tap */
if (tm->interface_params[if_id].bus_width ==
BUS_WIDTH_16)
ck_num_adll_tap = ck_delay_16 / adll_tap;
else
ck_num_adll_tap = ck_delay / adll_tap;
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
/* Calc delay ps in ADLL tap */
ck_num_adll_tap = ck_delay / adll_tap;
ca_num_adll_tap = ca_delay / adll_tap;
data = (ck_num_adll_tap & 0x3f) +
((ca_num_adll_tap & 0x3f) << 10);

View file

@ -10,11 +10,10 @@
#include "ddr_topology_def.h"
#include "ddr_training_ip_db.h"
#define DDR3_TIP_VERSION_STRING "DDR3 Training Sequence - Ver TIP-1.29."
#define MAX_CS_NUM 4
#define MAX_TOTAL_BUS_NUM (MAX_INTERFACE_NUM * MAX_BUS_NUM)
#define MAX_DQ_NUM 40
#define TIP_ENG_LOCK 0x02000000
#define TIP_TX_DLL_RANGE_MAX 64
#define GET_MIN(arg1, arg2) ((arg1) < (arg2)) ? (arg1) : (arg2)
#define GET_MAX(arg1, arg2) ((arg1) < (arg2)) ? (arg2) : (arg1)
@ -38,11 +37,15 @@
#define READ_LEVELING_TF_MASK_BIT 0x00010000
#define WRITE_LEVELING_SUPP_TF_MASK_BIT 0x00020000
#define DM_PBS_TX_MASK_BIT 0x00040000
#define RL_DQS_BURST_MASK_BIT 0x00080000
#define CENTRALIZATION_RX_MASK_BIT 0x00100000
#define CENTRALIZATION_TX_MASK_BIT 0x00200000
#define TX_EMPHASIS_MASK_BIT 0x00400000
#define PER_BIT_READ_LEVELING_TF_MASK_BIT 0x00800000
#define VREF_CALIBRATION_MASK_BIT 0x01000000
#define WRITE_LEVELING_LF_MASK_BIT 0x02000000
/* DDR4 Specific Training Mask bits */
enum hws_result {
TEST_FAILED = 0,
@ -79,6 +82,7 @@ enum auto_tune_stage {
TX_EMPHASIS,
LOAD_PATTERN_HIGH,
PER_BIT_READ_LEVELING_TF,
WRITE_LEVELING_LF,
MAX_STAGE_LIMIT
};
@ -110,7 +114,7 @@ struct pattern_info {
/* CL value for each frequency */
struct cl_val_per_freq {
u8 cl_val[DDR_FREQ_LIMIT];
u8 cl_val[DDR_FREQ_LAST];
};
struct cs_element {
@ -167,11 +171,14 @@ int hws_ddr3_tip_select_ddr_controller(u32 dev_num, int enable);
int hws_ddr3_tip_init_controller(u32 dev_num,
struct init_cntr_param *init_cntr_prm);
int hws_ddr3_tip_load_topology_map(u32 dev_num,
struct hws_topology_map *topology);
struct mv_ddr_topology_map *topology);
int hws_ddr3_tip_run_alg(u32 dev_num, enum hws_algo_type algo_type);
int hws_ddr3_tip_mode_read(u32 dev_num, struct mode_info *mode_info);
int hws_ddr3_tip_read_training_result(u32 dev_num,
enum hws_result result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM]);
int ddr3_tip_is_pup_lock(u32 *pup_buf, enum hws_training_result read_mode);
u8 ddr3_tip_get_buf_min(u8 *buf_ptr);
u8 ddr3_tip_get_buf_max(u8 *buf_ptr);
uint64_t mv_ddr_get_memory_size_per_cs_in_bits(void);
uint64_t mv_ddr_get_total_memory_size_in_bits(void);
#endif /* _DDR3_TRAINING_IP_H_ */

View file

@ -45,9 +45,13 @@ int hws_ddr3_run_bist(u32 dev_num, enum hws_pattern pattern, u32 *result,
u32 cs_num);
int ddr3_tip_run_sweep_test(int dev_num, u32 repeat_num, u32 direction,
u32 mode);
int ddr3_tip_run_leveling_sweep_test(int dev_num, u32 repeat_num,
u32 direction, u32 mode);
int ddr3_tip_print_regs(u32 dev_num);
int ddr3_tip_reg_dump(u32 dev_num);
int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type, u32 read_type,
u32 burst_length);
int mv_ddr_dm_to_dq_diff_get(u8 adll_byte_high, u8 adll_byte_low, u8 *vw_vector,
int *delta_h_adll, int *delta_l_adll);
int mv_ddr_dm_vw_get(enum hws_pattern pattern, u32 cs, u8 *vw_vector);
#endif /* _DDR3_TRAINING_IP_BIST_H_ */

View file

@ -9,7 +9,10 @@
enum hws_pattern {
PATTERN_PBS1,
PATTERN_PBS2,
PATTERN_PBS3,
PATTERN_TEST,
PATTERN_RL,
PATTERN_RL2,
PATTERN_STATIC_PBS,
PATTERN_KILLER_DQ0,
PATTERN_KILLER_DQ1,
@ -19,15 +22,73 @@ enum hws_pattern {
PATTERN_KILLER_DQ5,
PATTERN_KILLER_DQ6,
PATTERN_KILLER_DQ7,
PATTERN_PBS3,
PATTERN_RL2,
PATTERN_TEST,
PATTERN_VREF,
PATTERN_FULL_SSO0,
PATTERN_FULL_SSO1,
PATTERN_FULL_SSO2,
PATTERN_FULL_SSO3,
PATTERN_VREF,
PATTERN_LIMIT
PATTERN_LAST,
PATTERN_SSO_FULL_XTALK_DQ0,
PATTERN_SSO_FULL_XTALK_DQ1,
PATTERN_SSO_FULL_XTALK_DQ2,
PATTERN_SSO_FULL_XTALK_DQ3,
PATTERN_SSO_FULL_XTALK_DQ4,
PATTERN_SSO_FULL_XTALK_DQ5,
PATTERN_SSO_FULL_XTALK_DQ6,
PATTERN_SSO_FULL_XTALK_DQ7,
PATTERN_SSO_XTALK_FREE_DQ0,
PATTERN_SSO_XTALK_FREE_DQ1,
PATTERN_SSO_XTALK_FREE_DQ2,
PATTERN_SSO_XTALK_FREE_DQ3,
PATTERN_SSO_XTALK_FREE_DQ4,
PATTERN_SSO_XTALK_FREE_DQ5,
PATTERN_SSO_XTALK_FREE_DQ6,
PATTERN_SSO_XTALK_FREE_DQ7,
PATTERN_ISI_XTALK_FREE
};
enum mv_wl_supp_mode {
WRITE_LEVELING_SUPP_REG_MODE,
WRITE_LEVELING_SUPP_ECC_MODE_DATA_PUPS,
WRITE_LEVELING_SUPP_ECC_MODE_ECC_PUP4,
WRITE_LEVELING_SUPP_ECC_MODE_ECC_PUP3,
WRITE_LEVELING_SUPP_ECC_MODE_ECC_PUP8
};
enum mv_ddr_dev_attribute {
MV_ATTR_TIP_REV,
MV_ATTR_PHY_EDGE,
MV_ATTR_OCTET_PER_INTERFACE,
MV_ATTR_PLL_BEFORE_INIT,
MV_ATTR_TUNE_MASK,
MV_ATTR_INIT_FREQ,
MV_ATTR_MID_FREQ,
MV_ATTR_DFS_LOW_FREQ,
MV_ATTR_DFS_LOW_PHY,
MV_ATTR_DELAY_ENABLE,
MV_ATTR_CK_DELAY,
MV_ATTR_CA_DELAY,
MV_ATTR_INTERLEAVE_WA,
MV_ATTR_LAST
};
enum mv_ddr_tip_revison {
MV_TIP_REV_NA,
MV_TIP_REV_1, /* NP5 */
MV_TIP_REV_2, /* BC2 */
MV_TIP_REV_3, /* AC3 */
MV_TIP_REV_4, /* A-380/A-390 */
MV_TIP_REV_LAST
};
enum mv_ddr_phy_edge {
MV_DDR_PHY_EDGE_POSITIVE,
MV_DDR_PHY_EDGE_NEGATIVE
};
/* Device attribute functions */
void ddr3_tip_dev_attr_init(u32 dev_num);
u32 ddr3_tip_dev_attr_get(u32 dev_num, enum mv_ddr_dev_attribute attr_id);
void ddr3_tip_dev_attr_set(u32 dev_num, enum mv_ddr_dev_attribute attr_id, u32 value);
#endif /* _DDR3_TRAINING_IP_DB_H_ */

View file

@ -6,8 +6,6 @@
#ifndef _DDR3_TRAINING_IP_DEF_H
#define _DDR3_TRAINING_IP_DEF_H
#include "silicon_if.h"
#define PATTERN_55 0x55555555
#define PATTERN_AA 0xaaaaaaaa
#define PATTERN_80 0x80808080
@ -35,6 +33,7 @@
#define ADLL_RX_LENGTH 32
#define PARAM_NOT_CARE 0
#define PARAM_UNDEFINED 0xffffffff
#define READ_LEVELING_PHY_OFFSET 2
#define WRITE_LEVELING_PHY_OFFSET 0
@ -99,6 +98,8 @@
#define _1G 0x40000000
#define _2G 0x80000000
#define _4G 0x100000000
#define _8G 0x200000000
#define ADDR_SIZE_512MB 0x04000000
#define ADDR_SIZE_1GB 0x08000000
@ -163,10 +164,33 @@ enum hws_wl_supp {
ALIGN_SHIFT
};
enum mv_ddr_tip_bit_state {
BIT_LOW_UI,
BIT_HIGH_UI,
BIT_SPLIT_IN,
BIT_SPLIT_OUT,
BIT_STATE_LAST
};
enum mv_ddr_tip_byte_state{
BYTE_NOT_DEFINED,
BYTE_HOMOGENEOUS_LOW = 0x1,
BYTE_HOMOGENEOUS_HIGH = 0x2,
BYTE_HOMOGENEOUS_SPLIT_IN = 0x4,
BYTE_HOMOGENEOUS_SPLIT_OUT = 0x8,
BYTE_SPLIT_OUT_MIX = 0x10,
BYTE_STATE_LAST
};
struct reg_data {
u32 reg_addr;
u32 reg_data;
u32 reg_mask;
unsigned int reg_addr;
unsigned int reg_data;
unsigned int reg_mask;
};
enum dm_direction {
DM_DIR_INVERSE,
DM_DIR_DIRECT
};
#endif /* _DDR3_TRAINING_IP_DEF_H */

File diff suppressed because it is too large Load diff

View file

@ -37,8 +37,6 @@ int ddr3_tip_training_ip_test(u32 dev_num, enum hws_training_result result_type,
u32 num_of_iterations, u32 start_pattern,
u32 end_pattern);
int ddr3_tip_load_pattern_to_mem(u32 dev_num, enum hws_pattern pattern);
int ddr3_tip_load_pattern_to_mem_by_cpu(u32 dev_num, enum hws_pattern pattern,
u32 offset);
int ddr3_tip_load_all_pattern_to_mem(u32 dev_num);
int ddr3_tip_read_training_result(u32 dev_num, u32 if_id,
enum hws_access_type pup_access_type,
@ -75,10 +73,13 @@ int ddr3_tip_ip_training_wrapper(u32 dev_num, enum hws_access_type access_type,
enum hws_edge_compare edge_comp,
enum hws_ddr_cs train_cs_type, u32 cs_num,
enum hws_training_ip_stat *train_status);
int is_odpg_access_done(u32 dev_num, u32 if_id);
u8 mv_ddr_tip_sub_phy_byte_status_get(u32 if_id, u32 subphy_id);
void mv_ddr_tip_sub_phy_byte_status_set(u32 if_id, u32 subphy_id, u8 byte_status_data);
void ddr3_tip_print_bist_res(void);
struct pattern_info *ddr3_tip_get_pattern_table(void);
u16 *ddr3_tip_get_mask_results_dq_reg(void);
u16 *ddr3_tip_get_mask_results_pup_reg_map(void);
int mv_ddr_load_dm_pattern_to_odpg(enum hws_access_type access_type, enum hws_pattern pattern,
enum dm_direction dm_dir);
int mv_ddr_pattern_start_addr_set(struct pattern_info *pattern_tbl, enum hws_pattern pattern, u32 addr);
#endif /* _DDR3_TRAINING_IP_ENGINE_H_ */

View file

@ -8,45 +8,73 @@
#include "ddr3_training_ip.h"
#include "ddr3_training_ip_pbs.h"
#define MRS0_CMD 0x3
#define MRS1_CMD 0x4
#define MRS2_CMD 0x8
#define MRS3_CMD 0x9
/*
* Definitions of INTERFACE registers
*/
#define READ_BUFFER_SELECT 0x14a4
/*
* Definitions of PHY registers
*/
#include "mv_ddr_regs.h"
#define KILLER_PATTERN_LENGTH 32
#define EXT_ACCESS_BURST_LENGTH 8
#define IS_ACTIVE(if_mask , if_id) \
((if_mask) & (1 << (if_id)))
#define IS_ACTIVE(mask, id) \
((mask) & (1 << (id)))
#define VALIDATE_ACTIVE(mask, id) \
{ \
if (IS_ACTIVE(mask, id) == 0) \
continue; \
}
#define GET_TOPOLOGY_NUM_OF_BUSES() \
(ddr3_get_topology_map()->num_of_bus_per_interface)
#define IS_IF_ACTIVE(if_mask, if_id) \
((if_mask) & (1 << (if_id)))
#define VALIDATE_IF_ACTIVE(mask, id) \
{ \
if (IS_IF_ACTIVE(mask, id) == 0) \
continue; \
}
#define IS_BUS_ACTIVE(if_mask , if_id) \
(((if_mask) >> (if_id)) & 1)
#define VALIDATE_BUS_ACTIVE(mask, id) \
{ \
if (IS_BUS_ACTIVE(mask, id) == 0) \
continue; \
}
#define DDR3_IS_ECC_PUP3_MODE(if_mask) \
(((if_mask) == 0xb) ? 1 : 0)
#define DDR3_IS_ECC_PUP4_MODE(if_mask) \
(((((if_mask) & 0x10) == 0)) ? 0 : 1)
#define DDR3_IS_16BIT_DRAM_MODE(mask) \
(((((mask) & 0x4) == 0)) ? 1 : 0)
(((if_mask) == BUS_MASK_16BIT_ECC_PUP3) ? 1 : 0)
#define DDR3_IS_ECC_PUP4_MODE(if_mask) \
((if_mask == BUS_MASK_32BIT_ECC || if_mask == BUS_MASK_16BIT_ECC) ? 1 : 0)
#define DDR3_IS_16BIT_DRAM_MODE(mask) \
((mask == BUS_MASK_16BIT || mask == BUS_MASK_16BIT_ECC || mask == BUS_MASK_16BIT_ECC_PUP3) ? 1 : 0)
#define DDR3_IS_ECC_PUP8_MODE(if_mask) \
((if_mask == MV_DDR_32BIT_ECC_PUP8_BUS_MASK || if_mask == MV_DDR_64BIT_ECC_PUP8_BUS_MASK) ? 1 : 0)
#define MV_DDR_IS_64BIT_DRAM_MODE(mask) \
((((mask) & MV_DDR_64BIT_BUS_MASK) == MV_DDR_64BIT_BUS_MASK) || \
(((mask) & MV_DDR_64BIT_ECC_PUP8_BUS_MASK) == MV_DDR_64BIT_ECC_PUP8_BUS_MASK) ? 1 : 0)
#define MV_DDR_IS_32BIT_IN_64BIT_DRAM_MODE(mask, octets_per_if_num/* FIXME: get from ATF */) \
((octets_per_if_num == 9/* FIXME: get from ATF */) && \
((mask == BUS_MASK_32BIT) || \
(mask == MV_DDR_32BIT_ECC_PUP8_BUS_MASK)) ? 1 : 0)
#define MV_DDR_IS_HALF_BUS_DRAM_MODE(mask, octets_per_if_num/* FIXME: get from ATF */) \
(MV_DDR_IS_32BIT_IN_64BIT_DRAM_MODE(mask, octets_per_if_num) || DDR3_IS_16BIT_DRAM_MODE(mask))
#define ECC_READ_BUS_0 0
#define ECC_PHY_ACCESS_3 3
#define ECC_PHY_ACCESS_4 4
#define ECC_PHY_ACCESS_8 8
#define MEGA 1000000
#define BUS_WIDTH_IN_BITS 8
#define MAX_POLLING_ITERATIONS 1000000
#define NUM_OF_CS 4
#define ADLL_LENGTH 32
#define GP_RSVD0_REG 0x182e0
/*
* DFX address Space
@ -66,205 +94,20 @@
/* nsec */
#define TREFI_LOW 7800
#define TREFI_HIGH 3900
#define TR2R_VALUE_REG 0x180
#define TR2R_MASK_REG 0x180
#define TRFC_MASK_REG 0x7f
#define TR2W_MASK_REG 0x600
#define TW2W_HIGH_VALUE_REG 0x1800
#define TW2W_HIGH_MASK_REG 0xf800
#define TRFC_HIGH_VALUE_REG 0x20000
#define TRFC_HIGH_MASK_REG 0x70000
#define TR2R_HIGH_VALUE_REG 0x0
#define TR2R_HIGH_MASK_REG 0x380000
#define TMOD_VALUE_REG 0x16000000
#define TMOD_MASK_REG 0x1e000000
#define T_VALUE_REG 0x40000000
#define T_MASK_REG 0xc0000000
#define AUTO_ZQC_TIMING 15384
#define WRITE_XBAR_PORT1 0xc03f8077
#define READ_XBAR_PORT1 0xc03f8073
#define DISABLE_DDR_TUNING_DATA 0x02294285
#define ENABLE_DDR_TUNING_DATA 0x12294285
#define ODPG_TRAINING_STATUS_REG 0x18488
#define ODPG_TRAINING_TRIGGER_REG 0x1030
#define ODPG_STATUS_DONE_REG 0x16fc
#define ODPG_ENABLE_REG 0x186d4
#define ODPG_ENABLE_OFFS 0
#define ODPG_DISABLE_OFFS 8
enum mr_number {
MR_CMD0,
MR_CMD1,
MR_CMD2,
MR_CMD3,
MR_LAST
};
#define ODPG_TRAINING_CONTROL_REG 0x1034
#define ODPG_OBJ1_OPCODE_REG 0x103c
#define ODPG_OBJ1_ITER_CNT_REG 0x10b4
#define CALIB_OBJ_PRFA_REG 0x10c4
#define ODPG_WRITE_LEVELING_DONE_CNTR_REG 0x10f8
#define ODPG_WRITE_READ_MODE_ENABLE_REG 0x10fc
#define TRAINING_OPCODE_1_REG 0x10b4
#define SDRAM_CONFIGURATION_REG 0x1400
#define DDR_CONTROL_LOW_REG 0x1404
#define SDRAM_TIMING_LOW_REG 0x1408
#define SDRAM_TIMING_HIGH_REG 0x140c
#define SDRAM_ACCESS_CONTROL_REG 0x1410
#define SDRAM_OPEN_PAGE_CONTROL_REG 0x1414
#define SDRAM_OPERATION_REG 0x1418
#define DUNIT_CONTROL_HIGH_REG 0x1424
#define ODT_TIMING_LOW 0x1428
#define DDR_TIMING_REG 0x142c
#define ODT_TIMING_HI_REG 0x147c
#define SDRAM_INIT_CONTROL_REG 0x1480
#define SDRAM_ODT_CONTROL_HIGH_REG 0x1498
#define DUNIT_ODT_CONTROL_REG 0x149c
#define READ_BUFFER_SELECT_REG 0x14a4
#define DUNIT_MMASK_REG 0x14b0
#define CALIB_MACHINE_CTRL_REG 0x14cc
#define DRAM_DLL_TIMING_REG 0x14e0
#define DRAM_ZQ_INIT_TIMIMG_REG 0x14e4
#define DRAM_ZQ_TIMING_REG 0x14e8
#define DFS_REG 0x1528
#define READ_DATA_SAMPLE_DELAY 0x1538
#define READ_DATA_READY_DELAY 0x153c
#define TRAINING_REG 0x15b0
#define TRAINING_SW_1_REG 0x15b4
#define TRAINING_SW_2_REG 0x15b8
#define TRAINING_PATTERN_BASE_ADDRESS_REG 0x15bc
#define TRAINING_DBG_1_REG 0x15c0
#define TRAINING_DBG_2_REG 0x15c4
#define TRAINING_DBG_3_REG 0x15c8
#define RANK_CTRL_REG 0x15e0
#define TIMING_REG 0x15e4
#define DRAM_PHY_CONFIGURATION 0x15ec
#define MR0_REG 0x15d0
#define MR1_REG 0x15d4
#define MR2_REG 0x15d8
#define MR3_REG 0x15dc
#define TIMING_REG 0x15e4
#define ODPG_CTRL_CONTROL_REG 0x1600
#define ODPG_DATA_CONTROL_REG 0x1630
#define ODPG_PATTERN_ADDR_OFFSET_REG 0x1638
#define ODPG_DATA_BUF_SIZE_REG 0x163c
#define PHY_LOCK_STATUS_REG 0x1674
#define PHY_REG_FILE_ACCESS 0x16a0
#define TRAINING_WRITE_LEVELING_REG 0x16ac
#define ODPG_PATTERN_ADDR_REG 0x16b0
#define ODPG_PATTERN_DATA_HI_REG 0x16b4
#define ODPG_PATTERN_DATA_LOW_REG 0x16b8
#define ODPG_BIST_LAST_FAIL_ADDR_REG 0x16bc
#define ODPG_BIST_DATA_ERROR_COUNTER_REG 0x16c0
#define ODPG_BIST_FAILED_DATA_HI_REG 0x16c4
#define ODPG_BIST_FAILED_DATA_LOW_REG 0x16c8
#define ODPG_WRITE_DATA_ERROR_REG 0x16cc
#define CS_ENABLE_REG 0x16d8
#define WR_LEVELING_DQS_PATTERN_REG 0x16dc
#define ODPG_BIST_DONE 0x186d4
#define ODPG_BIST_DONE_BIT_OFFS 0
#define ODPG_BIST_DONE_BIT_VALUE 0
#define RESULT_CONTROL_BYTE_PUP_0_REG 0x1830
#define RESULT_CONTROL_BYTE_PUP_1_REG 0x1834
#define RESULT_CONTROL_BYTE_PUP_2_REG 0x1838
#define RESULT_CONTROL_BYTE_PUP_3_REG 0x183c
#define RESULT_CONTROL_BYTE_PUP_4_REG 0x18b0
#define RESULT_CONTROL_PUP_0_BIT_0_REG 0x18b4
#define RESULT_CONTROL_PUP_0_BIT_1_REG 0x18b8
#define RESULT_CONTROL_PUP_0_BIT_2_REG 0x18bc
#define RESULT_CONTROL_PUP_0_BIT_3_REG 0x18c0
#define RESULT_CONTROL_PUP_0_BIT_4_REG 0x18c4
#define RESULT_CONTROL_PUP_0_BIT_5_REG 0x18c8
#define RESULT_CONTROL_PUP_0_BIT_6_REG 0x18cc
#define RESULT_CONTROL_PUP_0_BIT_7_REG 0x18f0
#define RESULT_CONTROL_PUP_1_BIT_0_REG 0x18f4
#define RESULT_CONTROL_PUP_1_BIT_1_REG 0x18f8
#define RESULT_CONTROL_PUP_1_BIT_2_REG 0x18fc
#define RESULT_CONTROL_PUP_1_BIT_3_REG 0x1930
#define RESULT_CONTROL_PUP_1_BIT_4_REG 0x1934
#define RESULT_CONTROL_PUP_1_BIT_5_REG 0x1938
#define RESULT_CONTROL_PUP_1_BIT_6_REG 0x193c
#define RESULT_CONTROL_PUP_1_BIT_7_REG 0x19b0
#define RESULT_CONTROL_PUP_2_BIT_0_REG 0x19b4
#define RESULT_CONTROL_PUP_2_BIT_1_REG 0x19b8
#define RESULT_CONTROL_PUP_2_BIT_2_REG 0x19bc
#define RESULT_CONTROL_PUP_2_BIT_3_REG 0x19c0
#define RESULT_CONTROL_PUP_2_BIT_4_REG 0x19c4
#define RESULT_CONTROL_PUP_2_BIT_5_REG 0x19c8
#define RESULT_CONTROL_PUP_2_BIT_6_REG 0x19cc
#define RESULT_CONTROL_PUP_2_BIT_7_REG 0x19f0
#define RESULT_CONTROL_PUP_3_BIT_0_REG 0x19f4
#define RESULT_CONTROL_PUP_3_BIT_1_REG 0x19f8
#define RESULT_CONTROL_PUP_3_BIT_2_REG 0x19fc
#define RESULT_CONTROL_PUP_3_BIT_3_REG 0x1a30
#define RESULT_CONTROL_PUP_3_BIT_4_REG 0x1a34
#define RESULT_CONTROL_PUP_3_BIT_5_REG 0x1a38
#define RESULT_CONTROL_PUP_3_BIT_6_REG 0x1a3c
#define RESULT_CONTROL_PUP_3_BIT_7_REG 0x1ab0
#define RESULT_CONTROL_PUP_4_BIT_0_REG 0x1ab4
#define RESULT_CONTROL_PUP_4_BIT_1_REG 0x1ab8
#define RESULT_CONTROL_PUP_4_BIT_2_REG 0x1abc
#define RESULT_CONTROL_PUP_4_BIT_3_REG 0x1ac0
#define RESULT_CONTROL_PUP_4_BIT_4_REG 0x1ac4
#define RESULT_CONTROL_PUP_4_BIT_5_REG 0x1ac8
#define RESULT_CONTROL_PUP_4_BIT_6_REG 0x1acc
#define RESULT_CONTROL_PUP_4_BIT_7_REG 0x1af0
#define WL_PHY_REG 0x0
#define WRITE_CENTRALIZATION_PHY_REG 0x1
#define RL_PHY_REG 0x2
#define READ_CENTRALIZATION_PHY_REG 0x3
#define PBS_RX_PHY_REG 0x50
#define PBS_TX_PHY_REG 0x10
#define PHY_CONTROL_PHY_REG 0x90
#define BW_PHY_REG 0x92
#define RATE_PHY_REG 0x94
#define CMOS_CONFIG_PHY_REG 0xa2
#define PAD_ZRI_CALIB_PHY_REG 0xa4
#define PAD_ODT_CALIB_PHY_REG 0xa6
#define PAD_CONFIG_PHY_REG 0xa8
#define PAD_PRE_DISABLE_PHY_REG 0xa9
#define TEST_ADLL_REG 0xbf
#define CSN_IOB_VREF_REG(cs) (0xdb + (cs * 12))
#define CSN_IO_BASE_VREF_REG(cs) (0xd0 + (cs * 12))
#define RESULT_DB_PHY_REG_ADDR 0xc0
#define RESULT_DB_PHY_REG_RX_OFFSET 5
#define RESULT_DB_PHY_REG_TX_OFFSET 0
/* TBD - for NP5 use only CS 0 */
#define PHY_WRITE_DELAY(cs) WL_PHY_REG
/*( ( _cs_ == 0 ) ? 0x0 : 0x4 )*/
/* TBD - for NP5 use only CS 0 */
#define PHY_READ_DELAY(cs) RL_PHY_REG
#define DDR0_ADDR_1 0xf8258
#define DDR0_ADDR_2 0xf8254
#define DDR1_ADDR_1 0xf8270
#define DDR1_ADDR_2 0xf8270
#define DDR2_ADDR_1 0xf825c
#define DDR2_ADDR_2 0xf825c
#define DDR3_ADDR_1 0xf8264
#define DDR3_ADDR_2 0xf8260
#define DDR4_ADDR_1 0xf8274
#define DDR4_ADDR_2 0xf8274
#define GENERAL_PURPOSE_RESERVED0_REG 0x182e0
#define GET_BLOCK_ID_MAX_FREQ(dev_num, block_id) 800000
#define CS0_RD_LVL_REF_DLY_OFFS 0
#define CS0_RD_LVL_REF_DLY_LEN 0
#define CS0_RD_LVL_PH_SEL_OFFS 0
#define CS0_RD_LVL_PH_SEL_LEN 0
#define CS_REGISTER_ADDR_OFFSET 4
#define CALIBRATED_OBJECTS_REG_ADDR_OFFSET 0x10
#define MAX_POLLING_ITERATIONS 100000
#define PHASE_REG_OFFSET 32
#define NUM_BYTES_IN_BURST 31
#define NUM_OF_CS 4
#define CS_REG_VALUE(cs_num) (cs_mask_reg[cs_num])
#define ADLL_LENGTH 32
struct mv_ddr_mr_data {
u32 cmd;
u32 reg_addr;
};
struct write_supp_result {
enum hws_wl_supp stage;
@ -314,10 +157,11 @@ int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
u32 num_of_bursts, u32 *addr);
int ddr3_tip_dynamic_read_leveling(u32 dev_num, u32 ui_freq);
int mv_ddr_rl_dqs_burst(u32 dev_num, u32 if_id, u32 freq);
int ddr3_tip_legacy_dynamic_read_leveling(u32 dev_num);
int ddr3_tip_dynamic_per_bit_read_leveling(u32 dev_num, u32 ui_freq);
int ddr3_tip_legacy_dynamic_write_leveling(u32 dev_num);
int ddr3_tip_dynamic_write_leveling(u32 dev_num);
int ddr3_tip_dynamic_write_leveling(u32 dev_num, int phase_remove);
int ddr3_tip_dynamic_write_leveling_supp(u32 dev_num);
int ddr3_tip_static_init_controller(u32 dev_num);
int ddr3_tip_configure_phy(u32 dev_num);
@ -331,18 +175,21 @@ int ddr3_tip_configure_odpg(u32 dev_num, enum hws_access_type access_type,
u32 delay_between_burst, u32 rd_mode, u32 cs_num,
u32 addr_stress_jump, u32 single_pattern);
int ddr3_tip_set_atr(u32 dev_num, u32 flag_id, u32 value);
int ddr3_tip_write_mrs_cmd(u32 dev_num, u32 *cs_mask_arr, u32 cmd, u32 data,
u32 mask);
int ddr3_tip_write_mrs_cmd(u32 dev_num, u32 *cs_mask_arr, enum mr_number mr_num, u32 data, u32 mask);
int ddr3_tip_write_cs_result(u32 dev_num, u32 offset);
int ddr3_tip_get_first_active_if(u8 dev_num, u32 interface_mask, u32 *if_id);
int ddr3_tip_reset_fifo_ptr(u32 dev_num);
int read_pup_value(int pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
int ddr3_tip_read_pup_value(u32 dev_num,
u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
int reg_addr, u32 mask);
int read_adll_value(u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
int reg_addr, u32 mask);
int write_adll_value(u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
int reg_addr);
int ddr3_tip_read_adll_value(u32 dev_num,
u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
u32 reg_addr, u32 mask);
int ddr3_tip_write_adll_value(u32 dev_num,
u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
u32 reg_addr);
int ddr3_tip_tune_training_params(u32 dev_num,
struct tune_train_params *params);
struct page_element *mv_ddr_page_tbl_get(void);
#endif /* _DDR3_TRAINING_IP_FLOW_H_ */

View file

@ -62,7 +62,7 @@ typedef int (*HWS_TRAINING_IP_PBS_TX_FUNC_PTR)(u32 dev_num);
typedef int (*HWS_TRAINING_IP_SELECT_CONTROLLER_FUNC_PTR)(
u32 dev_num, int enable);
typedef int (*HWS_TRAINING_IP_TOPOLOGY_MAP_LOAD_FUNC_PTR)(
u32 dev_num, struct hws_topology_map *topology_map);
u32 dev_num, struct mv_ddr_topology_map *tm);
typedef int (*HWS_TRAINING_IP_STATIC_CONFIG_FUNC_PTR)(
u32 dev_num, enum hws_ddr_freq frequency,
enum hws_static_config_type static_config_type, u32 if_id);
@ -83,16 +83,27 @@ typedef int (*HWS_TRAINING_IP_LOAD_TOPOLOGY)(u32 dev_num, u32 config_num);
typedef int (*HWS_TRAINING_IP_READ_LEVELING)(u32 dev_num, u32 config_num);
typedef int (*HWS_TRAINING_IP_WRITE_LEVELING)(u32 dev_num, u32 config_num);
typedef u32 (*HWS_TRAINING_IP_GET_TEMP)(u8 dev_num);
typedef u8 (*HWS_TRAINING_IP_GET_RATIO)(u32 freq);
struct hws_tip_config_func_db {
HWS_TIP_DUNIT_MUX_SELECT_FUNC_PTR tip_dunit_mux_select_func;
HWS_TIP_DUNIT_REG_READ_FUNC_PTR tip_dunit_read_func;
HWS_TIP_DUNIT_REG_WRITE_FUNC_PTR tip_dunit_write_func;
void (*mv_ddr_dunit_read)(u32 addr, u32 mask, u32 *data);
void (*mv_ddr_dunit_write)(u32 addr, u32 mask, u32 data);
HWS_TIP_GET_FREQ_CONFIG_INFO tip_get_freq_config_info_func;
HWS_TIP_GET_DEVICE_INFO tip_get_device_info_func;
HWS_SET_FREQ_DIVIDER_FUNC_PTR tip_set_freq_divider_func;
HWS_GET_CS_CONFIG_FUNC_PTR tip_get_cs_config_info;
HWS_TRAINING_IP_GET_TEMP tip_get_temperature;
HWS_TRAINING_IP_GET_RATIO tip_get_clock_ratio;
HWS_TRAINING_IP_EXTERNAL_READ_PTR tip_external_read;
HWS_TRAINING_IP_EXTERNAL_WRITE_PTR tip_external_write;
int (*mv_ddr_phy_read)(enum hws_access_type phy_access,
u32 phy, enum hws_ddr_phy phy_type,
u32 reg_addr, u32 *data);
int (*mv_ddr_phy_write)(enum hws_access_type phy_access,
u32 phy, enum hws_ddr_phy phy_type,
u32 reg_addr, u32 data,
enum hws_operation op_type);
};
int ddr3_tip_init_config_func(u32 dev_num,

View file

@ -1,30 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR3_TRAINING_IP_STATIC_H_
#define _DDR3_TRAINING_IP_STATIC_H_
#include "ddr3_training_ip_def.h"
#include "ddr3_training_ip.h"
struct trip_delay_element {
u32 dqs_delay; /* DQS delay (m_sec) */
u32 ck_delay; /* CK Delay (m_sec) */
};
struct hws_tip_static_config_info {
u32 silicon_delay;
struct trip_delay_element *package_trace_arr;
struct trip_delay_element *board_trace_arr;
};
int ddr3_tip_run_static_alg(u32 dev_num, enum hws_ddr_freq freq);
int ddr3_tip_init_static_config_db(
u32 dev_num, struct hws_tip_static_config_info *static_config_info);
int ddr3_tip_init_specific_reg_config(u32 dev_num,
struct reg_data *reg_config_arr);
int ddr3_tip_static_phy_init_controller(u32 dev_num);
#endif /* _DDR3_TRAINING_IP_STATIC_H_ */

File diff suppressed because it is too large Load diff

View file

@ -11,6 +11,6 @@
int ddr3_tip_print_wl_supp_result(u32 dev_num);
int ddr3_tip_calc_cs_mask(u32 dev_num, u32 if_id, u32 effective_cs,
u32 *cs_mask);
u32 hws_ddr3_tip_max_cs_get(void);
u32 ddr3_tip_max_cs_get(u32 dev_num);
#endif /* _DDR3_TRAINING_LEVELING_H_ */

View file

@ -3,12 +3,6 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define TYPICAL_PBS_VALUE 12
@ -23,7 +17,7 @@ u8 max_pbs_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 min_pbs_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 max_adll_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 min_adll_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u32 pbsdelay_per_pup[NUM_OF_PBS_MODES][MAX_INTERFACE_NUM][MAX_BUS_NUM];
u32 pbsdelay_per_pup[NUM_OF_PBS_MODES][MAX_INTERFACE_NUM][MAX_BUS_NUM][MAX_CS_NUM];
u8 adll_shift_lock[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 adll_shift_val[MAX_INTERFACE_NUM][MAX_BUS_NUM];
enum hws_pattern pbs_pattern = PATTERN_VREF;
@ -49,34 +43,33 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
int init_val = (search_dir == HWS_LOW2HIGH) ? 0 : iterations;
enum hws_edge_compare search_edge = EDGE_FP;
u32 pup = 0, bit = 0, if_id = 0, all_lock = 0, cs_num = 0;
int reg_addr = 0;
u32 reg_addr = 0;
u32 validation_val = 0;
u32 cs_enable_reg_val[MAX_INTERFACE_NUM];
u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg();
u8 temp = 0;
struct hws_topology_map *tm = ddr3_get_topology_map();
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
/* save current cs enable reg val */
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
/* save current cs enable reg val */
CHECK_STATUS(ddr3_tip_if_read
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, cs_enable_reg_val, MASK_ALL_BITS));
DUAL_DUNIT_CFG_REG, cs_enable_reg_val, MASK_ALL_BITS));
/* enable single cs */
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, (1 << 3), (1 << 3)));
DUAL_DUNIT_CFG_REG, (1 << 3), (1 << 3)));
}
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(READ_CENTRALIZATION_PHY_REG +
(effective_cs * CS_REGISTER_ADDR_OFFSET)) :
(WRITE_CENTRALIZATION_PHY_REG +
(effective_cs * CS_REGISTER_ADDR_OFFSET));
read_adll_value(nominal_adll, reg_addr, MASK_ALL_BITS);
CRX_PHY_REG(effective_cs) :
CTX_PHY_REG(effective_cs);
ddr3_tip_read_adll_value(dev_num, nominal_adll, reg_addr, MASK_ALL_BITS);
/* stage 1 shift ADLL */
ddr3_tip_ip_training(dev_num, ACCESS_TYPE_MULTICAST,
@ -87,10 +80,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
pbs_pattern, search_edge, CS_SINGLE, cs_num,
train_status);
validation_val = (pbs_mode == PBS_RX_MODE) ? 0x1f : 0;
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
min_adll_per_pup[if_id][pup] =
(pbs_mode == PBS_RX_MODE) ? 0x1f : 0x3f;
pup_state[if_id][pup] = 0x3;
@ -100,8 +93,8 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
}
/* EBA */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
CHECK_STATUS(ddr3_tip_if_read
(dev_num, ACCESS_TYPE_MULTICAST,
@ -111,7 +104,7 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
res0, MASK_ALL_BITS));
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1;
if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE,
("FP I/F %d, bit:%d, pup:%d res0 0x%x\n",
if_id, bit, pup,
@ -176,10 +169,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
}
/* EEBA */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
if (pup_state[if_id][pup] != 4)
continue;
@ -335,10 +328,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
}
/* Print Stage result */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE,
("FP I/F %d, ADLL Shift for EBA: pup[%d] Lock status = %d Lock Val = %d,%d\n",
if_id, pup,
@ -350,10 +343,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO,
("Update ADLL Shift of all pups:\n"));
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
if (adll_shift_lock[if_id][pup] != 1)
continue;
/* if pup not locked continue to next pup */
@ -373,10 +366,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
/* PBS EEBA&EBA */
/* Start the Per Bit Skew search */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
max_pbs_per_pup[if_id][pup] = 0x0;
min_pbs_per_pup[if_id][pup] = 0x1f;
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
@ -400,10 +393,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
iterations, pbs_pattern, search_edge,
CS_SINGLE, cs_num, train_status);
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
if (adll_shift_lock[if_id][pup] != 1) {
/* if pup not lock continue to next pup */
continue;
@ -461,10 +454,10 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
/* Check all Pup lock */
all_lock = 1;
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
all_lock = all_lock * adll_shift_lock[if_id][pup];
}
}
@ -478,11 +471,11 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
search_dir = (pbs_mode == PBS_RX_MODE) ? HWS_LOW2HIGH :
HWS_HIGH2LOW;
init_val = (search_dir == HWS_LOW2HIGH) ? 0 : iterations;
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1;
if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
if (adll_shift_lock[if_id][pup] == 1) {
/*if pup lock continue to next pup */
continue;
@ -627,11 +620,11 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
search_edge, CS_SINGLE, cs_num,
train_status);
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1;
if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
CHECK_STATUS(ddr3_tip_if_read
(dev_num,
@ -696,7 +689,7 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
/* Check all Pup state */
all_lock = 1;
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
for (pup = 0; pup < octets_per_if_num; pup++) {
/*
* DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO,
* ("pup_state[%d][%d] = %d\n",if_id,pup,pup_state
@ -707,12 +700,12 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
/* END OF SBA */
/* Norm */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1;
if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
/* if pup not lock continue to next pup */
if (adll_shift_lock[if_id][pup] != 1) {
DEBUG_PBS_ENGINE(
@ -753,9 +746,9 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
/* DQ PBS register update with the final result */
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
DEBUG_PBS_ENGINE(
DEBUG_LEVEL_INFO,
@ -771,28 +764,32 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
pad_num = dq_map_table[
bit + pup * BUS_WIDTH_IN_BITS +
if_id * BUS_WIDTH_IN_BITS *
tm->num_of_bus_per_interface];
MAX_BUS_NUM];
DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO,
("result_mat: %d ",
result_mat[if_id][pup]
[bit]));
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(PBS_RX_PHY_REG + effective_cs * 0x10) :
(PBS_TX_PHY_REG + effective_cs * 0x10);
PBS_RX_PHY_REG(effective_cs, 0) :
PBS_TX_PHY_REG(effective_cs, 0);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr + pad_num,
result_mat[if_id][pup][bit]));
}
pbsdelay_per_pup[pbs_mode][if_id][pup] =
(max_pbs_per_pup[if_id][pup] ==
min_pbs_per_pup[if_id][pup]) ?
TYPICAL_PBS_VALUE :
((max_adll_per_pup[if_id][pup] -
min_adll_per_pup[if_id][pup]) * adll_tap /
if (max_pbs_per_pup[if_id][pup] == min_pbs_per_pup[if_id][pup]) {
temp = TYPICAL_PBS_VALUE;
} else {
temp = ((max_adll_per_pup[if_id][pup] -
min_adll_per_pup[if_id][pup]) *
adll_tap /
(max_pbs_per_pup[if_id][pup] -
min_pbs_per_pup[if_id][pup]));
}
pbsdelay_per_pup[pbs_mode]
[if_id][pup][effective_cs] = temp;
/* RX results ready, write RX also */
if (pbs_mode == PBS_TX_MODE) {
@ -842,18 +839,18 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
DEBUG_PBS_ENGINE(
DEBUG_LEVEL_INFO,
(", PBS tap=%d [psec] ==> skew observed = %d\n",
pbsdelay_per_pup[pbs_mode][if_id][pup],
temp,
((max_pbs_per_pup[if_id][pup] -
min_pbs_per_pup[if_id][pup]) *
pbsdelay_per_pup[pbs_mode][if_id][pup])));
temp)));
}
}
/* Write back to the phy the default values */
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(READ_CENTRALIZATION_PHY_REG + effective_cs * 4) :
(WRITE_CENTRALIZATION_PHY_REG + effective_cs * 4);
write_adll_value(nominal_adll, reg_addr);
CRX_PHY_REG(effective_cs) :
CTX_PHY_REG(effective_cs);
ddr3_tip_write_adll_value(dev_num, nominal_adll, reg_addr);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
reg_addr = (pbs_mode == PBS_RX_MODE) ?
@ -865,25 +862,30 @@ int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
0));
/* restore cs enable value */
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, cs_enable_reg_val[if_id],
DUAL_DUNIT_CFG_REG, cs_enable_reg_val[if_id],
MASK_ALL_BITS));
}
/* exit test mode */
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ODPG_WRITE_READ_MODE_ENABLE_REG, 0xffff, MASK_ALL_BITS));
ODPG_WR_RD_MODE_ENA_REG, 0xffff, MASK_ALL_BITS));
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
/*
* meaning that there is no VW exist at all (No lock at
* the EBA ADLL shift at EBS)
* no valid window found
* (no lock at EBA ADLL shift at EBS)
*/
if (pup_state[if_id][pup] == 1)
return MV_FAIL;
}
}
return MV_OK;
}
@ -912,14 +914,14 @@ int ddr3_tip_pbs_tx(u32 uidev_num)
return ddr3_tip_pbs(uidev_num, PBS_TX_MODE);
}
#ifndef EXCLUDE_SWITCH_DEBUG
#ifdef DDR_VIEWER_TOOL
/*
* Print PBS Result
*/
int ddr3_tip_print_all_pbs_result(u32 dev_num)
{
u32 curr_cs;
u32 max_cs = hws_ddr3_tip_max_cs_get();
u32 max_cs = ddr3_tip_max_cs_get(dev_num);
for (curr_cs = 0; curr_cs < max_cs; curr_cs++) {
ddr3_tip_print_pbs_result(dev_num, curr_cs, PBS_RX_MODE);
@ -936,21 +938,33 @@ int ddr3_tip_print_pbs_result(u32 dev_num, u32 cs_num, enum pbs_dir pbs_mode)
{
u32 data_value = 0, bit = 0, if_id = 0, pup = 0;
u32 reg_addr = (pbs_mode == PBS_RX_MODE) ?
(PBS_RX_PHY_REG + cs_num * 0x10) :
(PBS_TX_PHY_REG + cs_num * 0x10);
struct hws_topology_map *tm = ddr3_get_topology_map();
PBS_RX_PHY_REG(cs_num, 0) :
PBS_TX_PHY_REG(cs_num , 0);
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
printf("%s,CS%d,PBS,ADLLRATIO,,,",
(pbs_mode == PBS_RX_MODE) ? "Rx" : "Tx", cs_num);
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup < octets_per_if_num; pup++) {
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
printf("%d,",
pbsdelay_per_pup[pbs_mode][if_id][pup][cs_num]);
}
}
printf("CS%d, %s ,PBS\n", cs_num,
(pbs_mode == PBS_RX_MODE) ? "Rx" : "Tx");
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
printf("%s, DQ", (pbs_mode == PBS_RX_MODE) ? "Rx" : "Tx");
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
printf("%d ,PBS,,, ", bit);
for (pup = 0; pup <= tm->num_of_bus_per_interface;
for (pup = 0; pup <= octets_per_if_num;
pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
VALIDATE_BUS_ACTIVE(tm->bus_act_mask, pup);
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup,
@ -965,7 +979,7 @@ int ddr3_tip_print_pbs_result(u32 dev_num, u32 cs_num, enum pbs_dir pbs_mode)
return MV_OK;
}
#endif
#endif /* DDR_VIEWER_TOOL */
/*
* Fixup PBS Result
@ -974,13 +988,14 @@ int ddr3_tip_clean_pbs_result(u32 dev_num, enum pbs_dir pbs_mode)
{
u32 if_id, pup, bit;
u32 reg_addr = (pbs_mode == PBS_RX_MODE) ?
(PBS_RX_PHY_REG + effective_cs * 0x10) :
(PBS_TX_PHY_REG + effective_cs * 0x10);
struct hws_topology_map *tm = ddr3_get_topology_map();
PBS_RX_PHY_REG(effective_cs, 0) :
PBS_TX_PHY_REG(effective_cs, 0);
u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup <= tm->num_of_bus_per_interface; pup++) {
VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup <= octets_per_if_num; pup++) {
for (bit = 0; bit <= BUS_WIDTH_IN_BITS + 3; bit++) {
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,

View file

@ -1,100 +0,0 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
/* Design Guidelines parameters */
u32 g_zpri_data = 123; /* controller data - P drive strength */
u32 g_znri_data = 123; /* controller data - N drive strength */
u32 g_zpri_ctrl = 74; /* controller C/A - P drive strength */
u32 g_znri_ctrl = 74; /* controller C/A - N drive strength */
u32 g_zpodt_data = 45; /* controller data - P ODT */
u32 g_znodt_data = 45; /* controller data - N ODT */
u32 g_zpodt_ctrl = 45; /* controller data - P ODT */
u32 g_znodt_ctrl = 45; /* controller data - N ODT */
u32 g_odt_config_2cs = 0x120012;
u32 g_odt_config_1cs = 0x10000;
u32 g_rtt_nom = 0x44;
u32 g_dic = 0x2;
/*
* Configure phy (called by static init controller) for static flow
*/
int ddr3_tip_configure_phy(u32 dev_num)
{
u32 if_id, phy_id;
struct hws_topology_map *tm = ddr3_get_topology_map();
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
PAD_ZRI_CALIB_PHY_REG,
((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data))));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
PAD_ZRI_CALIB_PHY_REG,
((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl))));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
PAD_ODT_CALIB_PHY_REG,
((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data))));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
PAD_ODT_CALIB_PHY_REG,
((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl))));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
PAD_PRE_DISABLE_PHY_REG, 0));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
CMOS_CONFIG_PHY_REG, 0));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
CMOS_CONFIG_PHY_REG, 0));
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
/* check if the interface is enabled */
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (phy_id = 0;
phy_id < tm->num_of_bus_per_interface;
phy_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, phy_id);
/* Vref & clamp */
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, phy_id, DDR_PHY_DATA,
PAD_CONFIG_PHY_REG,
((clamp_tbl[if_id] << 4) | vref),
((0x7 << 4) | 0x7)));
/* clamp not relevant for control */
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, phy_id, DDR_PHY_CONTROL,
PAD_CONFIG_PHY_REG, 0x4, 0x7));
}
}
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0x90,
0x6002));
return MV_OK;
}

View file

@ -0,0 +1,148 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _DDR_ML_WRAPPER_H
#define _DDR_ML_WRAPPER_H
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X)
#define INTER_REGS_BASE SOC_REGS_PHY_BASE
#endif
/*
* MV_DEBUG_INIT need to be defines, otherwise the output of the
* DDR2 training code is not complete and misleading
*/
#define MV_DEBUG_INIT
#ifdef MV_DEBUG_INIT
#define DEBUG_INIT_S(s) puts(s)
#define DEBUG_INIT_D(d, l) printf("%x", d)
#define DEBUG_INIT_D_10(d, l) printf("%d", d)
#else
#define DEBUG_INIT_S(s)
#define DEBUG_INIT_D(d, l)
#define DEBUG_INIT_D_10(d, l)
#endif
#ifdef MV_DEBUG_INIT_FULL
#define DEBUG_INIT_FULL_S(s) puts(s)
#define DEBUG_INIT_FULL_D(d, l) printf("%x", d)
#define DEBUG_INIT_FULL_D_10(d, l) printf("%d", d)
#define DEBUG_WR_REG(reg, val) \
{ DEBUG_INIT_S("Write Reg: 0x"); DEBUG_INIT_D((reg), 8); \
DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); }
#define DEBUG_RD_REG(reg, val) \
{ DEBUG_INIT_S("Read Reg: 0x"); DEBUG_INIT_D((reg), 8); \
DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); }
#else
#define DEBUG_INIT_FULL_S(s)
#define DEBUG_INIT_FULL_D(d, l)
#define DEBUG_INIT_FULL_D_10(d, l)
#define DEBUG_WR_REG(reg, val)
#define DEBUG_RD_REG(reg, val)
#endif
#define DEBUG_INIT_FULL_C(s, d, l) \
{ DEBUG_INIT_FULL_S(s); \
DEBUG_INIT_FULL_D(d, l); \
DEBUG_INIT_FULL_S("\n"); }
#define DEBUG_INIT_C(s, d, l) \
{ DEBUG_INIT_S(s); DEBUG_INIT_D(d, l); DEBUG_INIT_S("\n"); }
/*
* Debug (Enable/Disable modules) and Error report
*/
#ifdef BASIC_DEBUG
#define MV_DEBUG_WL
#define MV_DEBUG_RL
#define MV_DEBUG_DQS_RESULTS
#endif
#ifdef FULL_DEBUG
#define MV_DEBUG_WL
#define MV_DEBUG_RL
#define MV_DEBUG_DQS
#define MV_DEBUG_PBS
#define MV_DEBUG_DFS
#define MV_DEBUG_MAIN_FULL
#define MV_DEBUG_DFS_FULL
#define MV_DEBUG_DQS_FULL
#define MV_DEBUG_RL_FULL
#define MV_DEBUG_WL_FULL
#endif
/* The following is a list of Marvell status */
#define MV_ERROR (-1)
#define MV_OK (0x00) /* Operation succeeded */
#define MV_FAIL (0x01) /* Operation failed */
#define MV_BAD_VALUE (0x02) /* Illegal value (general) */
#define MV_OUT_OF_RANGE (0x03) /* The value is out of range */
#define MV_BAD_PARAM (0x04) /* Illegal parameter in function called */
#define MV_BAD_PTR (0x05) /* Illegal pointer value */
#define MV_BAD_SIZE (0x06) /* Illegal size */
#define MV_BAD_STATE (0x07) /* Illegal state of state machine */
#define MV_SET_ERROR (0x08) /* Set operation failed */
#define MV_GET_ERROR (0x09) /* Get operation failed */
#define MV_CREATE_ERROR (0x0a) /* Fail while creating an item */
#define MV_NOT_FOUND (0x0b) /* Item not found */
#define MV_NO_MORE (0x0c) /* No more items found */
#define MV_NO_SUCH (0x0d) /* No such item */
#define MV_TIMEOUT (0x0e) /* Time Out */
#define MV_NO_CHANGE (0x0f) /* Parameter(s) is already in this value */
#define MV_NOT_SUPPORTED (0x10) /* This request is not support */
#define MV_NOT_IMPLEMENTED (0x11) /* Request supported but not implemented*/
#define MV_NOT_INITIALIZED (0x12) /* The item is not initialized */
#define MV_NO_RESOURCE (0x13) /* Resource not available (memory ...) */
#define MV_FULL (0x14) /* Item is full (Queue or table etc...) */
#define MV_EMPTY (0x15) /* Item is empty (Queue or table etc...) */
#define MV_INIT_ERROR (0x16) /* Error occured while INIT process */
#define MV_HW_ERROR (0x17) /* Hardware error */
#define MV_TX_ERROR (0x18) /* Transmit operation not succeeded */
#define MV_RX_ERROR (0x19) /* Recieve operation not succeeded */
#define MV_NOT_READY (0x1a) /* The other side is not ready yet */
#define MV_ALREADY_EXIST (0x1b) /* Tried to create existing item */
#define MV_OUT_OF_CPU_MEM (0x1c) /* Cpu memory allocation failed. */
#define MV_NOT_STARTED (0x1d) /* Not started yet */
#define MV_BUSY (0x1e) /* Item is busy. */
#define MV_TERMINATE (0x1f) /* Item terminates it's work. */
#define MV_NOT_ALIGNED (0x20) /* Wrong alignment */
#define MV_NOT_ALLOWED (0x21) /* Operation NOT allowed */
#define MV_WRITE_PROTECT (0x22) /* Write protected */
#define MV_INVALID (int)(-1)
/*
* Accessor functions for the registers
*/
static inline void reg_write(u32 addr, u32 val)
{
writel(val, INTER_REGS_BASE + addr);
}
static inline u32 reg_read(u32 addr)
{
return readl(INTER_REGS_BASE + addr);
}
static inline void reg_bit_set(u32 addr, u32 mask)
{
setbits_le32(INTER_REGS_BASE + addr, mask);
}
static inline void reg_bit_clr(u32 addr, u32 mask)
{
clrbits_le32(INTER_REGS_BASE + addr, mask);
}
#endif /* _DDR_ML_WRAPPER_H */

View file

@ -9,38 +9,13 @@
#include "ddr3_training_ip_def.h"
#include "ddr3_topology_def.h"
#if defined(CONFIG_ARMADA_38X)
#include "ddr3_a38x.h"
#if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X)
#include "mv_ddr_plat.h"
#endif
/* bus width in bits */
enum hws_bus_width {
BUS_WIDTH_4,
BUS_WIDTH_8,
BUS_WIDTH_16,
BUS_WIDTH_32
};
enum hws_temperature {
HWS_TEMP_LOW,
HWS_TEMP_NORMAL,
HWS_TEMP_HIGH
};
enum hws_mem_size {
MEM_512M,
MEM_1G,
MEM_2G,
MEM_4G,
MEM_8G,
MEM_SIZE_LAST
};
enum hws_timing {
HWS_TIM_DEFAULT,
HWS_TIM_1T,
HWS_TIM_2T
};
#include "mv_ddr_topology.h"
#include "mv_ddr_spd.h"
#include "ddr3_logging_def.h"
struct bus_params {
/* Chip Select (CS) bitmask (bits 0-CS0, bit 1- CS1 ...) */
@ -66,11 +41,11 @@ struct if_params {
/* Speed Bin Table */
enum hws_speed_bin speed_bin_index;
/* bus width of memory */
enum hws_bus_width bus_width;
/* sdram device width */
enum mv_ddr_dev_width bus_width;
/* Bus memory size (MBit) */
enum hws_mem_size memory_size;
/* total sdram capacity per die, megabits */
enum mv_ddr_die_capacity memory_size;
/* The DDR frequency for each interfaces */
enum hws_ddr_freq memory_freq;
@ -88,33 +63,49 @@ struct if_params {
u8 cas_l;
/* operation temperature */
enum hws_temperature interface_temp;
/* 2T vs 1T mode (by default computed from number of CSs) */
enum hws_timing timing;
enum mv_ddr_temperature interface_temp;
};
struct hws_topology_map {
struct mv_ddr_topology_map {
/* debug level configuration */
enum mv_ddr_debug_level debug_level;
/* Number of interfaces (default is 12) */
u8 if_act_mask;
/* Controller configuration per interface */
struct if_params interface_params[MAX_INTERFACE_NUM];
/* BUS per interface (default is 4) */
u8 num_of_bus_per_interface;
/* Bit mask for active buses */
u8 bus_act_mask;
u16 bus_act_mask;
/* source of ddr configuration data */
enum mv_ddr_cfg_src cfg_src;
/* raw spd data */
union mv_ddr_spd_data spd_data;
/* timing parameters */
unsigned int timing_data[MV_DDR_TDATA_LAST];
};
/* DDR3 training global configuration parameters */
struct tune_train_params {
u32 ck_delay;
u32 ck_delay_16;
u32 p_finger;
u32 n_finger;
u32 phy_reg3_val;
u32 g_zpri_data;
u32 g_znri_data;
u32 g_zpri_ctrl;
u32 g_znri_ctrl;
u32 g_zpodt_data;
u32 g_znodt_data;
u32 g_zpodt_ctrl;
u32 g_znodt_ctrl;
u32 g_dic;
u32 g_odt_config;
u32 g_rtt_nom;
u32 g_rtt_wr;
u32 g_rtt_park;
};
#endif /* _DDR_TOPOLOGY_DEF_H */

View file

@ -0,0 +1 @@
const char mv_ddr_build_message[] = ""; const char mv_ddr_version_string[] = "mv_ddr: mv_ddr-armada-17.10.4";

View file

@ -0,0 +1,47 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include "mv_ddr_common.h"
#include "ddr_ml_wrapper.h"
void mv_ddr_ver_print(void)
{
printf("%s %s\n", mv_ddr_version_string, mv_ddr_build_message);
}
/* ceiling division for positive integers */
unsigned int ceil_div(unsigned int x, unsigned int y)
{
return (x % y) ? (x / y + 1) : (x / y);
}
/*
* time to number of clocks calculation based on the rounding algorithm
* using 97.4% inverse factor per JEDEC Standard No. 21-C, 4.1.2.L-4:
* Serial Presence Detect (SPD) for DDR4 SDRAM Modules
*/
unsigned int time_to_nclk(unsigned int t, unsigned int tclk)
{
/* t & tclk parameters are in ps */
return ((unsigned long)t * 1000 / tclk + 974) / 1000;
}
/* round division of two positive integers to the nearest whole number */
int round_div(unsigned int dividend, unsigned int divisor, unsigned int *quotient)
{
if (quotient == NULL) {
printf("%s: error: NULL quotient pointer found\n", __func__);
return MV_FAIL;
}
if (divisor == 0) {
printf("%s: error: division by zero\n", __func__);
return MV_FAIL;
} else {
*quotient = (dividend + divisor / 2) / divisor;
}
return MV_OK;
}

View file

@ -0,0 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _MV_DDR_COMMON_H
#define _MV_DDR_COMMON_H
extern const char mv_ddr_build_message[];
extern const char mv_ddr_version_string[];
#define MV_DDR_NUM_BITS_IN_BYTE 8
#define MV_DDR_MEGA_BITS (1024 * 1024)
#define MV_DDR_32_BITS_MASK 0xffffffff
unsigned int ceil_div(unsigned int x, unsigned int y);
unsigned int time_to_nclk(unsigned int t, unsigned int tclk);
int round_div(unsigned int dividend, unsigned int divisor, unsigned int *quotient);
#endif /* _MV_DDR_COMMON_H */

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,235 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _MV_DDR_PLAT_H
#define _MV_DDR_PLAT_H
#define MAX_INTERFACE_NUM 1
#define MAX_BUS_NUM 5
#define DDR_IF_CTRL_SUBPHYS_NUM 3
#define DFS_LOW_FREQ_VALUE 120
#define SDRAM_CS_SIZE 0xfffffff /* FIXME: implement a function for cs size for each platform */
#define INTER_REGS_BASE SOC_REGS_PHY_BASE
#define AP_INT_REG_START_ADDR 0xd0000000
#define AP_INT_REG_END_ADDR 0xd0100000
/* Controler bus divider 1 for 32 bit, 2 for 64 bit */
#define DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER 1
/* Tune internal training params values */
#define TUNE_TRAINING_PARAMS_CK_DELAY 160
#define TUNE_TRAINING_PARAMS_PHYREG3VAL 0xA
#define TUNE_TRAINING_PARAMS_PRI_DATA 123
#define TUNE_TRAINING_PARAMS_NRI_DATA 123
#define TUNE_TRAINING_PARAMS_PRI_CTRL 74
#define TUNE_TRAINING_PARAMS_NRI_CTRL 74
#define TUNE_TRAINING_PARAMS_P_ODT_DATA 45
#define TUNE_TRAINING_PARAMS_N_ODT_DATA 45
#define TUNE_TRAINING_PARAMS_P_ODT_CTRL 45
#define TUNE_TRAINING_PARAMS_N_ODT_CTRL 45
#define TUNE_TRAINING_PARAMS_DIC 0x2
#define TUNE_TRAINING_PARAMS_ODT_CONFIG_2CS 0x120012
#define TUNE_TRAINING_PARAMS_ODT_CONFIG_1CS 0x10000
#define TUNE_TRAINING_PARAMS_RTT_NOM 0x44
#define TUNE_TRAINING_PARAMS_RTT_WR_1CS 0x0 /*off*/
#define TUNE_TRAINING_PARAMS_RTT_WR_2CS 0x0 /*off*/
#define MARVELL_BOARD MARVELL_BOARD_ID_BASE
#define REG_DEVICE_SAR1_ADDR 0xe4204
#define RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET 17
#define RST2_CPU_DDR_CLOCK_SELECT_IN_MASK 0x1f
#define DEVICE_SAMPLE_AT_RESET2_REG 0x18604
#define DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET 0
#define DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ 0
#define DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_40MHZ 1
/* DRAM Windows */
#define REG_XBAR_WIN_5_CTRL_ADDR 0x20050
#define REG_XBAR_WIN_5_BASE_ADDR 0x20054
/* DRAM Windows */
#define REG_XBAR_WIN_4_CTRL_ADDR 0x20040
#define REG_XBAR_WIN_4_BASE_ADDR 0x20044
#define REG_XBAR_WIN_4_REMAP_ADDR 0x20048
#define REG_XBAR_WIN_7_REMAP_ADDR 0x20078
#define REG_XBAR_WIN_16_CTRL_ADDR 0x200d0
#define REG_XBAR_WIN_16_BASE_ADDR 0x200d4
#define REG_XBAR_WIN_16_REMAP_ADDR 0x200dc
#define REG_XBAR_WIN_19_CTRL_ADDR 0x200e8
#define REG_FASTPATH_WIN_BASE_ADDR(win) (0x20180 + (0x8 * win))
#define REG_FASTPATH_WIN_CTRL_ADDR(win) (0x20184 + (0x8 * win))
#define CPU_CONFIGURATION_REG(id) (0x21800 + (id * 0x100))
#define CPU_MRVL_ID_OFFSET 0x10
#define SAR1_CPU_CORE_MASK 0x00000018
#define SAR1_CPU_CORE_OFFSET 3
/* SatR defined too change topology busWidth and ECC configuration */
#define DDR_SATR_CONFIG_MASK_WIDTH 0x8
#define DDR_SATR_CONFIG_MASK_ECC 0x10
#define DDR_SATR_CONFIG_MASK_ECC_PUP 0x20
#define REG_SAMPLE_RESET_HIGH_ADDR 0x18600
#define MV_BOARD_REFCLK_25MHZ 25000000
#define MV_BOARD_REFCLK MV_BOARD_REFCLK_25MHZ
#define MAX_DQ_NUM 40
/* dram line buffer registers */
#define DLB_CTRL_REG 0x1700
#define DLB_EN_OFFS 0
#define DLB_EN_MASK 0x1
#define DLB_EN_ENA 1
#define DLB_EN_DIS 0
#define WR_COALESCE_EN_OFFS 2
#define WR_COALESCE_EN_MASK 0x1
#define WR_COALESCE_EN_ENA 1
#define WR_COALESCE_EN_DIS 0
#define AXI_PREFETCH_EN_OFFS 3
#define AXI_PREFETCH_EN_MASK 0x1
#define AXI_PREFETCH_EN_ENA 1
#define AXI_PREFETCH_EN_DIS 0
#define MBUS_PREFETCH_EN_OFFS 4
#define MBUS_PREFETCH_EN_MASK 0x1
#define MBUS_PREFETCH_EN_ENA 1
#define MBUS_PREFETCH_EN_DIS 0
#define PREFETCH_NXT_LN_SZ_TRIG_OFFS 6
#define PREFETCH_NXT_LN_SZ_TRIG_MASK 0x1
#define PREFETCH_NXT_LN_SZ_TRIG_ENA 1
#define PREFETCH_NXT_LN_SZ_TRIG_DIS 0
#define DLB_BUS_OPT_WT_REG 0x1704
#define DLB_AGING_REG 0x1708
#define DLB_EVICTION_CTRL_REG 0x170c
#define DLB_EVICTION_TIMERS_REG 0x1710
#define DLB_USER_CMD_REG 0x1714
#define DLB_WTS_DIFF_CS_REG 0x1770
#define DLB_WTS_DIFF_BG_REG 0x1774
#define DLB_WTS_SAME_BG_REG 0x1778
#define DLB_WTS_CMDS_REG 0x177c
#define DLB_WTS_ATTR_PRIO_REG 0x1780
#define DLB_QUEUE_MAP_REG 0x1784
#define DLB_SPLIT_REG 0x1788
/* Subphy result control per byte registers */
#define RESULT_CONTROL_BYTE_PUP_0_REG 0x1830
#define RESULT_CONTROL_BYTE_PUP_1_REG 0x1834
#define RESULT_CONTROL_BYTE_PUP_2_REG 0x1838
#define RESULT_CONTROL_BYTE_PUP_3_REG 0x183c
#define RESULT_CONTROL_BYTE_PUP_4_REG 0x18b0
/* Subphy result control per bit registers */
#define RESULT_CONTROL_PUP_0_BIT_0_REG 0x18b4
#define RESULT_CONTROL_PUP_0_BIT_1_REG 0x18b8
#define RESULT_CONTROL_PUP_0_BIT_2_REG 0x18bc
#define RESULT_CONTROL_PUP_0_BIT_3_REG 0x18c0
#define RESULT_CONTROL_PUP_0_BIT_4_REG 0x18c4
#define RESULT_CONTROL_PUP_0_BIT_5_REG 0x18c8
#define RESULT_CONTROL_PUP_0_BIT_6_REG 0x18cc
#define RESULT_CONTROL_PUP_0_BIT_7_REG 0x18f0
#define RESULT_CONTROL_PUP_1_BIT_0_REG 0x18f4
#define RESULT_CONTROL_PUP_1_BIT_1_REG 0x18f8
#define RESULT_CONTROL_PUP_1_BIT_2_REG 0x18fc
#define RESULT_CONTROL_PUP_1_BIT_3_REG 0x1930
#define RESULT_CONTROL_PUP_1_BIT_4_REG 0x1934
#define RESULT_CONTROL_PUP_1_BIT_5_REG 0x1938
#define RESULT_CONTROL_PUP_1_BIT_6_REG 0x193c
#define RESULT_CONTROL_PUP_1_BIT_7_REG 0x19b0
#define RESULT_CONTROL_PUP_2_BIT_0_REG 0x19b4
#define RESULT_CONTROL_PUP_2_BIT_1_REG 0x19b8
#define RESULT_CONTROL_PUP_2_BIT_2_REG 0x19bc
#define RESULT_CONTROL_PUP_2_BIT_3_REG 0x19c0
#define RESULT_CONTROL_PUP_2_BIT_4_REG 0x19c4
#define RESULT_CONTROL_PUP_2_BIT_5_REG 0x19c8
#define RESULT_CONTROL_PUP_2_BIT_6_REG 0x19cc
#define RESULT_CONTROL_PUP_2_BIT_7_REG 0x19f0
#define RESULT_CONTROL_PUP_3_BIT_0_REG 0x19f4
#define RESULT_CONTROL_PUP_3_BIT_1_REG 0x19f8
#define RESULT_CONTROL_PUP_3_BIT_2_REG 0x19fc
#define RESULT_CONTROL_PUP_3_BIT_3_REG 0x1a30
#define RESULT_CONTROL_PUP_3_BIT_4_REG 0x1a34
#define RESULT_CONTROL_PUP_3_BIT_5_REG 0x1a38
#define RESULT_CONTROL_PUP_3_BIT_6_REG 0x1a3c
#define RESULT_CONTROL_PUP_3_BIT_7_REG 0x1ab0
#define RESULT_CONTROL_PUP_4_BIT_0_REG 0x1ab4
#define RESULT_CONTROL_PUP_4_BIT_1_REG 0x1ab8
#define RESULT_CONTROL_PUP_4_BIT_2_REG 0x1abc
#define RESULT_CONTROL_PUP_4_BIT_3_REG 0x1ac0
#define RESULT_CONTROL_PUP_4_BIT_4_REG 0x1ac4
#define RESULT_CONTROL_PUP_4_BIT_5_REG 0x1ac8
#define RESULT_CONTROL_PUP_4_BIT_6_REG 0x1acc
#define RESULT_CONTROL_PUP_4_BIT_7_REG 0x1af0
/* CPU */
#define REG_BOOTROM_ROUTINE_ADDR 0x182d0
#define REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS 12
/* Matrix enables DRAM modes (bus width/ECC) per boardId */
#define TOPOLOGY_UPDATE_32BIT 0
#define TOPOLOGY_UPDATE_32BIT_ECC 1
#define TOPOLOGY_UPDATE_16BIT 2
#define TOPOLOGY_UPDATE_16BIT_ECC 3
#define TOPOLOGY_UPDATE_16BIT_ECC_PUP3 4
#define TOPOLOGY_UPDATE { \
/* 32Bit, 32bit ECC, 16bit, 16bit ECC PUP4, 16bit ECC PUP3 */ \
{1, 1, 1, 1, 1}, /* RD_NAS_68XX_ID */ \
{1, 1, 1, 1, 1}, /* DB_68XX_ID */ \
{1, 0, 1, 0, 1}, /* RD_AP_68XX_ID */ \
{1, 0, 1, 0, 1}, /* DB_AP_68XX_ID */ \
{1, 0, 1, 0, 1}, /* DB_GP_68XX_ID */ \
{0, 0, 1, 1, 0}, /* DB_BP_6821_ID */ \
{1, 1, 1, 1, 1} /* DB_AMC_6820_ID */ \
};
enum {
CPU_1066MHZ_DDR_400MHZ,
CPU_RESERVED_DDR_RESERVED0,
CPU_667MHZ_DDR_667MHZ,
CPU_800MHZ_DDR_800MHZ,
CPU_RESERVED_DDR_RESERVED1,
CPU_RESERVED_DDR_RESERVED2,
CPU_RESERVED_DDR_RESERVED3,
LAST_FREQ
};
/* struct used for DLB configuration array */
struct dlb_config {
u32 reg_addr;
u32 reg_data;
};
#define ACTIVE_INTERFACE_MASK 0x1
extern u32 dmin_phy_reg_table[][2];
extern u16 odt_slope[];
extern u16 odt_intercept[];
int mv_ddr_pre_training_soc_config(const char *ddr_type);
int mv_ddr_post_training_soc_config(const char *ddr_type);
void mv_ddr_mem_scrubbing(void);
void mv_ddr_odpg_enable(void);
void mv_ddr_odpg_disable(void);
void mv_ddr_odpg_done_clr(void);
int mv_ddr_is_odpg_done(u32 count);
void mv_ddr_training_enable(void);
int mv_ddr_is_training_done(u32 count, u32 *result);
u32 mv_ddr_dm_pad_get(void);
int mv_ddr_pre_training_fixup(void);
int mv_ddr_post_training_fixup(void);
int mv_ddr_manual_cal_do(void);
#endif /* _MV_DDR_PLAT_H */

View file

@ -0,0 +1,446 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _MV_DDR_REGS_H
#define _MV_DDR_REGS_H
#define GLOB_CTRL_STATUS_REG 0x1030
#define TRAINING_TRIGGER_OFFS 0
#define TRAINING_TRIGGER_MASK 0x1
#define TRAINING_TRIGGER_ENA 1
#define TRAINING_DONE_OFFS 1
#define TRAINING_DONE_MASK 0x1
#define TRAINING_DONE_DONE 1
#define TRAINING_DONE_NOT_DONE 0
#define TRAINING_RESULT_OFFS 2
#define TRAINING_RESULT_MASK 0x1
#define TRAINING_RESULT_PASS 0
#define TRAINING_RESULT_FAIL 1
#define GENERAL_TRAINING_OPCODE_REG 0x1034
#define OPCODE_REG0_BASE 0x1038
#define OPCODE_REG0_REG(obj) (OPCODE_REG0_BASE + (obj) * 0x4)
#define OPCODE_REG1_BASE 0x10b0
#define OPCODE_REG1_REG(obj) (OPCODE_REG1_BASE + (obj) * 0x4)
#define CAL_PHY_BASE 0x10c0
#define CAL_PHY_REG(obj) (CAL_PHY_BASE + (obj) * 0x4)
#define WL_DONE_CNTR_REF_REG 0x10f8
#define ODPG_WR_RD_MODE_ENA_REG 0x10fc
#define SDRAM_CFG_REG 0x1400
#define REFRESH_OFFS 0
#define REFRESH_MASK 0x3fff
#define DRAM_TYPE_OFFS 14
#define DRAM_TYPE_MASK 0x1
#define BUS_IN_USE_OFFS 15
#define BUS_IN_USE_MASK 0x1
#define CPU_2DRAM_WR_BUFF_CUT_TH_OFFS 16
#define CPU_2DRAM_WR_BUFF_CUT_TH_MASK 0x1
#define REG_DIMM_OFFS 17
#define REG_DIMM_MASK 0x1
#define ECC_OFFS 18
#define ECC_MASK 0x1
#define IGNORE_ERRORS_OFFS 19
#define IGNORE_ERRORS_MASK 0x1
#define DRAM_TYPE_HIGH_OFFS 20
#define DRAM_TYPE_HIGH_MASK 0x1
#define SELF_REFRESH_MODE_OFFS 24
#define SELF_REFRESH_MODE_MASK 0x1
#define CPU_RD_PER_PROP_OFFS 25
#define CPU_RD_PER_PROP_MASK 0x1
#define DDR4_EMULATION_OFFS 26
#define DDR4_EMULATION_MASK 0x1
#define PHY_RF_RST_OFFS 27
#define PHY_RF_RST_MASK 0x1
#define PUP_RST_DIVIDER_OFFS 28
#define PUP_RST_DIVIDER_MASK 0x1
#define DATA_PUP_WR_RESET_OFFS 29
#define DATA_PUP_WR_RESET_MASK 0x1
#define DATA_PUP_RD_RESET_OFFS 30
#define DATA_PUP_RD_RESET_MASK 0x1
#define DATA_PUP_RD_RESET_ENA 0x0
#define DATA_PUP_RD_RESET_DIS 0x1
#define IO_BIST_OFFS 31
#define DATA_PUP_RD_RESET_MASK 0x1
#define DUNIT_CTRL_LOW_REG 0x1404
#define SDRAM_TIMING_LOW_REG 0x1408
#define SDRAM_TIMING_LOW_TRAS_OFFS 0
#define SDRAM_TIMING_LOW_TRAS_MASK 0xf
#define SDRAM_TIMING_LOW_TRCD_OFFS 4
#define SDRAM_TIMING_LOW_TRCD_MASK 0xf
#define SDRAM_TIMING_HIGH_TRCD_OFFS 22
#define SDRAM_TIMING_HIGH_TRCD_MASK 0x1
#define SDRAM_TIMING_LOW_TRP_OFFS 8
#define SDRAM_TIMING_LOW_TRP_MASK 0xf
#define SDRAM_TIMING_HIGH_TRP_OFFS 23
#define SDRAM_TIMING_HIGH_TRP_MASK 0x1
#define SDRAM_TIMING_LOW_TWR_OFFS 12
#define SDRAM_TIMING_LOW_TWR_MASK 0xf
#define SDRAM_TIMING_LOW_TWTR_OFFS 16
#define SDRAM_TIMING_LOW_TWTR_MASK 0xf
#define SDRAM_TIMING_LOW_TRAS_HIGH_OFFS 20
#define SDRAM_TIMING_LOW_TRAS_HIGH_MASK 0x3
#define SDRAM_TIMING_LOW_TRRD_OFFS 24
#define SDRAM_TIMING_LOW_TRRD_MASK 0xf
#define SDRAM_TIMING_LOW_TRTP_OFFS 28
#define SDRAM_TIMING_LOW_TRTP_MASK 0xf
#define SDRAM_TIMING_HIGH_REG 0x140c
#define SDRAM_TIMING_HIGH_TRFC_OFFS 0
#define SDRAM_TIMING_HIGH_TRFC_MASK 0x7f
#define SDRAM_TIMING_HIGH_TR2R_OFFS 7
#define SDRAM_TIMING_HIGH_TR2R_MASK 0x3
#define SDRAM_TIMING_HIGH_TR2W_W2R_OFFS 9
#define SDRAM_TIMING_HIGH_TR2W_W2R_MASK 0x3
#define SDRAM_TIMING_HIGH_TW2W_OFFS 11
#define SDRAM_TIMING_HIGH_TW2W_MASK 0x1f
#define SDRAM_TIMING_HIGH_TRFC_HIGH_OFFS 16
#define SDRAM_TIMING_HIGH_TRFC_HIGH_MASK 0x7
#define SDRAM_TIMING_HIGH_TR2R_HIGH_OFFS 19
#define SDRAM_TIMING_HIGH_TR2R_HIGH_MASK 0x7
#define SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_OFFS 22
#define SDRAM_TIMING_HIGH_TR2W_W2R_HIGH_MASK 0x7
#define SDRAM_TIMING_HIGH_TMOD_OFFS 25
#define SDRAM_TIMING_HIGH_TMOD_MASK 0xf
#define SDRAM_TIMING_HIGH_TMOD_HIGH_OFFS 30
#define SDRAM_TIMING_HIGH_TMOD_HIGH_MASK 0x3
#define SDRAM_ADDR_CTRL_REG 0x1410
#define CS_STRUCT_BASE 0
#define CS_STRUCT_OFFS(cs) (CS_STRUCT_BASE + (cs) * 4)
#define CS_STRUCT_MASK 0x3
#define CS_SIZE_BASE 2
#define CS_SIZE_OFFS(cs) (CS_SIZE_BASE + (cs) * 4)
#define CS_SIZE_MASK 0x3
#define CS_SIZE_HIGH_BASE 20
#define CS_SIZE_HIGH_OFFS(cs) (CS_SIZE_HIGH_BASE + (cs))
#define CS_SIZE_HIGH_MASK 0x1
#define T_FAW_OFFS 24
#define T_FAW_MASK 0x7f
#define SDRAM_OPEN_PAGES_CTRL_REG 0x1414
#define SDRAM_OP_REG 0x1418
#define SDRAM_OP_CMD_OFFS 0
#define SDRAM_OP_CMD_MASK 0x1f
#define SDRAM_OP_CMD_CS_BASE 8
#define SDRAM_OP_CMD_CS_OFFS(cs) (SDRAM_OP_CMD_CS_BASE + (cs))
#define SDRAM_OP_CMD_CS_MASK 0x1
enum {
CMD_NORMAL,
CMD_PRECHARGE,
CMD_REFRESH,
CMD_DDR3_DDR4_MR0,
CMD_DDR3_DDR4_MR1,
CMD_NOP,
CMD_RES_0X6,
CMD_SELFREFRESH,
CMD_DDR3_DDR4_MR2,
CMD_DDR3_DDR4_MR3,
CMD_ACT_PDE,
CMD_PRE_PDE,
CMD_ZQCL,
CMD_ZQCS,
CMD_CWA,
CMD_RES_0XF,
CMD_DDR4_MR4,
CMD_DDR4_MR5,
CMD_DDR4_MR6,
DDR4_MPR_WR
};
#define DUNIT_CTRL_HIGH_REG 0x1424
#define CPU_INTERJECTION_ENA_OFFS 3
#define CPU_INTERJECTION_ENA_MASK 0x1
#define CPU_INTERJECTION_ENA_SPLIT_ENA 0
#define CPU_INTERJECTION_ENA_SPLIT_DIS 1
#define DDR_ODT_TIMING_LOW_REG 0x1428
#define DDR_TIMING_REG 0x142c
#define DDR_TIMING_TCCD_OFFS 18
#define DDR_TIMING_TCCD_MASK 0x7
#define DDR_TIMING_TPD_OFFS 0
#define DDR_TIMING_TPD_MASK 0xf
#define DDR_TIMING_TXPDLL_OFFS 4
#define DDR_TIMING_TXPDLL_MASK 0x1f
#define DDR_ODT_TIMING_HIGH_REG 0x147c
#define SDRAM_INIT_CTRL_REG 0x1480
#define DRAM_RESET_MASK_OFFS 1
#define DRAM_RESET_MASK_MASK 0x1
#define DRAM_RESET_MASK_NORMAL 0
#define DRAM_RESET_MASK_MASKED 1
#define SDRAM_ODT_CTRL_HIGH_REG 0x1498
#define DUNIT_ODT_CTRL_REG 0x149c
#define RD_BUFFER_SEL_REG 0x14a4
#define AXI_CTRL_REG 0x14a8
#define DUNIT_MMASK_REG 0x14b0
#define HORZ_SSTL_CAL_MACH_CTRL_REG 0x14c8
#define HORZ_POD_CAL_MACH_CTRL_REG 0x17c8
#define VERT_SSTL_CAL_MACH_CTRL_REG 0x1dc8
#define VERT_POD_CAL_MACH_CTRL_REG 0x1ec8
#define MAIN_PADS_CAL_MACH_CTRL_REG 0x14cc
#define DYN_PADS_CAL_ENABLE_OFFS 0
#define DYN_PADS_CAL_ENABLE_MASK 0x1
#define DYN_PADS_CAL_ENABLE_DIS 0
#define DYN_PADS_CAL_ENABLE_ENA 1
#define PADS_RECAL_OFFS 1
#define PADS_RECAL_MASK 0x1
#define DYN_PADS_CAL_BLOCK_OFFS 2
#define DYN_PADS_CAL_BLOCK_MASK 0x1
#define CAL_UPDATE_CTRL_OFFS 3
#define CAL_UPDATE_CTRL_MASK 0x3
#define CAL_UPDATE_CTRL_INT 1
#define CAL_UPDATE_CTRL_EXT 2
#define DYN_PADS_CAL_CNTR_OFFS 13
#define DYN_PADS_CAL_CNTR_MASK 0x3ffff
#define CAL_MACH_STATUS_OFFS 31
#define CAL_MACH_STATUS_MASK 0x1
#define CAL_MACH_BUSY 0
#define CAL_MACH_RDY 1
#define DRAM_DLL_TIMING_REG 0x14e0
#define DRAM_ZQ_INIT_TIMIMG_REG 0x14e4
#define DRAM_ZQ_TIMING_REG 0x14e8
#define DRAM_LONG_TIMING_REG 0x14ec
#define DDR4_TRRD_L_OFFS 0
#define DDR4_TRRD_L_MASK 0xf
#define DDR4_TWTR_L_OFFS 4
#define DDR4_TWTR_L_MASK 0xf
#define DDR_IO_REG 0x1524
#define DFS_REG 0x1528
#define RD_DATA_SMPL_DLYS_REG 0x1538
#define RD_SMPL_DLY_CS_BASE 0
#define RD_SMPL_DLY_CS_OFFS(cs) (RD_SMPL_DLY_CS_BASE + (cs) * 8)
#define RD_SMPL_DLY_CS_MASK 0x1f
#define RD_DATA_RDY_DLYS_REG 0x153c
#define RD_RDY_DLY_CS_BASE 0
#define RD_RDY_DLY_CS_OFFS(cs) (RD_RDY_DLY_CS_BASE + (cs) * 8)
#define RD_RDY_DLY_CS_MASK 0x1f
#define TRAINING_REG 0x15b0
#define TRN_START_OFFS 31
#define TRN_START_MASK 0x1
#define TRN_START_ENA 1
#define TRN_START_DIS 0
#define TRAINING_SW_1_REG 0x15b4
#define TRAINING_SW_2_REG 0x15b8
#define TRAINING_ECC_MUX_OFFS 1
#define TRAINING_ECC_MUX_MASK 0x1
#define TRAINING_ECC_MUX_DIS 0
#define TRAINING_ECC_MUX_ENA 1
#define TRAINING_SW_OVRD_OFFS 0
#define TRAINING_SW_OVRD_MASK 0x1
#define TRAINING_SW_OVRD_DIS 0
#define TRAINING_SW_OVRD_ENA 1
#define TRAINING_PATTERN_BASE_ADDR_REG 0x15bc
#define TRAINING_DBG_1_REG 0x15c0
#define TRAINING_DBG_2_REG 0x15c4
#define TRAINING_DBG_3_REG 0x15c8
#define TRN_DBG_RDY_INC_PH_2TO1_BASE 0
#define TRN_DBG_RDY_INC_PH_2TO1_OFFS(phase) (TRN_DBG_RDY_INC_PH_2TO1_BASE + (phase) * 3)
#define TRN_DBG_RDY_INC_PH_2TO1_MASK 0x7
#define DDR3_RANK_CTRL_REG 0x15e0
#define CS_EXIST_BASE 0
#define CS_EXIST_OFFS(cs) (CS_EXIST_BASE + (cs))
#define CS_EXIST_MASK 0x1
#define ZQC_CFG_REG 0x15e4
#define DRAM_PHY_CFG_REG 0x15ec
#define ODPG_CTRL_CTRL_REG 0x1600
#define ODPG_DATA_CTRL_REG 0x1630
#define ODPG_WRBUF_WR_CTRL_OFFS 0
#define ODPG_WRBUF_WR_CTRL_MASK 0x1
#define ODPG_WRBUF_WR_CTRL_DIS 0
#define ODPG_WRBUF_WR_CTRL_ENA 1
#define ODPG_WRBUF_RD_CTRL_OFFS 1
#define ODPG_WRBUF_RD_CTRL_MASK 0x1
#define ODPG_WRBUF_RD_CTRL_DIS 0
#define ODPG_WRBUF_RD_CTRL_ENA 1
#define ODPG_DATA_CBDEL_OFFS 15
#define ODPG_DATA_CBDEL_MASK 0x3f
#define ODPG_MODE_OFFS 25
#define ODPG_MODE_MASK 0x1
#define ODPG_MODE_RX 0
#define ODPG_MODE_TX 1
#define ODPG_DATA_CS_OFFS 26
#define ODPG_DATA_CS_MASK 0x3
#define ODPG_DISABLE_OFFS 30
#define ODPG_DISABLE_MASK 0x1
#define ODPG_DISABLE_DIS 1
#define ODPG_ENABLE_OFFS 31
#define ODPG_ENABLE_MASK 0x1
#define ODPG_ENABLE_ENA 1
#define ODPG_DATA_BUFFER_OFFS_REG 0x1638
#define ODPG_DATA_BUFFER_SIZE_REG 0x163c
#define PHY_LOCK_STATUS_REG 0x1674
#define PHY_REG_FILE_ACCESS_REG 0x16a0
#define PRFA_DATA_OFFS 0
#define PRFA_DATA_MASK 0xffff
#define PRFA_REG_NUM_OFFS 16
#define PRFA_REG_NUM_MASK 0x3f
#define PRFA_PUP_NUM_OFFS 22
#define PRFA_PUP_NUM_MASK 0xf
#define PRFA_PUP_CTRL_DATA_OFFS 26
#define PRFA_PUP_CTRL_DATA_MASK 0x1
#define PRFA_PUP_BCAST_WR_ENA_OFFS 27
#define PRFA_PUP_BCAST_WR_ENA_MASK 0x1
#define PRFA_REG_NUM_HI_OFFS 28
#define PRFA_REG_NUM_HI_MASK 0x3
#define PRFA_TYPE_OFFS 30
#define PRFA_TYPE_MASK 0x1
#define PRFA_REQ_OFFS 31
#define PRFA_REQ_MASK 0x1
#define PRFA_REQ_DIS 0x0
#define PRFA_REQ_ENA 0x1
#define TRAINING_WL_REG 0x16ac
#define ODPG_DATA_WR_ADDR_REG 0x16b0
#define ODPG_DATA_WR_ACK_OFFS 0
#define ODPG_DATA_WR_ACK_MASK 0x7f
#define ODPG_DATA_WR_DATA_OFFS 8
#define ODPG_DATA_WR_DATA_MASK 0xff
#define ODPG_DATA_WR_DATA_HIGH_REG 0x16b4
#define ODPG_DATA_WR_DATA_LOW_REG 0x16b8
#define ODPG_DATA_RX_WORD_ERR_ADDR_REG 0x16bc
#define ODPG_DATA_RX_WORD_ERR_CNTR_REG 0x16c0
#define ODPG_DATA_RX_WORD_ERR_DATA_HIGH_REG 0x16c4
#define ODPG_DATA_RX_WORD_ERR_DATA_LOW_REG 0x16c8
#define ODPG_DATA_WR_DATA_ERR_REG 0x16cc
#define DUAL_DUNIT_CFG_REG 0x16d8
#define FC_SAMPLE_STAGES_OFFS 0
#define FC_SAMPLE_STAGES_MASK 0x7
#define SINGLE_CS_PIN_OFFS 3
#define SINGLE_CS_PIN_MASK 0x1
#define SINGLE_CS_ENA 1
#define TUNING_ACTIVE_SEL_OFFS 6
#define TUNING_ACTIVE_SEL_MASK 0x1
#define TUNING_ACTIVE_SEL_MC 0
#define TUNING_ACTIVE_SEL_TIP 1
#define WL_DQS_PATTERN_REG 0x16dc
#define ODPG_DONE_STATUS_REG 0x16fc
#define ODPG_DONE_STATUS_BIT_OFFS 0
#define ODPG_DONE_STATUS_BIT_MASK 0x1
#define ODPG_DONE_STATUS_BIT_CLR 0
#define ODPG_DONE_STATUS_BIT_SET 1
#define RESULT_CTRL_BASE 0x1830
#define BLOCK_STATUS_OFFS 25
#define BLOCK_STATUS_MASK 0x1
#define BLOCK_STATUS_LOCK 1
#define BLOCK_STATUS_NOT_LOCKED 0
#define MR0_REG 0x15d0
#define MR1_REG 0x15d4
#define MR2_REG 0x15d8
#define MR3_REG 0x15dc
#define MRS0_CMD 0x3
#define MRS1_CMD 0x4
#define MRS2_CMD 0x8
#define MRS3_CMD 0x9
#define DRAM_PINS_MUX_REG 0x19d4
#define CTRL_PINS_MUX_OFFS 0
#define CTRL_PINS_MUX_MASK 0x3
enum {
DUNIT_DDR3_ON_BOARD,
DUNIT_DDR3_DIMM,
DUNIT_DDR4_ON_BOARD,
DUNIT_DDR4_DIMM
};
/* ddr phy registers */
#define WL_PHY_BASE 0x0
#define WL_PHY_REG(cs) (WL_PHY_BASE + (cs) * 0x4)
#define WR_LVL_PH_SEL_OFFS 6
#define WR_LVL_PH_SEL_MASK 0x7
#define WR_LVL_PH_SEL_PHASE1 1
#define WR_LVL_REF_DLY_OFFS 0
#define WR_LVL_REF_DLY_MASK 0x1f
#define CTRL_CENTER_DLY_OFFS 10
#define CTRL_CENTER_DLY_MASK 0x1f
#define CTRL_CENTER_DLY_INV_OFFS 15
#define CTRL_CENTER_DLY_INV_MASK 0x1
#define CTX_PHY_BASE 0x1
#define CTX_PHY_REG(cs) (CTX_PHY_BASE + (cs) * 0x4)
#define RL_PHY_BASE 0x2
#define RL_PHY_REG(cs) (RL_PHY_BASE + (cs) * 0x4)
#define RL_REF_DLY_OFFS 0
#define RL_REF_DLY_MASK 0x1f
#define RL_PH_SEL_OFFS 6
#define RL_PH_SEL_MASK 0x7
#define CRX_PHY_BASE 0x3
#define CRX_PHY_REG(cs) (CRX_PHY_BASE + (cs) * 0x4)
#define PHY_CTRL_PHY_REG 0x90
#define ADLL_CFG0_PHY_REG 0x92
#define ADLL_CFG1_PHY_REG 0x93
#define ADLL_CFG2_PHY_REG 0x94
#define CMOS_CONFIG_PHY_REG 0xa2
#define PAD_ZRI_CAL_PHY_REG 0xa4
#define PAD_ODT_CAL_PHY_REG 0xa6
#define PAD_CFG_PHY_REG 0xa8
#define PAD_PRE_DISABLE_PHY_REG 0xa9
#define TEST_ADLL_PHY_REG 0xbf
#define VREF_PHY_BASE 0xd0
#define VREF_PHY_REG(cs, bit) (VREF_PHY_BASE + (cs) * 12 + bit)
enum {
DQSP_PAD = 4,
DQSN_PAD
};
#define VREF_BCAST_PHY_BASE 0xdb
#define VREF_BCAST_PHY_REG(cs) (VREF_BCAST_PHY_BASE + (cs) * 12)
#define PBS_TX_PHY_BASE 0x10
#define PBS_TX_PHY_REG(cs, bit) (PBS_TX_PHY_BASE + (cs) * 0x10 + (bit))
#define PBS_TX_BCAST_PHY_BASE 0x1f
#define PBS_TX_BCAST_PHY_REG(cs) (PBS_TX_BCAST_PHY_BASE + (cs) * 0x10)
#define PBS_RX_PHY_BASE 0x50
#define PBS_RX_PHY_REG(cs, bit) (PBS_RX_PHY_BASE + (cs) * 0x10 + (bit))
#define PBS_RX_BCAST_PHY_BASE 0x5f
#define PBS_RX_BCAST_PHY_REG(cs) (PBS_RX_BCAST_PHY_BASE + (cs) * 0x10)
#define RESULT_PHY_REG 0xc0
#define RESULT_PHY_RX_OFFS 5
#define RESULT_PHY_TX_OFFS 0
#endif /* _MV_DDR_REGS_H */

View file

@ -0,0 +1,377 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include "mv_ddr_spd.h"
#define MV_DDR_SPD_DATA_MTB 125 /* medium timebase, ps */
#define MV_DDR_SPD_DATA_FTB 1 /* fine timebase, ps */
#define MV_DDR_SPD_MSB_OFFS 8 /* most significant byte offset, bits */
#define MV_DDR_SPD_SUPPORTED_CLS_NUM 30
static unsigned int mv_ddr_spd_supported_cls[MV_DDR_SPD_SUPPORTED_CLS_NUM];
int mv_ddr_spd_supported_cls_calc(union mv_ddr_spd_data *spd_data)
{
unsigned int byte, bit, start_cl;
start_cl = (spd_data->all_bytes[23] & 0x8) ? 23 : 7;
for (byte = 20; byte < 23; byte++) {
for (bit = 0; bit < 8; bit++) {
if (spd_data->all_bytes[byte] & (1 << bit))
mv_ddr_spd_supported_cls[(byte - 20) * 8 + bit] = start_cl + (byte - 20) * 8 + bit;
else
mv_ddr_spd_supported_cls[(byte - 20) * 8 + bit] = 0;
}
}
for (byte = 23, bit = 0; bit < 6; bit++) {
if (spd_data->all_bytes[byte] & (1 << bit))
mv_ddr_spd_supported_cls[(byte - 20) * 8 + bit] = start_cl + (byte - 20) * 8 + bit;
else
mv_ddr_spd_supported_cls[(byte - 20) * 8 + bit] = 0;
}
return 0;
}
unsigned int mv_ddr_spd_supported_cl_get(unsigned int cl)
{
unsigned int supported_cl;
int i = 0;
while (i < MV_DDR_SPD_SUPPORTED_CLS_NUM &&
mv_ddr_spd_supported_cls[i] < cl)
i++;
if (i < MV_DDR_SPD_SUPPORTED_CLS_NUM)
supported_cl = mv_ddr_spd_supported_cls[i];
else
supported_cl = 0;
return supported_cl;
}
int mv_ddr_spd_timing_calc(union mv_ddr_spd_data *spd_data, unsigned int timing_data[])
{
int calc_val;
/* t ck avg min, ps */
calc_val = spd_data->byte_fields.byte_18 * MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_125 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TCK_AVG_MIN] = calc_val;
/* t aa min, ps */
calc_val = spd_data->byte_fields.byte_24 * MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_123 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TAA_MIN] = calc_val;
/* t rfc1 min, ps */
timing_data[MV_DDR_TRFC1_MIN] = (spd_data->byte_fields.byte_30 +
(spd_data->byte_fields.byte_31 << MV_DDR_SPD_MSB_OFFS)) * MV_DDR_SPD_DATA_MTB;
/* t wr min, ps */
timing_data[MV_DDR_TWR_MIN] = (spd_data->byte_fields.byte_42 +
(spd_data->byte_fields.byte_41.bit_fields.t_wr_min_msn << MV_DDR_SPD_MSB_OFFS)) *
MV_DDR_SPD_DATA_MTB;
/* FIXME: wa: set twr to a default value, if it's unset on spd */
if (timing_data[MV_DDR_TWR_MIN] == 0)
timing_data[MV_DDR_TWR_MIN] = 15000;
/* t rcd min, ps */
calc_val = spd_data->byte_fields.byte_25 * MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_122 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TRCD_MIN] = calc_val;
/* t rp min, ps */
calc_val = spd_data->byte_fields.byte_26 * MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_121 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TRP_MIN] = calc_val;
/* t rc min, ps */
calc_val = (spd_data->byte_fields.byte_29 +
(spd_data->byte_fields.byte_27.bit_fields.t_rc_min_msn << MV_DDR_SPD_MSB_OFFS)) *
MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_120 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TRC_MIN] = calc_val;
/* t ras min, ps */
timing_data[MV_DDR_TRAS_MIN] = (spd_data->byte_fields.byte_28 +
(spd_data->byte_fields.byte_27.bit_fields.t_ras_min_msn << MV_DDR_SPD_MSB_OFFS)) *
MV_DDR_SPD_DATA_MTB;
/* t rrd s min, ps */
calc_val = spd_data->byte_fields.byte_38 * MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_119 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TRRD_S_MIN] = calc_val;
/* t rrd l min, ps */
calc_val = spd_data->byte_fields.byte_39 * MV_DDR_SPD_DATA_MTB +
(signed char)spd_data->byte_fields.byte_118 * MV_DDR_SPD_DATA_FTB;
if (calc_val < 0)
return 1;
timing_data[MV_DDR_TRRD_L_MIN] = calc_val;
/* t faw min, ps */
timing_data[MV_DDR_TFAW_MIN] = (spd_data->byte_fields.byte_37 +
(spd_data->byte_fields.byte_36.bit_fields.t_faw_min_msn << MV_DDR_SPD_MSB_OFFS)) *
MV_DDR_SPD_DATA_MTB;
/* t wtr s min, ps */
timing_data[MV_DDR_TWTR_S_MIN] = (spd_data->byte_fields.byte_44 +
(spd_data->byte_fields.byte_43.bit_fields.t_wtr_s_min_msn << MV_DDR_SPD_MSB_OFFS)) *
MV_DDR_SPD_DATA_MTB;
/* FIXME: wa: set twtr_s to a default value, if it's unset on spd */
if (timing_data[MV_DDR_TWTR_S_MIN] == 0)
timing_data[MV_DDR_TWTR_S_MIN] = 2500;
/* t wtr l min, ps */
timing_data[MV_DDR_TWTR_L_MIN] = (spd_data->byte_fields.byte_45 +
(spd_data->byte_fields.byte_43.bit_fields.t_wtr_l_min_msn << MV_DDR_SPD_MSB_OFFS)) *
MV_DDR_SPD_DATA_MTB;
/* FIXME: wa: set twtr_l to a default value, if it's unset on spd */
if (timing_data[MV_DDR_TWTR_L_MIN] == 0)
timing_data[MV_DDR_TWTR_L_MIN] = 7500;
return 0;
}
enum mv_ddr_dev_width mv_ddr_spd_dev_width_get(union mv_ddr_spd_data *spd_data)
{
unsigned char dev_width = spd_data->byte_fields.byte_12.bit_fields.device_width;
enum mv_ddr_dev_width ret_val;
switch (dev_width) {
case 0x00:
ret_val = MV_DDR_DEV_WIDTH_4BIT;
break;
case 0x01:
ret_val = MV_DDR_DEV_WIDTH_8BIT;
break;
case 0x02:
ret_val = MV_DDR_DEV_WIDTH_16BIT;
break;
case 0x03:
ret_val = MV_DDR_DEV_WIDTH_32BIT;
break;
default:
ret_val = MV_DDR_DEV_WIDTH_LAST;
}
return ret_val;
}
enum mv_ddr_die_capacity mv_ddr_spd_die_capacity_get(union mv_ddr_spd_data *spd_data)
{
unsigned char die_cap = spd_data->byte_fields.byte_4.bit_fields.die_capacity;
enum mv_ddr_die_capacity ret_val;
switch (die_cap) {
case 0x00:
ret_val = MV_DDR_DIE_CAP_256MBIT;
break;
case 0x01:
ret_val = MV_DDR_DIE_CAP_512MBIT;
break;
case 0x02:
ret_val = MV_DDR_DIE_CAP_1GBIT;
break;
case 0x03:
ret_val = MV_DDR_DIE_CAP_2GBIT;
break;
case 0x04:
ret_val = MV_DDR_DIE_CAP_4GBIT;
break;
case 0x05:
ret_val = MV_DDR_DIE_CAP_8GBIT;
break;
case 0x06:
ret_val = MV_DDR_DIE_CAP_16GBIT;
break;
case 0x07:
ret_val = MV_DDR_DIE_CAP_32GBIT;
break;
case 0x08:
ret_val = MV_DDR_DIE_CAP_12GBIT;
break;
case 0x09:
ret_val = MV_DDR_DIE_CAP_24GBIT;
break;
default:
ret_val = MV_DDR_DIE_CAP_LAST;
}
return ret_val;
}
unsigned char mv_ddr_spd_mem_mirror_get(union mv_ddr_spd_data *spd_data)
{
unsigned char mem_mirror = spd_data->byte_fields.byte_131.bit_fields.rank_1_mapping;
return mem_mirror;
}
enum mv_ddr_pkg_rank mv_ddr_spd_pri_bus_width_get(union mv_ddr_spd_data *spd_data)
{
unsigned char pri_bus_width = spd_data->byte_fields.byte_13.bit_fields.primary_bus_width;
enum mv_ddr_pri_bus_width ret_val;
switch (pri_bus_width) {
case 0x00:
ret_val = MV_DDR_PRI_BUS_WIDTH_8;
break;
case 0x01:
ret_val = MV_DDR_PRI_BUS_WIDTH_16;
break;
case 0x02:
ret_val = MV_DDR_PRI_BUS_WIDTH_32;
break;
case 0x03:
ret_val = MV_DDR_PRI_BUS_WIDTH_64;
break;
default:
ret_val = MV_DDR_PRI_BUS_WIDTH_LAST;
}
return ret_val;
}
enum mv_ddr_pkg_rank mv_ddr_spd_bus_width_ext_get(union mv_ddr_spd_data *spd_data)
{
unsigned char bus_width_ext = spd_data->byte_fields.byte_13.bit_fields.bus_width_ext;
enum mv_ddr_bus_width_ext ret_val;
switch (bus_width_ext) {
case 0x00:
ret_val = MV_DDR_BUS_WIDTH_EXT_0;
break;
case 0x01:
ret_val = MV_DDR_BUS_WIDTH_EXT_8;
break;
default:
ret_val = MV_DDR_BUS_WIDTH_EXT_LAST;
}
return ret_val;
}
static enum mv_ddr_pkg_rank mv_ddr_spd_pkg_rank_get(union mv_ddr_spd_data *spd_data)
{
unsigned char pkg_rank = spd_data->byte_fields.byte_12.bit_fields.dimm_pkg_ranks_num;
enum mv_ddr_pkg_rank ret_val;
switch (pkg_rank) {
case 0x00:
ret_val = MV_DDR_PKG_RANK_1;
break;
case 0x01:
ret_val = MV_DDR_PKG_RANK_2;
break;
case 0x02:
ret_val = MV_DDR_PKG_RANK_3;
break;
case 0x03:
ret_val = MV_DDR_PKG_RANK_4;
break;
case 0x04:
ret_val = MV_DDR_PKG_RANK_5;
break;
case 0x05:
ret_val = MV_DDR_PKG_RANK_6;
break;
case 0x06:
ret_val = MV_DDR_PKG_RANK_7;
break;
case 0x07:
ret_val = MV_DDR_PKG_RANK_8;
break;
default:
ret_val = MV_DDR_PKG_RANK_LAST;
}
return ret_val;
}
static enum mv_ddr_die_count mv_ddr_spd_die_count_get(union mv_ddr_spd_data *spd_data)
{
unsigned char die_count = spd_data->byte_fields.byte_6.bit_fields.die_count;
enum mv_ddr_die_count ret_val;
switch (die_count) {
case 0x00:
ret_val = MV_DDR_DIE_CNT_1;
break;
case 0x01:
ret_val = MV_DDR_DIE_CNT_2;
break;
case 0x02:
ret_val = MV_DDR_DIE_CNT_3;
break;
case 0x03:
ret_val = MV_DDR_DIE_CNT_4;
break;
case 0x04:
ret_val = MV_DDR_DIE_CNT_5;
break;
case 0x05:
ret_val = MV_DDR_DIE_CNT_6;
break;
case 0x06:
ret_val = MV_DDR_DIE_CNT_7;
break;
case 0x07:
ret_val = MV_DDR_DIE_CNT_8;
break;
default:
ret_val = MV_DDR_DIE_CNT_LAST;
}
return ret_val;
}
unsigned char mv_ddr_spd_cs_bit_mask_get(union mv_ddr_spd_data *spd_data)
{
unsigned char cs_bit_mask = 0x0;
enum mv_ddr_pkg_rank pkg_rank = mv_ddr_spd_pkg_rank_get(spd_data);
enum mv_ddr_die_count die_cnt = mv_ddr_spd_die_count_get(spd_data);
if (pkg_rank == MV_DDR_PKG_RANK_1 && die_cnt == MV_DDR_DIE_CNT_1)
cs_bit_mask = 0x1;
else if (pkg_rank == MV_DDR_PKG_RANK_1 && die_cnt == MV_DDR_DIE_CNT_2)
cs_bit_mask = 0x3;
else if (pkg_rank == MV_DDR_PKG_RANK_2 && die_cnt == MV_DDR_DIE_CNT_1)
cs_bit_mask = 0x3;
else if (pkg_rank == MV_DDR_PKG_RANK_2 && die_cnt == MV_DDR_DIE_CNT_2)
cs_bit_mask = 0xf;
return cs_bit_mask;
}
unsigned char mv_ddr_spd_dev_type_get(union mv_ddr_spd_data *spd_data)
{
unsigned char dev_type = spd_data->byte_fields.byte_2;
return dev_type;
}
unsigned char mv_ddr_spd_module_type_get(union mv_ddr_spd_data *spd_data)
{
unsigned char module_type = spd_data->byte_fields.byte_3.bit_fields.module_type;
return module_type;
}

View file

@ -0,0 +1,289 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _MV_DDR_SPD_H
#define _MV_DDR_SPD_H
#include "mv_ddr_topology.h"
/*
* Based on JEDEC Standard No. 21-C, 4.1.2.L-4:
* Serial Presence Detect (SPD) for DDR4 SDRAM Modules
*/
/* block 0: base configuration and dram parameters */
#define MV_DDR_SPD_DATA_BLOCK0_SIZE 128
/* block 1: module specific parameters sub-block */
#define MV_DDR_SPD_DATA_BLOCK1M_SIZE 64
/* block 1: hybrid memory parameters sub-block */
#define MV_DDR_SPD_DATA_BLOCK1H_SIZE 64
/* block 2: extended function parameter block */
#define MV_DDR_SPD_DATA_BLOCK2E_SIZE 64
/* block 2: manufacturing information */
#define MV_DDR_SPD_DATA_BLOCK2M_SIZE 64
/* block 3: end user programmable */
#define MV_DDR_SPD_DATA_BLOCK3_SIZE 128
#define MV_DDR_SPD_DEV_TYPE_DDR4 0xc
#define MV_DDR_SPD_MODULE_TYPE_UDIMM 0x2
#define MV_DDR_SPD_MODULE_TYPE_SO_DIMM 0x3
#define MV_DDR_SPD_MODULE_TYPE_MINI_UDIMM 0x6
#define MV_DDR_SPD_MODULE_TYPE_72BIT_SO_UDIMM 0x9
#define MV_DDR_SPD_MODULE_TYPE_16BIT_SO_DIMM 0xc
#define MV_DDR_SPD_MODULE_TYPE_32BIT_SO_DIMM 0xd
/*
* TODO: For now, the struct contains block 0 & block 1 with module specific
* parameters for unbuffered memory module types only.
*/
union mv_ddr_spd_data {
unsigned char all_bytes[MV_DDR_SPD_DATA_BLOCK0_SIZE +
MV_DDR_SPD_DATA_BLOCK1M_SIZE];
struct {
/* block 0 */
union { /* num of bytes used/num of bytes in spd device/crc coverage */
unsigned char all_bits;
struct {
unsigned char spd_bytes_used:4,
spd_bytes_total:3,
reserved:1;
} bit_fields;
} byte_0;
union { /* spd revision */
unsigned char all_bits;
struct {
unsigned char addtions_level:4,
encoding_level:4;
} bit_fields;
} byte_1;
unsigned char byte_2; /* key_byte/dram device type */
union { /* key byte/module type */
unsigned char all_bits;
struct {
unsigned char module_type:4,
hybrid_media:3,
hybrid:1;
} bit_fields;
} byte_3;
union { /* sdram density & banks */
unsigned char all_bits;
struct {
unsigned char die_capacity:4,
bank_address:2,
bank_group:2;
} bit_fields;
} byte_4;
union { /* sdram addressing */
unsigned char all_bits;
struct {
unsigned char col_address:3,
row_address:3,
reserved:2;
} bit_fields;
} byte_5;
union { /* sdram package type */
unsigned char all_bits;
struct {
unsigned char signal_loading:2,
reserved:2,
die_count:3,
sdram_package_type:1;
} bit_fields;
} byte_6;
union { /* sdram optional features */
unsigned char all_bits;
struct {
unsigned char mac:4, /* max activate count */
t_maw:2, /* max activate window */
reserved:2; /* all 0s */
} bit_fields;
} byte_7;
unsigned char byte_8; /* sdram thermal & refresh options; reserved; 0x00 */
union { /* other sdram optional features */
unsigned char all_bits;
struct {
unsigned char reserved:5, /* all 0s */
soft_ppr:1,
ppr:2; /* post package repair */
} bit_fields;
} byte_9;
union { /* secondary sdram package type */
unsigned char all_bits;
struct {
unsigned char signal_loading:2,
density_ratio:2, /* dram density ratio */
die_count:3,
sdram_package_type:1;
} bit_fields;
} byte_10;
union { /* module nominal voltage, vdd */
unsigned char all_bits;
struct {
unsigned char operable:1,
endurant:1,
reserved:5; /* all 0s */
} bit_fields;
} byte_11;
union { /* module organization*/
unsigned char all_bits;
struct {
unsigned char device_width:3,
dimm_pkg_ranks_num:3, /* package ranks per dimm number */
rank_mix:1,
reserved:1; /* 0 */
} bit_fields;
} byte_12;
union { /* module memory bus width */
unsigned char all_bits;
struct {
unsigned char primary_bus_width:3, /* in bits */
bus_width_ext:2, /* in bits */
reserved:3; /* all 0s */
} bit_fields;
} byte_13;
union { /* module thernal sensor */
unsigned char all_bits;
struct {
unsigned char reserved:7,
thermal_sensor:1;
} bit_fields;
} byte_14;
union { /* extended module type */
unsigned char all_bits;
struct {
unsigned char ext_base_module_type:4,
reserved:4; /* all 0s */
} bit_fields;
} byte_15;
unsigned char byte_16; /* reserved; 0x00 */
union { /* timebases */
unsigned char all_bits;
struct {
unsigned char ftb:2, /* fine timebase */
mtb:2, /* medium timebase */
reserved:4; /* all 0s */
} bit_fields;
} byte_17;
unsigned char byte_18; /* sdram min cycle time (t ck avg min), mtb */
unsigned char byte_19; /* sdram max cycle time (t ck avg max), mtb */
unsigned char byte_20; /* cas latencies supported, first byte */
unsigned char byte_21; /* cas latencies supported, second byte */
unsigned char byte_22; /* cas latencies supported, third byte */
unsigned char byte_23; /* cas latencies supported, fourth byte */
unsigned char byte_24; /* min cas latency time (t aa min), mtb */
unsigned char byte_25; /* min ras to cas delay time (t rcd min), mtb */
unsigned char byte_26; /* min row precharge delay time (t rp min), mtb */
union { /* upper nibbles for t ras min & t rc min */
unsigned char all_bits;
struct {
unsigned char t_ras_min_msn:4, /* t ras min most significant nibble */
t_rc_min_msn:4; /* t rc min most significant nibble */
} bit_fields;
} byte_27;
unsigned char byte_28; /* min active to precharge delay time (t ras min), l-s-byte, mtb */
unsigned char byte_29; /* min active to active/refresh delay time (t rc min), l-s-byte, mtb */
unsigned char byte_30; /* min refresh recovery delay time (t rfc1 min), l-s-byte, mtb */
unsigned char byte_31; /* min refresh recovery delay time (t rfc1 min), m-s-byte, mtb */
unsigned char byte_32; /* min refresh recovery delay time (t rfc2 min), l-s-byte, mtb */
unsigned char byte_33; /* min refresh recovery delay time (t rfc2 min), m-s-byte, mtb */
unsigned char byte_34; /* min refresh recovery delay time (t rfc4 min), l-s-byte, mtb */
unsigned char byte_35; /* min refresh recovery delay time (t rfc4 min), m-s-byte, mtb */
union { /* upper nibble for t faw */
unsigned char all_bits;
struct {
unsigned char t_faw_min_msn:4, /* t faw min most significant nibble */
reserved:4;
} bit_fields;
} byte_36;
unsigned char byte_37; /* min four activate window delay time (t faw min), l-s-byte, mtb */
/* byte 38: min activate to activate delay time (t rrd_s min), diff bank group, mtb */
unsigned char byte_38;
/* byte 39: min activate to activate delay time (t rrd_l min), same bank group, mtb */
unsigned char byte_39;
unsigned char byte_40; /* min cas to cas delay time (t ccd_l min), same bank group, mtb */
union { /* upper nibble for t wr min */
unsigned char all_bits;
struct {
unsigned char t_wr_min_msn:4, /* t wr min most significant nibble */
reserved:4;
} bit_fields;
} byte_41;
unsigned char byte_42; /* min write recovery time (t wr min) */
union { /* upper nibbles for t wtr min */
unsigned char all_bits;
struct {
unsigned char t_wtr_s_min_msn:4, /* t wtr s min most significant nibble */
t_wtr_l_min_msn:4; /* t wtr l min most significant nibble */
} bit_fields;
} byte_43;
unsigned char byte_44; /* min write to read time (t wtr s min), diff bank group, mtb */
unsigned char byte_45; /* min write to read time (t wtr l min), same bank group, mtb */
unsigned char bytes_46_59[14]; /* reserved; all 0s */
unsigned char bytes_60_77[18]; /* TODO: connector to sdram bit mapping */
unsigned char bytes_78_116[39]; /* reserved; all 0s */
/* fine offset for min cas to cas delay time (t ccd_l min), same bank group, ftb */
unsigned char byte_117;
/* fine offset for min activate to activate delay time (t rrd_l min), same bank group, ftb */
unsigned char byte_118;
/* fine offset for min activate to activate delay time (t rrd_s min), diff bank group, ftb */
unsigned char byte_119;
/* fine offset for min active to active/refresh delay time (t rc min), ftb */
unsigned char byte_120;
unsigned char byte_121; /* fine offset for min row precharge delay time (t rp min), ftb */
unsigned char byte_122; /* fine offset for min ras to cas delay time (t rcd min), ftb */
unsigned char byte_123; /* fine offset for min cas latency time (t aa min), ftb */
unsigned char byte_124; /* fine offset for sdram max cycle time (t ck avg max), ftb */
unsigned char byte_125; /* fine offset for sdram min cycle time (t ck avg min), ftb */
unsigned char byte_126; /* crc for base configuration section, l-s-byte */
unsigned char byte_127; /* crc for base configuration section, m-s-byte */
/*
* block 1: module specific parameters for unbuffered memory module types only
*/
union { /* (unbuffered) raw card extension, module nominal height */
unsigned char all_bits;
struct {
unsigned char nom_height_max:5, /* in mm */
raw_cad_ext:3;
} bit_fields;
} byte_128;
union { /* (unbuffered) module maximum thickness */
unsigned char all_bits;
struct {
unsigned char front_thickness_max:4, /* in mm */
back_thickness_max:4; /* in mm */
} bit_fields;
} byte_129;
union { /* (unbuffered) reference raw card used */
unsigned char all_bits;
struct {
unsigned char ref_raw_card:5,
ref_raw_card_rev:2,
ref_raw_card_ext:1;
} bit_fields;
} byte_130;
union { /* (unbuffered) address mapping from edge connector to dram */
unsigned char all_bits;
struct {
unsigned char rank_1_mapping:1,
reserved:7;
} bit_fields;
} byte_131;
unsigned char bytes_132_191[60]; /* reserved; all 0s */
} byte_fields;
};
int mv_ddr_spd_timing_calc(union mv_ddr_spd_data *spd_data, unsigned int timing_data[]);
enum mv_ddr_dev_width mv_ddr_spd_dev_width_get(union mv_ddr_spd_data *spd_data);
enum mv_ddr_die_capacity mv_ddr_spd_die_capacity_get(union mv_ddr_spd_data *spd_data);
unsigned char mv_ddr_spd_mem_mirror_get(union mv_ddr_spd_data *spd_data);
unsigned char mv_ddr_spd_cs_bit_mask_get(union mv_ddr_spd_data *spd_data);
unsigned char mv_ddr_spd_dev_type_get(union mv_ddr_spd_data *spd_data);
unsigned char mv_ddr_spd_module_type_get(union mv_ddr_spd_data *spd_data);
int mv_ddr_spd_supported_cls_calc(union mv_ddr_spd_data *spd_data);
unsigned int mv_ddr_spd_supported_cl_get(unsigned int cl);
enum mv_ddr_pkg_rank mv_ddr_spd_pri_bus_width_get(union mv_ddr_spd_data *spd_data);
enum mv_ddr_pkg_rank mv_ddr_spd_bus_width_ext_get(union mv_ddr_spd_data *spd_data);
#endif /* _MV_DDR_SPD_H */

View file

@ -0,0 +1,102 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include "mv_ddr_regs.h"
#include "mv_ddr_sys_env_lib.h"
static u32 mv_ddr_board_id_get(void)
{
#if defined(CONFIG_TARGET_DB_88F6820_GP)
return DB_GP_68XX_ID;
#else
/*
* Return 0 here for custom board as this should not be used
* for custom boards.
*/
return 0;
#endif
}
static u32 mv_ddr_board_id_index_get(u32 board_id)
{
/*
* Marvell Boards use 0x10 as base for Board ID:
* mask MSB to receive index for board ID
*/
return board_id & (MARVELL_BOARD_ID_MASK - 1);
}
/*
* read gpio input for suspend-wakeup indication
* return indicating suspend wakeup status:
* 0 - not supported,
* 1 - supported: read magic word detect wakeup,
* 2 - detected wakeup from gpio
*/
enum suspend_wakeup_status mv_ddr_sys_env_suspend_wakeup_check(void)
{
u32 reg, board_id_index, gpio;
struct board_wakeup_gpio board_gpio[] = MV_BOARD_WAKEUP_GPIO_INFO;
board_id_index = mv_ddr_board_id_index_get(mv_ddr_board_id_get());
if (!(sizeof(board_gpio) / sizeof(struct board_wakeup_gpio) >
board_id_index)) {
printf("\n_failed loading Suspend-Wakeup information (invalid board ID)\n");
return SUSPEND_WAKEUP_DISABLED;
}
/*
* - Detect if Suspend-Wakeup is supported on current board
* - Fetch the GPIO number for wakeup status input indication
*/
if (board_gpio[board_id_index].gpio_num == -1) {
/* Suspend to RAM is not supported */
return SUSPEND_WAKEUP_DISABLED;
} else if (board_gpio[board_id_index].gpio_num == -2) {
/*
* Suspend to RAM is supported but GPIO indication is
* not implemented - Skip
*/
return SUSPEND_WAKEUP_ENABLED;
} else {
gpio = board_gpio[board_id_index].gpio_num;
}
/* Initialize MPP for GPIO (set MPP = 0x0) */
reg = reg_read(MPP_CONTROL_REG(MPP_REG_NUM(gpio)));
/* reset MPP21 to 0x0, keep rest of MPP settings*/
reg &= ~MPP_MASK(gpio);
reg_write(MPP_CONTROL_REG(MPP_REG_NUM(gpio)), reg);
/* Initialize GPIO as input */
reg = reg_read(GPP_DATA_OUT_EN_REG(GPP_REG_NUM(gpio)));
reg |= GPP_MASK(gpio);
reg_write(GPP_DATA_OUT_EN_REG(GPP_REG_NUM(gpio)), reg);
/*
* Check GPP for input status from PIC: 0 - regular init,
* 1 - suspend wakeup
*/
reg = reg_read(GPP_DATA_IN_REG(GPP_REG_NUM(gpio)));
/* if GPIO is ON: wakeup from S2RAM indication detected */
return (reg & GPP_MASK(gpio)) ? SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED :
SUSPEND_WAKEUP_DISABLED;
}
/*
* get bit mask of enabled cs
* return bit mask of enabled cs:
* 1 - only cs0 enabled,
* 3 - both cs0 and cs1 enabled
*/
u32 mv_ddr_sys_env_get_cs_ena_from_reg(void)
{
return reg_read(DDR3_RANK_CTRL_REG) &
((CS_EXIST_MASK << CS_EXIST_OFFS(0)) |
(CS_EXIST_MASK << CS_EXIST_OFFS(1)) |
(CS_EXIST_MASK << CS_EXIST_OFFS(2)) |
(CS_EXIST_MASK << CS_EXIST_OFFS(3)));
}

View file

@ -0,0 +1,117 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _MV_DDR_SYS_ENV_LIB_H
#define _MV_DDR_SYS_ENV_LIB_H
#include "ddr_ml_wrapper.h"
/* device revision */
#define DEV_ID_REG 0x18238
#define DEV_VERSION_ID_REG 0x1823c
#define REVISON_ID_OFFS 8
#define REVISON_ID_MASK 0xf00
#define MPP_CONTROL_REG(id) (0x18000 + (id * 4))
#define GPP_DATA_OUT_REG(grp) (MV_GPP_REGS_BASE(grp) + 0x00)
#define GPP_DATA_OUT_EN_REG(grp) (MV_GPP_REGS_BASE(grp) + 0x04)
#define GPP_DATA_IN_REG(grp) (MV_GPP_REGS_BASE(grp) + 0x10)
#define MV_GPP_REGS_BASE(unit) (0x18100 + ((unit) * 0x40))
#define MPP_REG_NUM(GPIO_NUM) (GPIO_NUM / 8)
#define MPP_MASK(GPIO_NUM) (0xf << 4 * (GPIO_NUM - \
(MPP_REG_NUM(GPIO_NUM) * 8)));
#define GPP_REG_NUM(GPIO_NUM) (GPIO_NUM / 32)
#define GPP_MASK(GPIO_NUM) (1 << GPIO_NUM % 32)
/* device ID */
/* Board ID numbers */
#define MARVELL_BOARD_ID_MASK 0x10
/* Customer boards for A38x */
#define A38X_CUSTOMER_BOARD_ID_BASE 0x0
#define A38X_CUSTOMER_BOARD_ID0 (A38X_CUSTOMER_BOARD_ID_BASE + 0)
#define A38X_CUSTOMER_BOARD_ID1 (A38X_CUSTOMER_BOARD_ID_BASE + 1)
#define A38X_MV_MAX_CUSTOMER_BOARD_ID (A38X_CUSTOMER_BOARD_ID_BASE + 2)
#define A38X_MV_CUSTOMER_BOARD_NUM (A38X_MV_MAX_CUSTOMER_BOARD_ID - \
A38X_CUSTOMER_BOARD_ID_BASE)
/* Marvell boards for A38x */
#define A38X_MARVELL_BOARD_ID_BASE 0x10
#define RD_NAS_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 0)
#define DB_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 1)
#define RD_AP_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 2)
#define DB_AP_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 3)
#define DB_GP_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 4)
#define DB_BP_6821_ID (A38X_MARVELL_BOARD_ID_BASE + 5)
#define DB_AMC_6820_ID (A38X_MARVELL_BOARD_ID_BASE + 6)
#define A38X_MV_MAX_MARVELL_BOARD_ID (A38X_MARVELL_BOARD_ID_BASE + 7)
#define A38X_MV_MARVELL_BOARD_NUM (A38X_MV_MAX_MARVELL_BOARD_ID - \
A38X_MARVELL_BOARD_ID_BASE)
/* Marvell boards for A39x */
#define A39X_MARVELL_BOARD_ID_BASE 0x30
#define A39X_DB_69XX_ID (A39X_MARVELL_BOARD_ID_BASE + 0)
#define A39X_RD_69XX_ID (A39X_MARVELL_BOARD_ID_BASE + 1)
#define A39X_MV_MAX_MARVELL_BOARD_ID (A39X_MARVELL_BOARD_ID_BASE + 2)
#define A39X_MV_MARVELL_BOARD_NUM (A39X_MV_MAX_MARVELL_BOARD_ID - \
A39X_MARVELL_BOARD_ID_BASE)
struct board_wakeup_gpio {
u32 board_id;
int gpio_num;
};
enum suspend_wakeup_status {
SUSPEND_WAKEUP_DISABLED,
SUSPEND_WAKEUP_ENABLED,
SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED,
};
/*
* GPIO status indication for Suspend Wakeup:
* If suspend to RAM is supported and GPIO inidcation is implemented,
* set the gpio number
* If suspend to RAM is supported but GPIO indication is not implemented
* set '-2'
* If suspend to RAM is not supported set '-1'
*/
#ifdef CONFIG_CUSTOMER_BOARD_SUPPORT
#ifdef CONFIG_ARMADA_38X
#define MV_BOARD_WAKEUP_GPIO_INFO { \
{A38X_CUSTOMER_BOARD_ID0, -1 }, \
{A38X_CUSTOMER_BOARD_ID0, -1 }, \
};
#else
#define MV_BOARD_WAKEUP_GPIO_INFO { \
{A39X_CUSTOMER_BOARD_ID0, -1 }, \
{A39X_CUSTOMER_BOARD_ID0, -1 }, \
};
#endif /* CONFIG_ARMADA_38X */
#else
#ifdef CONFIG_ARMADA_38X
#define MV_BOARD_WAKEUP_GPIO_INFO { \
{RD_NAS_68XX_ID, -2 }, \
{DB_68XX_ID, -1 }, \
{RD_AP_68XX_ID, -2 }, \
{DB_AP_68XX_ID, -2 }, \
{DB_GP_68XX_ID, -2 }, \
{DB_BP_6821_ID, -2 }, \
{DB_AMC_6820_ID, -2 }, \
};
#else
#define MV_BOARD_WAKEUP_GPIO_INFO { \
{A39X_RD_69XX_ID, -1 }, \
{A39X_DB_69XX_ID, -1 }, \
};
#endif /* CONFIG_ARMADA_38X */
#endif /* CONFIG_CUSTOMER_BOARD_SUPPORT */
enum suspend_wakeup_status mv_ddr_sys_env_suspend_wakeup_check(void);
u32 mv_ddr_sys_env_get_cs_ena_from_reg(void);
#endif /* _MV_DDR_SYS_ENV_LIB_H */

View file

@ -0,0 +1,197 @@
// SPDX-License-Identifier: GPL-2.0
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include "mv_ddr_topology.h"
#include "mv_ddr_common.h"
#include "mv_ddr_spd.h"
#include "ddr3_init.h"
#include "ddr_topology_def.h"
#include "ddr3_training_ip_db.h"
#include "ddr3_training_ip.h"
unsigned int mv_ddr_cl_calc(unsigned int taa_min, unsigned int tclk)
{
unsigned int cl = ceil_div(taa_min, tclk);
return mv_ddr_spd_supported_cl_get(cl);
}
unsigned int mv_ddr_cwl_calc(unsigned int tclk)
{
unsigned int cwl;
if (tclk >= 1250)
cwl = 9;
else if (tclk >= 1071)
cwl = 10;
else if (tclk >= 938)
cwl = 11;
else if (tclk >= 833)
cwl = 12;
else
cwl = 0;
return cwl;
}
struct mv_ddr_topology_map *mv_ddr_topology_map_update(void)
{
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
enum hws_speed_bin speed_bin_index;
enum hws_ddr_freq freq = DDR_FREQ_LAST;
unsigned int tclk;
unsigned char val = 0;
int i;
if (tm->interface_params[0].memory_freq == DDR_FREQ_SAR)
tm->interface_params[0].memory_freq = mv_ddr_init_freq_get();
if (tm->cfg_src == MV_DDR_CFG_SPD) {
/* check dram device type */
val = mv_ddr_spd_dev_type_get(&tm->spd_data);
if (val != MV_DDR_SPD_DEV_TYPE_DDR4) {
printf("mv_ddr: unsupported dram device type found\n");
return NULL;
}
/* update topology map with timing data */
if (mv_ddr_spd_timing_calc(&tm->spd_data, tm->timing_data) > 0) {
printf("mv_ddr: negative timing data found\n");
return NULL;
}
/* update device width in topology map */
tm->interface_params[0].bus_width = mv_ddr_spd_dev_width_get(&tm->spd_data);
/* update die capacity in topology map */
tm->interface_params[0].memory_size = mv_ddr_spd_die_capacity_get(&tm->spd_data);
/* update bus bit mask in topology map */
tm->bus_act_mask = mv_ddr_bus_bit_mask_get();
/* update cs bit mask in topology map */
val = mv_ddr_spd_cs_bit_mask_get(&tm->spd_data);
for (i = 0; i < octets_per_if_num; i++) {
tm->interface_params[0].as_bus_params[i].cs_bitmask = val;
}
/* check dram module type */
val = mv_ddr_spd_module_type_get(&tm->spd_data);
switch (val) {
case MV_DDR_SPD_MODULE_TYPE_UDIMM:
case MV_DDR_SPD_MODULE_TYPE_SO_DIMM:
case MV_DDR_SPD_MODULE_TYPE_MINI_UDIMM:
case MV_DDR_SPD_MODULE_TYPE_72BIT_SO_UDIMM:
case MV_DDR_SPD_MODULE_TYPE_16BIT_SO_DIMM:
case MV_DDR_SPD_MODULE_TYPE_32BIT_SO_DIMM:
break;
default:
printf("mv_ddr: unsupported dram module type found\n");
return NULL;
}
/* update mirror bit mask in topology map */
val = mv_ddr_spd_mem_mirror_get(&tm->spd_data);
for (i = 0; i < octets_per_if_num; i++) {
tm->interface_params[0].as_bus_params[i].mirror_enable_bitmask = val << 1;
}
tclk = 1000000 / freq_val[tm->interface_params[0].memory_freq];
/* update cas write latency (cwl) */
val = mv_ddr_cwl_calc(tclk);
if (val == 0) {
printf("mv_ddr: unsupported cas write latency value found\n");
return NULL;
}
tm->interface_params[0].cas_wl = val;
/* update cas latency (cl) */
mv_ddr_spd_supported_cls_calc(&tm->spd_data);
val = mv_ddr_cl_calc(tm->timing_data[MV_DDR_TAA_MIN], tclk);
if (val == 0) {
printf("mv_ddr: unsupported cas latency value found\n");
return NULL;
}
tm->interface_params[0].cas_l = val;
} else if (tm->cfg_src == MV_DDR_CFG_DEFAULT) {
/* set cas and cas-write latencies per speed bin, if they unset */
speed_bin_index = tm->interface_params[0].speed_bin_index;
freq = tm->interface_params[0].memory_freq;
if (tm->interface_params[0].cas_l == 0)
tm->interface_params[0].cas_l =
cas_latency_table[speed_bin_index].cl_val[freq];
if (tm->interface_params[0].cas_wl == 0)
tm->interface_params[0].cas_wl =
cas_write_latency_table[speed_bin_index].cl_val[freq];
}
return tm;
}
unsigned short mv_ddr_bus_bit_mask_get(void)
{
unsigned short pri_and_ext_bus_width = 0x0;
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
if (tm->cfg_src == MV_DDR_CFG_SPD) {
enum mv_ddr_pri_bus_width pri_bus_width = mv_ddr_spd_pri_bus_width_get(&tm->spd_data);
enum mv_ddr_bus_width_ext bus_width_ext = mv_ddr_spd_bus_width_ext_get(&tm->spd_data);
switch (pri_bus_width) {
case MV_DDR_PRI_BUS_WIDTH_16:
pri_and_ext_bus_width = BUS_MASK_16BIT;
break;
case MV_DDR_PRI_BUS_WIDTH_32:
pri_and_ext_bus_width = BUS_MASK_32BIT;
break;
case MV_DDR_PRI_BUS_WIDTH_64:
pri_and_ext_bus_width = MV_DDR_64BIT_BUS_MASK;
break;
default:
pri_and_ext_bus_width = 0x0;
}
if (bus_width_ext == MV_DDR_BUS_WIDTH_EXT_8)
pri_and_ext_bus_width |= 1 << (octets_per_if_num - 1);
}
return pri_and_ext_bus_width;
}
unsigned int mv_ddr_if_bus_width_get(void)
{
struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
unsigned int bus_width;
switch (tm->bus_act_mask) {
case BUS_MASK_16BIT:
case BUS_MASK_16BIT_ECC:
case BUS_MASK_16BIT_ECC_PUP3:
bus_width = 16;
break;
case BUS_MASK_32BIT:
case BUS_MASK_32BIT_ECC:
case MV_DDR_32BIT_ECC_PUP8_BUS_MASK:
bus_width = 32;
break;
case MV_DDR_64BIT_BUS_MASK:
case MV_DDR_64BIT_ECC_PUP8_BUS_MASK:
bus_width = 64;
break;
default:
printf("mv_ddr: unsupported bus active mask parameter found\n");
bus_width = 0;
}
return bus_width;
}

View file

@ -0,0 +1,123 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _MV_DDR_TOPOLOGY_H
#define _MV_DDR_TOPOLOGY_H
/* ddr bus masks */
#define BUS_MASK_32BIT 0xf
#define BUS_MASK_32BIT_ECC 0x1f
#define BUS_MASK_16BIT 0x3
#define BUS_MASK_16BIT_ECC 0x13
#define BUS_MASK_16BIT_ECC_PUP3 0xb
#define MV_DDR_64BIT_BUS_MASK 0xff
#define MV_DDR_64BIT_ECC_PUP8_BUS_MASK 0x1ff
#define MV_DDR_32BIT_ECC_PUP8_BUS_MASK 0x10f
/* source of ddr configuration data */
enum mv_ddr_cfg_src {
MV_DDR_CFG_DEFAULT, /* based on data in mv_ddr_topology_map structure */
MV_DDR_CFG_SPD, /* based on data in spd */
MV_DDR_CFG_USER, /* based on data from user */
MV_DDR_CFG_STATIC, /* based on data from user in register-value format */
MV_DDR_CFG_LAST
};
enum mv_ddr_num_of_sub_phys_per_ddr_unit {
SINGLE_SUB_PHY = 1,
TWO_SUB_PHYS = 2
};
enum mv_ddr_temperature {
MV_DDR_TEMP_LOW,
MV_DDR_TEMP_NORMAL,
MV_DDR_TEMP_HIGH
};
enum mv_ddr_timing_data {
MV_DDR_TCK_AVG_MIN, /* sdram min cycle time (t ck avg min) */
MV_DDR_TAA_MIN, /* min cas latency time (t aa min) */
MV_DDR_TRFC1_MIN, /* min refresh recovery delay time (t rfc1 min) */
MV_DDR_TWR_MIN, /* min write recovery time (t wr min) */
MV_DDR_TRCD_MIN, /* min ras to cas delay time (t rcd min) */
MV_DDR_TRP_MIN, /* min row precharge delay time (t rp min) */
MV_DDR_TRC_MIN, /* min active to active/refresh delay time (t rc min) */
MV_DDR_TRAS_MIN, /* min active to precharge delay time (t ras min) */
MV_DDR_TRRD_S_MIN, /* min activate to activate delay time (t rrd_s min), diff bank group */
MV_DDR_TRRD_L_MIN, /* min activate to activate delay time (t rrd_l min), same bank group */
MV_DDR_TFAW_MIN, /* min four activate window delay time (t faw min) */
MV_DDR_TWTR_S_MIN, /* min write to read time (t wtr s min), diff bank group */
MV_DDR_TWTR_L_MIN, /* min write to read time (t wtr l min), same bank group */
MV_DDR_TDATA_LAST
};
enum mv_ddr_dev_width { /* sdram device width */
MV_DDR_DEV_WIDTH_4BIT,
MV_DDR_DEV_WIDTH_8BIT,
MV_DDR_DEV_WIDTH_16BIT,
MV_DDR_DEV_WIDTH_32BIT,
MV_DDR_DEV_WIDTH_LAST
};
enum mv_ddr_die_capacity { /* total sdram capacity per die, megabits */
MV_DDR_DIE_CAP_256MBIT,
MV_DDR_DIE_CAP_512MBIT = 0,
MV_DDR_DIE_CAP_1GBIT,
MV_DDR_DIE_CAP_2GBIT,
MV_DDR_DIE_CAP_4GBIT,
MV_DDR_DIE_CAP_8GBIT,
MV_DDR_DIE_CAP_16GBIT,
MV_DDR_DIE_CAP_32GBIT,
MV_DDR_DIE_CAP_12GBIT,
MV_DDR_DIE_CAP_24GBIT,
MV_DDR_DIE_CAP_LAST
};
enum mv_ddr_pkg_rank { /* number of package ranks per dimm */
MV_DDR_PKG_RANK_1,
MV_DDR_PKG_RANK_2,
MV_DDR_PKG_RANK_3,
MV_DDR_PKG_RANK_4,
MV_DDR_PKG_RANK_5,
MV_DDR_PKG_RANK_6,
MV_DDR_PKG_RANK_7,
MV_DDR_PKG_RANK_8,
MV_DDR_PKG_RANK_LAST
};
enum mv_ddr_pri_bus_width { /* number of primary bus width bits */
MV_DDR_PRI_BUS_WIDTH_8,
MV_DDR_PRI_BUS_WIDTH_16,
MV_DDR_PRI_BUS_WIDTH_32,
MV_DDR_PRI_BUS_WIDTH_64,
MV_DDR_PRI_BUS_WIDTH_LAST
};
enum mv_ddr_bus_width_ext { /* number of extension bus width bits */
MV_DDR_BUS_WIDTH_EXT_0,
MV_DDR_BUS_WIDTH_EXT_8,
MV_DDR_BUS_WIDTH_EXT_LAST
};
enum mv_ddr_die_count {
MV_DDR_DIE_CNT_1,
MV_DDR_DIE_CNT_2,
MV_DDR_DIE_CNT_3,
MV_DDR_DIE_CNT_4,
MV_DDR_DIE_CNT_5,
MV_DDR_DIE_CNT_6,
MV_DDR_DIE_CNT_7,
MV_DDR_DIE_CNT_8,
MV_DDR_DIE_CNT_LAST
};
unsigned int mv_ddr_cl_calc(unsigned int taa_min, unsigned int tclk);
unsigned int mv_ddr_cwl_calc(unsigned int tclk);
struct mv_ddr_topology_map *mv_ddr_topology_map_update(void);
struct dram_config *mv_ddr_dram_config_update(void);
unsigned short mv_ddr_bus_bit_mask_get(void);
unsigned int mv_ddr_if_bus_width_get(void);
#endif /* _MV_DDR_TOPOLOGY_H */

View file

@ -0,0 +1,64 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef _SEQ_EXEC_H
#define _SEQ_EXEC_H
#define NA 0xff
#define DEFAULT_PARAM 0
#define MV_BOARD_TCLK_ERROR 0xffffffff
#define NO_DATA 0xffffffff
#define MAX_DATA_ARRAY 5
#define FIRST_CELL 0
/* Operation types */
enum mv_op {
WRITE_OP,
DELAY_OP,
POLL_OP,
};
/* Operation parameters */
struct op_params {
u32 unit_base_reg;
u32 unit_offset;
u32 mask;
u32 data[MAX_DATA_ARRAY]; /* data array */
u8 wait_time; /* msec */
u16 num_of_loops; /* for polling only */
};
/*
* Sequence parameters. Each sequence contains:
* 1. Sequence id.
* 2. Sequence size (total amount of operations during the sequence)
* 3. a series of operations. operations can be write, poll or delay
* 4. index in the data array (the entry where the relevant data sits)
*/
struct cfg_seq {
struct op_params *op_params_ptr;
u8 cfg_seq_size;
u8 data_arr_idx;
};
extern struct cfg_seq serdes_seq_db[];
/*
* A generic function type for executing an operation (write, poll or delay)
*/
typedef int (*op_execute_func_ptr)(u32 serdes_num, struct op_params *params,
u32 data_arr_idx);
/* Specific functions for executing each operation */
int write_op_execute(u32 serdes_num, struct op_params *params,
u32 data_arr_idx);
int delay_op_execute(u32 serdes_num, struct op_params *params,
u32 data_arr_idx);
int poll_op_execute(u32 serdes_num, struct op_params *params, u32 data_arr_idx);
enum mv_op get_cfg_seq_op(struct op_params *params);
int mv_seq_exec(u32 serdes_num, u32 seq_id);
#endif /*_SEQ_EXEC_H*/

View file

@ -1,16 +0,0 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#ifndef __silicon_if_H
#define __silicon_if_H
/* max number of devices supported by driver */
#ifdef CO_CPU_RUN
#define HWS_MAX_DEVICE_NUM (1)
#else
#define HWS_MAX_DEVICE_NUM (16)
#endif
#endif /* __silicon_if_H */

View file

@ -3,13 +3,6 @@
* Copyright (C) Marvell International Ltd. and its affiliates
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#include "xor_regs.h"
@ -21,39 +14,48 @@
#endif
static u32 ui_xor_regs_ctrl_backup;
static u32 ui_xor_regs_base_backup[MAX_CS];
static u32 ui_xor_regs_mask_backup[MAX_CS];
static u32 ui_xor_regs_base_backup[MAX_CS_NUM + 1];
static u32 ui_xor_regs_mask_backup[MAX_CS_NUM + 1];
void mv_sys_xor_init(u32 num_of_cs, u32 cs_ena, u32 cs_size, u32 base_delta)
void mv_sys_xor_init(u32 num_of_cs, u32 cs_ena, uint64_t cs_size, u32 base_delta)
{
u32 reg, ui, base, cs_count;
u32 reg, ui, cs_count;
uint64_t base, size_mask;
ui_xor_regs_ctrl_backup = reg_read(XOR_WINDOW_CTRL_REG(0, 0));
for (ui = 0; ui < MAX_CS; ui++)
for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
ui_xor_regs_base_backup[ui] =
reg_read(XOR_BASE_ADDR_REG(0, ui));
for (ui = 0; ui < MAX_CS; ui++)
for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
ui_xor_regs_mask_backup[ui] =
reg_read(XOR_SIZE_MASK_REG(0, ui));
reg = 0;
for (ui = 0; ui < (num_of_cs); ui++) {
for (ui = 0, cs_count = 0;
(cs_count < num_of_cs) && (ui < 8);
ui++, cs_count++) {
if (cs_ena & (1 << ui)) {
/* Enable Window x for each CS */
reg |= (0x1 << (ui));
/* Enable Window x for each CS */
reg |= (0x3 << ((ui * 2) + 16));
}
}
reg_write(XOR_WINDOW_CTRL_REG(0, 0), reg);
cs_count = 0;
for (ui = 0; ui < num_of_cs; ui++) {
for (ui = 0, cs_count = 0;
(cs_count < num_of_cs) && (ui < 8);
ui++, cs_count++) {
if (cs_ena & (1 << ui)) {
/*
* window x - Base - 0x00000000,
* Attribute 0x0e - DRAM
*/
base = cs_size * ui + base_delta;
/* fixed size 2GB for each CS */
size_mask = 0x7FFF0000;
switch (ui) {
case 0:
base |= 0xe00;
@ -67,13 +69,19 @@ void mv_sys_xor_init(u32 num_of_cs, u32 cs_ena, u32 cs_size, u32 base_delta)
case 3:
base |= 0x700;
break;
case 4: /* SRAM */
base = 0x40000000;
/* configure as shared transaction */
base |= 0x1F00;
size_mask = 0xF0000;
break;
}
reg_write(XOR_BASE_ADDR_REG(0, cs_count), base);
reg_write(XOR_BASE_ADDR_REG(0, ui), (u32)base);
size_mask = (cs_size / _64K) - 1;
size_mask = (size_mask << XESMRX_SIZE_MASK_OFFS) & XESMRX_SIZE_MASK_MASK;
/* window x - Size */
reg_write(XOR_SIZE_MASK_REG(0, cs_count), 0x7fff0000);
cs_count++;
reg_write(XOR_SIZE_MASK_REG(0, ui), (u32)size_mask);
}
}
@ -87,10 +95,10 @@ void mv_sys_xor_finish(void)
u32 ui;
reg_write(XOR_WINDOW_CTRL_REG(0, 0), ui_xor_regs_ctrl_backup);
for (ui = 0; ui < MAX_CS; ui++)
for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
reg_write(XOR_BASE_ADDR_REG(0, ui),
ui_xor_regs_base_backup[ui]);
for (ui = 0; ui < MAX_CS; ui++)
for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
reg_write(XOR_SIZE_MASK_REG(0, ui),
ui_xor_regs_mask_backup[ui]);
@ -153,11 +161,14 @@ int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl)
return MV_OK;
}
int mv_xor_mem_init(u32 chan, u32 start_ptr, u32 block_size,
int mv_xor_mem_init(u32 chan, u32 start_ptr, unsigned long long block_size,
u32 init_val_high, u32 init_val_low)
{
u32 temp;
if (block_size == _4G)
block_size -= 1;
/* Parameter checking */
if (chan >= MV_XOR_MAX_CHAN)
return MV_BAD_PARAM;
@ -328,28 +339,123 @@ void ddr3_new_tip_ecc_scrub(void)
{
u32 cs_c, max_cs;
u32 cs_ena = 0;
u32 dev_num = 0;
uint64_t total_mem_size, cs_mem_size = 0;
printf("DDR3 Training Sequence - Start scrubbing\n");
max_cs = hws_ddr3_tip_max_cs_get();
printf("DDR Training Sequence - Start scrubbing\n");
max_cs = ddr3_tip_max_cs_get(dev_num);
for (cs_c = 0; cs_c < max_cs; cs_c++)
cs_ena |= 1 << cs_c;
mv_sys_xor_init(max_cs, cs_ena, 0x80000000, 0);
mv_xor_mem_init(0, 0x00000000, 0x80000000, 0xdeadbeef, 0xdeadbeef);
mv_sys_xor_init(max_cs, cs_ena, cs_mem_size, 0);
total_mem_size = max_cs * cs_mem_size;
mv_xor_mem_init(0, 0, total_mem_size, 0xdeadbeef, 0xdeadbeef);
/* wait for previous transfer completion */
while (mv_xor_state_get(0) != MV_IDLE)
;
mv_xor_mem_init(0, 0x80000000, 0x40000000, 0xdeadbeef, 0xdeadbeef);
/* wait for previous transfer completion */
while (mv_xor_state_get(0) != MV_IDLE)
;
/* Return XOR State */
mv_sys_xor_finish();
printf("DDR3 Training Sequence - End scrubbing\n");
}
/*
* mv_xor_transfer - Transfer data from source to destination in one of
* three modes: XOR, CRC32 or DMA
*
* DESCRIPTION:
* This function initiates XOR channel, according to function parameters,
* in order to perform XOR, CRC32 or DMA transaction.
* To gain maximum performance the user is asked to keep the following
* restrictions:
* 1) Selected engine is available (not busy).
* 2) This module does not take into consideration CPU MMU issues.
* In order for the XOR engine to access the appropriate source
* and destination, address parameters must be given in system
* physical mode.
* 3) This API does not take care of cache coherency issues. The source,
* destination and, in case of chain, the descriptor list are assumed
* to be cache coherent.
* 4) Parameters validity.
*
* INPUT:
* chan - XOR channel number.
* type - One of three: XOR, CRC32 and DMA operations.
* xor_chain_ptr - address of chain pointer
*
* OUTPUT:
* None.
*
* RETURN:
* MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
*
*******************************************************************************/
int mv_xor_transfer(u32 chan, enum xor_type type, u32 xor_chain_ptr)
{
u32 temp;
/* Parameter checking */
if (chan >= MV_XOR_MAX_CHAN) {
DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan));
return MV_BAD_PARAM;
}
if (mv_xor_state_get(chan) == MV_ACTIVE) {
DB(printf("%s: ERR. Channel is already active\n", __func__));
return MV_BUSY;
}
if (xor_chain_ptr == 0x0) {
DB(printf("%s: ERR. xor_chain_ptr is NULL pointer\n", __func__));
return MV_BAD_PARAM;
}
/* read configuration register and mask the operation mode field */
temp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)));
temp &= ~XEXCR_OPERATION_MODE_MASK;
switch (type) {
case MV_XOR:
if ((xor_chain_ptr & XEXDPR_DST_PTR_XOR_MASK) != 0) {
DB(printf("%s: ERR. Invalid chain pointer (bits [5:0] must be cleared)\n",
__func__));
return MV_BAD_PARAM;
}
/* set the operation mode to XOR */
temp |= XEXCR_OPERATION_MODE_XOR;
break;
case MV_DMA:
if ((xor_chain_ptr & XEXDPR_DST_PTR_DMA_MASK) != 0) {
DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n",
__func__));
return MV_BAD_PARAM;
}
/* set the operation mode to DMA */
temp |= XEXCR_OPERATION_MODE_DMA;
break;
case MV_CRC32:
if ((xor_chain_ptr & XEXDPR_DST_PTR_CRC_MASK) != 0) {
DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n",
__func__));
return MV_BAD_PARAM;
}
/* set the operation mode to CRC32 */
temp |= XEXCR_OPERATION_MODE_CRC;
break;
default:
return MV_BAD_PARAM;
}
/* write the operation mode to the register */
reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp);
/*
* update the NextDescPtr field in the XOR Engine [0..1] Next Descriptor
* Pointer Register (XExNDPR)
*/
reg_write(XOR_NEXT_DESC_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
xor_chain_ptr);
/* start transfer */
reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
XEXACTR_XESTART_MASK);
return MV_OK;
}

View file

@ -8,8 +8,6 @@
#define SRAM_BASE 0x40000000
#include "ddr3_hws_hw_training_def.h"
#define MV_XOR_MAX_UNIT 2 /* XOR unit == XOR engine */
#define MV_XOR_MAX_CHAN 4 /* total channels for all units */
#define MV_XOR_MAX_CHAN_PER_UNIT 2 /* channels for units */
@ -87,5 +85,6 @@ int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl);
int mv_xor_command_set(u32 chan, enum mv_command command);
int mv_xor_override_set(u32 chan, enum xor_override_target target, u32 win_num,
int enable);
int mv_xor_transfer(u32 chan, enum xor_type type, u32 xor_chain_ptr);
#endif