1
0
Fork 0

Merge git://git.denx.de/u-boot-marvell

utp
Tom Rini 2015-07-23 09:02:28 -04:00
commit 3c9cc70d71
98 changed files with 22783 additions and 10 deletions

View File

@ -887,7 +887,7 @@ MKIMAGEFLAGS_u-boot.kwb = -n $(srctree)/$(CONFIG_SYS_KWD_CONFIG:"%"=%) \
-T kwbimage -a $(CONFIG_SYS_TEXT_BASE) -e $(CONFIG_SYS_TEXT_BASE)
MKIMAGEFLAGS_u-boot-spl.kwb = -n $(srctree)/$(CONFIG_SYS_KWD_CONFIG:"%"=%) \
-T kwbimage -a $(CONFIG_SYS_TEXT_BASE) -e $(CONFIG_SYS_TEXT_BASE)
-T kwbimage -a $(CONFIG_SPL_TEXT_BASE) -e $(CONFIG_SPL_TEXT_BASE)
MKIMAGEFLAGS_u-boot.pbl = -n $(srctree)/$(CONFIG_SYS_FSL_PBL_RCW:"%"=%) \
-R $(srctree)/$(CONFIG_SYS_FSL_PBL_PBI:"%"=%) -T pblimage

View File

@ -20,5 +20,7 @@ obj-y += timer.o
obj-$(CONFIG_SPL_BUILD) += spl.o
obj-$(CONFIG_SPL_BUILD) += lowlevel_spl.o
obj-y += serdes/
obj-$(CONFIG_SYS_MVEBU_DDR_A38X) += serdes/a38x/
obj-$(CONFIG_SYS_MVEBU_DDR_AXP) += serdes/axp/
endif

View File

@ -163,6 +163,14 @@ static void update_sdram_window_sizes(void)
}
}
void mmu_disable(void)
{
asm volatile(
"mrc p15, 0, r0, c1, c0, 0\n"
"bic r0, #1\n"
"mcr p15, 0, r0, c1, c0, 0\n");
}
#ifdef CONFIG_ARCH_CPU_INIT
static void set_cbar(u32 addr)
{
@ -172,6 +180,16 @@ static void set_cbar(u32 addr)
int arch_cpu_init(void)
{
#ifndef CONFIG_SPL_BUILD
/*
* Only with disabled MMU its possible to switch the base
* register address on Armada 38x. Without this the SDRAM
* located at >= 0x4000.0000 is also not accessible, as its
* still locked to cache.
*/
mmu_disable();
#endif
/* Linux expects the internal registers to be at 0xf1000000 */
writel(SOC_REGS_PHY_BASE, INTREG_BASE_ADDR_REG);
set_cbar(SOC_REGS_PHY_BASE + 0xC000);

View File

@ -125,7 +125,7 @@ int serdes_phy_config(void);
/*
* DDR3 init / training code ported from Marvell bin_hdr. Now
* available in mainline U-Boot in:
* drivers/ddr/mvebu/
* drivers/ddr/marvell
*/
int ddr3_init(void);
#endif /* __ASSEMBLY__ */

View File

@ -28,7 +28,17 @@
/* SOC specific definations */
#define INTREG_BASE 0xd0000000
#define INTREG_BASE_ADDR_REG (INTREG_BASE + 0x20080)
#if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SYS_MVEBU_DDR_A38X)
/*
* On A38x switching the regs base address without running from
* SDRAM doesn't seem to work. So let the SPL still use the
* default base address and switch to the new address in the
* main u-boot later.
*/
#define SOC_REGS_PHY_BASE 0xd0000000
#else
#define SOC_REGS_PHY_BASE 0xf1000000
#endif
#define MVEBU_REGISTER(x) (SOC_REGS_PHY_BASE + x)
#define MVEBU_SDRAM_SCRATCH (MVEBU_REGISTER(0x01504))
@ -52,6 +62,7 @@
#define MVEBU_USB20_BASE (MVEBU_REGISTER(0x58000))
#define MVEBU_EGIGA0_BASE (MVEBU_REGISTER(0x70000))
#define MVEBU_EGIGA1_BASE (MVEBU_REGISTER(0x74000))
#define MVEBU_AXP_SATA_BASE (MVEBU_REGISTER(0xa0000))
#define MVEBU_SATA0_BASE (MVEBU_REGISTER(0xa8000))
#define MVEBU_SDIO_BASE (MVEBU_REGISTER(0xd8000))

View File

@ -0,0 +1,10 @@
#
# SPDX-License-Identifier: GPL-2.0+
#
obj-$(CONFIG_SPL_BUILD) = ctrl_pex.o
obj-$(CONFIG_SPL_BUILD) += high_speed_env_spec.o
obj-$(CONFIG_SPL_BUILD) += high_speed_env_spec-38x.o
obj-$(CONFIG_SPL_BUILD) += high_speed_topology_spec-38x.o
obj-$(CONFIG_SPL_BUILD) += seq_exec.o
obj-$(CONFIG_SPL_BUILD) += sys_env_lib.o

View File

@ -0,0 +1,347 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ctrl_pex.h"
#include "sys_env_lib.h"
int hws_pex_config(struct serdes_map *serdes_map)
{
u32 pex_idx, tmp, next_busno, first_busno, temp_pex_reg,
temp_reg, addr, dev_id, ctrl_mode;
enum serdes_type serdes_type;
u32 idx, max_lane_num;
DEBUG_INIT_FULL_S("\n### hws_pex_config ###\n");
max_lane_num = hws_serdes_get_max_lane();
for (idx = 0; idx < max_lane_num; idx++) {
serdes_type = serdes_map[idx].serdes_type;
/* configuration for PEX only */
if ((serdes_type != PEX0) && (serdes_type != PEX1) &&
(serdes_type != PEX2) && (serdes_type != PEX3))
continue;
if ((serdes_type != PEX0) &&
((serdes_map[idx].serdes_mode == PEX_ROOT_COMPLEX_X4) ||
(serdes_map[idx].serdes_mode == PEX_END_POINT_X4))) {
/* for PEX by4 - relevant for the first port only */
continue;
}
pex_idx = serdes_type - PEX0;
tmp = reg_read(PEX_CAPABILITIES_REG(pex_idx));
tmp &= ~(0xf << 20);
tmp |= (0x4 << 20);
reg_write(PEX_CAPABILITIES_REG(pex_idx), tmp);
}
tmp = reg_read(SOC_CTRL_REG);
tmp &= ~0x03;
for (idx = 0; idx < max_lane_num; idx++) {
serdes_type = serdes_map[idx].serdes_type;
if ((serdes_type != PEX0) &&
((serdes_map[idx].serdes_mode == PEX_ROOT_COMPLEX_X4) ||
(serdes_map[idx].serdes_mode == PEX_END_POINT_X4))) {
/* for PEX by4 - relevant for the first port only */
continue;
}
switch (serdes_type) {
case PEX0:
tmp |= 0x1 << PCIE0_ENABLE_OFFS;
break;
case PEX1:
tmp |= 0x1 << PCIE1_ENABLE_OFFS;
break;
case PEX2:
tmp |= 0x1 << PCIE2_ENABLE_OFFS;
break;
case PEX3:
tmp |= 0x1 << PCIE3_ENABLE_OFFS;
break;
default:
break;
}
}
reg_write(SOC_CTRL_REG, tmp);
/* Support gen1/gen2 */
DEBUG_INIT_FULL_S("Support gen1/gen2\n");
next_busno = 0;
mdelay(150);
for (idx = 0; idx < max_lane_num; idx++) {
serdes_type = serdes_map[idx].serdes_type;
DEBUG_INIT_FULL_S(" serdes_type=0x");
DEBUG_INIT_FULL_D(serdes_type, 8);
DEBUG_INIT_FULL_S("\n");
DEBUG_INIT_FULL_S(" idx=0x");
DEBUG_INIT_FULL_D(idx, 8);
DEBUG_INIT_FULL_S("\n");
/* Configuration for PEX only */
if ((serdes_type != PEX0) && (serdes_type != PEX1) &&
(serdes_type != PEX2) && (serdes_type != PEX3))
continue;
if ((serdes_type != PEX0) &&
((serdes_map[idx].serdes_mode == PEX_ROOT_COMPLEX_X4) ||
(serdes_map[idx].serdes_mode == PEX_END_POINT_X4))) {
/* for PEX by4 - relevant for the first port only */
continue;
}
pex_idx = serdes_type - PEX0;
tmp = reg_read(PEX_DBG_STATUS_REG(pex_idx));
first_busno = next_busno;
if ((tmp & 0x7f) != 0x7e) {
DEBUG_INIT_S("PCIe, Idx ");
DEBUG_INIT_D(pex_idx, 1);
DEBUG_INIT_S(": detected no link\n");
continue;
}
next_busno++;
temp_pex_reg = reg_read((PEX_CFG_DIRECT_ACCESS
(pex_idx, PEX_LINK_CAPABILITY_REG)));
temp_pex_reg &= 0xf;
if (temp_pex_reg != 0x2)
continue;
temp_reg = (reg_read(PEX_CFG_DIRECT_ACCESS(
pex_idx,
PEX_LINK_CTRL_STAT_REG)) &
0xf0000) >> 16;
/* Check if the link established is GEN1 */
DEBUG_INIT_FULL_S
("Checking if the link established is gen1\n");
if (temp_reg != 0x1)
continue;
pex_local_bus_num_set(pex_idx, first_busno);
pex_local_dev_num_set(pex_idx, 1);
DEBUG_INIT_FULL_S("PCIe, Idx ");
DEBUG_INIT_FULL_D(pex_idx, 1);
DEBUG_INIT_S(":** Link is Gen1, check the EP capability\n");
/* link is Gen1, check the EP capability */
addr = pex_config_read(pex_idx, first_busno, 0, 0, 0x34) & 0xff;
DEBUG_INIT_FULL_C("pex_config_read: return addr=0x%x", addr, 4);
if (addr == 0xff) {
DEBUG_INIT_FULL_C
("pex_config_read: return 0xff -->PCIe (%d): Detected No Link.",
pex_idx, 1);
continue;
}
while ((pex_config_read(pex_idx, first_busno, 0, 0, addr)
& 0xff) != 0x10) {
addr = (pex_config_read(pex_idx, first_busno, 0,
0, addr) & 0xff00) >> 8;
}
/* Check for Gen2 and above */
if ((pex_config_read(pex_idx, first_busno, 0, 0,
addr + 0xc) & 0xf) < 0x2) {
DEBUG_INIT_S("PCIe, Idx ");
DEBUG_INIT_D(pex_idx, 1);
DEBUG_INIT_S(": remains Gen1\n");
continue;
}
tmp = reg_read(PEX_LINK_CTRL_STATUS2_REG(pex_idx));
DEBUG_RD_REG(PEX_LINK_CTRL_STATUS2_REG(pex_idx), tmp);
tmp &= ~(BIT(0) | BIT(1));
tmp |= BIT(1);
tmp |= BIT(6); /* Select Deemphasize (-3.5d_b) */
reg_write(PEX_LINK_CTRL_STATUS2_REG(pex_idx), tmp);
DEBUG_WR_REG(PEX_LINK_CTRL_STATUS2_REG(pex_idx), tmp);
tmp = reg_read(PEX_CTRL_REG(pex_idx));
DEBUG_RD_REG(PEX_CTRL_REG(pex_idx), tmp);
tmp |= BIT(10);
reg_write(PEX_CTRL_REG(pex_idx), tmp);
DEBUG_WR_REG(PEX_CTRL_REG(pex_idx), tmp);
/*
* We need to wait 10ms before reading the PEX_DBG_STATUS_REG
* in order not to read the status of the former state
*/
mdelay(10);
DEBUG_INIT_S("PCIe, Idx ");
DEBUG_INIT_D(pex_idx, 1);
DEBUG_INIT_S
(": Link upgraded to Gen2 based on client cpabilities\n");
}
/* Update pex DEVICE ID */
ctrl_mode = sys_env_model_get();
for (idx = 0; idx < max_lane_num; idx++) {
serdes_type = serdes_map[idx].serdes_type;
/* configuration for PEX only */
if ((serdes_type != PEX0) && (serdes_type != PEX1) &&
(serdes_type != PEX2) && (serdes_type != PEX3))
continue;
if ((serdes_type != PEX0) &&
((serdes_map[idx].serdes_mode == PEX_ROOT_COMPLEX_X4) ||
(serdes_map[idx].serdes_mode == PEX_END_POINT_X4))) {
/* for PEX by4 - relevant for the first port only */
continue;
}
pex_idx = serdes_type - PEX0;
dev_id = reg_read(PEX_CFG_DIRECT_ACCESS
(pex_idx, PEX_DEVICE_AND_VENDOR_ID));
dev_id &= 0xffff;
dev_id |= ((ctrl_mode << 16) & 0xffff0000);
reg_write(PEX_CFG_DIRECT_ACCESS
(pex_idx, PEX_DEVICE_AND_VENDOR_ID), dev_id);
}
DEBUG_INIT_FULL_C("Update PEX Device ID ", ctrl_mode, 4);
return MV_OK;
}
int pex_local_bus_num_set(u32 pex_if, u32 bus_num)
{
u32 pex_status;
DEBUG_INIT_FULL_S("\n### pex_local_bus_num_set ###\n");
if (bus_num >= MAX_PEX_BUSSES) {
DEBUG_INIT_C("pex_local_bus_num_set: Illegal bus number %d\n",
bus_num, 4);
return MV_BAD_PARAM;
}
pex_status = reg_read(PEX_STATUS_REG(pex_if));
pex_status &= ~PXSR_PEX_BUS_NUM_MASK;
pex_status |=
(bus_num << PXSR_PEX_BUS_NUM_OFFS) & PXSR_PEX_BUS_NUM_MASK;
reg_write(PEX_STATUS_REG(pex_if), pex_status);
return MV_OK;
}
int pex_local_dev_num_set(u32 pex_if, u32 dev_num)
{
u32 pex_status;
DEBUG_INIT_FULL_S("\n### pex_local_dev_num_set ###\n");
pex_status = reg_read(PEX_STATUS_REG(pex_if));
pex_status &= ~PXSR_PEX_DEV_NUM_MASK;
pex_status |=
(dev_num << PXSR_PEX_DEV_NUM_OFFS) & PXSR_PEX_DEV_NUM_MASK;
reg_write(PEX_STATUS_REG(pex_if), pex_status);
return MV_OK;
}
/*
* pex_config_read - Read from configuration space
*
* DESCRIPTION:
* This function performs a 32 bit read from PEX configuration space.
* It supports both type 0 and type 1 of Configuration Transactions
* (local and over bridge). In order to read from local bus segment, use
* bus number retrieved from pex_local_bus_num_get(). Other bus numbers
* will result configuration transaction of type 1 (over bridge).
*
* INPUT:
* pex_if - PEX interface number.
* bus - PEX segment bus number.
* dev - PEX device number.
* func - Function number.
* reg_offs - Register offset.
*
* OUTPUT:
* None.
*
* RETURN:
* 32bit register data, 0xffffffff on error
*/
u32 pex_config_read(u32 pex_if, u32 bus, u32 dev, u32 func, u32 reg_off)
{
u32 pex_data = 0;
u32 local_dev, local_bus;
u32 pex_status;
pex_status = reg_read(PEX_STATUS_REG(pex_if));
local_dev =
((pex_status & PXSR_PEX_DEV_NUM_MASK) >> PXSR_PEX_DEV_NUM_OFFS);
local_bus =
((pex_status & PXSR_PEX_BUS_NUM_MASK) >> PXSR_PEX_BUS_NUM_OFFS);
/*
* In PCI Express we have only one device number
* and this number is the first number we encounter
* else that the local_dev
* spec pex define return on config read/write on any device
*/
if (bus == local_bus) {
if (local_dev == 0) {
/*
* if local dev is 0 then the first number we encounter
* after 0 is 1
*/
if ((dev != 1) && (dev != local_dev))
return MV_ERROR;
} else {
/*
* if local dev is not 0 then the first number we
* encounter is 0
*/
if ((dev != 0) && (dev != local_dev))
return MV_ERROR;
}
}
/* Creating PEX address to be passed */
pex_data = (bus << PXCAR_BUS_NUM_OFFS);
pex_data |= (dev << PXCAR_DEVICE_NUM_OFFS);
pex_data |= (func << PXCAR_FUNC_NUM_OFFS);
/* Legacy register space */
pex_data |= (reg_off & PXCAR_REG_NUM_MASK);
/* Extended register space */
pex_data |= (((reg_off & PXCAR_REAL_EXT_REG_NUM_MASK) >>
PXCAR_REAL_EXT_REG_NUM_OFFS) << PXCAR_EXT_REG_NUM_OFFS);
pex_data |= PXCAR_CONFIG_EN;
/* Write the address to the PEX configuration address register */
reg_write(PEX_CFG_ADDR_REG(pex_if), pex_data);
/*
* In order to let the PEX controller absorbed the address
* of the read transaction we perform a validity check that
* the address was written
*/
if (pex_data != reg_read(PEX_CFG_ADDR_REG(pex_if)))
return MV_ERROR;
/* Cleaning Master Abort */
reg_bit_set(PEX_CFG_DIRECT_ACCESS(pex_if, PEX_STATUS_AND_COMMAND),
PXSAC_MABORT);
/* Read the Data returned in the PEX Data register */
pex_data = reg_read(PEX_CFG_DATA_REG(pex_if));
DEBUG_INIT_FULL_C(" --> ", pex_data, 4);
return pex_data;
}

View File

@ -0,0 +1,86 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _CTRL_PEX_H
#define _CTRL_PEX_H
#include "high_speed_env_spec.h"
/* Sample at Reset */
#define MPP_SAMPLE_AT_RESET(id) (0xe4200 + (id * 4))
/* PCI Express Control and Status Registers */
#define MAX_PEX_BUSSES 256
#define MISC_REGS_OFFSET 0x18200
#define MV_MISC_REGS_BASE MISC_REGS_OFFSET
#define SOC_CTRL_REG (MV_MISC_REGS_BASE + 0x4)
#define PEX_IF_REGS_OFFSET(if) ((if) > 0 ? \
(0x40000 + ((if) - 1) * 0x4000) : \
0x80000)
#define PEX_IF_REGS_BASE(if) (PEX_IF_REGS_OFFSET(if))
#define PEX_CAPABILITIES_REG(if) ((PEX_IF_REGS_BASE(if)) + 0x60)
#define PEX_LINK_CTRL_STATUS2_REG(if) ((PEX_IF_REGS_BASE(if)) + 0x90)
#define PEX_CTRL_REG(if) ((PEX_IF_REGS_BASE(if)) + 0x1a00)
#define PEX_STATUS_REG(if) ((PEX_IF_REGS_BASE(if)) + 0x1a04)
#define PEX_DBG_STATUS_REG(if) ((PEX_IF_REGS_BASE(if)) + 0x1a64)
#define PEX_LINK_CAPABILITY_REG 0x6c
#define PEX_LINK_CTRL_STAT_REG 0x70
#define PXSR_PEX_DEV_NUM_OFFS 16 /* Device Number Indication */
#define PXSR_PEX_DEV_NUM_MASK (0x1f << PXSR_PEX_DEV_NUM_OFFS)
#define PXSR_PEX_BUS_NUM_OFFS 8 /* Bus Number Indication */
#define PXSR_PEX_BUS_NUM_MASK (0xff << PXSR_PEX_BUS_NUM_OFFS)
/* PEX_CAPABILITIES_REG fields */
#define PCIE0_ENABLE_OFFS 0
#define PCIE0_ENABLE_MASK (0x1 << PCIE0_ENABLE_OFFS)
#define PCIE1_ENABLE_OFFS 1
#define PCIE1_ENABLE_MASK (0x1 << PCIE1_ENABLE_OFFS)
#define PCIE2_ENABLE_OFFS 2
#define PCIE2_ENABLE_MASK (0x1 << PCIE2_ENABLE_OFFS)
#define PCIE3_ENABLE_OFFS 3
#define PCIE4_ENABLE_MASK (0x1 << PCIE3_ENABLE_OFFS)
/* Controller revision info */
#define PEX_DEVICE_AND_VENDOR_ID 0x000
/* PCI Express Configuration Address Register */
#define PXCAR_REG_NUM_OFFS 2
#define PXCAR_REG_NUM_MAX 0x3f
#define PXCAR_REG_NUM_MASK (PXCAR_REG_NUM_MAX << \
PXCAR_REG_NUM_OFFS)
#define PXCAR_FUNC_NUM_OFFS 8
#define PXCAR_FUNC_NUM_MAX 0x7
#define PXCAR_FUNC_NUM_MASK (PXCAR_FUNC_NUM_MAX << \
PXCAR_FUNC_NUM_OFFS)
#define PXCAR_DEVICE_NUM_OFFS 11
#define PXCAR_DEVICE_NUM_MAX 0x1f
#define PXCAR_DEVICE_NUM_MASK (PXCAR_DEVICE_NUM_MAX << \
PXCAR_DEVICE_NUM_OFFS)
#define PXCAR_BUS_NUM_OFFS 16
#define PXCAR_BUS_NUM_MAX 0xff
#define PXCAR_BUS_NUM_MASK (PXCAR_BUS_NUM_MAX << \
PXCAR_BUS_NUM_OFFS)
#define PXCAR_EXT_REG_NUM_OFFS 24
#define PXCAR_EXT_REG_NUM_MAX 0xf
#define PEX_CFG_ADDR_REG(if) ((PEX_IF_REGS_BASE(if)) + 0x18f8)
#define PEX_CFG_DATA_REG(if) ((PEX_IF_REGS_BASE(if)) + 0x18fc)
#define PXCAR_REAL_EXT_REG_NUM_OFFS 8
#define PXCAR_REAL_EXT_REG_NUM_MASK (0xf << PXCAR_REAL_EXT_REG_NUM_OFFS)
#define PXCAR_CONFIG_EN BIT(31)
#define PEX_STATUS_AND_COMMAND 0x004
#define PXSAC_MABORT BIT(29) /* Recieved Master Abort */
int hws_pex_config(struct serdes_map *serdes_map);
int pex_local_bus_num_set(u32 pex_if, u32 bus_num);
int pex_local_dev_num_set(u32 pex_if, u32 dev_num);
u32 pex_config_read(u32 pex_if, u32 bus, u32 dev, u32 func, u32 reg_off);
#endif

View File

@ -0,0 +1,158 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "high_speed_env_spec.h"
#include "sys_env_lib.h"
#define SERDES_VERION "2.0"
u8 selectors_serdes_rev1_map[LAST_SERDES_TYPE][MAX_SERDES_LANES] = {
/* 0 1 2 3 4 5 */
{0x1, 0x1, NA, NA, NA, NA}, /* PEX0 */
{NA, 0x2, 0x1, NA, 0x1, NA}, /* PEX1 */
{NA, NA, 0x2, NA, NA, 0x1}, /* PEX2 */
{NA, NA, NA, 0x1, NA, NA}, /* PEX3 */
{0x2, 0x3, NA, NA, NA, NA}, /* SATA0 */
{NA, NA, 0x3, NA, 0x2, NA}, /* SATA1 */
{NA, NA, NA, NA, 0x6, 0x2}, /* SATA2 */
{NA, NA, NA, 0x3, NA, NA}, /* SATA3 */
{0x3, 0x4, NA, NA, NA, NA}, /* SGMII0 */
{NA, 0x5, 0x4, NA, 0x3, NA}, /* SGMII1 */
{NA, NA, NA, 0x4, NA, 0x3}, /* SGMII2 */
{NA, 0x7, NA, NA, NA, NA}, /* QSGMII */
{NA, 0x6, NA, NA, 0x4, NA}, /* USB3_HOST0 */
{NA, NA, NA, 0x5, NA, 0x4}, /* USB3_HOST1 */
{NA, NA, NA, 0x6, 0x5, 0x5}, /* USB3_DEVICE */
{0x0, 0x0, 0x0, 0x0, 0x0, 0x0} /* DEFAULT_SERDES */
};
int hws_serdes_seq_init(void)
{
DEBUG_INIT_FULL_S("\n### serdes_seq_init ###\n");
if (hws_serdes_seq_db_init() != MV_OK) {
printf("hws_serdes_seq_init: Error: Serdes initialization fail\n");
return MV_FAIL;
}
return MV_OK;
}
int serdes_power_up_ctrl_ext(u32 serdes_num, int serdes_power_up,
enum serdes_type serdes_type,
enum serdes_speed baud_rate,
enum serdes_mode serdes_mode,
enum ref_clock ref_clock)
{
return MV_NOT_SUPPORTED;
}
u32 hws_serdes_silicon_ref_clock_get(void)
{
DEBUG_INIT_FULL_S("\n### hws_serdes_silicon_ref_clock_get ###\n");
return REF_CLOCK_25MHZ;
}
u32 hws_serdes_get_max_lane(void)
{
switch (sys_env_device_id_get()) {
case MV_6811: /* A381/A3282: 6811/6821: single/dual cpu */
return 4;
case MV_6810:
return 5;
case MV_6820:
case MV_6828:
return 6;
default: /* not the right module */
printf("%s: Device ID Error, using 4 SerDes lanes\n",
__func__);
return 4;
}
return 6;
}
int hws_is_serdes_active(u8 lane_num)
{
int ret = 1;
/* Maximum lane count for A388 (6828) is 6 */
if (lane_num > 6)
ret = 0;
/* 4th Lane (#4 on Device 6810 is not Active */
if (sys_env_device_id_get() == MV_6810 && lane_num == 4) {
printf("%s: Error: Lane#4 on Device 6810 is not Active.\n",
__func__);
return 0;
}
/*
* 6th Lane (#5) on Device 6810 is Active, even though 6810
* has only 5 lanes
*/
if (sys_env_device_id_get() == MV_6810 && lane_num == 5)
return 1;
if (lane_num >= hws_serdes_get_max_lane())
ret = 0;
return ret;
}
int hws_get_ext_base_addr(u32 serdes_num, u32 base_addr, u32 unit_base_offset,
u32 *unit_base_reg, u32 *unit_offset)
{
*unit_base_reg = base_addr;
*unit_offset = unit_base_offset;
return MV_OK;
}
/*
* hws_serdes_get_phy_selector_val
*
* DESCRIPTION: Get the mapping of Serdes Selector values according to the
* Serdes revision number
* INPUT: serdes_num - Serdes number
* serdes_type - Serdes type
* OUTPUT: None
* RETURN:
* Mapping of Serdes Selector values
*/
u32 hws_serdes_get_phy_selector_val(int serdes_num,
enum serdes_type serdes_type)
{
if (serdes_type >= LAST_SERDES_TYPE)
return 0xff;
if (hws_ctrl_serdes_rev_get() == MV_SERDES_REV_1_2) {
return selectors_serdes_rev1_map
[serdes_type][serdes_num];
} else
return selectors_serdes_rev2_map
[serdes_type][serdes_num];
}
u32 hws_get_physical_serdes_num(u32 serdes_num)
{
if ((serdes_num == 4) && (sys_env_device_id_get() == MV_6810)) {
/*
* For 6810, there are 5 Serdes and Serdes Num 4 doesn't
* exist. Instead Serdes Num 5 is connected.
*/
return 5;
} else {
return serdes_num;
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,251 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _HIGH_SPEED_ENV_SPEC_H
#define _HIGH_SPEED_ENV_SPEC_H
#include "seq_exec.h"
/*
* For setting or clearing a certain bit (bit is a number between 0 and 31)
* in the data
*/
#define SET_BIT(data, bit) ((data) | (0x1 << (bit)))
#define CLEAR_BIT(data, bit) ((data) & (~(0x1 << (bit))))
#define MAX_SERDES_LANES 7 /* as in a39x */
/* Serdes revision */
/* Serdes revision 1.2 (for A38x-Z1) */
#define MV_SERDES_REV_1_2 0x0
/* Serdes revision 2.1 (for A39x-Z1, A38x-A0) */
#define MV_SERDES_REV_2_1 0x1
#define MV_SERDES_REV_NA 0xff
#define SERDES_REGS_LANE_BASE_OFFSET(lane) (0x800 * (lane))
#define PEX_X4_ENABLE_OFFS \
(hws_ctrl_serdes_rev_get() == MV_SERDES_REV_1_2 ? 18 : 31)
/* Serdes lane types */
enum serdes_type {
PEX0,
PEX1,
PEX2,
PEX3,
SATA0,
SATA1,
SATA2,
SATA3,
SGMII0,
SGMII1,
SGMII2,
QSGMII,
USB3_HOST0,
USB3_HOST1,
USB3_DEVICE,
SGMII3,
XAUI,
RXAUI,
DEFAULT_SERDES,
LAST_SERDES_TYPE
};
/* Serdes baud rates */
enum serdes_speed {
SERDES_SPEED_1_25_GBPS,
SERDES_SPEED_1_5_GBPS,
SERDES_SPEED_2_5_GBPS,
SERDES_SPEED_3_GBPS,
SERDES_SPEED_3_125_GBPS,
SERDES_SPEED_5_GBPS,
SERDES_SPEED_6_GBPS,
SERDES_SPEED_6_25_GBPS,
LAST_SERDES_SPEED
};
/* Serdes modes */
enum serdes_mode {
PEX_ROOT_COMPLEX_X1,
PEX_ROOT_COMPLEX_X4,
PEX_END_POINT_X1,
PEX_END_POINT_X4,
SERDES_DEFAULT_MODE, /* not pex */
SERDES_LAST_MODE
};
struct serdes_map {
enum serdes_type serdes_type;
enum serdes_speed serdes_speed;
enum serdes_mode serdes_mode;
int swap_rx;
int swap_tx;
};
/* Serdes ref clock options */
enum ref_clock {
REF_CLOCK_25MHZ,
REF_CLOCK_100MHZ,
REF_CLOCK_40MHZ,
REF_CLOCK_UNSUPPORTED
};
/* Serdes sequences */
enum serdes_seq {
SATA_PORT_0_ONLY_POWER_UP_SEQ,
SATA_PORT_1_ONLY_POWER_UP_SEQ,
SATA_POWER_UP_SEQ,
SATA_1_5_SPEED_CONFIG_SEQ,
SATA_3_SPEED_CONFIG_SEQ,
SATA_6_SPEED_CONFIG_SEQ,
SATA_ELECTRICAL_CONFIG_SEQ,
SATA_TX_CONFIG_SEQ1,
SATA_PORT_0_ONLY_TX_CONFIG_SEQ,
SATA_PORT_1_ONLY_TX_CONFIG_SEQ,
SATA_TX_CONFIG_SEQ2,
SGMII_POWER_UP_SEQ,
SGMII_1_25_SPEED_CONFIG_SEQ,
SGMII_3_125_SPEED_CONFIG_SEQ,
SGMII_ELECTRICAL_CONFIG_SEQ,
SGMII_TX_CONFIG_SEQ1,
SGMII_TX_CONFIG_SEQ2,
PEX_POWER_UP_SEQ,
PEX_2_5_SPEED_CONFIG_SEQ,
PEX_5_SPEED_CONFIG_SEQ,
PEX_ELECTRICAL_CONFIG_SEQ,
PEX_TX_CONFIG_SEQ1,
PEX_TX_CONFIG_SEQ2,
PEX_TX_CONFIG_SEQ3,
PEX_BY_4_CONFIG_SEQ,
PEX_CONFIG_REF_CLOCK_25MHZ_SEQ,
PEX_CONFIG_REF_CLOCK_100MHZ_SEQ,
PEX_CONFIG_REF_CLOCK_40MHZ_SEQ,
USB3_POWER_UP_SEQ,
USB3_HOST_SPEED_CONFIG_SEQ,
USB3_DEVICE_SPEED_CONFIG_SEQ,
USB3_ELECTRICAL_CONFIG_SEQ,
USB3_TX_CONFIG_SEQ1,
USB3_TX_CONFIG_SEQ2,
USB3_TX_CONFIG_SEQ3,
USB3_DEVICE_CONFIG_SEQ,
USB2_POWER_UP_SEQ,
SERDES_POWER_DOWN_SEQ,
SGMII3_POWER_UP_SEQ,
SGMII3_1_25_SPEED_CONFIG_SEQ,
SGMII3_TX_CONFIG_SEQ1,
SGMII3_TX_CONFIG_SEQ2,
QSGMII_POWER_UP_SEQ,
QSGMII_5_SPEED_CONFIG_SEQ,
QSGMII_ELECTRICAL_CONFIG_SEQ,
QSGMII_TX_CONFIG_SEQ1,
QSGMII_TX_CONFIG_SEQ2,
XAUI_POWER_UP_SEQ,
XAUI_3_125_SPEED_CONFIG_SEQ,
XAUI_ELECTRICAL_CONFIG_SEQ,
XAUI_TX_CONFIG_SEQ1,
XAUI_TX_CONFIG_SEQ2,
RXAUI_POWER_UP_SEQ,
RXAUI_6_25_SPEED_CONFIG_SEQ,
RXAUI_ELECTRICAL_CONFIG_SEQ,
RXAUI_TX_CONFIG_SEQ1,
RXAUI_TX_CONFIG_SEQ2,
SERDES_LAST_SEQ
};
/* The different sequence types for PEX and USB3 */
enum {
PEX,
USB3,
LAST_PEX_USB_SEQ_TYPE
};
enum {
PEXSERDES_SPEED_2_5_GBPS,
PEXSERDES_SPEED_5_GBPS,
USB3SERDES_SPEED_5_GBPS_HOST,
USB3SERDES_SPEED_5_GBPS_DEVICE,
LAST_PEX_USB_SPEED_SEQ_TYPE
};
/* The different sequence types for SATA and SGMII */
enum {
SATA,
SGMII,
SGMII_3_125,
LAST_SATA_SGMII_SEQ_TYPE
};
enum {
QSGMII_SEQ_IDX,
LAST_QSGMII_SEQ_TYPE
};
enum {
XAUI_SEQ_IDX,
RXAUI_SEQ_IDX,
LAST_XAUI_RXAUI_SEQ_TYPE
};
enum {
SATASERDES_SPEED_1_5_GBPS,
SATASERDES_SPEED_3_GBPS,
SATASERDES_SPEED_6_GBPS,
SGMIISERDES_SPEED_1_25_GBPS,
SGMIISERDES_SPEED_3_125_GBPS,
LAST_SATA_SGMII_SPEED_SEQ_TYPE
};
extern u8 selectors_serdes_rev1_map[LAST_SERDES_TYPE][MAX_SERDES_LANES];
extern u8 selectors_serdes_rev2_map[LAST_SERDES_TYPE][MAX_SERDES_LANES];
u8 hws_ctrl_serdes_rev_get(void);
int mv_update_serdes_select_phy_mode_seq(void);
int hws_board_topology_load(struct serdes_map *serdes_map_array);
enum serdes_seq serdes_type_and_speed_to_speed_seq(enum serdes_type serdes_type,
enum serdes_speed baud_rate);
int hws_serdes_seq_init(void);
int hws_serdes_seq_db_init(void);
int hws_power_up_serdes_lanes(struct serdes_map *serdes_config_map);
int hws_ctrl_high_speed_serdes_phy_config(void);
int serdes_power_up_ctrl(u32 serdes_num, int serdes_power_up,
enum serdes_type serdes_type,
enum serdes_speed baud_rate,
enum serdes_mode serdes_mode,
enum ref_clock ref_clock);
int serdes_power_up_ctrl_ext(u32 serdes_num, int serdes_power_up,
enum serdes_type serdes_type,
enum serdes_speed baud_rate,
enum serdes_mode serdes_mode,
enum ref_clock ref_clock);
u32 hws_serdes_silicon_ref_clock_get(void);
int hws_serdes_pex_ref_clock_get(enum serdes_type serdes_type,
enum ref_clock *ref_clock);
int hws_ref_clock_set(u32 serdes_num, enum serdes_type serdes_type,
enum ref_clock ref_clock);
int hws_update_serdes_phy_selectors(struct serdes_map *serdes_config_map);
u32 hws_serdes_get_phy_selector_val(int serdes_num,
enum serdes_type serdes_type);
u32 hws_serdes_get_ref_clock_val(enum serdes_type serdes_type);
u32 hws_serdes_get_max_lane(void);
int hws_get_ext_base_addr(u32 serdes_num, u32 base_addr, u32 unit_base_offset,
u32 *unit_base_reg, u32 *unit_offset);
int hws_pex_tx_config_seq(struct serdes_map *serdes_map);
u32 hws_get_physical_serdes_num(u32 serdes_num);
int hws_is_serdes_active(u8 lane_num);
#endif /* _HIGH_SPEED_ENV_SPEC_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,124 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _HIGHSPEED_TOPOLOGY_SPEC_H
#define _HIGHSPEED_TOPOLOGY_SPEC_H
#include "high_speed_env_spec.h"
/* Topology map options for the DB_A38X_BP board */
enum topology_config_db {
DB_CONFIG_SLM1363_C,
DB_CONFIG_SLM1363_D,
DB_CONFIG_SLM1363_E,
DB_CONFIG_SLM1363_F,
DB_CONFIG_SLM1364_D,
DB_CONFIG_SLM1364_E,
DB_CONFIG_SLM1364_F,
DB_CONFIG_DEFAULT,
DB_NO_TOPOLOGY
};
/*
* this enum must be aligned with topology_config_db_381 array,
* every update to this enum requires update to topology_config_db_381
* array
*/
enum topology_config_db381 {
DB_CONFIG_SLM1427, /* enum for db_config_slm1427 */
DB_CONFIG_SLM1426, /* enum for db_config_slm1426 */
DB_381_CONFIG_DEFAULT,
DB_381_NO_TOPOLOGY
};
/* A generic function pointer for loading the board topology map */
typedef int (*load_topology_func_ptr)(struct serdes_map *serdes_map_array);
extern load_topology_func_ptr load_topology_func_arr[];
/*
* topology_config_db_mode_get -
*
* DESCRIPTION: Gets the relevant topology mode (index).
* for load_topology_db use only.
* INPUT: None.
* OUTPUT: None.
* RETURNS: the topology mode
*/
u8 topology_config_db_mode_get(void);
/*
* load_topology_xxx -
*
* DESCRIPTION: Loads the board topology for the XXX board
* INPUT: serdes_map_array - The struct that will contain
* the board topology map
* OUTPUT: The board topology map.
* RETURNS: MV_OK for success
* MV_FAIL for failure (a wrong topology mode was read
* from the board)
*/
/* load_topology_db - Loads the board topology for DB Board */
int load_topology_db(struct serdes_map *serdes_map_array);
/* load_topology_rd - Loads the board topology for RD Board */
int load_topology_rd(struct serdes_map *serdes_map_array);
/* load_topology_rd_nas - Loads the board topology for RD NAS Board */
int load_topology_rd_nas(struct serdes_map *serdes_map_array);
/* load_topology_rd_ap - Loads the board topology for RD Ap Board */
int load_topology_rd_ap(struct serdes_map *serdes_map_array);
/* load_topology_db_ap - Loads the board topology for DB-AP Board */
int load_topology_db_ap(struct serdes_map *serdes_map_array);
/* load_topology_db_gp - Loads the board topology for DB GP Board */
int load_topology_db_gp(struct serdes_map *serdes_map_array);
/* load_topology_db_381 - Loads the board topology for 381 DB-BP Board */
int load_topology_db_381(struct serdes_map *serdes_map_array);
/* load_topology_db_amc - Loads the board topology for DB-AMC Board */
int load_topology_db_amc(struct serdes_map *serdes_map_array);
/*
* hws_update_device_toplogy
* DESCRIPTION: Update the default board topology for specific device Id
* INPUT:
* topology_config_ptr - pointer to the Serdes mapping
* topology_mode - topology mode (index)
* OUTPUT: None
* RRETURNS:
* MV_OK - if updating the board topology success
* MV_BAD_PARAM - if the input parameter is wrong
*/
int hws_update_device_toplogy(struct serdes_map *topology_config_ptr,
enum topology_config_db topology_mode);
/*
* load_topology_rd_sgmii_usb -
*
* DESCRIPTION: For RD board check if lane 4 is USB3 or SGMII
* INPUT: None
* OUTPUT: is_sgmii - return 1 if lane 4 is SGMII
* return 0 if lane 4 is USB.
* RETURNS: MV_OK for success
*/
int load_topology_rd_sgmii_usb(int *is_sgmii);
/*
* load_topology_usb_mode_get -
*
* DESCRIPTION: For DB board check if USB3.0 mode
* INPUT: None
* OUTPUT: twsi_data - return data read from S@R via I2C
* RETURNS: MV_OK for success
*/
int load_topology_usb_mode_get(u8 *twsi_data);
#endif /* _HIGHSPEED_TOPOLOGY_SPEC_H */

View File

@ -0,0 +1,170 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "seq_exec.h"
#include "high_speed_env_spec.h"
#include "../../../drivers/ddr/marvell/a38x/ddr3_init.h"
#if defined(MV_DEBUG_INIT_FULL) || defined(MV_DEBUG)
#define DB(x) x
#else
#define DB(x)
#endif
/* Array for mapping the operation (write, poll or delay) functions */
op_execute_func_ptr op_execute_func_arr[] = {
write_op_execute,
delay_op_execute,
poll_op_execute
};
int write_op_execute(u32 serdes_num, struct op_params *params, u32 data_arr_idx)
{
u32 unit_base_reg, unit_offset, data, mask, reg_data, reg_addr;
/* Getting write op params from the input parameter */
data = params->data[data_arr_idx];
mask = params->mask;
/* an empty operation */
if (data == NO_DATA)
return MV_OK;
/* get updated base address since it can be different between Serdes */
CHECK_STATUS(hws_get_ext_base_addr(serdes_num, params->unit_base_reg,
params->unit_offset,
&unit_base_reg, &unit_offset));
/* Address calculation */
reg_addr = unit_base_reg + unit_offset * serdes_num;
#ifdef SEQ_DEBUG
printf("Write: 0x%x: 0x%x (mask 0x%x) - ", reg_addr, data, mask);
#endif
/* Reading old value */
reg_data = reg_read(reg_addr);
reg_data &= (~mask);
/* Writing new data */
data &= mask;
reg_data |= data;
reg_write(reg_addr, reg_data);
#ifdef SEQ_DEBUG
printf(" - 0x%x\n", reg_data);
#endif
return MV_OK;
}
int delay_op_execute(u32 serdes_num, struct op_params *params, u32 data_arr_idx)
{
u32 delay;
/* Getting delay op params from the input parameter */
delay = params->wait_time;
#ifdef SEQ_DEBUG
printf("Delay: %d\n", delay);
#endif
mdelay(delay);
return MV_OK;
}
int poll_op_execute(u32 serdes_num, struct op_params *params, u32 data_arr_idx)
{
u32 unit_base_reg, unit_offset, data, mask, num_of_loops, wait_time;
u32 poll_counter = 0;
u32 reg_addr, reg_data;
/* Getting poll op params from the input parameter */
data = params->data[data_arr_idx];
mask = params->mask;
num_of_loops = params->num_of_loops;
wait_time = params->wait_time;
/* an empty operation */
if (data == NO_DATA)
return MV_OK;
/* get updated base address since it can be different between Serdes */
CHECK_STATUS(hws_get_ext_base_addr(serdes_num, params->unit_base_reg,
params->unit_offset,
&unit_base_reg, &unit_offset));
/* Address calculation */
reg_addr = unit_base_reg + unit_offset * serdes_num;
/* Polling */
#ifdef SEQ_DEBUG
printf("Poll: 0x%x: 0x%x (mask 0x%x)\n", reg_addr, data, mask);
#endif
do {
reg_data = reg_read(reg_addr) & mask;
poll_counter++;
udelay(wait_time);
} while ((reg_data != data) && (poll_counter < num_of_loops));
if ((poll_counter >= num_of_loops) && (reg_data != data)) {
DEBUG_INIT_S("poll_op_execute: TIMEOUT\n");
return MV_TIMEOUT;
}
return MV_OK;
}
enum mv_op get_cfg_seq_op(struct op_params *params)
{
if (params->wait_time == 0)
return WRITE_OP;
else if (params->num_of_loops == 0)
return DELAY_OP;
return POLL_OP;
}
int mv_seq_exec(u32 serdes_num, u32 seq_id)
{
u32 seq_idx;
struct op_params *seq_arr;
u32 seq_size;
u32 data_arr_idx;
enum mv_op curr_op;
DB(printf("\n### mv_seq_exec ###\n"));
DB(printf("seq id: %d\n", seq_id));
if (hws_is_serdes_active(serdes_num) != 1) {
printf("mv_seq_exec_ext:Error: SerDes lane %d is not valid\n",
serdes_num);
return MV_BAD_PARAM;
}
seq_arr = serdes_seq_db[seq_id].op_params_ptr;
seq_size = serdes_seq_db[seq_id].cfg_seq_size;
data_arr_idx = serdes_seq_db[seq_id].data_arr_idx;
DB(printf("seq_size: %d\n", seq_size));
DB(printf("data_arr_idx: %d\n", data_arr_idx));
/* Executing the sequence operations */
for (seq_idx = 0; seq_idx < seq_size; seq_idx++) {
curr_op = get_cfg_seq_op(&seq_arr[seq_idx]);
op_execute_func_arr[curr_op](serdes_num, &seq_arr[seq_idx],
data_arr_idx);
}
return MV_OK;
}

View File

@ -0,0 +1,65 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _SEQ_EXEC_H
#define _SEQ_EXEC_H
#define NA 0xff
#define DEFAULT_PARAM 0
#define MV_BOARD_TCLK_ERROR 0xffffffff
#define NO_DATA 0xffffffff
#define MAX_DATA_ARRAY 5
#define FIRST_CELL 0
/* Operation types */
enum mv_op {
WRITE_OP,
DELAY_OP,
POLL_OP,
};
/* Operation parameters */
struct op_params {
u32 unit_base_reg;
u32 unit_offset;
u32 mask;
u32 data[MAX_DATA_ARRAY]; /* data array */
u8 wait_time; /* msec */
u16 num_of_loops; /* for polling only */
};
/*
* Sequence parameters. Each sequence contains:
* 1. Sequence id.
* 2. Sequence size (total amount of operations during the sequence)
* 3. a series of operations. operations can be write, poll or delay
* 4. index in the data array (the entry where the relevant data sits)
*/
struct cfg_seq {
struct op_params *op_params_ptr;
u8 cfg_seq_size;
u8 data_arr_idx;
};
extern struct cfg_seq serdes_seq_db[];
/*
* A generic function type for executing an operation (write, poll or delay)
*/
typedef int (*op_execute_func_ptr)(u32 serdes_num, struct op_params *params,
u32 data_arr_idx);
/* Specific functions for executing each operation */
int write_op_execute(u32 serdes_num, struct op_params *params,
u32 data_arr_idx);
int delay_op_execute(u32 serdes_num, struct op_params *params,
u32 data_arr_idx);
int poll_op_execute(u32 serdes_num, struct op_params *params, u32 data_arr_idx);
enum mv_op get_cfg_seq_op(struct op_params *params);
int mv_seq_exec(u32 serdes_num, u32 seq_id);
#endif /*_SEQ_EXEC_H*/

View File

@ -0,0 +1,388 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "seq_exec.h"
#include "sys_env_lib.h"
#include "../../../drivers/ddr/marvell/a38x/ddr3_a38x.h"
#ifdef CONFIG_ARMADA_38X
enum unit_id sys_env_soc_unit_nums[MAX_UNITS_ID][MAX_DEV_ID_NUM] = {
/* 6820 6810 6811 6828 */
/* PEX_UNIT_ID */ { 4, 3, 3, 4},
/* ETH_GIG_UNIT_ID */ { 3, 2, 3, 3},
/* USB3H_UNIT_ID */ { 2, 2, 2, 2},
/* USB3D_UNIT_ID */ { 1, 1, 1, 1},
/* SATA_UNIT_ID */ { 2, 2, 2, 4},
/* QSGMII_UNIT_ID */ { 1, 0, 0, 1},
/* XAUI_UNIT_ID */ { 0, 0, 0, 0},
/* RXAUI_UNIT_ID */ { 0, 0, 0, 0}
};
#else /* if (CONFIG_ARMADA_39X) */
enum unit_id sys_env_soc_unit_nums[MAX_UNITS_ID][MAX_DEV_ID_NUM] = {
/* 6920 6928 */
/* PEX_UNIT_ID */ { 4, 4},
/* ETH_GIG_UNIT_ID */ { 3, 4},
/* USB3H_UNIT_ID */ { 1, 2},
/* USB3D_UNIT_ID */ { 0, 1},
/* SATA_UNIT_ID */ { 0, 4},
/* QSGMII_UNIT_ID */ { 0, 1},
/* XAUI_UNIT_ID */ { 1, 1},
/* RXAUI_UNIT_ID */ { 1, 1}
};
#endif
u32 g_dev_id = -1;
u32 mv_board_id_get(void)
{
#if defined(CONFIG_DB_88F6820_GP)
return DB_GP_68XX_ID;
#else
/*
* Return 0 here for custom board as this should not be used
* for custom boards.
*/
return 0;
#endif
}
u32 mv_board_tclk_get(void)
{
u32 value;
value = (reg_read(DEVICE_SAMPLE_AT_RESET1_REG) >> 15) & 0x1;
switch (value) {
case (0x0):
return 250000000;
case (0x1):
return 200000000;
default:
return 0xffffffff;
}
}
u32 mv_board_id_index_get(u32 board_id)
{
/*
* Marvell Boards use 0x10 as base for Board ID:
* mask MSB to receive index for board ID
*/
return board_id & (MARVELL_BOARD_ID_MASK - 1);
}
/*
* sys_env_suspend_wakeup_check
* DESCRIPTION: Reads GPIO input for suspend-wakeup indication.
* INPUT: None.
* OUTPUT:
* RETURNS: u32 indicating suspend wakeup status:
* 0 - Not supported,
* 1 - supported: read magic word detect wakeup,
* 2 - detected wakeup from GPIO.
*/
enum suspend_wakeup_status sys_env_suspend_wakeup_check(void)
{
u32 reg, board_id_index, gpio;
struct board_wakeup_gpio board_gpio[] = MV_BOARD_WAKEUP_GPIO_INFO;
board_id_index = mv_board_id_index_get(mv_board_id_get());
if (!(sizeof(board_gpio) / sizeof(struct board_wakeup_gpio) >
board_id_index)) {
printf("\n_failed loading Suspend-Wakeup information (invalid board ID)\n");
return SUSPEND_WAKEUP_DISABLED;
}
/*
* - Detect if Suspend-Wakeup is supported on current board
* - Fetch the GPIO number for wakeup status input indication
*/
if (board_gpio[board_id_index].gpio_num == -1) {
/* Suspend to RAM is not supported */
return SUSPEND_WAKEUP_DISABLED;
} else if (board_gpio[board_id_index].gpio_num == -2) {
/*
* Suspend to RAM is supported but GPIO indication is
* not implemented - Skip
*/
return SUSPEND_WAKEUP_ENABLED;
} else {
gpio = board_gpio[board_id_index].gpio_num;
}
/* Initialize MPP for GPIO (set MPP = 0x0) */
reg = reg_read(MPP_CONTROL_REG(MPP_REG_NUM(gpio)));
/* reset MPP21 to 0x0, keep rest of MPP settings*/
reg &= ~MPP_MASK(gpio);
reg_write(MPP_CONTROL_REG(MPP_REG_NUM(gpio)), reg);
/* Initialize GPIO as input */
reg = reg_read(GPP_DATA_OUT_EN_REG(GPP_REG_NUM(gpio)));
reg |= GPP_MASK(gpio);
reg_write(GPP_DATA_OUT_EN_REG(GPP_REG_NUM(gpio)), reg);
/*
* Check GPP for input status from PIC: 0 - regular init,
* 1 - suspend wakeup
*/
reg = reg_read(GPP_DATA_IN_REG(GPP_REG_NUM(gpio)));
/* if GPIO is ON: wakeup from S2RAM indication detected */
return (reg & GPP_MASK(gpio)) ? SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED :
SUSPEND_WAKEUP_DISABLED;
}
/*
* mv_ctrl_dev_id_index_get
*
* DESCRIPTION: return SOC device index
* INPUT: None
* OUTPUT: None
* RETURN:
* return SOC device index
*/
u32 sys_env_id_index_get(u32 ctrl_model)
{
switch (ctrl_model) {
case MV_6820_DEV_ID:
return MV_6820_INDEX;
case MV_6810_DEV_ID:
return MV_6810_INDEX;
case MV_6811_DEV_ID:
return MV_6811_INDEX;
case MV_6828_DEV_ID:
return MV_6828_INDEX;
case MV_6920_DEV_ID:
return MV_6920_INDEX;
case MV_6928_DEV_ID:
return MV_6928_INDEX;
default:
return MV_6820_INDEX;
}
}
u32 sys_env_unit_max_num_get(enum unit_id unit)
{
u32 dev_id_index;
if (unit >= MAX_UNITS_ID) {
printf("%s: Error: Wrong unit type (%u)\n", __func__, unit);
return 0;
}
dev_id_index = sys_env_id_index_get(sys_env_model_get());
return sys_env_soc_unit_nums[unit][dev_id_index];
}
/*
* sys_env_model_get
* DESCRIPTION: Returns 16bit describing the device model (ID) as defined
* in Vendor ID configuration register
*/
u16 sys_env_model_get(void)
{
u32 default_ctrl_id, ctrl_id = reg_read(DEV_ID_REG);
ctrl_id = (ctrl_id & (DEV_ID_REG_DEVICE_ID_MASK)) >>
DEV_ID_REG_DEVICE_ID_OFFS;
switch (ctrl_id) {
case MV_6820_DEV_ID:
case MV_6810_DEV_ID:
case MV_6811_DEV_ID:
case MV_6828_DEV_ID:
case MV_6920_DEV_ID:
case MV_6928_DEV_ID:
return ctrl_id;
default:
/* Device ID Default for A38x: 6820 , for A39x: 6920 */
#ifdef CONFIG_ARMADA_38X
default_ctrl_id = MV_6820_DEV_ID;
#else
default_ctrl_id = MV_6920_DEV_ID;
#endif
printf("%s: Error retrieving device ID (%x), using default ID = %x\n",
__func__, ctrl_id, default_ctrl_id);
return default_ctrl_id;
}
}
/*
* sys_env_device_id_get
* DESCRIPTION: Returns enum (0..7) index of the device model (ID)
*/
u32 sys_env_device_id_get(void)
{
char *device_id_str[7] = {
"6810", "6820", "6811", "6828", "NONE", "6920", "6928"
};
if (g_dev_id != -1)
return g_dev_id;
g_dev_id = reg_read(DEVICE_SAMPLE_AT_RESET1_REG);
g_dev_id = g_dev_id >> SAR_DEV_ID_OFFS & SAR_DEV_ID_MASK;
printf("Detected Device ID %s\n", device_id_str[g_dev_id]);
return g_dev_id;
}
#ifdef MV_DDR_TOPOLOGY_UPDATE_FROM_TWSI
/*
* sys_env_get_topology_update_info
* DESCRIPTION: Read TWSI fields to update DDR topology structure
* INPUT: None
* OUTPUT: None, 0 means no topology update
* RETURN:
* Bit mask of changes topology features
*/
#ifdef CONFIG_ARMADA_39X
u32 sys_env_get_topology_update_info(
struct topology_update_info *tui)
{
/* Set 16/32 bit configuration*/
tui->update_width = 1;
tui->width = TOPOLOGY_UPDATE_WIDTH_32BIT;
#ifdef CONFIG_DDR3
if (1 == sys_env_config_get(MV_CONFIG_DDR_BUSWIDTH)) {
/* 16bit */
tui->width = TOPOLOGY_UPDATE_WIDTH_16BIT;
} else {
/* 32bit */
tui->width = TOPOLOGY_UPDATE_WIDTH_32BIT;
}
#endif
/* Set ECC/no ECC bit configuration */
tui->update_ecc = 1;
if (0 == sys_env_config_get(MV_CONFIG_DDR_ECC_EN)) {
/* NO ECC */
tui->ecc = TOPOLOGY_UPDATE_ECC_OFF;
} else {
/* ECC */
tui->ecc = TOPOLOGY_UPDATE_ECC_ON;
}
tui->update_ecc_pup3_mode = 1;
tui->ecc_pup_mode_offset = TOPOLOGY_UPDATE_ECC_OFFSET_PUP4;
return MV_OK;
}
#else /*CONFIG_ARMADA_38X*/
u32 sys_env_get_topology_update_info(
struct topology_update_info *tui)
{
u8 config_val;
u8 ecc_mode[A38X_MV_MAX_MARVELL_BOARD_ID -
A38X_MARVELL_BOARD_ID_BASE][5] = TOPOLOGY_UPDATE;
u8 board_id = mv_board_id_get();
int ret;
board_id = mv_board_id_index_get(board_id);
ret = i2c_read(EEPROM_I2C_ADDR, 0, 2, &config_val, 1);
if (ret) {
DEBUG_INIT_S("sys_env_get_topology_update_info: TWSI Read failed\n");
return 0;
}
/* Set 16/32 bit configuration */
if ((0 == (config_val & DDR_SATR_CONFIG_MASK_WIDTH)) ||
(ecc_mode[board_id][TOPOLOGY_UPDATE_32BIT] == 0)) {
/* 16bit by SatR of 32bit mode not supported for the board */
if ((ecc_mode[board_id][TOPOLOGY_UPDATE_16BIT] != 0)) {
tui->update_width = 1;
tui->width = TOPOLOGY_UPDATE_WIDTH_16BIT;
}
} else {
/* 32bit */
if ((ecc_mode[board_id][TOPOLOGY_UPDATE_32BIT] != 0)) {
tui->update_width = 1;
tui->width = TOPOLOGY_UPDATE_WIDTH_32BIT;
}
}
/* Set ECC/no ECC bit configuration */
if (0 == (config_val & DDR_SATR_CONFIG_MASK_ECC)) {
/* NO ECC */
tui->update_ecc = 1;
tui->ecc = TOPOLOGY_UPDATE_ECC_OFF;
} else {
/* ECC */
if ((ecc_mode[board_id][TOPOLOGY_UPDATE_32BIT_ECC] != 0) ||
(ecc_mode[board_id][TOPOLOGY_UPDATE_16BIT_ECC] != 0) ||
(ecc_mode[board_id][TOPOLOGY_UPDATE_16BIT_ECC_PUP3] != 0)) {
tui->update_ecc = 1;
tui->ecc = TOPOLOGY_UPDATE_ECC_ON;
}
}
/* Set ECC pup bit configuration */
if (0 == (config_val & DDR_SATR_CONFIG_MASK_ECC_PUP)) {
/* PUP3 */
/*
* Check if PUP3 configuration allowed, if not -
* force Pup4 with warning message
*/
if ((ecc_mode[board_id][TOPOLOGY_UPDATE_16BIT_ECC_PUP3] != 0)) {
if (tui->width == TOPOLOGY_UPDATE_WIDTH_16BIT) {
tui->update_ecc_pup3_mode = 1;
tui->ecc_pup_mode_offset =
TOPOLOGY_UPDATE_ECC_OFFSET_PUP3;
} else {
if ((ecc_mode[board_id][TOPOLOGY_UPDATE_32BIT_ECC] != 0)) {
printf("DDR Topology Update: ECC PUP3 not valid for 32bit mode, force ECC in PUP4\n");
tui->update_ecc_pup3_mode = 1;
tui->ecc_pup_mode_offset =
TOPOLOGY_UPDATE_ECC_OFFSET_PUP4;
}
}
} else {
if (ecc_mode[board_id][TOPOLOGY_UPDATE_16BIT_ECC] !=
0) {
printf("DDR Topology Update: ECC on PUP3 not supported, force ECC on PUP4\n");
tui->update_ecc_pup3_mode = 1;
tui->ecc_pup_mode_offset =
TOPOLOGY_UPDATE_ECC_OFFSET_PUP4;
}
}
} else {
/* PUP4 */
if ((ecc_mode[board_id][TOPOLOGY_UPDATE_32BIT_ECC] != 0) ||
(ecc_mode[board_id][TOPOLOGY_UPDATE_16BIT_ECC] != 0)) {
tui->update_ecc_pup3_mode = 1;
tui->ecc_pup_mode_offset =
TOPOLOGY_UPDATE_ECC_OFFSET_PUP4;
}
}
/*
* Check for forbidden ECC mode,
* if by default width and pup selection set 32bit ECC mode and this
* mode not supported for the board - config 16bit with ECC on PUP3
*/
if ((tui->ecc == TOPOLOGY_UPDATE_ECC_ON) &&
(tui->width == TOPOLOGY_UPDATE_WIDTH_32BIT)) {
if (ecc_mode[board_id][TOPOLOGY_UPDATE_32BIT_ECC] == 0) {
printf("DDR Topology Update: 32bit mode with ECC not allowed on this board, forced 16bit with ECC on PUP3\n");
tui->width = TOPOLOGY_UPDATE_WIDTH_16BIT;
tui->update_ecc_pup3_mode = 1;
tui->ecc_pup_mode_offset =
TOPOLOGY_UPDATE_ECC_OFFSET_PUP3;
}
}
return MV_OK;
}
#endif /* CONFIG_ARMADA_38X */
#endif /* MV_DDR_TOPOLOGY_UPDATE_FROM_TWSI */

View File

@ -0,0 +1,371 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _SYS_ENV_LIB_H
#define _SYS_ENV_LIB_H
#include "../../../drivers/ddr/marvell/a38x/ddr3_init.h"
#include "../../../drivers/ddr/marvell/a38x/ddr3_hws_hw_training.h"
/* Serdes definitions */
#define COMMON_PHY_BASE_ADDR 0x18300
#define DEVICE_CONFIGURATION_REG0 0x18284
#define DEVICE_CONFIGURATION_REG1 0x18288
#define COMMON_PHY_CONFIGURATION1_REG 0x18300
#define COMMON_PHY_CONFIGURATION2_REG 0x18304
#define COMMON_PHY_CONFIGURATION4_REG 0x1830c
#define COMMON_PHY_STATUS1_REG 0x18318
#define COMMON_PHYS_SELECTORS_REG 0x183fc
#define SOC_CONTROL_REG1 0x18204
#define GENERAL_PURPOSE_RESERVED0_REG 0x182e0
#define GBE_CONFIGURATION_REG 0x18460
#define DEVICE_SAMPLE_AT_RESET1_REG 0x18600
#define DEVICE_SAMPLE_AT_RESET2_REG 0x18604
#define DEV_ID_REG 0x18238
#define CORE_PLL_PARAMETERS_REG 0xe42e0
#define CORE_PLL_CONFIG_REG 0xe42e4
#define QSGMII_CONTROL_REG1 0x18494
#define DEV_ID_REG_DEVICE_ID_OFFS 16
#define DEV_ID_REG_DEVICE_ID_MASK 0xffff0000
#define SAR_DEV_ID_OFFS 27
#define SAR_DEV_ID_MASK 0x7
#define POWER_AND_PLL_CTRL_REG 0xa0004
#define CALIBRATION_CTRL_REG 0xa0008
#define DFE_REG0 0xa001c
#define DFE_REG3 0xa0028
#define RESET_DFE_REG 0xa0148
#define LOOPBACK_REG 0xa008c
#define SYNC_PATTERN_REG 0xa0090
#define INTERFACE_REG 0xa0094
#define ISOLATE_REG 0xa0098
#define MISC_REG 0xa013c
#define GLUE_REG 0xa0140
#define GENERATION_DIVIDER_FORCE_REG 0xa0144
#define PCIE_REG0 0xa0120
#define LANE_ALIGN_REG0 0xa0124
#define SQUELCH_FFE_SETTING_REG 0xa0018
#define G1_SETTINGS_0_REG 0xa0034
#define G1_SETTINGS_1_REG 0xa0038
#define G1_SETTINGS_3_REG 0xa0440
#define G1_SETTINGS_4_REG 0xa0444
#define G2_SETTINGS_0_REG 0xa003c
#define G2_SETTINGS_1_REG 0xa0040
#define G2_SETTINGS_2_REG 0xa00f8
#define G2_SETTINGS_3_REG 0xa0448
#define G2_SETTINGS_4_REG 0xa044c
#define G3_SETTINGS_0_REG 0xa0044
#define G3_SETTINGS_1_REG 0xa0048
#define G3_SETTINGS_3_REG 0xa0450
#define G3_SETTINGS_4_REG 0xa0454
#define VTHIMPCAL_CTRL_REG 0xa0104
#define REF_REG0 0xa0134
#define CAL_REG6 0xa0168
#define RX_REG2 0xa0184
#define RX_REG3 0xa0188
#define PCIE_REG1 0xa0288
#define PCIE_REG3 0xa0290
#define LANE_CFG1_REG 0xa0604
#define LANE_CFG4_REG 0xa0620
#define LANE_CFG5_REG 0xa0624
#define GLOBAL_CLK_CTRL 0xa0704
#define GLOBAL_MISC_CTRL 0xa0718
#define GLOBAL_CLK_SRC_HI 0xa0710
#define GLOBAL_CLK_CTRL 0xa0704
#define GLOBAL_MISC_CTRL 0xa0718
#define GLOBAL_PM_CTRL 0xa0740
/* SATA registers */
#define SATA_CTRL_REG_IND_ADDR 0xa80a0
#define SATA_CTRL_REG_IND_DATA 0xa80a4
#define SATA_VENDOR_PORT_0_REG_ADDR 0xa8178
#define SATA_VENDOR_PORT_1_REG_ADDR 0xa81f8
#define SATA_VENDOR_PORT_0_REG_DATA 0xa817c
#define SATA_VENDOR_PORT_1_REG_DATA 0xa81fc
/* Reference clock values and mask */
#define POWER_AND_PLL_CTRL_REG_100MHZ_VAL 0x0
#define POWER_AND_PLL_CTRL_REG_25MHZ_VAL_1 0x1
#define POWER_AND_PLL_CTRL_REG_25MHZ_VAL_2 0x2
#define POWER_AND_PLL_CTRL_REG_40MHZ_VAL 0x3
#define GLOBAL_PM_CTRL_REG_25MHZ_VAL 0x7
#define GLOBAL_PM_CTRL_REG_40MHZ_VAL 0xc
#define LANE_CFG4_REG_25MHZ_VAL 0x200
#define LANE_CFG4_REG_40MHZ_VAL 0x300
#define POWER_AND_PLL_CTRL_REG_MASK (~(0x1f))
#define GLOBAL_PM_CTRL_REG_MASK (~(0xff))
#define LANE_CFG4_REG_MASK (~(0x1f00))
#define REF_CLK_SELECTOR_VAL_PEX0(reg_val) (reg_val >> 2) & 0x1
#define REF_CLK_SELECTOR_VAL_PEX1(reg_val) (reg_val >> 3) & 0x1
#define REF_CLK_SELECTOR_VAL_PEX2(reg_val) (reg_val >> 30) & 0x1
#define REF_CLK_SELECTOR_VAL_PEX3(reg_val) (reg_val >> 31) & 0x1
#define REF_CLK_SELECTOR_VAL(reg_val) (reg_val & 0x1)
#define MAX_SELECTOR_VAL 10
/* TWSI addresses */
/* starting from A38x A0, i2c address of EEPROM is 0x57 */
#ifdef CONFIG_ARMADA_39X
#define EEPROM_I2C_ADDR 0x50
#else
#define EEPROM_I2C_ADDR (sys_env_device_rev_get() == \
MV_88F68XX_Z1_ID ? 0x50 : 0x57)
#endif
#define RD_GET_MODE_ADDR 0x4c
#define DB_GET_MODE_SLM1363_ADDR 0x25
#define DB_GET_MODE_SLM1364_ADDR 0x24
#define DB381_GET_MODE_SLM1426_1427_ADDR 0x56
/* DB-BP Board 'SatR' mapping */
#define SATR_DB_LANE1_MAX_OPTIONS 7
#define SATR_DB_LANE1_CFG_MASK 0x7
#define SATR_DB_LANE1_CFG_OFFSET 0
#define SATR_DB_LANE2_MAX_OPTIONS 4
#define SATR_DB_LANE2_CFG_MASK 0x38
#define SATR_DB_LANE2_CFG_OFFSET 3
/* GP Board 'SatR' mapping */
#define SATR_GP_LANE1_CFG_MASK 0x4
#define SATR_GP_LANE1_CFG_OFFSET 2
#define SATR_GP_LANE2_CFG_MASK 0x8
#define SATR_GP_LANE2_CFG_OFFSET 3
/* For setting MPP2 and MPP3 to be TWSI mode and MPP 0,1 to UART mode */
#define MPP_CTRL_REG 0x18000
#define MPP_SET_MASK (~(0xffff))
#define MPP_SET_DATA (0x1111)
#define MPP_UART1_SET_MASK (~(0xff000))
#define MPP_UART1_SET_DATA (0x66000)
#define AVS_DEBUG_CNTR_REG 0xe4124
#define AVS_DEBUG_CNTR_DEFAULT_VALUE 0x08008073
#define AVS_ENABLED_CONTROL 0xe4130
#define AVS_LOW_VDD_LIMIT_OFFS 4
#define AVS_LOW_VDD_LIMIT_MASK (0xff << AVS_LOW_VDD_LIMIT_OFFS)
#define AVS_LOW_VDD_LIMIT_VAL (0x27 << AVS_LOW_VDD_LIMIT_OFFS)
#define AVS_HIGH_VDD_LIMIT_OFFS 12
#define AVS_HIGH_VDD_LIMIT_MASK (0xff << AVS_HIGH_VDD_LIMIT_OFFS)
#define AVS_HIGH_VDD_LIMIT_VAL (0x27 << AVS_HIGH_VDD_LIMIT_OFFS)
/* Board ID numbers */
#define MARVELL_BOARD_ID_MASK 0x10
/* Customer boards for A38x */
#define A38X_CUSTOMER_BOARD_ID_BASE 0x0
#define A38X_CUSTOMER_BOARD_ID0 (A38X_CUSTOMER_BOARD_ID_BASE + 0)
#define A38X_CUSTOMER_BOARD_ID1 (A38X_CUSTOMER_BOARD_ID_BASE + 1)
#define A38X_MV_MAX_CUSTOMER_BOARD_ID (A38X_CUSTOMER_BOARD_ID_BASE + 2)
#define A38X_MV_CUSTOMER_BOARD_NUM (A38X_MV_MAX_CUSTOMER_BOARD_ID - \
A38X_CUSTOMER_BOARD_ID_BASE)
/* Marvell boards for A38x */
#define A38X_MARVELL_BOARD_ID_BASE 0x10
#define RD_NAS_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 0)
#define DB_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 1)
#define RD_AP_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 2)
#define DB_AP_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 3)
#define DB_GP_68XX_ID (A38X_MARVELL_BOARD_ID_BASE + 4)
#define DB_BP_6821_ID (A38X_MARVELL_BOARD_ID_BASE + 5)
#define DB_AMC_6820_ID (A38X_MARVELL_BOARD_ID_BASE + 6)
#define A38X_MV_MAX_MARVELL_BOARD_ID (A38X_MARVELL_BOARD_ID_BASE + 7)
#define A38X_MV_MARVELL_BOARD_NUM (A38X_MV_MAX_MARVELL_BOARD_ID - \
A38X_MARVELL_BOARD_ID_BASE)
/* Customer boards for A39x */
#define A39X_CUSTOMER_BOARD_ID_BASE 0x20
#define A39X_CUSTOMER_BOARD_ID0 (A39X_CUSTOMER_BOARD_ID_BASE + 0)
#define A39X_CUSTOMER_BOARD_ID1 (A39X_CUSTOMER_BOARD_ID_BASE + 1)
#define A39X_MV_MAX_CUSTOMER_BOARD_ID (A39X_CUSTOMER_BOARD_ID_BASE + 2)
#define A39X_MV_CUSTOMER_BOARD_NUM (A39X_MV_MAX_CUSTOMER_BOARD_ID - \
A39X_CUSTOMER_BOARD_ID_BASE)
/* Marvell boards for A39x */
#define A39X_MARVELL_BOARD_ID_BASE 0x30
#define A39X_DB_69XX_ID (A39X_MARVELL_BOARD_ID_BASE + 0)
#define A39X_RD_69XX_ID (A39X_MARVELL_BOARD_ID_BASE + 1)
#define A39X_MV_MAX_MARVELL_BOARD_ID (A39X_MARVELL_BOARD_ID_BASE + 2)
#define A39X_MV_MARVELL_BOARD_NUM (A39X_MV_MAX_MARVELL_BOARD_ID - \
A39X_MARVELL_BOARD_ID_BASE)
#ifdef CONFIG_ARMADA_38X
#define CUTOMER_BOARD_ID_BASE A38X_CUSTOMER_BOARD_ID_BASE
#define CUSTOMER_BOARD_ID0 A38X_CUSTOMER_BOARD_ID0
#define CUSTOMER_BOARD_ID1 A38X_CUSTOMER_BOARD_ID1
#define MV_MAX_CUSTOMER_BOARD_ID A38X_MV_MAX_CUSTOMER_BOARD_ID
#define MV_CUSTOMER_BOARD_NUM A38X_MV_CUSTOMER_BOARD_NUM
#define MARVELL_BOARD_ID_BASE A38X_MARVELL_BOARD_ID_BASE
#define MV_MAX_MARVELL_BOARD_ID A38X_MV_MAX_MARVELL_BOARD_ID
#define MV_MARVELL_BOARD_NUM A38X_MV_MARVELL_BOARD_NUM
#define MV_DEFAULT_BOARD_ID DB_68XX_ID
#define MV_DEFAULT_DEVICE_ID MV_6811
#elif defined(CONFIG_ARMADA_39X)
#define CUTOMER_BOARD_ID_BASE A39X_CUSTOMER_BOARD_ID_BASE
#define CUSTOMER_BOARD_ID0 A39X_CUSTOMER_BOARD_ID0
#define CUSTOMER_BOARD_ID1 A39X_CUSTOMER_BOARD_ID1
#define MV_MAX_CUSTOMER_BOARD_ID A39X_MV_MAX_CUSTOMER_BOARD_ID
#define MV_CUSTOMER_BOARD_NUM A39X_MV_CUSTOMER_BOARD_NUM
#define MARVELL_BOARD_ID_BASE A39X_MARVELL_BOARD_ID_BASE
#define MV_MAX_MARVELL_BOARD_ID A39X_MV_MAX_MARVELL_BOARD_ID
#define MV_MARVELL_BOARD_NUM A39X_MV_MARVELL_BOARD_NUM
#define MV_DEFAULT_BOARD_ID A39X_DB_69XX_ID
#define MV_DEFAULT_DEVICE_ID MV_6920
#endif
#define MV_INVALID_BOARD_ID 0xffffffff
/* device revesion */
#define DEV_VERSION_ID_REG 0x1823c
#define REVISON_ID_OFFS 8
#define REVISON_ID_MASK 0xf00
/* A38x revisions */
#define MV_88F68XX_Z1_ID 0x0
#define MV_88F68XX_A0_ID 0x4
/* A39x revisions */
#define MV_88F69XX_Z1_ID 0x2
#define MPP_CONTROL_REG(id) (0x18000 + (id * 4))
#define GPP_DATA_OUT_REG(grp) (MV_GPP_REGS_BASE(grp) + 0x00)
#define GPP_DATA_OUT_EN_REG(grp) (MV_GPP_REGS_BASE(grp) + 0x04)
#define GPP_DATA_IN_REG(grp) (MV_GPP_REGS_BASE(grp) + 0x10)
#define MV_GPP_REGS_BASE(unit) (0x18100 + ((unit) * 0x40))
#define MPP_REG_NUM(GPIO_NUM) (GPIO_NUM / 8)
#define MPP_MASK(GPIO_NUM) (0xf << 4 * (GPIO_NUM - \
(MPP_REG_NUM(GPIO_NUM) * 8)));
#define GPP_REG_NUM(GPIO_NUM) (GPIO_NUM / 32)
#define GPP_MASK(GPIO_NUM) (1 << GPIO_NUM % 32)
/* device ID */
/* Armada 38x Family */
#define MV_6810_DEV_ID 0x6810
#define MV_6811_DEV_ID 0x6811
#define MV_6820_DEV_ID 0x6820
#define MV_6828_DEV_ID 0x6828
/* Armada 39x Family */
#define MV_6920_DEV_ID 0x6920
#define MV_6928_DEV_ID 0x6928
enum {
MV_6810,
MV_6820,
MV_6811,
MV_6828,
MV_NONE,
MV_6920,
MV_6928,
MV_MAX_DEV_ID,
};
#define MV_6820_INDEX 0
#define MV_6810_INDEX 1
#define MV_6811_INDEX 2
#define MV_6828_INDEX 3
#define MV_6920_INDEX 0
#define MV_6928_INDEX 1
#ifdef CONFIG_ARMADA_38X
#define MAX_DEV_ID_NUM 4
#else
#define MAX_DEV_ID_NUM 2
#endif
#define MV_6820_INDEX 0
#define MV_6810_INDEX 1
#define MV_6811_INDEX 2
#define MV_6828_INDEX 3
#define MV_6920_INDEX 0
#define MV_6928_INDEX 1
enum unit_id {
PEX_UNIT_ID,
ETH_GIG_UNIT_ID,
USB3H_UNIT_ID,
USB3D_UNIT_ID,
SATA_UNIT_ID,
QSGMII_UNIT_ID,
XAUI_UNIT_ID,
RXAUI_UNIT_ID,
MAX_UNITS_ID
};
struct board_wakeup_gpio {
u32 board_id;
int gpio_num;
};
enum suspend_wakeup_status {
SUSPEND_WAKEUP_DISABLED,
SUSPEND_WAKEUP_ENABLED,
SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED,
};
/*
* GPIO status indication for Suspend Wakeup:
* If suspend to RAM is supported and GPIO inidcation is implemented,
* set the gpio number
* If suspend to RAM is supported but GPIO indication is not implemented
* set '-2'
* If suspend to RAM is not supported set '-1'
*/
#ifdef CONFIG_CUSTOMER_BOARD_SUPPORT
#ifdef CONFIG_ARMADA_38X
#define MV_BOARD_WAKEUP_GPIO_INFO { \
{A38X_CUSTOMER_BOARD_ID0, -1 }, \
{A38X_CUSTOMER_BOARD_ID0, -1 }, \
};
#else
#define MV_BOARD_WAKEUP_GPIO_INFO { \
{A39X_CUSTOMER_BOARD_ID0, -1 }, \
{A39X_CUSTOMER_BOARD_ID0, -1 }, \
};
#endif /* CONFIG_ARMADA_38X */
#else
#ifdef CONFIG_ARMADA_38X
#define MV_BOARD_WAKEUP_GPIO_INFO { \
{RD_NAS_68XX_ID, -2 }, \
{DB_68XX_ID, -1 }, \
{RD_AP_68XX_ID, -2 }, \
{DB_AP_68XX_ID, -2 }, \
{DB_GP_68XX_ID, -2 }, \
{DB_BP_6821_ID, -2 }, \
{DB_AMC_6820_ID, -2 }, \
};
#else
#define MV_BOARD_WAKEUP_GPIO_INFO { \
{A39X_RD_69XX_ID, -1 }, \
{A39X_DB_69XX_ID, -1 }, \
};
#endif /* CONFIG_ARMADA_38X */
#endif /* CONFIG_CUSTOMER_BOARD_SUPPORT */
u32 mv_board_tclk_get(void);
u32 mv_board_id_get(void);
u32 mv_board_id_index_get(u32 board_id);
u32 sys_env_unit_max_num_get(enum unit_id unit);
enum suspend_wakeup_status sys_env_suspend_wakeup_check(void);
u8 sys_env_device_rev_get(void);
u32 sys_env_device_id_get(void);
u16 sys_env_model_get(void);
struct dlb_config *sys_env_dlb_config_ptr_get(void);
u32 sys_env_get_topology_update_info(
struct topology_update_info *topology_update_info);
u32 sys_env_get_cs_ena_from_reg(void);
#endif /* _SYS_ENV_LIB_H */

View File

@ -7,7 +7,7 @@
#ifndef __HIGHSPEED_ENV_SPEC_H
#define __HIGHSPEED_ENV_SPEC_H
#include "../../../drivers/ddr/mvebu/ddr3_hw_training.h"
#include "../../../drivers/ddr/marvell/axp/ddr3_hw_training.h"
typedef enum {
SERDES_UNIT_UNCONNECTED = 0x0,

View File

@ -26,8 +26,17 @@ void board_init_f(ulong dummy)
/* Linux expects the internal registers to be at 0xf1000000 */
arch_cpu_init();
/*
* Pin muxing needs to be done before UART output, since
* on A38x the UART pins need some re-muxing for output
* to work.
*/
board_early_init_f();
preloader_console_init();
timer_init();
/* First init the serdes PHY's */
serdes_phy_config();

View File

@ -41,6 +41,8 @@
#define timestamp gd->arch.tbl
#define lastdec gd->arch.lastinc
static int init_done;
/* Timer reload and current value registers */
struct kwtmr_val {
u32 reload; /* Timer reload reg */
@ -112,6 +114,11 @@ void __udelay(unsigned long usec)
*/
int timer_init(void)
{
/* Only init the timer once */
if (init_done)
return 0;
init_done = 1;
/* load value into timer */
writel(TIMER_LOAD_VAL, CNTMR_RELOAD_REG(UBOOT_CNTR));
writel(TIMER_LOAD_VAL, CNTMR_VAL_REG(UBOOT_CNTR));

View File

@ -0,0 +1,18 @@
Update from original Marvell U-Boot to mainline U-Boot:
-------------------------------------------------------
The resulting image including the SPL binary with the
full DDR setup is "u-boot-spl.kwb".
To update the SPI NOR flash, please use the following
command:
=> sf probe;tftpboot 2000000 db-88f6820-gp/u-boot-spl.kwb;\
sf update 2000000 0 60000
Note that the original Marvell U-Boot seems to have
problems with the "sf update" command. This does not
work reliable. So here this command should be used:
=> sf probe;tftpboot 2000000 db-88f6820-gp/u-boot-spl.kwb;\
sf erase 0 60000;sf write 2000000 0 60000

View File

@ -11,6 +11,8 @@
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "../drivers/ddr/marvell/a38x/ddr3_a38x_topology.h"
DECLARE_GLOBAL_DATA_PTR;
#define BIT(nr) (1UL << (nr))
@ -54,6 +56,35 @@ static struct marvell_io_exp io_exp[] = {
{ 0x21, 3, 0xC0 } /* Output Data, register#1 */
};
/*
* Define the DDR layout / topology here in the board file. This will
* be used by the DDR3 init code in the SPL U-Boot version to configure
* the DDR3 controller.
*/
static struct hws_topology_map board_topology_map = {
0x1, /* active interfaces */
/* cs_mask, mirror, dqs_swap, ck_swap X PUPs */
{ { { {0x1, 0, 0, 0},
{0x1, 0, 0, 0},
{0x1, 0, 0, 0},
{0x1, 0, 0, 0},
{0x1, 0, 0, 0} },
SPEED_BIN_DDR_1866L, /* speed_bin */
BUS_WIDTH_8, /* memory_width */
MEM_4G, /* mem_size */
DDR_FREQ_800, /* frequency */
0, 0, /* cas_l cas_wl */
HWS_TEMP_LOW} }, /* temperature */
5, /* Num Of Bus Per Interface*/
BUS_MASK_32BIT /* Busses mask */
};
struct hws_topology_map *ddr3_get_topology_map(void)
{
/* Return the board topology as defined in the board code */
return &board_topology_map;
}
int board_early_init_f(void)
{
/* Configure MPP */

View File

@ -9,4 +9,4 @@ VERSION 1
BOOT_FROM spi
# Binary Header (bin_hdr) with DDR3 training code
BINARY board/Marvell/db-88f6820-gp/binary.0 0000005b 00000068
BINARY spl/u-boot-spl.bin 0000005b 00000068

View File

@ -11,8 +11,8 @@
#include <asm/arch/soc.h>
#include <linux/mbus.h>
#include "../drivers/ddr/mvebu/ddr3_hw_training.h"
#include "../arch/arm/mach-mvebu/serdes/high_speed_env_spec.h"
#include "../drivers/ddr/marvell/axp/ddr3_hw_training.h"
#include "../arch/arm/mach-mvebu/serdes/axp/high_speed_env_spec.h"
DECLARE_GLOBAL_DATA_PTR;

View File

@ -1,3 +1,4 @@
CONFIG_SPL=y
CONFIG_ARM=y
CONFIG_TARGET_DB_88F6820_GP=y
# CONFIG_CMD_IMLS is not set

View File

@ -13,6 +13,8 @@
#include <asm/arch/orion5x.h>
#elif defined(CONFIG_KIRKWOOD)
#include <asm/arch/soc.h>
#elif defined(CONFIG_ARMADA_XP)
#include <linux/mbus.h>
#endif
/* SATA port registers */
@ -89,6 +91,41 @@ struct mvsata_port_registers {
#define MVSATA_STATUS_OK 0
#define MVSATA_STATUS_TIMEOUT -1
/*
* Registers for SATA MBUS memory windows
*/
#define MVSATA_WIN_CONTROL(w) (MVEBU_AXP_SATA_BASE + 0x30 + ((w) << 4))
#define MVSATA_WIN_BASE(w) (MVEBU_AXP_SATA_BASE + 0x34 + ((w) << 4))
/*
* Initialize SATA memory windows for Armada XP
*/
#ifdef CONFIG_ARMADA_XP
static void mvsata_ide_conf_mbus_windows(void)
{
const struct mbus_dram_target_info *dram;
int i;
dram = mvebu_mbus_dram_info();
/* Disable windows, Set Size/Base to 0 */
for (i = 0; i < 4; i++) {
writel(0, MVSATA_WIN_CONTROL(i));
writel(0, MVSATA_WIN_BASE(i));
}
for (i = 0; i < dram->num_cs; i++) {
const struct mbus_dram_window *cs = dram->cs + i;
writel(((cs->size - 1) & 0xffff0000) | (cs->mbus_attr << 8) |
(dram->mbus_dram_target_id << 4) | 1,
MVSATA_WIN_CONTROL(i));
writel(cs->base & 0xffff0000, MVSATA_WIN_BASE(i));
}
}
#endif
/*
* Initialize one MVSATAHC port: set SControl's IPM to "always active"
* and DET to "reset", then wait for SStatus's DET to become "device and
@ -137,6 +174,10 @@ int ide_preinit(void)
int ret = MVSATA_STATUS_TIMEOUT;
int status;
#ifdef CONFIG_ARMADA_XP
mvsata_ide_conf_mbus_windows();
#endif
/* Enable ATA port 0 (could be SATA port 0 or 1) if declared */
#if defined(CONFIG_SYS_ATA_IDE0_OFFSET)
status = mvsata_ide_initialize_port(

View File

@ -0,0 +1,19 @@
#
# SPDX-License-Identifier: GPL-2.0+
#
obj-$(CONFIG_SPL_BUILD) += ddr3_a38x.o
obj-$(CONFIG_SPL_BUILD) += ddr3_a38x_training.o
obj-$(CONFIG_SPL_BUILD) += ddr3_debug.o
obj-$(CONFIG_SPL_BUILD) += ddr3_hws_hw_training.o
obj-$(CONFIG_SPL_BUILD) += ddr3_init.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_bist.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_centralization.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_db.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_hw_algo.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_ip_engine.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_leveling.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_pbs.o
obj-$(CONFIG_SPL_BUILD) += ddr3_training_static.o
obj-$(CONFIG_SPL_BUILD) += xor.o

View File

@ -0,0 +1,741 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define A38X_NUMBER_OF_INTERFACES 5
#define SAR_DEV_ID_OFFS 27
#define SAR_DEV_ID_MASK 0x7
/* Termal Sensor Registers */
#define TSEN_STATE_REG 0xe4070
#define TSEN_STATE_OFFSET 31
#define TSEN_STATE_MASK (0x1 << TSEN_STATE_OFFSET)
#define TSEN_CONF_REG 0xe4074
#define TSEN_CONF_RST_OFFSET 8
#define TSEN_CONF_RST_MASK (0x1 << TSEN_CONF_RST_OFFSET)
#define TSEN_STATUS_REG 0xe4078
#define TSEN_STATUS_READOUT_VALID_OFFSET 10
#define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \
TSEN_STATUS_READOUT_VALID_OFFSET)
#define TSEN_STATUS_TEMP_OUT_OFFSET 0
#define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET)
static struct dfx_access interface_map[] = {
/* Pipe Client */
{ 0, 17 },
{ 1, 7 },
{ 1, 11 },
{ 0, 3 },
{ 1, 25 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 }
};
/* This array hold the board round trip delay (DQ and CK) per <interface,bus> */
struct trip_delay_element a38x_board_round_trip_delay_array[] = {
/* 1st board */
/* Interface bus DQS-delay CK-delay */
{ 3952, 5060 },
{ 3192, 4493 },
{ 4785, 6677 },
{ 3413, 7267 },
{ 4282, 6086 }, /* ECC PUP */
{ 3952, 5134 },
{ 3192, 4567 },
{ 4785, 6751 },
{ 3413, 7341 },
{ 4282, 6160 }, /* ECC PUP */
/* 2nd board */
/* Interface bus DQS-delay CK-delay */
{ 3952, 5060 },
{ 3192, 4493 },
{ 4785, 6677 },
{ 3413, 7267 },
{ 4282, 6086 }, /* ECC PUP */
{ 3952, 5134 },
{ 3192, 4567 },
{ 4785, 6751 },
{ 3413, 7341 },
{ 4282, 6160 } /* ECC PUP */
};
#ifdef STATIC_ALGO_SUPPORT
/* package trace */
static struct trip_delay_element a38x_package_round_trip_delay_array[] = {
/* IF BUS DQ_DELAY CK_DELAY */
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 },
{ 0, 0 }
};
static int a38x_silicon_delay_offset[] = {
/* board 0 */
0,
/* board 1 */
0,
/* board 2 */
0
};
#endif
static u8 a38x_bw_per_freq[DDR_FREQ_LIMIT] = {
0x3, /* DDR_FREQ_100 */
0x4, /* DDR_FREQ_400 */
0x4, /* DDR_FREQ_533 */
0x5, /* DDR_FREQ_667 */
0x5, /* DDR_FREQ_800 */
0x5, /* DDR_FREQ_933 */
0x5, /* DDR_FREQ_1066 */
0x3, /* DDR_FREQ_311 */
0x3, /* DDR_FREQ_333 */
0x4, /* DDR_FREQ_467 */
0x5, /* DDR_FREQ_850 */
0x5, /* DDR_FREQ_600 */
0x3, /* DDR_FREQ_300 */
0x5, /* DDR_FREQ_900 */
0x3, /* DDR_FREQ_360 */
0x5 /* DDR_FREQ_1000 */
};
static u8 a38x_rate_per_freq[DDR_FREQ_LIMIT] = {
/*TBD*/ 0x1, /* DDR_FREQ_100 */
0x2, /* DDR_FREQ_400 */
0x2, /* DDR_FREQ_533 */
0x2, /* DDR_FREQ_667 */
0x2, /* DDR_FREQ_800 */
0x3, /* DDR_FREQ_933 */
0x3, /* DDR_FREQ_1066 */
0x1, /* DDR_FREQ_311 */
0x1, /* DDR_FREQ_333 */
0x2, /* DDR_FREQ_467 */
0x2, /* DDR_FREQ_850 */
0x2, /* DDR_FREQ_600 */
0x1, /* DDR_FREQ_300 */
0x2, /* DDR_FREQ_900 */
0x1, /* DDR_FREQ_360 */
0x2 /* DDR_FREQ_1000 */
};
static u16 a38x_vco_freq_per_sar[] = {
666, /* 0 */
1332,
800,
1600,
1066,
2132,
1200,
2400,
1332,
1332,
1500,
1500,
1600, /* 12 */
1600,
1700,
1700,
1866,
1866,
1800, /* 18 */
2000,
2000,
4000,
2132,
2132,
2300,
2300,
2400,
2400,
2500,
2500,
800
};
u32 pipe_multicast_mask;
u32 dq_bit_map_2_phy_pin[] = {
1, 0, 2, 6, 9, 8, 3, 7, /* 0 */
8, 9, 1, 7, 2, 6, 3, 0, /* 1 */
3, 9, 7, 8, 1, 0, 2, 6, /* 2 */
1, 0, 6, 2, 8, 3, 7, 9, /* 3 */
0, 1, 2, 9, 7, 8, 3, 6, /* 4 */
};
static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
enum hws_ddr_freq freq);
/*
* Read temperature TJ value
*/
u32 ddr3_ctrl_get_junc_temp(u8 dev_num)
{
int reg = 0;
/* Initiates TSEN hardware reset once */
if ((reg_read(TSEN_CONF_REG) & TSEN_CONF_RST_MASK) == 0)
reg_bit_set(TSEN_CONF_REG, TSEN_CONF_RST_MASK);
mdelay(10);
/* Check if the readout field is valid */
if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) {
printf("%s: TSEN not ready\n", __func__);
return 0;
}
reg = reg_read(TSEN_STATUS_REG);
reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET;
return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000;
}
/*
* Name: ddr3_tip_a38x_get_freq_config.
* Desc:
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum hws_ddr_freq freq,
struct hws_tip_freq_config_info
*freq_config_info)
{
if (a38x_bw_per_freq[freq] == 0xff)
return MV_NOT_SUPPORTED;
if (freq_config_info == NULL)
return MV_BAD_PARAM;
freq_config_info->bw_per_freq = a38x_bw_per_freq[freq];
freq_config_info->rate_per_freq = a38x_rate_per_freq[freq];
freq_config_info->is_supported = 1;
return MV_OK;
}
/*
* Name: ddr3_tip_a38x_pipe_enable.
* Desc:
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_pipe_enable(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, int enable)
{
u32 data_value, pipe_enable_mask = 0;
if (enable == 0) {
pipe_enable_mask = 0;
} else {
if (interface_access == ACCESS_TYPE_MULTICAST)
pipe_enable_mask = pipe_multicast_mask;
else
pipe_enable_mask = (1 << interface_map[if_id].pipe);
}
CHECK_STATUS(ddr3_tip_reg_read
(dev_num, PIPE_ENABLE_ADDR, &data_value, MASK_ALL_BITS));
data_value = (data_value & (~0xff)) | pipe_enable_mask;
CHECK_STATUS(ddr3_tip_reg_write(dev_num, PIPE_ENABLE_ADDR, data_value));
return MV_OK;
}
/*
* Name: ddr3_tip_a38x_if_write.
* Desc:
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_if_write(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 data_value,
u32 mask)
{
u32 ui_data_read;
if (mask != MASK_ALL_BITS) {
CHECK_STATUS(ddr3_tip_a38x_if_read
(dev_num, ACCESS_TYPE_UNICAST, if_id, reg_addr,
&ui_data_read, MASK_ALL_BITS));
data_value = (ui_data_read & (~mask)) | (data_value & mask);
}
reg_write(reg_addr, data_value);
return MV_OK;
}
/*
* Name: ddr3_tip_a38x_if_read.
* Desc:
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_if_read(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 *data, u32 mask)
{
*data = reg_read(reg_addr) & mask;
return MV_OK;
}
/*
* Name: ddr3_tip_a38x_select_ddr_controller.
* Desc: Enable/Disable access to Marvell's server.
* Args: dev_num - device number
* enable - whether to enable or disable the server
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable)
{
u32 reg;
reg = reg_read(CS_ENABLE_REG);
if (enable)
reg |= (1 << 6);
else
reg &= ~(1 << 6);
reg_write(CS_ENABLE_REG, reg);
return MV_OK;
}
/*
* Name: ddr3_tip_init_a38x_silicon.
* Desc: init Training SW DB.
* Args:
* Notes:
* Returns: MV_OK if success, other error code if fail.
*/
static int ddr3_tip_init_a38x_silicon(u32 dev_num, u32 board_id)
{
struct hws_tip_config_func_db config_func;
enum hws_ddr_freq ddr_freq;
int status;
struct hws_topology_map *tm = ddr3_get_topology_map();
/* new read leveling version */
config_func.tip_dunit_read_func = ddr3_tip_a38x_if_read;
config_func.tip_dunit_write_func = ddr3_tip_a38x_if_write;
config_func.tip_dunit_mux_select_func =
ddr3_tip_a38x_select_ddr_controller;
config_func.tip_get_freq_config_info_func =
ddr3_tip_a38x_get_freq_config;
config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider;
config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info;
config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp;
ddr3_tip_init_config_func(dev_num, &config_func);
ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin);
#ifdef STATIC_ALGO_SUPPORT
{
struct hws_tip_static_config_info static_config;
u32 board_offset =
board_id * A38X_NUMBER_OF_INTERFACES *
tm->num_of_bus_per_interface;
static_config.silicon_delay =
a38x_silicon_delay_offset[board_id];
static_config.package_trace_arr =
a38x_package_round_trip_delay_array;
static_config.board_trace_arr =
&a38x_board_round_trip_delay_array[board_offset];
ddr3_tip_init_static_config_db(dev_num, &static_config);
}
#endif
status = ddr3_tip_a38x_get_init_freq(dev_num, &ddr_freq);
if (MV_OK != status) {
DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
("DDR3 silicon get target frequency - FAILED 0x%x\n",
status));
return status;
}
rl_version = 1;
mask_tune_func = (SET_LOW_FREQ_MASK_BIT |
LOAD_PATTERN_MASK_BIT |
SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT |
/* LOAD_PATTERN_2_MASK_BIT | */
WRITE_LEVELING_SUPP_MASK_BIT |
READ_LEVELING_MASK_BIT |
PBS_RX_MASK_BIT |
PBS_TX_MASK_BIT |
SET_TARGET_FREQ_MASK_BIT |
WRITE_LEVELING_TF_MASK_BIT |
WRITE_LEVELING_SUPP_TF_MASK_BIT |
READ_LEVELING_TF_MASK_BIT |
CENTRALIZATION_RX_MASK_BIT |
CENTRALIZATION_TX_MASK_BIT);
rl_mid_freq_wa = 1;
if ((ddr_freq == DDR_FREQ_333) || (ddr_freq == DDR_FREQ_400)) {
mask_tune_func = (WRITE_LEVELING_MASK_BIT |
LOAD_PATTERN_2_MASK_BIT |
WRITE_LEVELING_SUPP_MASK_BIT |
READ_LEVELING_MASK_BIT |
PBS_RX_MASK_BIT |
PBS_TX_MASK_BIT |
CENTRALIZATION_RX_MASK_BIT |
CENTRALIZATION_TX_MASK_BIT);
rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */
}
/* Supplementary not supported for ECC modes */
if (1 == ddr3_if_ecc_enabled()) {
mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT;
mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT;
mask_tune_func &= ~PBS_TX_MASK_BIT;
mask_tune_func &= ~PBS_RX_MASK_BIT;
}
if (ck_delay == -1)
ck_delay = 160;
if (ck_delay_16 == -1)
ck_delay_16 = 160;
ca_delay = 0;
delay_enable = 1;
calibration_update_control = 1;
init_freq = tm->interface_params[first_active_if].memory_freq;
ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq);
return MV_OK;
}
int ddr3_a38x_update_topology_map(u32 dev_num, struct hws_topology_map *tm)
{
u32 if_id = 0;
enum hws_ddr_freq freq;
ddr3_tip_a38x_get_init_freq(dev_num, &freq);
tm->interface_params[if_id].memory_freq = freq;
/*
* re-calc topology parameters according to topology updates
* (if needed)
*/
CHECK_STATUS(hws_ddr3_tip_load_topology_map(dev_num, tm));
return MV_OK;
}
int ddr3_tip_init_a38x(u32 dev_num, u32 board_id)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
if (NULL == tm)
return MV_FAIL;
ddr3_a38x_update_topology_map(dev_num, tm);
ddr3_tip_init_a38x_silicon(dev_num, board_id);
return MV_OK;
}
int ddr3_tip_a38x_get_init_freq(int dev_num, enum hws_ddr_freq *freq)
{
u32 reg;
/* Read sample at reset setting */
reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
switch (reg) {
case 0x0:
case 0x1:
*freq = DDR_FREQ_333;
break;
case 0x2:
case 0x3:
*freq = DDR_FREQ_400;
break;
case 0x4:
case 0xd:
*freq = DDR_FREQ_533;
break;
case 0x6:
*freq = DDR_FREQ_600;
break;
case 0x8:
case 0x11:
case 0x14:
*freq = DDR_FREQ_667;
break;
case 0xc:
case 0x15:
case 0x1b:
*freq = DDR_FREQ_800;
break;
case 0x10:
*freq = DDR_FREQ_933;
break;
case 0x12:
*freq = DDR_FREQ_900;
break;
case 0x13:
*freq = DDR_FREQ_900;
break;
default:
*freq = 0;
return MV_NOT_SUPPORTED;
}
return MV_OK;
}
int ddr3_tip_a38x_get_medium_freq(int dev_num, enum hws_ddr_freq *freq)
{
u32 reg;
/* Read sample at reset setting */
reg = (reg_read(REG_DEVICE_SAR1_ADDR) >>
RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
switch (reg) {
case 0x0:
case 0x1:
/* Medium is same as TF to run PBS in this freq */
*freq = DDR_FREQ_333;
break;
case 0x2:
case 0x3:
/* Medium is same as TF to run PBS in this freq */
*freq = DDR_FREQ_400;
break;
case 0x4:
case 0xd:
*freq = DDR_FREQ_533;
break;
case 0x8:
case 0x11:
case 0x14:
*freq = DDR_FREQ_333;
break;
case 0xc:
case 0x15:
case 0x1b:
*freq = DDR_FREQ_400;
break;
case 0x6:
*freq = DDR_FREQ_300;
break;
case 0x12:
*freq = DDR_FREQ_360;
break;
case 0x13:
*freq = DDR_FREQ_400;
break;
default:
*freq = 0;
return MV_NOT_SUPPORTED;
}
return MV_OK;
}
u32 ddr3_tip_get_init_freq(void)
{
enum hws_ddr_freq freq;
ddr3_tip_a38x_get_init_freq(0, &freq);
return freq;
}
static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id,
enum hws_ddr_freq frequency)
{
u32 divider = 0;
u32 sar_val;
if (if_id != 0) {
DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR,
("A38x does not support interface 0x%x\n",
if_id));
return MV_BAD_PARAM;
}
/* get VCO freq index */
sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >>
RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) &
RST2_CPU_DDR_CLOCK_SELECT_IN_MASK;
divider = a38x_vco_freq_per_sar[sar_val] / freq_val[frequency];
/* Set Sync mode */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x20220, 0x0,
0x1000));
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe42f4, 0x0,
0x200));
/* cpupll_clkdiv_reset_mask */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0x1f,
0xff));
/* cpupll_clkdiv_reload_smooth */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
(0x2 << 8), (0xff << 8)));
/* cpupll_clkdiv_relax_en */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260,
(0x2 << 24), (0xff << 24)));
/* write the divider */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4268,
(divider << 8), (0x3f << 8)));
/* set cpupll_clkdiv_reload_ratio */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264,
(1 << 8), (1 << 8)));
/* undet cpupll_clkdiv_reload_ratio */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
(1 << 8)));
/* clear cpupll_clkdiv_reload_force */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
(0xff << 8)));
/* clear cpupll_clkdiv_relax_en */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4260, 0,
(0xff << 24)));
/* clear cpupll_clkdiv_reset_mask */
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0xe4264, 0,
0xff));
/* Dunit training clock + 1:1 mode */
if ((frequency == DDR_FREQ_LOW_FREQ) || (freq_val[frequency] <= 400)) {
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
(1 << 16), (1 << 16)));
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
(0 << 15), (1 << 15)));
} else {
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x18488,
0, (1 << 16)));
CHECK_STATUS(ddr3_tip_a38x_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1524,
(1 << 15), (1 << 15)));
}
return MV_OK;
}
/*
* external read from memory
*/
int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
u32 num_of_bursts, u32 *data)
{
u32 burst_num;
for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
data[burst_num] = readl(reg_addr + 4 * burst_num);
return MV_OK;
}
/*
* external write to memory
*/
int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
u32 num_of_bursts, u32 *data) {
u32 burst_num;
for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++)
writel(data[burst_num], reg_addr + 4 * burst_num);
return MV_OK;
}
int ddr3_silicon_pre_init(void)
{
int result;
result = ddr3_silicon_init();
return result;
}
int ddr3_post_run_alg(void)
{
return MV_OK;
}
int ddr3_silicon_post_init(void)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
/* Set half bus width */
if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) {
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
REG_SDRAM_CONFIG_ADDR, 0x0, 0x8000));
}
return MV_OK;
}
int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr)
{
info_ptr->device_id = 0x6800;
info_ptr->ck_delay = ck_delay;
return MV_OK;
}

View File

@ -0,0 +1,98 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_A38X_H
#define _DDR3_A38X_H
#define MAX_INTERFACE_NUM 1
#define MAX_BUS_NUM 5
#include "ddr3_hws_hw_training_def.h"
/* Allow topolgy update from board TWSI device*/
#if !defined(CONFIG_CUSTOMER_BOARD_SUPPORT)
#define MV_DDR_TOPOLOGY_UPDATE_FROM_TWSI
#endif
#define ECC_SUPPORT
/* right now, we're not supporting this in mainline */
#undef SUPPORT_STATIC_DUNIT_CONFIG
/* Controler bus divider 1 for 32 bit, 2 for 64 bit */
#define DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER 1
/* Tune internal training params values */
#define TUNE_TRAINING_PARAMS_CK_DELAY 160
#define TUNE_TRAINING_PARAMS_CK_DELAY_16 160
#define TUNE_TRAINING_PARAMS_PFINGER 41
#define TUNE_TRAINING_PARAMS_NFINGER 43
#define TUNE_TRAINING_PARAMS_PHYREG3VAL 0xa
#define MARVELL_BOARD MARVELL_BOARD_ID_BASE
#define REG_DEVICE_SAR1_ADDR 0xe4204
#define RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET 17
#define RST2_CPU_DDR_CLOCK_SELECT_IN_MASK 0x1f
/* DRAM Windows */
#define REG_XBAR_WIN_5_CTRL_ADDR 0x20050
#define REG_XBAR_WIN_5_BASE_ADDR 0x20054
/* DRAM Windows */
#define REG_XBAR_WIN_4_CTRL_ADDR 0x20040
#define REG_XBAR_WIN_4_BASE_ADDR 0x20044
#define REG_XBAR_WIN_4_REMAP_ADDR 0x20048
#define REG_XBAR_WIN_7_REMAP_ADDR 0x20078
#define REG_XBAR_WIN_16_CTRL_ADDR 0x200d0
#define REG_XBAR_WIN_16_BASE_ADDR 0x200d4
#define REG_XBAR_WIN_16_REMAP_ADDR 0x200dc
#define REG_XBAR_WIN_19_CTRL_ADDR 0x200e8
#define REG_FASTPATH_WIN_BASE_ADDR(win) (0x20180 + (0x8 * win))
#define REG_FASTPATH_WIN_CTRL_ADDR(win) (0x20184 + (0x8 * win))
/* SatR defined too change topology busWidth and ECC configuration */
#define DDR_SATR_CONFIG_MASK_WIDTH 0x8
#define DDR_SATR_CONFIG_MASK_ECC 0x10
#define DDR_SATR_CONFIG_MASK_ECC_PUP 0x20
#define REG_SAMPLE_RESET_HIGH_ADDR 0x18600
#define MV_BOARD_REFCLK MV_BOARD_REFCLK_25MHZ
/* Matrix enables DRAM modes (bus width/ECC) per boardId */
#define TOPOLOGY_UPDATE_32BIT 0
#define TOPOLOGY_UPDATE_32BIT_ECC 1
#define TOPOLOGY_UPDATE_16BIT 2
#define TOPOLOGY_UPDATE_16BIT_ECC 3
#define TOPOLOGY_UPDATE_16BIT_ECC_PUP3 4
#define TOPOLOGY_UPDATE { \
/* 32Bit, 32bit ECC, 16bit, 16bit ECC PUP4, 16bit ECC PUP3 */ \
{1, 1, 1, 1, 1}, /* RD_NAS_68XX_ID */ \
{1, 1, 1, 1, 1}, /* DB_68XX_ID */ \
{1, 0, 1, 0, 1}, /* RD_AP_68XX_ID */ \
{1, 0, 1, 0, 1}, /* DB_AP_68XX_ID */ \
{1, 0, 1, 0, 1}, /* DB_GP_68XX_ID */ \
{0, 0, 1, 1, 0}, /* DB_BP_6821_ID */ \
{1, 1, 1, 1, 1} /* DB_AMC_6820_ID */ \
};
enum {
CPU_1066MHZ_DDR_400MHZ,
CPU_RESERVED_DDR_RESERVED0,
CPU_667MHZ_DDR_667MHZ,
CPU_800MHZ_DDR_800MHZ,
CPU_RESERVED_DDR_RESERVED1,
CPU_RESERVED_DDR_RESERVED2,
CPU_RESERVED_DDR_RESERVED3,
LAST_FREQ
};
#define ACTIVE_INTERFACE_MASK 0x1
#endif /* _DDR3_A38X_H */

View File

@ -0,0 +1,226 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_A38X_MC_STATIC_H
#define _DDR3_A38X_MC_STATIC_H
#include "ddr3_a38x.h"
#ifdef SUPPORT_STATIC_DUNIT_CONFIG
#ifdef CONFIG_CUSTOMER_BOARD_SUPPORT
static struct reg_data ddr3_customer_800[] = {
/* parameters for customer board (based on 800MHZ) */
{0x1400, 0x7b00cc30, 0xffffffff},
{0x1404, 0x36301820, 0xffffffff},
{0x1408, 0x5415baab, 0xffffffff},
{0x140c, 0x38411def, 0xffffffff},
{0x1410, 0x18300000, 0xffffffff},
{0x1414, 0x00000700, 0xffffffff},
{0x1424, 0x0060f3ff, 0xffffffff},
{0x1428, 0x0011a940, 0xffffffff},
{0x142c, 0x28c5134, 0xffffffff},
{0x1474, 0x00000000, 0xffffffff},
{0x147c, 0x0000d771, 0xffffffff},
{0x1494, 0x00030000, 0xffffffff},
{0x149c, 0x00000300, 0xffffffff},
{0x14a8, 0x00000000, 0xffffffff},
{0x14cc, 0xbd09000d, 0xffffffff},
{0x1504, 0xfffffff1, 0xffffffff},
{0x150c, 0xffffffe5, 0xffffffff},
{0x1514, 0x00000000, 0xffffffff},
{0x151c, 0x00000000, 0xffffffff},
{0x1538, 0x00000b0b, 0xffffffff},
{0x153c, 0x00000c0c, 0xffffffff},
{0x15d0, 0x00000670, 0xffffffff},
{0x15d4, 0x00000046, 0xffffffff},
{0x15d8, 0x00000010, 0xffffffff},
{0x15dc, 0x00000000, 0xffffffff},
{0x15e0, 0x00000023, 0xffffffff},
{0x15e4, 0x00203c18, 0xffffffff},
{0x15ec, 0xf8000019, 0xffffffff},
{0x16a0, 0xcc000006, 0xffffffff}, /* Clock Delay */
{0xe4124, 0x08008073, 0xffffffff}, /* AVS BG default */
{0, 0, 0}
};
#else /* CONFIG_CUSTOMER_BOARD_SUPPORT */
struct reg_data ddr3_a38x_933[MV_MAX_DDR3_STATIC_SIZE] = {
/* parameters for 933MHZ */
{0x1400, 0x7b00ce3a, 0xffffffff},
{0x1404, 0x36301820, 0xffffffff},
{0x1408, 0x7417eccf, 0xffffffff},
{0x140c, 0x3e421f98, 0xffffffff},
{0x1410, 0x1a300000, 0xffffffff},
{0x1414, 0x00000700, 0xffffffff},
{0x1424, 0x0060f3ff, 0xffffffff},
{0x1428, 0x0013ca50, 0xffffffff},
{0x142c, 0x028c5165, 0xffffffff},
{0x1474, 0x00000000, 0xffffffff},
{0x147c, 0x0000e871, 0xffffffff},
{0x1494, 0x00010000, 0xffffffff},
{0x149c, 0x00000001, 0xffffffff},
{0x14a8, 0x00000000, 0xffffffff},
{0x14cc, 0xbd09000d, 0xffffffff},
{0x1504, 0xffffffe1, 0xffffffff},
{0x150c, 0xffffffe5, 0xffffffff},
{0x1514, 0x00000000, 0xffffffff},
{0x151c, 0x00000000, 0xffffffff},
{0x1538, 0x00000d0d, 0xffffffff},
{0x153c, 0x00000d0d, 0xffffffff},
{0x15d0, 0x00000608, 0xffffffff},
{0x15d4, 0x00000044, 0xffffffff},
{0x15d8, 0x00000020, 0xffffffff},
{0x15dc, 0x00000000, 0xffffffff},
{0x15e0, 0x00000021, 0xffffffff},
{0x15e4, 0x00203c18, 0xffffffff},
{0x15ec, 0xf8000019, 0xffffffff},
{0x16a0, 0xcc000006, 0xffffffff}, /* Clock Delay */
{0xe4124, 0x08008073, 0xffffffff}, /* AVS BG default */
{0, 0, 0}
};
static struct reg_data ddr3_a38x_800[] = {
/* parameters for 800MHZ */
{0x1400, 0x7b00cc30, 0xffffffff},
{0x1404, 0x36301820, 0xffffffff},
{0x1408, 0x5415baab, 0xffffffff},
{0x140c, 0x38411def, 0xffffffff},
{0x1410, 0x18300000, 0xffffffff},
{0x1414, 0x00000700, 0xffffffff},
{0x1424, 0x0060f3ff, 0xffffffff},
{0x1428, 0x0011a940, 0xffffffff},
{0x142c, 0x28c5134, 0xffffffff},
{0x1474, 0x00000000, 0xffffffff},
{0x147c, 0x0000d771, 0xffffffff},
{0x1494, 0x00030000, 0xffffffff},
{0x149c, 0x00000300, 0xffffffff},
{0x14a8, 0x00000000, 0xffffffff},
{0x14cc, 0xbd09000d, 0xffffffff},
{0x1504, 0xfffffff1, 0xffffffff},
{0x150c, 0xffffffe5, 0xffffffff},
{0x1514, 0x00000000, 0xffffffff},
{0x151c, 0x00000000, 0xffffffff},
{0x1538, 0x00000b0b, 0xffffffff},
{0x153c, 0x00000c0c, 0xffffffff},
{0x15d0, 0x00000670, 0xffffffff},
{0x15d4, 0x00000046, 0xffffffff},
{0x15d8, 0x00000010, 0xffffffff},
{0x15dc, 0x00000000, 0xffffffff},
{0x15e0, 0x00000023, 0xffffffff},
{0x15e4, 0x00203c18, 0xffffffff},
{0x15ec, 0xf8000019, 0xffffffff},
{0x16a0, 0xcc000006, 0xffffffff}, /* Clock Delay */
{0xe4124, 0x08008073, 0xffffffff}, /* AVS BG default */
{0, 0, 0}
};
static struct reg_data ddr3_a38x_667[] = {
/* parameters for 667MHZ */
/* DDR SDRAM Configuration Register */
{0x1400, 0x7b00ca28, 0xffffffff},
/* Dunit Control Low Register - kw28 bit12 low (disable CLK1) */
{0x1404, 0x36301820, 0xffffffff},
/* DDR SDRAM Timing (Low) Register */
{0x1408, 0x43149997, 0xffffffff},
/* DDR SDRAM Timing (High) Register */
{0x140c, 0x38411bc7, 0xffffffff},
/* DDR SDRAM Address Control Register */
{0x1410, 0x14330000, 0xffffffff},
/* DDR SDRAM Open Pages Control Register */
{0x1414, 0x00000700, 0xffffffff},
/* Dunit Control High Register (2 :1 - bits 15:12 = 0xd) */
{0x1424, 0x0060f3ff, 0xffffffff},
/* Dunit Control High Register */
{0x1428, 0x000f8830, 0xffffffff},
/* Dunit Control High Register (2:1 - bit 29 = '1') */
{0x142c, 0x28c50f8, 0xffffffff},
{0x147c, 0x0000c671, 0xffffffff},
/* DDR SDRAM ODT Control (Low) Register */
{0x1494, 0x00030000, 0xffffffff},
/* DDR SDRAM ODT Control (High) Register, will be configured at WL */
{0x1498, 0x00000000, 0xffffffff},
/* DDR Dunit ODT Control Register */
{0x149c, 0x00000300, 0xffffffff},
{0x14a8, 0x00000000, 0xffffffff}, /* */
{0x14cc, 0xbd09000d, 0xffffffff}, /* */
{0x1474, 0x00000000, 0xffffffff},
/* Read Data Sample Delays Register */
{0x1538, 0x00000009, 0xffffffff},
/* Read Data Ready Delay Register */
{0x153c, 0x0000000c, 0xffffffff},
{0x1504, 0xfffffff1, 0xffffffff}, /* */
{0x150c, 0xffffffe5, 0xffffffff}, /* */
{0x1514, 0x00000000, 0xffffffff}, /* */
{0x151c, 0x0, 0xffffffff}, /* */
{0x15d0, 0x00000650, 0xffffffff}, /* MR0 */
{0x15d4, 0x00000046, 0xffffffff}, /* MR1 */
{0x15d8, 0x00000010, 0xffffffff}, /* MR2 */
{0x15dc, 0x00000000, 0xffffffff}, /* MR3 */
{0x15e0, 0x23, 0xffffffff}, /* */
{0x15e4, 0x00203c18, 0xffffffff}, /* ZQC Configuration Register */
{0x15ec, 0xf8000019, 0xffffffff}, /* DDR PHY */
{0x16a0, 0xcc000006, 0xffffffff}, /* Clock Delay */
{0xe4124, 0x08008073, 0xffffffff}, /* AVS BG default */
{0, 0, 0}
};
static struct reg_data ddr3_a38x_533[] = {
/* parameters for 533MHZ */
/* DDR SDRAM Configuration Register */
{0x1400, 0x7b00d040, 0xffffffff},
/* Dunit Control Low Register - kw28 bit12 low (disable CLK1) */
{0x1404, 0x36301820, 0xffffffff},
/* DDR SDRAM Timing (Low) Register */
{0x1408, 0x33137772, 0xffffffff},
/* DDR SDRAM Timing (High) Register */
{0x140c, 0x3841199f, 0xffffffff},
/* DDR SDRAM Address Control Register */
{0x1410, 0x10330000, 0xffffffff},
/* DDR SDRAM Open Pages Control Register */
{0x1414, 0x00000700, 0xffffffff},
/* Dunit Control High Register (2 :1 - bits 15:12 = 0xd) */
{0x1424, 0x0060f3ff, 0xffffffff},
/* Dunit Control High Register */
{0x1428, 0x000d6720, 0xffffffff},
/* Dunit Control High Register (2:1 - bit 29 = '1') */
{0x142c, 0x028c50c3, 0xffffffff},
{0x147c, 0x0000b571, 0xffffffff},
/* DDR SDRAM ODT Control (Low) Register */
{0x1494, 0x00030000, 0xffffffff},
/* DDR SDRAM ODT Control (High) Register, will be configured at WL */
{0x1498, 0x00000000, 0xffffffff},
/* DDR Dunit ODT Control Register */
{0x149c, 0x00000003, 0xffffffff},
{0x14a8, 0x00000000, 0xffffffff}, /* */
{0x14cc, 0xbd09000d, 0xffffffff}, /* */
{0x1474, 0x00000000, 0xffffffff},
/* Read Data Sample Delays Register */
{0x1538, 0x00000707, 0xffffffff},
/* Read Data Ready Delay Register */
{0x153c, 0x00000707, 0xffffffff},
{0x1504, 0xffffffe1, 0xffffffff}, /* */
{0x150c, 0xffffffe5, 0xffffffff}, /* */
{0x1514, 0x00000000, 0xffffffff}, /* */
{0x151c, 0x00000000, 0xffffffff}, /* */
{0x15d0, 0x00000630, 0xffffffff}, /* MR0 */
{0x15d4, 0x00000046, 0xffffffff}, /* MR1 */
{0x15d8, 0x00000008, 0xffffffff}, /* MR2 */
{0x15dc, 0x00000000, 0xffffffff}, /* MR3 */
{0x15e0, 0x00000023, 0xffffffff}, /* */
{0x15e4, 0x00203c18, 0xffffffff}, /* ZQC Configuration Register */
{0x15ec, 0xf8000019, 0xffffffff}, /* DDR PHY */
{0x16a0, 0xcc000006, 0xffffffff}, /* Clock Delay */
{0xe4124, 0x08008073, 0xffffffff}, /* AVS BG default */
{0, 0, 0}
};
#endif /* CONFIG_CUSTOMER_BOARD_SUPPORT */
#endif /* SUPPORT_STATIC_DUNIT_CONFIG */
#endif /* _DDR3_A38X_MC_STATIC_H */

View File

@ -0,0 +1,22 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_A38X_TOPOLOGY_H
#define _DDR3_A38X_TOPOLOGY_H
#include "ddr_topology_def.h"
/* Bus mask variants */
#define BUS_MASK_32BIT 0xf
#define BUS_MASK_32BIT_ECC 0x1f
#define BUS_MASK_16BIT 0x3
#define BUS_MASK_16BIT_ECC 0x13
#define BUS_MASK_16BIT_ECC_PUP3 0xb
#define DYNAMIC_CS_SIZE_CONFIG
#define DISABLE_L2_FILTERING_DURING_DDR_TRAINING
#endif /* _DDR3_A38X_TOPOLOGY_H */

View File

@ -0,0 +1,40 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
/*
* Name: ddr3_tip_init_silicon
* Desc: initiate silicon parameters
* Args:
* Notes:
* Returns: required value
*/
int ddr3_silicon_init(void)
{
int status;
static int init_done;
if (init_done == 1)
return MV_OK;
status = ddr3_tip_init_a38x(0, 0);
if (MV_OK != status) {
printf("DDR3 A38x silicon init - FAILED 0x%x\n", status);
return status;
}
init_done = 1;
return MV_OK;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,148 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define REG_READ_DATA_SAMPLE_DELAYS_ADDR 0x1538
#define REG_READ_DATA_SAMPLE_DELAYS_MASK 0x1f
#define REG_READ_DATA_SAMPLE_DELAYS_OFFS 8
#define REG_READ_DATA_READY_DELAYS_ADDR 0x153c
#define REG_READ_DATA_READY_DELAYS_MASK 0x1f
#define REG_READ_DATA_READY_DELAYS_OFFS 8
int ddr3_if_ecc_enabled(void)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
if (DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask) ||
DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask))
return 1;
else
return 0;
}
int ddr3_pre_algo_config(void)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
/* Set Bus3 ECC training mode */
if (DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask)) {
/* Set Bus3 ECC MUX */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
REG_SDRAM_PINS_MUX, 0x100, 0x100));
}
/* Set regular ECC training mode (bus4 and bus 3) */
if ((DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask)) ||
(DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask))) {
/* Enable ECC Write MUX */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
TRAINING_SW_2_REG, 0x100, 0x100));
/* General ECC enable */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
REG_SDRAM_CONFIG_ADDR, 0x40000, 0x40000));
/* Disable Read Data ECC MUX */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
TRAINING_SW_2_REG, 0x0, 0x2));
}
return MV_OK;
}
int ddr3_post_algo_config(void)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
int status;
status = ddr3_post_run_alg();
if (MV_OK != status) {
printf("DDR3 Post Run Alg - FAILED 0x%x\n", status);
return status;
}
/* Un_set ECC training mode */
if ((DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask)) ||
(DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask))) {
/* Disable ECC Write MUX */
CHECK_STATUS(ddr3_tip_if_write
(0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE,
TRAINING_SW_2_REG, 0x0, 0x100));
/* General ECC and Bus3 ECC MUX remains enabled */
}
return MV_OK;
}
int ddr3_hws_hw_training(void)
{
enum hws_algo_type algo_mode = ALGO_TYPE_DYNAMIC;
int status;
struct init_cntr_param init_param;
status = ddr3_silicon_pre_init();
if (MV_OK != status) {
printf("DDR3 Pre silicon Config - FAILED 0x%x\n", status);
return status;
}
init_param.do_mrs_phy = 1;
#if defined(CONFIG_ARMADA_38X) || defined(CONFIG_ARMADA_39X)
init_param.is_ctrl64_bit = 0;
#else
init_param.is_ctrl64_bit = 1;
#endif
#if defined(CONFIG_ALLEYCAT3) || defined(CONFIG_ARMADA_38X) || \
defined(CONFIG_ARMADA_39X)
init_param.init_phy = 1;
#else
init_param.init_phy = 0;
#endif
init_param.msys_init = 1;
status = hws_ddr3_tip_init_controller(0, &init_param);
if (MV_OK != status) {
printf("DDR3 init controller - FAILED 0x%x\n", status);
return status;
}
status = ddr3_silicon_post_init();
if (MV_OK != status) {
printf("DDR3 Post Init - FAILED 0x%x\n", status);
return status;
}
status = ddr3_pre_algo_config();
if (MV_OK != status) {
printf("DDR3 Pre Algo Config - FAILED 0x%x\n", status);
return status;
}
/* run algorithm in order to configure the PHY */
status = hws_ddr3_tip_run_alg(0, algo_mode);
if (MV_OK != status) {
printf("DDR3 run algorithm - FAILED 0x%x\n", status);
return status;
}
status = ddr3_post_algo_config();
if (MV_OK != status) {
printf("DDR3 Post Algo Config - FAILED 0x%x\n", status);
return status;
}
return MV_OK;
}

View File

@ -0,0 +1,49 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_HWS_HW_TRAINING_H
#define _DDR3_HWS_HW_TRAINING_H
/* struct used for DLB configuration array */
struct dlb_config {
u32 reg_addr;
u32 reg_data;
};
/* Topology update structure */
struct topology_update_info {
int update_ecc;
u8 ecc;
int update_width;
u8 width;
int update_ecc_pup3_mode;
u8 ecc_pup_mode_offset;
};
/* Topology update defines */
#define TOPOLOGY_UPDATE_WIDTH_16BIT 1
#define TOPOLOGY_UPDATE_WIDTH_32BIT 0
#define TOPOLOGY_UPDATE_WIDTH_32BIT_MASK 0xf
#define TOPOLOGY_UPDATE_WIDTH_16BIT_MASK 0x3
#define TOPOLOGY_UPDATE_ECC_ON 1
#define TOPOLOGY_UPDATE_ECC_OFF 0
#define TOPOLOGY_UPDATE_ECC_OFFSET_PUP4 4
#define TOPOLOGY_UPDATE_ECC_OFFSET_PUP3 3
/*
* 1. L2 filter should be set at binary header to 0xd000000,
* to avoid conflict with internal register IO.
* 2. U-Boot modifies internal registers base to 0xf100000,
* and than should update L2 filter accordingly to 0xf000000 (3.75 GB)
*/
/* temporary limit l2 filter to 3GiB (LSP issue) */
#define L2_FILTER_FOR_MAX_MEMORY_SIZE 0xc0000000
#define ADDRESS_FILTERING_END_REGISTER 0x8c04
#define SUB_VERSION 0
#endif /* _DDR3_HWS_HW_TRAINING_H */

View File

@ -0,0 +1,467 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_HWS_HW_TRAINING_DEF_H
#define _DDR3_HWS_HW_TRAINING_DEF_H
#define SAR_DDR3_FREQ_MASK 0xfe00000
#define SAR_CPU_FAB_GET(cpu, fab) (((cpu & 0x7) << 21) | \
((fab & 0xf) << 24))
#define MAX_CS 4
#define MIN_DIMM_ADDR 0x50
#define FAR_END_DIMM_ADDR 0x50
#define MAX_DIMM_ADDR 0x60
#define SDRAM_CS_SIZE 0xfffffff
#define SDRAM_CS_BASE 0x0
#define SDRAM_DIMM_SIZE 0x80000000
#define CPU_CONFIGURATION_REG(id) (0x21800 + (id * 0x100))
#define CPU_MRVL_ID_OFFSET 0x10
#define SAR1_CPU_CORE_MASK 0x00000018
#define SAR1_CPU_CORE_OFFSET 3
#define NEW_FABRIC_TWSI_ADDR 0x4e
#ifdef DB_784MP_GP
#define BUS_WIDTH_ECC_TWSI_ADDR 0x4e
#else
#define BUS_WIDTH_ECC_TWSI_ADDR 0x4f
#endif
#define MV_MAX_DDR3_STATIC_SIZE 50
#define MV_DDR3_MODES_NUMBER 30
#define RESUME_RL_PATTERNS_ADDR 0xfe0000
#define RESUME_RL_PATTERNS_SIZE 0x100
#define RESUME_TRAINING_VALUES_ADDR (RESUME_RL_PATTERNS_ADDR + \
RESUME_RL_PATTERNS_SIZE)
#define RESUME_TRAINING_VALUES_MAX 0xcd0
#define BOOT_INFO_ADDR (RESUME_RL_PATTERNS_ADDR + 0x1000)
#define CHECKSUM_RESULT_ADDR (BOOT_INFO_ADDR + 0x1000)
#define NUM_OF_REGISTER_ADDR (CHECKSUM_RESULT_ADDR + 4)
#define SUSPEND_MAGIC_WORD 0xdeadb002
#define REGISTER_LIST_END 0xffffffff
/* MISC */
#define INTER_REGS_BASE SOC_REGS_PHY_BASE
/* DDR */
#define REG_SDRAM_CONFIG_ADDR 0x1400
#define REG_SDRAM_CONFIG_MASK 0x9fffffff
#define REG_SDRAM_CONFIG_RFRS_MASK 0x3fff
#define REG_SDRAM_CONFIG_WIDTH_OFFS 15
#define REG_SDRAM_CONFIG_REGDIMM_OFFS 17
#define REG_SDRAM_CONFIG_ECC_OFFS 18
#define REG_SDRAM_CONFIG_IERR_OFFS 19
#define REG_SDRAM_CONFIG_PUPRSTDIV_OFFS 28
#define REG_SDRAM_CONFIG_RSTRD_OFFS 30
#define REG_SDRAM_PINS_MUX 0x19d4
#define REG_DUNIT_CTRL_LOW_ADDR 0x1404
#define REG_DUNIT_CTRL_LOW_2T_OFFS 3
#define REG_DUNIT_CTRL_LOW_2T_MASK 0x3
#define REG_DUNIT_CTRL_LOW_DPDE_OFFS 14
#define REG_SDRAM_TIMING_LOW_ADDR 0x1408
#define REG_SDRAM_TIMING_HIGH_ADDR 0x140c
#define REG_SDRAM_TIMING_H_R2R_OFFS 7
#define REG_SDRAM_TIMING_H_R2R_MASK 0x3
#define REG_SDRAM_TIMING_H_R2W_W2R_OFFS 9
#define REG_SDRAM_TIMING_H_R2W_W2R_MASK 0x3
#define REG_SDRAM_TIMING_H_W2W_OFFS 11
#define REG_SDRAM_TIMING_H_W2W_MASK 0x1f
#define REG_SDRAM_TIMING_H_R2R_H_OFFS 19
#define REG_SDRAM_TIMING_H_R2R_H_MASK 0x7
#define REG_SDRAM_TIMING_H_R2W_W2R_H_OFFS 22
#define REG_SDRAM_TIMING_H_R2W_W2R_H_MASK 0x7
#define REG_SDRAM_ADDRESS_CTRL_ADDR 0x1410
#define REG_SDRAM_ADDRESS_SIZE_OFFS 2
#define REG_SDRAM_ADDRESS_SIZE_HIGH_OFFS 18
#define REG_SDRAM_ADDRESS_CTRL_STRUCT_OFFS 4
#define REG_SDRAM_OPEN_PAGES_ADDR 0x1414
#define REG_SDRAM_OPERATION_CS_OFFS 8
#define REG_SDRAM_OPERATION_ADDR 0x1418
#define REG_SDRAM_OPERATION_CWA_DELAY_SEL_OFFS 24
#define REG_SDRAM_OPERATION_CWA_DATA_OFFS 20
#define REG_SDRAM_OPERATION_CWA_DATA_MASK 0xf
#define REG_SDRAM_OPERATION_CWA_RC_OFFS 16
#define REG_SDRAM_OPERATION_CWA_RC_MASK 0xf
#define REG_SDRAM_OPERATION_CMD_MR0 0xf03
#define REG_SDRAM_OPERATION_CMD_MR1 0xf04
#define REG_SDRAM_OPERATION_CMD_MR2 0xf08
#define REG_SDRAM_OPERATION_CMD_MR3 0xf09
#define REG_SDRAM_OPERATION_CMD_RFRS 0xf02
#define REG_SDRAM_OPERATION_CMD_CWA 0xf0e
#define REG_SDRAM_OPERATION_CMD_RFRS_DONE 0xf
#define REG_SDRAM_OPERATION_CMD_MASK 0xf
#define REG_SDRAM_OPERATION_CS_OFFS 8
#define REG_OUDDR3_TIMING_ADDR 0x142c
#define REG_SDRAM_MODE_ADDR 0x141c
#define REG_SDRAM_EXT_MODE_ADDR 0x1420
#define REG_DDR_CONT_HIGH_ADDR 0x1424
#define REG_ODT_TIME_LOW_ADDR 0x1428
#define REG_ODT_ON_CTL_RD_OFFS 12
#define REG_ODT_OFF_CTL_RD_OFFS 16
#define REG_SDRAM_ERROR_ADDR 0x1454
#define REG_SDRAM_AUTO_PWR_SAVE_ADDR 0x1474
#define REG_ODT_TIME_HIGH_ADDR 0x147c
#define REG_SDRAM_INIT_CTRL_ADDR 0x1480
#define REG_SDRAM_INIT_CTRL_OFFS 0
#define REG_SDRAM_INIT_CKE_ASSERT_OFFS 2
#define REG_SDRAM_INIT_RESET_DEASSERT_OFFS 3
#define REG_SDRAM_INIT_RESET_MASK_OFFS 1
#define REG_SDRAM_ODT_CTRL_LOW_ADDR 0x1494
#define REG_SDRAM_ODT_CTRL_HIGH_ADDR 0x1498
#define REG_SDRAM_ODT_CTRL_HIGH_OVRD_MASK 0x0
#define REG_SDRAM_ODT_CTRL_HIGH_OVRD_ENA 0x3
#define REG_DUNIT_ODT_CTRL_ADDR 0x149c
#define REG_DUNIT_ODT_CTRL_OVRD_OFFS 8
#define REG_DUNIT_ODT_CTRL_OVRD_VAL_OFFS 9
#define REG_DRAM_FIFO_CTRL_ADDR 0x14a0
#define REG_DRAM_AXI_CTRL_ADDR 0x14a8
#define REG_DRAM_AXI_CTRL_AXIDATABUSWIDTH_OFFS 0
#define REG_METAL_MASK_ADDR 0x14b0
#define REG_METAL_MASK_MASK 0xdfffffff
#define REG_METAL_MASK_RETRY_OFFS 0
#define REG_DRAM_ADDR_CTRL_DRIVE_STRENGTH_ADDR 0x14c0
#define REG_DRAM_DATA_DQS_DRIVE_STRENGTH_ADDR 0x14c4
#define REG_DRAM_VER_CAL_MACHINE_CTRL_ADDR 0x14c8
#define REG_DRAM_MAIN_PADS_CAL_ADDR 0x14cc
#define REG_DRAM_HOR_CAL_MACHINE_CTRL_ADDR 0x17c8
#define REG_CS_SIZE_SCRATCH_ADDR 0x1504
#define REG_DYNAMIC_POWER_SAVE_ADDR 0x1520
#define REG_DDR_IO_ADDR 0x1524
#define REG_DDR_IO_CLK_RATIO_OFFS 15
#define REG_DFS_ADDR 0x1528
#define REG_DFS_DLLNEXTSTATE_OFFS 0
#define REG_DFS_BLOCK_OFFS 1
#define REG_DFS_SR_OFFS 2
#define REG_DFS_ATSR_OFFS 3
#define REG_DFS_RECONF_OFFS 4
#define REG_DFS_CL_NEXT_STATE_OFFS 8
#define REG_DFS_CL_NEXT_STATE_MASK 0xf
#define REG_DFS_CWL_NEXT_STATE_OFFS 12
#define REG_DFS_CWL_NEXT_STATE_MASK 0x7
#define REG_READ_DATA_SAMPLE_DELAYS_ADDR 0x1538
#define REG_READ_DATA_SAMPLE_DELAYS_MASK 0x1f
#define REG_READ_DATA_SAMPLE_DELAYS_OFFS 8
#define REG_READ_DATA_READY_DELAYS_ADDR 0x153c
#define REG_READ_DATA_READY_DELAYS_MASK 0x1f
#define REG_READ_DATA_READY_DELAYS_OFFS 8
#define START_BURST_IN_ADDR 1
#define REG_DRAM_TRAINING_SHADOW_ADDR 0x18488
#define REG_DRAM_TRAINING_ADDR 0x15b0
#define REG_DRAM_TRAINING_LOW_FREQ_OFFS 0
#define REG_DRAM_TRAINING_PATTERNS_OFFS 4
#define REG_DRAM_TRAINING_MED_FREQ_OFFS 2
#define REG_DRAM_TRAINING_WL_OFFS 3
#define REG_DRAM_TRAINING_RL_OFFS 6
#define REG_DRAM_TRAINING_DQS_RX_OFFS 15
#define REG_DRAM_TRAINING_DQS_TX_OFFS 16
#define REG_DRAM_TRAINING_CS_OFFS 20
#define REG_DRAM_TRAINING_RETEST_OFFS 24
#define REG_DRAM_TRAINING_DFS_FREQ_OFFS 27
#define REG_DRAM_TRAINING_DFS_REQ_OFFS 29
#define REG_DRAM_TRAINING_ERROR_OFFS 30
#define REG_DRAM_TRAINING_AUTO_OFFS 31
#define REG_DRAM_TRAINING_RETEST_PAR 0x3
#define REG_DRAM_TRAINING_RETEST_MASK 0xf8ffffff
#define REG_DRAM_TRAINING_CS_MASK 0xff0fffff
#define REG_DRAM_TRAINING_PATTERNS_MASK 0xff0f0000
#define REG_DRAM_TRAINING_1_ADDR 0x15b4
#define REG_DRAM_TRAINING_1_TRNBPOINT_OFFS 16
#define REG_DRAM_TRAINING_2_ADDR 0x15b8
#define REG_DRAM_TRAINING_2_OVERRUN_OFFS 17
#define REG_DRAM_TRAINING_2_FIFO_RST_OFFS 4
#define REG_DRAM_TRAINING_2_RL_MODE_OFFS 3
#define REG_DRAM_TRAINING_2_WL_MODE_OFFS 2
#define REG_DRAM_TRAINING_2_ECC_MUX_OFFS 1
#define REG_DRAM_TRAINING_2_SW_OVRD_OFFS 0
#define REG_DRAM_TRAINING_PATTERN_BASE_ADDR 0x15bc
#define REG_DRAM_TRAINING_PATTERN_BASE_OFFS 3
#define REG_TRAINING_DEBUG_2_ADDR 0x15c4
#define REG_TRAINING_DEBUG_2_OFFS 16
#define REG_TRAINING_DEBUG_2_MASK 0x3
#define REG_TRAINING_DEBUG_3_ADDR 0x15c8
#define REG_TRAINING_DEBUG_3_OFFS 3
#define REG_TRAINING_DEBUG_3_MASK 0x7
#define MR_CS_ADDR_OFFS 4
#define REG_DDR3_MR0_ADDR 0x15d0
#define REG_DDR3_MR0_CS_ADDR 0x1870
#define REG_DDR3_MR0_CL_MASK 0x74
#define REG_DDR3_MR0_CL_OFFS 2
#define REG_DDR3_MR0_CL_HIGH_OFFS 3
#define CL_MASK 0xf
#define REG_DDR3_MR1_ADDR 0x15d4
#define REG_DDR3_MR1_CS_ADDR 0x1874
#define REG_DDR3_MR1_RTT_MASK 0xfffffdbb
#define REG_DDR3_MR1_DLL_ENA_OFFS 0
#define REG_DDR3_MR1_RTT_DISABLED 0x0
#define REG_DDR3_MR1_RTT_RZQ2 0x40
#define REG_DDR3_MR1_RTT_RZQ4 0x2
#define REG_DDR3_MR1_RTT_RZQ6 0x42
#define REG_DDR3_MR1_RTT_RZQ8 0x202
#define REG_DDR3_MR1_RTT_RZQ12 0x4
/* WL-disabled, OB-enabled */
#define REG_DDR3_MR1_OUTBUF_WL_MASK 0xffffef7f
/* Output Buffer Disabled */
#define REG_DDR3_MR1_OUTBUF_DIS_OFFS 12
#define REG_DDR3_MR1_WL_ENA_OFFS 7
#define REG_DDR3_MR1_WL_ENA 0x80 /* WL Enabled */
#define REG_DDR3_MR1_ODT_MASK 0xfffffdbb
#define REG_DDR3_MR2_ADDR 0x15d8
#define REG_DDR3_MR2_CS_ADDR 0x1878
#define REG_DDR3_MR2_CWL_OFFS 3
#define REG_DDR3_MR2_CWL_MASK 0x7
#define REG_DDR3_MR2_ODT_MASK 0xfffff9ff
#define REG_DDR3_MR3_ADDR 0x15dc
#define REG_DDR3_MR3_CS_ADDR 0x187c
#define REG_DDR3_RANK_CTRL_ADDR 0x15e0
#define REG_DDR3_RANK_CTRL_CS_ENA_MASK 0xf
#define REG_DDR3_RANK_CTRL_MIRROR_OFFS 4
#define REG_ZQC_CONF_ADDR 0x15e4
#define REG_DRAM_PHY_CONFIG_ADDR 0x15ec
#define REG_DRAM_PHY_CONFIG_MASK 0x3fffffff
#define REG_ODPG_CNTRL_ADDR 0x1600
#define REG_ODPG_CNTRL_OFFS 21
#define REG_PHY_LOCK_MASK_ADDR 0x1670
#define REG_PHY_LOCK_MASK_MASK 0xfffff000
#define REG_PHY_LOCK_STATUS_ADDR 0x1674
#define REG_PHY_LOCK_STATUS_LOCK_OFFS 9
#define REG_PHY_LOCK_STATUS_LOCK_MASK 0xfff
#define REG_PHY_LOCK_APLL_ADLL_STATUS_MASK 0x7ff
#define REG_PHY_REGISTRY_FILE_ACCESS_ADDR 0x16a0
#define REG_PHY_REGISTRY_FILE_ACCESS_OP_WR 0xc0000000
#define REG_PHY_REGISTRY_FILE_ACCESS_OP_RD 0x80000000
#define REG_PHY_REGISTRY_FILE_ACCESS_OP_DONE 0x80000000
#define REG_PHY_BC_OFFS 27
#define REG_PHY_CNTRL_OFFS 26
#define REG_PHY_CS_OFFS 16
#define REG_PHY_DQS_REF_DLY_OFFS 10
#define REG_PHY_PHASE_OFFS 8
#define REG_PHY_PUP_OFFS 22
#define REG_TRAINING_WL_ADDR 0x16ac
#define REG_TRAINING_WL_CS_MASK 0xfffffffc
#define REG_TRAINING_WL_UPD_OFFS 2
#define REG_TRAINING_WL_CS_DONE_OFFS 3
#define REG_TRAINING_WL_RATIO_MASK 0xffffff0f
#define REG_TRAINING_WL_1TO1 0x50
#define REG_TRAINING_WL_2TO1 0x10
#define REG_TRAINING_WL_DELAYEXP_MASK 0x20000000
#define REG_TRAINING_WL_RESULTS_MASK 0x000001ff
#define REG_TRAINING_WL_RESULTS_OFFS 20
#define REG_REGISTERED_DRAM_CTRL_ADDR 0x16d0
#define REG_REGISTERED_DRAM_CTRL_SR_FLOAT_OFFS 15
#define REG_REGISTERED_DRAM_CTRL_PARITY_MASK 0x3f
/* DLB */
#define REG_STATIC_DRAM_DLB_CONTROL 0x1700
#define DLB_BUS_OPTIMIZATION_WEIGHTS_REG 0x1704
#define DLB_AGING_REGISTER 0x1708
#define DLB_EVICTION_CONTROL_REG 0x170c
#define DLB_EVICTION_TIMERS_REGISTER_REG 0x1710
#define DLB_USER_COMMAND_REG 0x1714
#define DLB_BUS_WEIGHTS_DIFF_CS 0x1770
#define DLB_BUS_WEIGHTS_DIFF_BG 0x1774
#define DLB_BUS_WEIGHTS_SAME_BG 0x1778
#define DLB_BUS_WEIGHTS_RD_WR 0x177c
#define DLB_BUS_WEIGHTS_ATTR_SYS_PRIO 0x1780
#define DLB_MAIN_QUEUE_MAP 0x1784
#define DLB_LINE_SPLIT 0x1788
#define DLB_ENABLE 0x1
#define DLB_WRITE_COALESING (0x1 << 2)
#define DLB_AXI_PREFETCH_EN (0x1 << 3)
#define DLB_MBUS_PREFETCH_EN (0x1 << 4)
#define PREFETCH_N_LN_SZ_TR (0x1 << 6)
#define DLB_INTERJECTION_ENABLE (0x1 << 3)
/* CPU */
#define REG_BOOTROM_ROUTINE_ADDR 0x182d0
#define REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS 12
#define REG_DRAM_INIT_CTRL_STATUS_ADDR 0x18488
#define REG_DRAM_INIT_CTRL_TRN_CLK_OFFS 16
#define REG_CPU_DIV_CLK_CTRL_0_NEW_RATIO 0x000200ff
#define REG_DRAM_INIT_CTRL_STATUS_2_ADDR 0x1488
#define REG_CPU_DIV_CLK_CTRL_0_ADDR 0x18700
#define REG_CPU_DIV_CLK_CTRL_1_ADDR 0x18704
#define REG_CPU_DIV_CLK_CTRL_2_ADDR 0x18708
#define REG_CPU_DIV_CLK_CTRL_3_ADDR 0x1870c
#define REG_CPU_DIV_CLK_CTRL_3_FREQ_MASK 0xffffc0ff
#define REG_CPU_DIV_CLK_CTRL_3_FREQ_OFFS 8
#define REG_CPU_DIV_CLK_CTRL_4_ADDR 0x18710
#define REG_CPU_DIV_CLK_STATUS_0_ADDR 0x18718
#define REG_CPU_DIV_CLK_ALL_STABLE_OFFS 8
#define REG_CPU_PLL_CTRL_0_ADDR 0x1871c
#define REG_CPU_PLL_STATUS_0_ADDR 0x18724
#define REG_CORE_DIV_CLK_CTRL_ADDR 0x18740
#define REG_CORE_DIV_CLK_STATUS_ADDR 0x18744
#define REG_DDRPHY_APLL_CTRL_ADDR 0x18780
#define REG_DDRPHY_APLL_CTRL_2_ADDR 0x18784
#define REG_SFABRIC_CLK_CTRL_ADDR 0x20858
#define REG_SFABRIC_CLK_CTRL_SMPL_OFFS 8
/* DRAM Windows */
#define REG_XBAR_WIN_19_CTRL_ADDR 0x200e8
#define REG_XBAR_WIN_4_CTRL_ADDR 0x20040
#define REG_XBAR_WIN_4_BASE_ADDR 0x20044
#define REG_XBAR_WIN_4_REMAP_ADDR 0x20048
#define REG_FASTPATH_WIN_0_CTRL_ADDR 0x20184
#define REG_XBAR_WIN_7_REMAP_ADDR 0x20078
/* SRAM */
#define REG_CDI_CONFIG_ADDR 0x20220
#define REG_SRAM_WINDOW_0_ADDR 0x20240
#define REG_SRAM_WINDOW_0_ENA_OFFS 0
#define REG_SRAM_WINDOW_1_ADDR 0x20244
#define REG_SRAM_L2_ENA_ADDR 0x8500
#define REG_SRAM_CLEAN_BY_WAY_ADDR 0x87bc
/* Timers */
#define REG_TIMERS_CTRL_ADDR 0x20300
#define REG_TIMERS_EVENTS_ADDR 0x20304
#define REG_TIMER0_VALUE_ADDR 0x20314
#define REG_TIMER1_VALUE_ADDR 0x2031c
#define REG_TIMER0_ENABLE_MASK 0x1
#define MV_BOARD_REFCLK_25MHZ 25000000
#define CNTMR_RELOAD_REG(tmr) (REG_TIMERS_CTRL_ADDR + 0x10 + (tmr * 8))
#define CNTMR_VAL_REG(tmr) (REG_TIMERS_CTRL_ADDR + 0x14 + (tmr * 8))
#define CNTMR_CTRL_REG(tmr) (REG_TIMERS_CTRL_ADDR)
#define CTCR_ARM_TIMER_EN_OFFS(timer) (timer * 2)
#define CTCR_ARM_TIMER_EN_MASK(timer) (1 << CTCR_ARM_TIMER_EN_OFFS(timer))
#define CTCR_ARM_TIMER_EN(timer) (1 << CTCR_ARM_TIMER_EN_OFFS(timer))
#define CTCR_ARM_TIMER_AUTO_OFFS(timer) (1 + (timer * 2))
#define CTCR_ARM_TIMER_AUTO_MASK(timer) (1 << CTCR_ARM_TIMER_EN_OFFS(timer))
#define CTCR_ARM_TIMER_AUTO_EN(timer) (1 << CTCR_ARM_TIMER_AUTO_OFFS(timer))
/* PMU */
#define REG_PMU_I_F_CTRL_ADDR 0x1c090
#define REG_PMU_DUNIT_BLK_OFFS 16
#define REG_PMU_DUNIT_RFRS_OFFS 20
#define REG_PMU_DUNIT_ACK_OFFS 24
/* MBUS */
#define MBUS_UNITS_PRIORITY_CONTROL_REG (MBUS_REGS_OFFSET + 0x420)
#define FABRIC_UNITS_PRIORITY_CONTROL_REG (MBUS_REGS_OFFSET + 0x424)
#define MBUS_UNITS_PREFETCH_CONTROL_REG (MBUS_REGS_OFFSET + 0x428)
#define FABRIC_UNITS_PREFETCH_CONTROL_REG (MBUS_REGS_OFFSET + 0x42c)
#define REG_PM_STAT_MASK_ADDR 0x2210c
#define REG_PM_STAT_MASK_CPU0_IDLE_MASK_OFFS 16
#define REG_PM_EVENT_STAT_MASK_ADDR 0x22120
#define REG_PM_EVENT_STAT_MASK_DFS_DONE_OFFS 17
#define REG_PM_CTRL_CONFIG_ADDR 0x22104
#define REG_PM_CTRL_CONFIG_DFS_REQ_OFFS 18
#define REG_FABRIC_LOCAL_IRQ_MASK_ADDR 0x218c4
#define REG_FABRIC_LOCAL_IRQ_PMU_MASK_OFFS 18
/* Controller revision info */
#define PCI_CLASS_CODE_AND_REVISION_ID 0x008
#define PCCRIR_REVID_OFFS 0 /* Revision ID */
#define PCCRIR_REVID_MASK (0xff << PCCRIR_REVID_OFFS)
/* Power Management Clock Gating Control Register */
#define POWER_MNG_CTRL_REG 0x18220
#define PEX_DEVICE_AND_VENDOR_ID 0x000
#define PEX_CFG_DIRECT_ACCESS(if, reg) (PEX_IF_REGS_BASE(if) + (reg))
#define PMC_PEXSTOPCLOCK_OFFS(p) ((p) < 8 ? (5 + (p)) : (18 + (p)))
#define PMC_PEXSTOPCLOCK_MASK(p) (1 << PMC_PEXSTOPCLOCK_OFFS(p))
#define PMC_PEXSTOPCLOCK_EN(p) (1 << PMC_PEXSTOPCLOCK_OFFS(p))
#define PMC_PEXSTOPCLOCK_STOP(p) (0 << PMC_PEXSTOPCLOCK_OFFS(p))
/* TWSI */
#define TWSI_DATA_ADDR_MASK 0x7
#define TWSI_DATA_ADDR_OFFS 1
/* General */
#define MAX_CS 4
/* Frequencies */
#define FAB_OPT 21
#define CLK_CPU 12
#define CLK_VCO (2 * CLK_CPU)
#define CLK_DDR 12
/* CPU Frequencies: */
#define CLK_CPU_1000 0
#define CLK_CPU_1066 1
#define CLK_CPU_1200 2
#define CLK_CPU_1333 3
#define CLK_CPU_1500 4
#define CLK_CPU_1666 5
#define CLK_CPU_1800 6
#define CLK_CPU_2000 7
#define CLK_CPU_600 8
#define CLK_CPU_667 9
#define CLK_CPU_800 0xa
/* Extra Cpu Frequencies: */
#define CLK_CPU_1600 11
#define CLK_CPU_2133 12
#define CLK_CPU_2200 13
#define CLK_CPU_2400 14
#define SAR1_CPU_CORE_MASK 0x00000018
#define SAR1_CPU_CORE_OFFSET 3
#endif /* _DDR3_HWS_HW_TRAINING_DEF_H */

View File

@ -0,0 +1,17 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_HWS_SIL_TRAINING_H
#define _DDR3_HWS_SIL_TRAINING_H
#include "ddr3_training_ip.h"
#include "ddr3_training_ip_prv_if.h"
int ddr3_silicon_pre_config(void);
int ddr3_silicon_init(void);
int ddr3_silicon_get_ddr_target_freq(u32 *ddr_freq);
#endif /* _DDR3_HWS_SIL_TRAINING_H */

View File

@ -0,0 +1,852 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#include "../../../../arch/arm/mach-mvebu/serdes/a38x/sys_env_lib.h"
static struct dlb_config ddr3_dlb_config_table[] = {
{REG_STATIC_DRAM_DLB_CONTROL, 0x2000005c},
{DLB_BUS_OPTIMIZATION_WEIGHTS_REG, 0x00880000},
{DLB_AGING_REGISTER, 0x0f7f007f},
{DLB_EVICTION_CONTROL_REG, 0x0000129f},
{DLB_EVICTION_TIMERS_REGISTER_REG, 0x00ff0000},
{DLB_BUS_WEIGHTS_DIFF_CS, 0x04030802},
{DLB_BUS_WEIGHTS_DIFF_BG, 0x00000a02},
{DLB_BUS_WEIGHTS_SAME_BG, 0x09000a01},
{DLB_BUS_WEIGHTS_RD_WR, 0x00020005},
{DLB_BUS_WEIGHTS_ATTR_SYS_PRIO, 0x00060f10},
{DLB_MAIN_QUEUE_MAP, 0x00000543},
{DLB_LINE_SPLIT, 0x00000000},
{DLB_USER_COMMAND_REG, 0x00000000},
{0x0, 0x0}
};
static struct dlb_config ddr3_dlb_config_table_a0[] = {
{REG_STATIC_DRAM_DLB_CONTROL, 0x2000005c},
{DLB_BUS_OPTIMIZATION_WEIGHTS_REG, 0x00880000},
{DLB_AGING_REGISTER, 0x0f7f007f},
{DLB_EVICTION_CONTROL_REG, 0x0000129f},
{DLB_EVICTION_TIMERS_REGISTER_REG, 0x00ff0000},
{DLB_BUS_WEIGHTS_DIFF_CS, 0x04030802},
{DLB_BUS_WEIGHTS_DIFF_BG, 0x00000a02},
{DLB_BUS_WEIGHTS_SAME_BG, 0x09000a01},
{DLB_BUS_WEIGHTS_RD_WR, 0x00020005},
{DLB_BUS_WEIGHTS_ATTR_SYS_PRIO, 0x00060f10},
{DLB_MAIN_QUEUE_MAP, 0x00000543},
{DLB_LINE_SPLIT, 0x00000000},
{DLB_USER_COMMAND_REG, 0x00000000},
{0x0, 0x0}
};
#if defined(CONFIG_ARMADA_38X)
struct dram_modes {
char *mode_name;
u8 cpu_freq;
u8 fab_freq;
u8 chip_id;
u8 chip_board_rev;
struct reg_data *regs;
};
struct dram_modes ddr_modes[] = {
#ifdef SUPPORT_STATIC_DUNIT_CONFIG
/* Conf name, CPUFreq, Fab_freq, Chip ID, Chip/Board, MC regs*/
#ifdef CONFIG_CUSTOMER_BOARD_SUPPORT
{"a38x_customer_0_800", DDR_FREQ_800, 0, 0x0, A38X_CUSTOMER_BOARD_ID0,
ddr3_customer_800},
{"a38x_customer_1_800", DDR_FREQ_800, 0, 0x0, A38X_CUSTOMER_BOARD_ID1,
ddr3_customer_800},
#else
{"a38x_533", DDR_FREQ_533, 0, 0x0, MARVELL_BOARD, ddr3_a38x_533},
{"a38x_667", DDR_FREQ_667, 0, 0x0, MARVELL_BOARD, ddr3_a38x_667},
{"a38x_800", DDR_FREQ_800, 0, 0x0, MARVELL_BOARD, ddr3_a38x_800},
{"a38x_933", DDR_FREQ_933, 0, 0x0, MARVELL_BOARD, ddr3_a38x_933},
#endif
#endif
};
#endif /* defined(CONFIG_ARMADA_38X) */
/* Translates topology map definitions to real memory size in bits */
u32 mem_size[] = {
ADDR_SIZE_512MB, ADDR_SIZE_1GB, ADDR_SIZE_2GB, ADDR_SIZE_4GB,
ADDR_SIZE_8GB
};
static char *ddr_type = "DDR3";
/*
* Set 1 to use dynamic DUNIT configuration,
* set 0 (supported for A380 and AC3) to configure DUNIT in values set by
* ddr3_tip_init_specific_reg_config
*/
u8 generic_init_controller = 1;
#ifdef SUPPORT_STATIC_DUNIT_CONFIG
static u32 ddr3_get_static_ddr_mode(void);
#endif
static int ddr3_hws_tune_training_params(u8 dev_num);
static int ddr3_update_topology_map(struct hws_topology_map *topology_map);
/* device revision */
#define DEV_VERSION_ID_REG 0x1823c
#define REVISON_ID_OFFS 8
#define REVISON_ID_MASK 0xf00
/* A38x revisions */
#define MV_88F68XX_Z1_ID 0x0
#define MV_88F68XX_A0_ID 0x4
/* A39x revisions */
#define MV_88F69XX_Z1_ID 0x2
/*
* sys_env_device_rev_get - Get Marvell controller device revision number
*
* DESCRIPTION:
* This function returns 8bit describing the device revision as defined
* Revision ID Register.
*
* INPUT:
* None.
*
* OUTPUT:
* None.
*
* RETURN:
* 8bit desscribing Marvell controller revision number
*/
u8 sys_env_device_rev_get(void)
{
u32 value;
value = reg_read(DEV_VERSION_ID_REG);
return (value & (REVISON_ID_MASK)) >> REVISON_ID_OFFS;
}
/*
* sys_env_dlb_config_ptr_get
*
* DESCRIPTION: defines pointer to to DLB COnfiguration table
*
* INPUT: none
*
* OUTPUT: pointer to DLB COnfiguration table
*
* RETURN:
* returns pointer to DLB COnfiguration table
*/
struct dlb_config *sys_env_dlb_config_ptr_get(void)
{
#ifdef CONFIG_ARMADA_39X
return &ddr3_dlb_config_table_a0[0];
#else
if (sys_env_device_rev_get() == MV_88F68XX_A0_ID)
return &ddr3_dlb_config_table_a0[0];
else
return &ddr3_dlb_config_table[0];
#endif
}
/*
* sys_env_get_cs_ena_from_reg
*
* DESCRIPTION: Get bit mask of enabled CS
*
* INPUT: None
*
* OUTPUT: None
*
* RETURN:
* Bit mask of enabled CS, 1 if only CS0 enabled,
* 3 if both CS0 and CS1 enabled
*/
u32 sys_env_get_cs_ena_from_reg(void)
{
return reg_read(REG_DDR3_RANK_CTRL_ADDR) &
REG_DDR3_RANK_CTRL_CS_ENA_MASK;
}
static void ddr3_restore_and_set_final_windows(u32 *win)
{
u32 win_ctrl_reg, num_of_win_regs;
u32 cs_ena = sys_env_get_cs_ena_from_reg();
u32 ui;
win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
num_of_win_regs = 16;
/* Return XBAR windows 4-7 or 16-19 init configuration */
for (ui = 0; ui < num_of_win_regs; ui++)
reg_write((win_ctrl_reg + 0x4 * ui), win[ui]);
printf("%s Training Sequence - Switching XBAR Window to FastPath Window\n",
ddr_type);
#if defined DYNAMIC_CS_SIZE_CONFIG
if (ddr3_fast_path_dynamic_cs_size_config(cs_ena) != MV_OK)
printf("ddr3_fast_path_dynamic_cs_size_config FAILED\n");
#else
u32 reg, cs;
reg = 0x1fffffe1;
for (cs = 0; cs < MAX_CS; cs++) {
if (cs_ena & (1 << cs)) {
reg |= (cs << 2);
break;
}
}
/* Open fast path Window to - 0.5G */
reg_write(REG_FASTPATH_WIN_0_CTRL_ADDR, reg);
#endif
}
static int ddr3_save_and_set_training_windows(u32 *win)
{
u32 cs_ena;
u32 reg, tmp_count, cs, ui;
u32 win_ctrl_reg, win_base_reg, win_remap_reg;
u32 num_of_win_regs, win_jump_index;
win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR;
win_base_reg = REG_XBAR_WIN_4_BASE_ADDR;
win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR;
win_jump_index = 0x10;
num_of_win_regs = 16;
struct hws_topology_map *tm = ddr3_get_topology_map();
#ifdef DISABLE_L2_FILTERING_DURING_DDR_TRAINING
/*
* Disable L2 filtering during DDR training
* (when Cross Bar window is open)
*/
reg_write(ADDRESS_FILTERING_END_REGISTER, 0);
#endif
cs_ena = tm->interface_params[0].as_bus_params[0].cs_bitmask;
/* Close XBAR Window 19 - Not needed */
/* {0x000200e8} - Open Mbus Window - 2G */
reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0);
/* Save XBAR Windows 4-19 init configurations */
for (ui = 0; ui < num_of_win_regs; ui++)
win[ui] = reg_read(win_ctrl_reg + 0x4 * ui);
/* Open XBAR Windows 4-7 or 16-19 for other CS */
reg = 0;
tmp_count = 0;
for (cs = 0; cs < MAX_CS; cs++) {
if (cs_ena & (1 << cs)) {
switch (cs) {
case 0:
reg = 0x0e00;
break;
case 1:
reg = 0x0d00;
break;
case 2:
reg = 0x0b00;
break;
case 3:
reg = 0x0700;
break;
}
reg |= (1 << 0);
reg |= (SDRAM_CS_SIZE & 0xffff0000);
reg_write(win_ctrl_reg + win_jump_index * tmp_count,
reg);
reg = (((SDRAM_CS_SIZE + 1) * (tmp_count)) &
0xffff0000);
reg_write(win_base_reg + win_jump_index * tmp_count,
reg);
if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR)
reg_write(win_remap_reg +
win_jump_index * tmp_count, 0);
tmp_count++;
}
}
return MV_OK;
}
/*
* Name: ddr3_init - Main DDR3 Init function
* Desc: This routine initialize the DDR3 MC and runs HW training.
* Args: None.
* Notes:
* Returns: None.
*/
int ddr3_init(void)
{
u32 reg = 0;
u32 soc_num;
int status;
u32 win[16];
/* SoC/Board special Initializtions */
/* Get version from internal library */
ddr3_print_version();
/*Add sub_version string */
DEBUG_INIT_C("", SUB_VERSION, 1);
/* Switching CPU to MRVL ID */
soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >>
SAR1_CPU_CORE_OFFSET;
switch (soc_num) {
case 0x3:
reg_bit_set(CPU_CONFIGURATION_REG(3), CPU_MRVL_ID_OFFSET);
reg_bit_set(CPU_CONFIGURATION_REG(2), CPU_MRVL_ID_OFFSET);
case 0x1:
reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET);
case 0x0:
reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET);
default:
break;
}
/*
* Set DRAM Reset Mask in case detected GPIO indication of wakeup from
* suspend i.e the DRAM values will not be overwritten / reset when
* waking from suspend
*/
if (sys_env_suspend_wakeup_check() ==
SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED) {
reg_bit_set(REG_SDRAM_INIT_CTRL_ADDR,
1 << REG_SDRAM_INIT_RESET_MASK_OFFS);
}
/*
* Stage 0 - Set board configuration
*/
/* Check if DRAM is already initialized */
if (reg_read(REG_BOOTROM_ROUTINE_ADDR) &
(1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)) {
printf("%s Training Sequence - 2nd boot - Skip\n", ddr_type);
return MV_OK;
}
/*
* Stage 1 - Dunit Setup
*/
/* Fix read ready phases for all SOC in reg 0x15c8 */
reg = reg_read(REG_TRAINING_DEBUG_3_ADDR);
reg &= ~(REG_TRAINING_DEBUG_3_MASK);
reg |= 0x4; /* Phase 0 */
reg &= ~(REG_TRAINING_DEBUG_3_MASK << REG_TRAINING_DEBUG_3_OFFS);
reg |= (0x4 << (1 * REG_TRAINING_DEBUG_3_OFFS)); /* Phase 1 */
reg &= ~(REG_TRAINING_DEBUG_3_MASK << (3 * REG_TRAINING_DEBUG_3_OFFS));
reg |= (0x6 << (3 * REG_TRAINING_DEBUG_3_OFFS)); /* Phase 3 */
reg &= ~(REG_TRAINING_DEBUG_3_MASK << (4 * REG_TRAINING_DEBUG_3_OFFS));
reg |= (0x6 << (4 * REG_TRAINING_DEBUG_3_OFFS));
reg &= ~(REG_TRAINING_DEBUG_3_MASK << (5 * REG_TRAINING_DEBUG_3_OFFS));
reg |= (0x6 << (5 * REG_TRAINING_DEBUG_3_OFFS));
reg_write(REG_TRAINING_DEBUG_3_ADDR, reg);
/*
* Axi_bresp_mode[8] = Compliant,
* Axi_addr_decode_cntrl[11] = Internal,
* Axi_data_bus_width[0] = 128bit
* */
/* 0x14a8 - AXI Control Register */
reg_write(REG_DRAM_AXI_CTRL_ADDR, 0);
/*
* Stage 2 - Training Values Setup
*/
/* Set X-BAR windows for the training sequence */
ddr3_save_and_set_training_windows(win);
#ifdef SUPPORT_STATIC_DUNIT_CONFIG
/*
* Load static controller configuration (in case dynamic/generic init
* is not enabled
*/
if (generic_init_controller == 0) {
ddr3_tip_init_specific_reg_config(0,
ddr_modes
[ddr3_get_static_ddr_mode
()].regs);
}
#endif
/* Load topology for New Training IP */
status = ddr3_load_topology_map();
if (MV_OK != status) {
printf("%s Training Sequence topology load - FAILED\n",
ddr_type);
return status;
}
/* Tune training algo paramteres */
status = ddr3_hws_tune_training_params(0);
if (MV_OK != status)
return status;
/* Set log level for training lib */
ddr3_hws_set_log_level(DEBUG_BLOCK_ALL, DEBUG_LEVEL_ERROR);
/* Start New Training IP */
status = ddr3_hws_hw_training();
if (MV_OK != status) {
printf("%s Training Sequence - FAILED\n", ddr_type);
return status;
}
/*
* Stage 3 - Finish
*/
/* Restore and set windows */
ddr3_restore_and_set_final_windows(win);
/* Update DRAM init indication in bootROM register */
reg = reg_read(REG_BOOTROM_ROUTINE_ADDR);
reg_write(REG_BOOTROM_ROUTINE_ADDR,
reg | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS));
/* DLB config */
ddr3_new_tip_dlb_config();
#if defined(ECC_SUPPORT)
if (ddr3_if_ecc_enabled())
ddr3_new_tip_ecc_scrub();
#endif
printf("%s Training Sequence - Ended Successfully\n", ddr_type);
return MV_OK;
}
/*
* Name: ddr3_get_cpu_freq
* Desc: read S@R and return CPU frequency
* Args:
* Notes:
* Returns: required value
*/
u32 ddr3_get_cpu_freq(void)
{
return ddr3_tip_get_init_freq();
}
/*
* Name: ddr3_get_fab_opt
* Desc: read S@R and return CPU frequency
* Args:
* Notes:
* Returns: required value
*/
u32 ddr3_get_fab_opt(void)
{
return 0; /* No fabric */
}
/*
* Name: ddr3_get_static_m_cValue - Init Memory controller with
* static parameters
* Desc: Use this routine to init the controller without the HW training
* procedure.
* User must provide compatible header file with registers data.
* Args: None.
* Notes:
* Returns: None.
*/
u32 ddr3_get_static_mc_value(u32 reg_addr, u32 offset1, u32 mask1,
u32 offset2, u32 mask2)
{
u32 reg, temp;
reg = reg_read(reg_addr);
temp = (reg >> offset1) & mask1;
if (mask2)
temp |= (reg >> offset2) & mask2;
return temp;
}
/*
* Name: ddr3_get_static_ddr_mode - Init Memory controller with
* static parameters
* Desc: Use this routine to init the controller without the HW training
* procedure.
* User must provide compatible header file with registers data.
* Args: None.
* Notes:
* Returns: None.
*/
u32 ddr3_get_static_ddr_mode(void)
{
u32 chip_board_rev, i;
u32 size;
/* Valid only for A380 only, MSYS using dynamic controller config */
#ifdef CONFIG_CUSTOMER_BOARD_SUPPORT
/*
* Customer boards select DDR mode according to
* board ID & Sample@Reset
*/
chip_board_rev = mv_board_id_get();
#else
/* Marvell boards select DDR mode according to Sample@Reset only */
chip_board_rev = MARVELL_BOARD;
#endif
size = ARRAY_SIZE(ddr_modes);
for (i = 0; i < size; i++) {
if ((ddr3_get_cpu_freq() == ddr_modes[i].cpu_freq) &&
(ddr3_get_fab_opt() == ddr_modes[i].fab_freq) &&
(chip_board_rev == ddr_modes[i].chip_board_rev))
return i;
}
DEBUG_INIT_S("\n*** Error: ddr3_get_static_ddr_mode: No match for requested DDR mode. ***\n\n");
return 0;
}
/******************************************************************************
* Name: ddr3_get_cs_num_from_reg
* Desc:
* Args:
* Notes:
* Returns:
*/
u32 ddr3_get_cs_num_from_reg(void)
{
u32 cs_ena = sys_env_get_cs_ena_from_reg();
u32 cs_count = 0;
u32 cs;
for (cs = 0; cs < MAX_CS; cs++) {
if (cs_ena & (1 << cs))
cs_count++;
}
return cs_count;
}
/*
* Name: ddr3_load_topology_map
* Desc:
* Args:
* Notes:
* Returns:
*/
int ddr3_load_topology_map(void)
{
struct hws_topology_map *tm = ddr3_get_topology_map();
#if defined(MV_DDR_TOPOLOGY_UPDATE_FROM_TWSI)
/* Update topology data */
if (MV_OK != ddr3_update_topology_map(tm)) {
DEBUG_INIT_FULL_S("Failed update of DDR3 Topology map\n");
}
#endif
return MV_OK;
}
void get_target_freq(u32 freq_mode, u32 *ddr_freq, u32 *hclk_ps)
{
u32 tmp, hclk = 200;
switch (freq_mode) {
case 4:
tmp = 1; /* DDR_400; */
hclk = 200;
break;
case 0x8:
tmp = 1; /* DDR_666; */
hclk = 333;
break;
case 0xc:
tmp = 1; /* DDR_800; */
hclk = 400;
break;
default:
*ddr_freq = 0;
*hclk_ps = 0;
break;
}
*ddr_freq = tmp; /* DDR freq define */
*hclk_ps = 1000000 / hclk; /* values are 1/HCLK in ps */
return;
}
void ddr3_new_tip_dlb_config(void)
{
u32 reg, i = 0;
struct dlb_config *config_table_ptr = sys_env_dlb_config_ptr_get();
/* Write the configuration */
while (config_table_ptr[i].reg_addr != 0) {
reg_write(config_table_ptr[i].reg_addr,
config_table_ptr[i].reg_data);
i++;
}
/* Enable DLB */
reg = reg_read(REG_STATIC_DRAM_DLB_CONTROL);
reg |= DLB_ENABLE | DLB_WRITE_COALESING | DLB_AXI_PREFETCH_EN |
DLB_MBUS_PREFETCH_EN | PREFETCH_N_LN_SZ_TR;
reg_write(REG_STATIC_DRAM_DLB_CONTROL, reg);
}
int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena)
{
u32 reg, cs;
u32 mem_total_size = 0;
u32 cs_mem_size = 0;
u32 mem_total_size_c, cs_mem_size_c;
#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
u32 physical_mem_size;
u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE;
struct hws_topology_map *tm = ddr3_get_topology_map();
#endif
/* Open fast path windows */
for (cs = 0; cs < MAX_CS; cs++) {
if (cs_ena & (1 << cs)) {
/* get CS size */
if (ddr3_calc_mem_cs_size(cs, &cs_mem_size) != MV_OK)
return MV_FAIL;
#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE
/*
* if number of address pins doesn't allow to use max
* mem size that is defined in topology
* mem size is defined by DEVICE_MAX_DRAM_ADDRESS_SIZE
*/
physical_mem_size = mem_size
[tm->interface_params[0].memory_size];
if (ddr3_get_device_width(cs) == 16) {
/*
* 16bit mem device can be twice more - no need
* in less significant pin
*/
max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2;
}
if (physical_mem_size > max_mem_size) {
cs_mem_size = max_mem_size *
(ddr3_get_bus_width() /
ddr3_get_device_width(cs));
printf("Updated Physical Mem size is from 0x%x to %x\n",
physical_mem_size,
DEVICE_MAX_DRAM_ADDRESS_SIZE);
}
#endif
/* set fast path window control for the cs */
reg = 0xffffe1;
reg |= (cs << 2);
reg |= (cs_mem_size - 1) & 0xffff0000;
/*Open fast path Window */
reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg);
/* Set fast path window base address for the cs */
reg = ((cs_mem_size) * cs) & 0xffff0000;
/* Set base address */
reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg);
/*
* Since memory size may be bigger than 4G the summ may
* be more than 32 bit word,
* so to estimate the result divide mem_total_size and
* cs_mem_size by 0x10000 (it is equal to >> 16)
*/
mem_total_size_c = mem_total_size >> 16;
cs_mem_size_c = cs_mem_size >> 16;
/* if the sum less than 2 G - calculate the value */
if (mem_total_size_c + cs_mem_size_c < 0x10000)
mem_total_size += cs_mem_size;
else /* put max possible size */
mem_total_size = L2_FILTER_FOR_MAX_MEMORY_SIZE;
}
}
/* Set L2 filtering to Max Memory size */
reg_write(ADDRESS_FILTERING_END_REGISTER, mem_total_size);
return MV_OK;
}
u32 ddr3_get_bus_width(void)
{
u32 bus_width;
bus_width = (reg_read(REG_SDRAM_CONFIG_ADDR) & 0x8000) >>
REG_SDRAM_CONFIG_WIDTH_OFFS;
return (bus_width == 0) ? 16 : 32;
}
u32 ddr3_get_device_width(u32 cs)
{
u32 device_width;
device_width = (reg_read(REG_SDRAM_ADDRESS_CTRL_ADDR) &
(0x3 << (REG_SDRAM_ADDRESS_CTRL_STRUCT_OFFS * cs))) >>
(REG_SDRAM_ADDRESS_CTRL_STRUCT_OFFS * cs);
return (device_width == 0) ? 8 : 16;
}
float ddr3_get_device_size(u32 cs)
{
u32 device_size_low, device_size_high, device_size;
u32 data, cs_low_offset, cs_high_offset;
cs_low_offset = REG_SDRAM_ADDRESS_SIZE_OFFS + cs * 4;
cs_high_offset = REG_SDRAM_ADDRESS_SIZE_OFFS +
REG_SDRAM_ADDRESS_SIZE_HIGH_OFFS + cs;
data = reg_read(REG_SDRAM_ADDRESS_CTRL_ADDR);
device_size_low = (data >> cs_low_offset) & 0x3;
device_size_high = (data >> cs_high_offset) & 0x1;
device_size = device_size_low | (device_size_high << 2);
switch (device_size) {
case 0:
return 2;
case 2:
return 0.5;
case 3:
return 1;
case 4:
return 4;
case 5:
return 8;
case 1:
default:
DEBUG_INIT_C("Error: Wrong device size of Cs: ", cs, 1);
/*
* Small value will give wrong emem size in
* ddr3_calc_mem_cs_size
*/
return 0.01;
}
}
int ddr3_calc_mem_cs_size(u32 cs, u32 *cs_size)
{
float cs_mem_size;
/* Calculate in GiB */
cs_mem_size = ((ddr3_get_bus_width() / ddr3_get_device_width(cs)) *
ddr3_get_device_size(cs)) / 8;
/*
* Multiple controller bus width, 2x for 64 bit
* (SoC controller may be 32 or 64 bit,
* so bit 15 in 0x1400, that means if whole bus used or only half,
* have a differnt meaning
*/
cs_mem_size *= DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER;
if (cs_mem_size == 0.125) {
*cs_size = 128 << 20;
} else if (cs_mem_size == 0.25) {
*cs_size = 256 << 20;
} else if (cs_mem_size == 0.5) {
*cs_size = 512 << 20;
} else if (cs_mem_size == 1) {
*cs_size = 1 << 30;
} else if (cs_mem_size == 2) {
*cs_size = 2 << 30;
} else {
DEBUG_INIT_C("Error: Wrong Memory size of Cs: ", cs, 1);
return MV_BAD_VALUE;
}
return MV_OK;
}
#if defined(MV_DDR_TOPOLOGY_UPDATE_FROM_TWSI)
/*
* Name: ddr3_update_topology_map
* Desc:
* Args:
* Notes: Update topology map by Sat_r values
* Returns:
*/
static int ddr3_update_topology_map(struct hws_topology_map *tm)
{
struct topology_update_info topology_update_info;
topology_update_info.update_width = 0;
topology_update_info.update_ecc = 0;
topology_update_info.update_ecc_pup3_mode = 0;
sys_env_get_topology_update_info(&topology_update_info);
if (topology_update_info.update_width) {
tm->bus_act_mask &=
~(TOPOLOGY_UPDATE_WIDTH_32BIT_MASK);
if (topology_update_info.width == TOPOLOGY_UPDATE_WIDTH_16BIT)
tm->bus_act_mask =
TOPOLOGY_UPDATE_WIDTH_16BIT_MASK;
else
tm->bus_act_mask =
TOPOLOGY_UPDATE_WIDTH_32BIT_MASK;
}
if (topology_update_info.update_ecc) {
if (topology_update_info.ecc == TOPOLOGY_UPDATE_ECC_OFF) {
tm->bus_act_mask &=
~(1 << topology_update_info.ecc_pup_mode_offset);
} else {
tm->bus_act_mask |=
topology_update_info.
ecc << topology_update_info.ecc_pup_mode_offset;
}
}
return MV_OK;
}
#endif
/*
* Name: ddr3_hws_tune_training_params
* Desc:
* Args:
* Notes: Tune internal training params
* Returns:
*/
static int ddr3_hws_tune_training_params(u8 dev_num)
{
struct tune_train_params params;
int status;
/* NOTE: do not remove any field initilization */
params.ck_delay = TUNE_TRAINING_PARAMS_CK_DELAY;
params.ck_delay_16 = TUNE_TRAINING_PARAMS_CK_DELAY_16;
params.p_finger = TUNE_TRAINING_PARAMS_PFINGER;
params.n_finger = TUNE_TRAINING_PARAMS_NFINGER;
params.phy_reg3_val = TUNE_TRAINING_PARAMS_PHYREG3VAL;
status = ddr3_tip_tune_training_params(dev_num, &params);
if (MV_OK != status) {
printf("%s Training Sequence - FAILED\n", ddr_type);
return status;
}
return MV_OK;
}

View File

@ -0,0 +1,395 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_INIT_H
#define _DDR3_INIT_H
#if defined(CONFIG_ARMADA_38X)
#include "ddr3_a38x.h"
#include "ddr3_a38x_mc_static.h"
#include "ddr3_a38x_topology.h"
#endif
#include "ddr3_hws_hw_training.h"
#include "ddr3_hws_sil_training.h"
#include "ddr3_logging_def.h"
#include "ddr3_training_hw_algo.h"
#include "ddr3_training_ip.h"
#include "ddr3_training_ip_centralization.h"
#include "ddr3_training_ip_engine.h"
#include "ddr3_training_ip_flow.h"
#include "ddr3_training_ip_pbs.h"
#include "ddr3_training_ip_prv_if.h"
#include "ddr3_training_ip_static.h"
#include "ddr3_training_leveling.h"
#include "xor.h"
/*
* MV_DEBUG_INIT need to be defines, otherwise the output of the
* DDR2 training code is not complete and misleading
*/
#define MV_DEBUG_INIT
#define BIT(x) (1 << (x))
#ifdef MV_DEBUG_INIT
#define DEBUG_INIT_S(s) puts(s)
#define DEBUG_INIT_D(d, l) printf("%x", d)
#define DEBUG_INIT_D_10(d, l) printf("%d", d)
#else
#define DEBUG_INIT_S(s)
#define DEBUG_INIT_D(d, l)
#define DEBUG_INIT_D_10(d, l)
#endif
#ifdef MV_DEBUG_INIT_FULL
#define DEBUG_INIT_FULL_S(s) puts(s)
#define DEBUG_INIT_FULL_D(d, l) printf("%x", d)
#define DEBUG_INIT_FULL_D_10(d, l) printf("%d", d)
#define DEBUG_WR_REG(reg, val) \
{ DEBUG_INIT_S("Write Reg: 0x"); DEBUG_INIT_D((reg), 8); \
DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); }
#define DEBUG_RD_REG(reg, val) \
{ DEBUG_INIT_S("Read Reg: 0x"); DEBUG_INIT_D((reg), 8); \
DEBUG_INIT_S("= "); DEBUG_INIT_D((val), 8); DEBUG_INIT_S("\n"); }
#else
#define DEBUG_INIT_FULL_S(s)
#define DEBUG_INIT_FULL_D(d, l)
#define DEBUG_INIT_FULL_D_10(d, l)
#define DEBUG_WR_REG(reg, val)
#define DEBUG_RD_REG(reg, val)
#endif
#define DEBUG_INIT_FULL_C(s, d, l) \
{ DEBUG_INIT_FULL_S(s); \
DEBUG_INIT_FULL_D(d, l); \
DEBUG_INIT_FULL_S("\n"); }
#define DEBUG_INIT_C(s, d, l) \
{ DEBUG_INIT_S(s); DEBUG_INIT_D(d, l); DEBUG_INIT_S("\n"); }
/*
* Debug (Enable/Disable modules) and Error report
*/
#ifdef BASIC_DEBUG
#define MV_DEBUG_WL
#define MV_DEBUG_RL
#define MV_DEBUG_DQS_RESULTS
#endif
#ifdef FULL_DEBUG
#define MV_DEBUG_WL
#define MV_DEBUG_RL
#define MV_DEBUG_DQS
#define MV_DEBUG_PBS
#define MV_DEBUG_DFS
#define MV_DEBUG_MAIN_FULL
#define MV_DEBUG_DFS_FULL
#define MV_DEBUG_DQS_FULL
#define MV_DEBUG_RL_FULL
#define MV_DEBUG_WL_FULL
#endif
#if defined(CONFIG_ARMADA_38X)
#include "ddr3_a38x.h"
#include "ddr3_a38x_topology.h"
#endif
/* The following is a list of Marvell status */
#define MV_ERROR (-1)
#define MV_OK (0x00) /* Operation succeeded */
#define MV_FAIL (0x01) /* Operation failed */
#define MV_BAD_VALUE (0x02) /* Illegal value (general) */
#define MV_OUT_OF_RANGE (0x03) /* The value is out of range */
#define MV_BAD_PARAM (0x04) /* Illegal parameter in function called */
#define MV_BAD_PTR (0x05) /* Illegal pointer value */
#define MV_BAD_SIZE (0x06) /* Illegal size */
#define MV_BAD_STATE (0x07) /* Illegal state of state machine */
#define MV_SET_ERROR (0x08) /* Set operation failed */
#define MV_GET_ERROR (0x09) /* Get operation failed */
#define MV_CREATE_ERROR (0x0a) /* Fail while creating an item */
#define MV_NOT_FOUND (0x0b) /* Item not found */
#define MV_NO_MORE (0x0c) /* No more items found */
#define MV_NO_SUCH (0x0d) /* No such item */
#define MV_TIMEOUT (0x0e) /* Time Out */
#define MV_NO_CHANGE (0x0f) /* Parameter(s) is already in this value */
#define MV_NOT_SUPPORTED (0x10) /* This request is not support */
#define MV_NOT_IMPLEMENTED (0x11) /* Request supported but not implemented*/
#define MV_NOT_INITIALIZED (0x12) /* The item is not initialized */
#define MV_NO_RESOURCE (0x13) /* Resource not available (memory ...) */
#define MV_FULL (0x14) /* Item is full (Queue or table etc...) */
#define MV_EMPTY (0x15) /* Item is empty (Queue or table etc...) */
#define MV_INIT_ERROR (0x16) /* Error occured while INIT process */
#define MV_HW_ERROR (0x17) /* Hardware error */
#define MV_TX_ERROR (0x18) /* Transmit operation not succeeded */
#define MV_RX_ERROR (0x19) /* Recieve operation not succeeded */
#define MV_NOT_READY (0x1a) /* The other side is not ready yet */
#define MV_ALREADY_EXIST (0x1b) /* Tried to create existing item */
#define MV_OUT_OF_CPU_MEM (0x1c) /* Cpu memory allocation failed. */
#define MV_NOT_STARTED (0x1d) /* Not started yet */
#define MV_BUSY (0x1e) /* Item is busy. */
#define MV_TERMINATE (0x1f) /* Item terminates it's work. */
#define MV_NOT_ALIGNED (0x20) /* Wrong alignment */
#define MV_NOT_ALLOWED (0x21) /* Operation NOT allowed */
#define MV_WRITE_PROTECT (0x22) /* Write protected */
#define MV_INVALID (int)(-1)
/* For checking function return values */
#define CHECK_STATUS(orig_func) \
{ \
int status; \
status = orig_func; \
if (MV_OK != status) \
return status; \
}
enum log_level {
MV_LOG_LEVEL_0,
MV_LOG_LEVEL_1,
MV_LOG_LEVEL_2,
MV_LOG_LEVEL_3
};
/* Globals */
extern u8 debug_training;
extern u8 is_reg_dump;
extern u8 generic_init_controller;
extern u32 freq_val[];
extern u32 is_pll_old;
extern struct cl_val_per_freq cas_latency_table[];
extern struct pattern_info pattern_table[];
extern struct cl_val_per_freq cas_write_latency_table[];
extern u8 debug_training;
extern u8 debug_centralization, debug_training_ip, debug_training_bist,
debug_pbs, debug_training_static, debug_leveling;
extern u32 pipe_multicast_mask;
extern struct hws_tip_config_func_db config_func_info[];
extern u8 cs_mask_reg[];
extern u8 twr_mask_table[];
extern u8 cl_mask_table[];
extern u8 cwl_mask_table[];
extern u16 rfc_table[];
extern u32 speed_bin_table_t_rc[];
extern u32 speed_bin_table_t_rcd_t_rp[];
extern u32 ck_delay, ck_delay_16;
extern u32 g_zpri_data;
extern u32 g_znri_data;
extern u32 g_zpri_ctrl;
extern u32 g_znri_ctrl;
extern u32 g_zpodt_data;
extern u32 g_znodt_data;
extern u32 g_zpodt_ctrl;
extern u32 g_znodt_ctrl;
extern u32 g_dic;
extern u32 g_odt_config;
extern u32 g_rtt_nom;
extern u8 debug_training_access;
extern u8 debug_training_a38x;
extern u32 first_active_if;
extern enum hws_ddr_freq init_freq;
extern u32 delay_enable, ck_delay, ck_delay_16, ca_delay;
extern u32 mask_tune_func;
extern u32 rl_version;
extern int rl_mid_freq_wa;
extern u8 calibration_update_control; /* 2 external only, 1 is internal only */
extern enum hws_ddr_freq medium_freq;
extern u32 ck_delay, ck_delay_16;
extern enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM];
extern u32 first_active_if;
extern u32 mask_tune_func;
extern u32 freq_val[];
extern enum hws_ddr_freq init_freq;
extern enum hws_ddr_freq low_freq;
extern enum hws_ddr_freq medium_freq;
extern u8 generic_init_controller;
extern enum auto_tune_stage training_stage;
extern u32 is_pll_before_init;
extern u32 is_adll_calib_before_init;
extern u32 is_dfs_in_init;
extern int wl_debug_delay;
extern u32 silicon_delay[HWS_MAX_DEVICE_NUM];
extern u32 p_finger;
extern u32 n_finger;
extern u32 freq_val[DDR_FREQ_LIMIT];
extern u32 start_pattern, end_pattern;
extern u32 phy_reg0_val;
extern u32 phy_reg1_val;
extern u32 phy_reg2_val;
extern u32 phy_reg3_val;
extern enum hws_pattern sweep_pattern;
extern enum hws_pattern pbs_pattern;
extern u8 is_rzq6;
extern u32 znri_data_phy_val;
extern u32 zpri_data_phy_val;
extern u32 znri_ctrl_phy_val;
extern u32 zpri_ctrl_phy_val;
extern u8 debug_training_access;
extern u32 finger_test, p_finger_start, p_finger_end, n_finger_start,
n_finger_end, p_finger_step, n_finger_step;
extern u32 mode2_t;
extern u32 xsb_validate_type;
extern u32 xsb_validation_base_address;
extern u32 odt_additional;
extern u32 debug_mode;
extern u32 delay_enable;
extern u32 ca_delay;
extern u32 debug_dunit;
extern u32 clamp_tbl[];
extern u32 freq_mask[HWS_MAX_DEVICE_NUM][DDR_FREQ_LIMIT];
extern u32 start_pattern, end_pattern;
extern u32 maxt_poll_tries;
extern u32 is_bist_reset_bit;
extern u8 debug_training_bist;
extern u8 vref_window_size[MAX_INTERFACE_NUM][MAX_BUS_NUM];
extern u32 debug_mode;
extern u32 effective_cs;
extern int ddr3_tip_centr_skip_min_win_check;
extern u32 *dq_map_table;
extern enum auto_tune_stage training_stage;
extern u8 debug_centralization;
extern u32 delay_enable;
extern u32 start_pattern, end_pattern;
extern u32 freq_val[DDR_FREQ_LIMIT];
extern u8 debug_training_hw_alg;
extern enum auto_tune_stage training_stage;
extern u8 debug_training_ip;
extern enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM];
extern enum auto_tune_stage training_stage;
extern u32 effective_cs;
extern u8 debug_leveling;
extern enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM];
extern enum auto_tune_stage training_stage;
extern u32 rl_version;
extern struct cl_val_per_freq cas_latency_table[];
extern u32 start_xsb_offset;
extern u32 debug_mode;
extern u32 odt_config;
extern u32 effective_cs;
extern u32 phy_reg1_val;
extern u8 debug_pbs;
extern u32 effective_cs;
extern u16 mask_results_dq_reg_map[];
extern enum hws_ddr_freq medium_freq;
extern u32 freq_val[];
extern enum hws_result training_result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM];
extern enum auto_tune_stage training_stage;
extern u32 debug_mode;
extern u32 *dq_map_table;
extern u32 vref;
extern struct cl_val_per_freq cas_latency_table[];
extern u32 target_freq;
extern struct hws_tip_config_func_db config_func_info[HWS_MAX_DEVICE_NUM];
extern u32 clamp_tbl[];
extern u32 init_freq;
/* list of allowed frequency listed in order of enum hws_ddr_freq */
extern u32 freq_val[];
extern u8 debug_training_static;
extern u32 first_active_if;
/* Prototypes */
int ddr3_tip_enable_init_sequence(u32 dev_num);
int ddr3_tip_init_a38x(u32 dev_num, u32 board_id);
int ddr3_hws_hw_training(void);
int ddr3_silicon_pre_init(void);
int ddr3_silicon_post_init(void);
int ddr3_post_run_alg(void);
int ddr3_if_ecc_enabled(void);
void ddr3_new_tip_ecc_scrub(void);
void ddr3_print_version(void);
void ddr3_new_tip_dlb_config(void);
struct hws_topology_map *ddr3_get_topology_map(void);
int ddr3_if_ecc_enabled(void);
int ddr3_tip_reg_write(u32 dev_num, u32 reg_addr, u32 data);
int ddr3_tip_reg_read(u32 dev_num, u32 reg_addr, u32 *data, u32 reg_mask);
int ddr3_silicon_get_ddr_target_freq(u32 *ddr_freq);
int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum hws_ddr_freq freq,
struct hws_tip_freq_config_info
*freq_config_info);
int ddr3_a38x_update_topology_map(u32 dev_num,
struct hws_topology_map *topology_map);
int ddr3_tip_a38x_get_init_freq(int dev_num, enum hws_ddr_freq *freq);
int ddr3_tip_a38x_get_medium_freq(int dev_num, enum hws_ddr_freq *freq);
int ddr3_tip_a38x_if_read(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 *data, u32 mask);
int ddr3_tip_a38x_if_write(u8 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 data, u32 mask);
int ddr3_tip_a38x_get_device_info(u8 dev_num,
struct ddr3_device_info *info_ptr);
int ddr3_tip_init_a38x(u32 dev_num, u32 board_id);
int print_adll(u32 dev_num, u32 adll[MAX_INTERFACE_NUM * MAX_BUS_NUM]);
int ddr3_tip_restore_dunit_regs(u32 dev_num);
void print_topology(struct hws_topology_map *topology_db);
u32 mv_board_id_get(void);
int ddr3_load_topology_map(void);
int ddr3_tip_init_specific_reg_config(u32 dev_num,
struct reg_data *reg_config_arr);
u32 ddr3_tip_get_init_freq(void);
void ddr3_hws_set_log_level(enum ddr_lib_debug_block block, u8 level);
int ddr3_tip_tune_training_params(u32 dev_num,
struct tune_train_params *params);
void get_target_freq(u32 freq_mode, u32 *ddr_freq, u32 *hclk_ps);
int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena);
void ddr3_fast_path_static_cs_size_config(u32 cs_ena);
u32 ddr3_get_device_width(u32 cs);
u32 mv_board_id_index_get(u32 board_id);
u32 mv_board_id_get(void);
u32 ddr3_get_bus_width(void);
void ddr3_set_log_level(u32 n_log_level);
int ddr3_calc_mem_cs_size(u32 cs, u32 *cs_size);
int hws_ddr3_cs_base_adr_calc(u32 if_id, u32 cs, u32 *cs_base_addr);
int ddr3_tip_print_pbs_result(u32 dev_num, u32 cs_num, enum pbs_dir pbs_mode);
int ddr3_tip_clean_pbs_result(u32 dev_num, enum pbs_dir pbs_mode);
int ddr3_tip_static_round_trip_arr_build(u32 dev_num,
struct trip_delay_element *table_ptr,
int is_wl, u32 *round_trip_delay_arr);
u32 hws_ddr3_tip_max_cs_get(void);
/*
* Accessor functions for the registers
*/
static inline void reg_write(u32 addr, u32 val)
{
writel(val, INTER_REGS_BASE + addr);
}
static inline u32 reg_read(u32 addr)
{
return readl(INTER_REGS_BASE + addr);
}
static inline void reg_bit_set(u32 addr, u32 mask)
{
setbits_le32(INTER_REGS_BASE + addr, mask);
}
static inline void reg_bit_clr(u32 addr, u32 mask)
{
clrbits_le32(INTER_REGS_BASE + addr, mask);
}
#endif /* _DDR3_INIT_H */

View File

@ -0,0 +1,101 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_LOGGING_CONFIG_H
#define _DDR3_LOGGING_CONFIG_H
#ifdef SILENT_LIB
#define DEBUG_TRAINING_BIST_ENGINE(level, s)
#define DEBUG_TRAINING_IP(level, s)
#define DEBUG_CENTRALIZATION_ENGINE(level, s)
#define DEBUG_TRAINING_HW_ALG(level, s)
#define DEBUG_TRAINING_IP_ENGINE(level, s)
#define DEBUG_LEVELING(level, s)
#define DEBUG_PBS_ENGINE(level, s)
#define DEBUG_TRAINING_STATIC_IP(level, s)
#define DEBUG_TRAINING_ACCESS(level, s)
#else
#ifdef LIB_FUNCTIONAL_DEBUG_ONLY
#define DEBUG_TRAINING_BIST_ENGINE(level, s)
#define DEBUG_TRAINING_IP_ENGINE(level, s)
#define DEBUG_TRAINING_IP(level, s) \
if (level >= debug_training) \
printf s
#define DEBUG_CENTRALIZATION_ENGINE(level, s) \
if (level >= debug_centralization) \
printf s
#define DEBUG_TRAINING_HW_ALG(level, s) \
if (level >= debug_training_hw_alg) \
printf s
#define DEBUG_LEVELING(level, s) \
if (level >= debug_leveling) \
printf s
#define DEBUG_PBS_ENGINE(level, s) \
if (level >= debug_pbs) \
printf s
#define DEBUG_TRAINING_STATIC_IP(level, s) \
if (level >= debug_training_static) \
printf s
#define DEBUG_TRAINING_ACCESS(level, s) \
if (level >= debug_training_access) \
printf s
#else
#define DEBUG_TRAINING_BIST_ENGINE(level, s) \
if (level >= debug_training_bist) \
printf s
#define DEBUG_TRAINING_IP_ENGINE(level, s) \
if (level >= debug_training_ip) \
printf s
#define DEBUG_TRAINING_IP(level, s) \
if (level >= debug_training) \
printf s
#define DEBUG_CENTRALIZATION_ENGINE(level, s) \
if (level >= debug_centralization) \
printf s
#define DEBUG_TRAINING_HW_ALG(level, s) \
if (level >= debug_training_hw_alg) \
printf s
#define DEBUG_LEVELING(level, s) \
if (level >= debug_leveling) \
printf s
#define DEBUG_PBS_ENGINE(level, s) \
if (level >= debug_pbs) \
printf s
#define DEBUG_TRAINING_STATIC_IP(level, s) \
if (level >= debug_training_static) \
printf s
#define DEBUG_TRAINING_ACCESS(level, s) \
if (level >= debug_training_access) \
printf s
#endif
#endif
/* Logging defines */
#define DEBUG_LEVEL_TRACE 1
#define DEBUG_LEVEL_INFO 2
#define DEBUG_LEVEL_ERROR 3
enum ddr_lib_debug_block {
DEBUG_BLOCK_STATIC,
DEBUG_BLOCK_TRAINING_MAIN,
DEBUG_BLOCK_LEVELING,
DEBUG_BLOCK_CENTRALIZATION,
DEBUG_BLOCK_PBS,
DEBUG_BLOCK_IP,
DEBUG_BLOCK_BIST,
DEBUG_BLOCK_ALG,
DEBUG_BLOCK_DEVICE,
DEBUG_BLOCK_ACCESS,
DEBUG_STAGES_REG_DUMP,
/* All excluding IP and REG_DUMP, should be enabled separatelly */
DEBUG_BLOCK_ALL
};
int ddr3_tip_print_log(u32 dev_num, u32 mem_addr);
int ddr3_tip_print_stability_log(u32 dev_num);
#endif /* _DDR3_LOGGING_CONFIG_H */

View File

@ -0,0 +1,924 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef __DDR3_PATTERNS_64_H
#define __DDR3_PATTERNS_64_H
/*
* Patterns Declerations
*/
u32 wl_sup_pattern[LEN_WL_SUP_PATTERN] __aligned(32) = {
0x04030201, 0x08070605, 0x0c0b0a09, 0x100f0e0d,
0x14131211, 0x18171615, 0x1c1b1a19, 0x201f1e1d,
0x24232221, 0x28272625, 0x2c2b2a29, 0x302f2e2d,
0x34333231, 0x38373635, 0x3c3b3a39, 0x403f3e3d,
0x44434241, 0x48474645, 0x4c4b4a49, 0x504f4e4d,
0x54535251, 0x58575655, 0x5c5b5a59, 0x605f5e5d,
0x64636261, 0x68676665, 0x6c6b6a69, 0x706f6e6d,
0x74737271, 0x78777675, 0x7c7b7a79, 0x807f7e7d
};
u32 pbs_pattern_32b[2][LEN_PBS_PATTERN] __aligned(32) = {
{
0xaaaaaaaa, 0x55555555, 0xaaaaaaaa, 0x55555555,
0xaaaaaaaa, 0x55555555, 0xaaaaaaaa, 0x55555555,
0xaaaaaaaa, 0x55555555, 0xaaaaaaaa, 0x55555555,
0xaaaaaaaa, 0x55555555, 0xaaaaaaaa, 0x55555555
},
{
0x55555555, 0xaaaaaaaa, 0x55555555, 0xaaaaaaaa,
0x55555555, 0xaaaaaaaa, 0x55555555, 0xaaaaaaaa,
0x55555555, 0xaaaaaaaa, 0x55555555, 0xaaaaaaaa,
0x55555555, 0xaaaaaaaa, 0x55555555, 0xaaaaaaaa
}
};
u32 pbs_pattern_64b[2][LEN_PBS_PATTERN] __aligned(32) = {
{
0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555,
0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555,
0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555,
0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555
},
{
0x55555555, 0x55555555, 0xaaaaaaaa, 0xaaaaaaaa,
0x55555555, 0x55555555, 0xaaaaaaaa, 0xaaaaaaaa,
0x55555555, 0x55555555, 0xaaaaaaaa, 0xaaaaaaaa,
0x55555555, 0x55555555, 0xaaaaaaaa, 0xaaaaaaaa
}
};
u32 rl_pattern[LEN_STD_PATTERN] __aligned(32) = {
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x01010101, 0x01010101, 0x01010101, 0x01010101
};
u32 killer_pattern_32b[DQ_NUM][LEN_KILLER_PATTERN] __aligned(32) = {
{
0x01010101, 0x00000000, 0x01010101, 0xffffffff,
0x01010101, 0x00000000, 0x01010101, 0xffffffff,
0xfefefefe, 0xfefefefe, 0x01010101, 0xfefefefe,
0xfefefefe, 0xfefefefe, 0x01010101, 0xfefefefe,
0x01010101, 0xfefefefe, 0x01010101, 0x01010101,
0x01010101, 0xfefefefe, 0x01010101, 0x01010101,
0xfefefefe, 0x01010101, 0xfefefefe, 0x00000000,
0xfefefefe, 0x01010101, 0xfefefefe, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x01010101,
0xffffffff, 0x00000000, 0xffffffff, 0x01010101,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0xfefefefe,
0x00000000, 0x00000000, 0x00000000, 0xfefefefe,
0xfefefefe, 0xffffffff, 0x00000000, 0x00000000,
0xfefefefe, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0xfefefefe, 0x00000000, 0xfefefefe, 0x00000000,
0xfefefefe, 0x00000000, 0xfefefefe, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x01010101,
0x00000000, 0xffffffff, 0xffffffff, 0x01010101,
0xffffffff, 0xffffffff, 0x01010101, 0x00000000,
0xffffffff, 0xffffffff, 0x01010101, 0x00000000,
0x01010101, 0xffffffff, 0xfefefefe, 0xfefefefe,
0x01010101, 0xffffffff, 0xfefefefe, 0xfefefefe
},
{
0x02020202, 0x00000000, 0x02020202, 0xffffffff,
0x02020202, 0x00000000, 0x02020202, 0xffffffff,
0xfdfdfdfd, 0xfdfdfdfd, 0x02020202, 0xfdfdfdfd,
0xfdfdfdfd, 0xfdfdfdfd, 0x02020202, 0xfdfdfdfd,
0x02020202, 0xfdfdfdfd, 0x02020202, 0x02020202,
0x02020202, 0xfdfdfdfd, 0x02020202, 0x02020202,
0xfdfdfdfd, 0x02020202, 0xfdfdfdfd, 0x00000000,
0xfdfdfdfd, 0x02020202, 0xfdfdfdfd, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x02020202,
0xffffffff, 0x00000000, 0xffffffff, 0x02020202,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0xfdfdfdfd,
0x00000000, 0x00000000, 0x00000000, 0xfdfdfdfd,
0xfdfdfdfd, 0xffffffff, 0x00000000, 0x00000000,
0xfdfdfdfd, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0xfdfdfdfd, 0x00000000, 0xfdfdfdfd, 0x00000000,
0xfdfdfdfd, 0x00000000, 0xfdfdfdfd, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x02020202,
0x00000000, 0xffffffff, 0xffffffff, 0x02020202,
0xffffffff, 0xffffffff, 0x02020202, 0x00000000,
0xffffffff, 0xffffffff, 0x02020202, 0x00000000,
0x02020202, 0xffffffff, 0xfdfdfdfd, 0xfdfdfdfd,
0x02020202, 0xffffffff, 0xfdfdfdfd, 0xfdfdfdfd
},
{
0x04040404, 0x00000000, 0x04040404, 0xffffffff,
0x04040404, 0x00000000, 0x04040404, 0xffffffff,
0xfbfbfbfb, 0xfbfbfbfb, 0x04040404, 0xfbfbfbfb,
0xfbfbfbfb, 0xfbfbfbfb, 0x04040404, 0xfbfbfbfb,
0x04040404, 0xfbfbfbfb, 0x04040404, 0x04040404,
0x04040404, 0xfbfbfbfb, 0x04040404, 0x04040404,
0xfbfbfbfb, 0x04040404, 0xfbfbfbfb, 0x00000000,
0xfbfbfbfb, 0x04040404, 0xfbfbfbfb, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x04040404,
0xffffffff, 0x00000000, 0xffffffff, 0x04040404,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0xfbfbfbfb,
0x00000000, 0x00000000, 0x00000000, 0xfbfbfbfb,
0xfbfbfbfb, 0xffffffff, 0x00000000, 0x00000000,
0xfbfbfbfb, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0xfbfbfbfb, 0x00000000, 0xfbfbfbfb, 0x00000000,
0xfbfbfbfb, 0x00000000, 0xfbfbfbfb, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x04040404,
0x00000000, 0xffffffff, 0xffffffff, 0x04040404,
0xffffffff, 0xffffffff, 0x04040404, 0x00000000,
0xffffffff, 0xffffffff, 0x04040404, 0x00000000,
0x04040404, 0xffffffff, 0xfbfbfbfb, 0xfbfbfbfb,
0x04040404, 0xffffffff, 0xfbfbfbfb, 0xfbfbfbfb
},
{
0x08080808, 0x00000000, 0x08080808, 0xffffffff,
0x08080808, 0x00000000, 0x08080808, 0xffffffff,
0xf7f7f7f7, 0xf7f7f7f7, 0x08080808, 0xf7f7f7f7,
0xf7f7f7f7, 0xf7f7f7f7, 0x08080808, 0xf7f7f7f7,
0x08080808, 0xf7f7f7f7, 0x08080808, 0x08080808,
0x08080808, 0xf7f7f7f7, 0x08080808, 0x08080808,
0xf7f7f7f7, 0x08080808, 0xf7f7f7f7, 0x00000000,
0xf7f7f7f7, 0x08080808, 0xf7f7f7f7, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x08080808,
0xffffffff, 0x00000000, 0xffffffff, 0x08080808,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0xf7f7f7f7,
0x00000000, 0x00000000, 0x00000000, 0xf7f7f7f7,
0xf7f7f7f7, 0xffffffff, 0x00000000, 0x00000000,
0xf7f7f7f7, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0xf7f7f7f7, 0x00000000, 0xf7f7f7f7, 0x00000000,
0xf7f7f7f7, 0x00000000, 0xf7f7f7f7, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x08080808,
0x00000000, 0xffffffff, 0xffffffff, 0x08080808,
0xffffffff, 0xffffffff, 0x08080808, 0x00000000,
0xffffffff, 0xffffffff, 0x08080808, 0x00000000,
0x08080808, 0xffffffff, 0xf7f7f7f7, 0xf7f7f7f7,
0x08080808, 0xffffffff, 0xf7f7f7f7, 0xf7f7f7f7
},
{
0x10101010, 0x00000000, 0x10101010, 0xffffffff,
0x10101010, 0x00000000, 0x10101010, 0xffffffff,
0xefefefef, 0xefefefef, 0x10101010, 0xefefefef,
0xefefefef, 0xefefefef, 0x10101010, 0xefefefef,
0x10101010, 0xefefefef, 0x10101010, 0x10101010,
0x10101010, 0xefefefef, 0x10101010, 0x10101010,
0xefefefef, 0x10101010, 0xefefefef, 0x00000000,
0xefefefef, 0x10101010, 0xefefefef, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x10101010,
0xffffffff, 0x00000000, 0xffffffff, 0x10101010,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0xefefefef,
0x00000000, 0x00000000, 0x00000000, 0xefefefef,
0xefefefef, 0xffffffff, 0x00000000, 0x00000000,
0xefefefef, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0xefefefef, 0x00000000, 0xefefefef, 0x00000000,
0xefefefef, 0x00000000, 0xefefefef, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x10101010,
0x00000000, 0xffffffff, 0xffffffff, 0x10101010,
0xffffffff, 0xffffffff, 0x10101010, 0x00000000,
0xffffffff, 0xffffffff, 0x10101010, 0x00000000,
0x10101010, 0xffffffff, 0xefefefef, 0xefefefef,
0x10101010, 0xffffffff, 0xefefefef, 0xefefefef
},
{
0x20202020, 0x00000000, 0x20202020, 0xffffffff,
0x20202020, 0x00000000, 0x20202020, 0xffffffff,
0xdfdfdfdf, 0xdfdfdfdf, 0x20202020, 0xdfdfdfdf,
0xdfdfdfdf, 0xdfdfdfdf, 0x20202020, 0xdfdfdfdf,
0x20202020, 0xdfdfdfdf, 0x20202020, 0x20202020,
0x20202020, 0xdfdfdfdf, 0x20202020, 0x20202020,
0xdfdfdfdf, 0x20202020, 0xdfdfdfdf, 0x00000000,
0xdfdfdfdf, 0x20202020, 0xdfdfdfdf, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x20202020,
0xffffffff, 0x00000000, 0xffffffff, 0x20202020,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0xdfdfdfdf,
0x00000000, 0x00000000, 0x00000000, 0xdfdfdfdf,
0xdfdfdfdf, 0xffffffff, 0x00000000, 0x00000000,
0xdfdfdfdf, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0xdfdfdfdf, 0x00000000, 0xdfdfdfdf, 0x00000000,
0xdfdfdfdf, 0x00000000, 0xdfdfdfdf, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x20202020,
0x00000000, 0xffffffff, 0xffffffff, 0x20202020,
0xffffffff, 0xffffffff, 0x20202020, 0x00000000,
0xffffffff, 0xffffffff, 0x20202020, 0x00000000,
0x20202020, 0xffffffff, 0xdfdfdfdf, 0xdfdfdfdf,
0x20202020, 0xffffffff, 0xdfdfdfdf, 0xdfdfdfdf
},
{
0x40404040, 0x00000000, 0x40404040, 0xffffffff,
0x40404040, 0x00000000, 0x40404040, 0xffffffff,
0xbfbfbfbf, 0xbfbfbfbf, 0x40404040, 0xbfbfbfbf,
0xbfbfbfbf, 0xbfbfbfbf, 0x40404040, 0xbfbfbfbf,
0x40404040, 0xbfbfbfbf, 0x40404040, 0x40404040,
0x40404040, 0xbfbfbfbf, 0x40404040, 0x40404040,
0xbfbfbfbf, 0x40404040, 0xbfbfbfbf, 0x00000000,
0xbfbfbfbf, 0x40404040, 0xbfbfbfbf, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x40404040,
0xffffffff, 0x00000000, 0xffffffff, 0x40404040,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0xbfbfbfbf,
0x00000000, 0x00000000, 0x00000000, 0xbfbfbfbf,
0xbfbfbfbf, 0xffffffff, 0x00000000, 0x00000000,
0xbfbfbfbf, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0xbfbfbfbf, 0x00000000, 0xbfbfbfbf, 0x00000000,
0xbfbfbfbf, 0x00000000, 0xbfbfbfbf, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x40404040,
0x00000000, 0xffffffff, 0xffffffff, 0x40404040,
0xffffffff, 0xffffffff, 0x40404040, 0x00000000,
0xffffffff, 0xffffffff, 0x40404040, 0x00000000,
0x40404040, 0xffffffff, 0xbfbfbfbf, 0xbfbfbfbf,
0x40404040, 0xffffffff, 0xbfbfbfbf, 0xbfbfbfbf
},
{
0x80808080, 0x00000000, 0x80808080, 0xffffffff,
0x80808080, 0x00000000, 0x80808080, 0xffffffff,
0x7f7f7f7f, 0x7f7f7f7f, 0x80808080, 0x7f7f7f7f,
0x7f7f7f7f, 0x7f7f7f7f, 0x80808080, 0x7f7f7f7f,
0x80808080, 0x7f7f7f7f, 0x80808080, 0x80808080,
0x80808080, 0x7f7f7f7f, 0x80808080, 0x80808080,
0x7f7f7f7f, 0x80808080, 0x7f7f7f7f, 0x00000000,
0x7f7f7f7f, 0x80808080, 0x7f7f7f7f, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x00000000,
0xffffffff, 0x00000000, 0xffffffff, 0x80808080,
0xffffffff, 0x00000000, 0xffffffff, 0x80808080,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x7f7f7f7f,
0x00000000, 0x00000000, 0x00000000, 0x7f7f7f7f,
0x7f7f7f7f, 0xffffffff, 0x00000000, 0x00000000,
0x7f7f7f7f, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x00000000,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x00000000, 0xffffffff, 0x00000000, 0xffffffff,
0x7f7f7f7f, 0x00000000, 0x7f7f7f7f, 0x00000000,
0x7f7f7f7f, 0x00000000, 0x7f7f7f7f, 0x00000000,
0x00000000, 0xffffffff, 0xffffffff, 0x80808080,
0x00000000, 0xffffffff, 0xffffffff, 0x80808080,
0xffffffff, 0xffffffff, 0x80808080, 0x00000000,
0xffffffff, 0xffffffff, 0x80808080, 0x00000000,
0x80808080, 0xffffffff, 0x7f7f7f7f, 0x7f7f7f7f,
0x80808080, 0xffffffff, 0x7f7f7f7f, 0x7f7f7f7f
}
};
u32 killer_pattern_64b[DQ_NUM][LEN_KILLER_PATTERN] __aligned(32) = {
{
0x01010101, 0x01010101, 0x00000000, 0x00000000,
0x01010101, 0x01010101, 0xffffffff, 0xffffffff,
0xfefefefe, 0xfefefefe, 0xfefefefe, 0xfefefefe,
0x01010101, 0x01010101, 0xfefefefe, 0xfefefefe,
0x01010101, 0x01010101, 0xfefefefe, 0xfefefefe,
0x01010101, 0x01010101, 0x01010101, 0x01010101,
0xfefefefe, 0xfefefefe, 0x01010101, 0x01010101,
0xfefefefe, 0xfefefefe, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x01010101, 0x01010101,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xfefefefe, 0xfefefefe,
0xfefefefe, 0xfefefefe, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xfefefefe, 0xfefefefe, 0x00000000, 0x00000000,
0xfefefefe, 0xfefefefe, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x01010101, 0x01010101,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x01010101, 0x01010101, 0x00000000, 0x00000000,
0x01010101, 0x01010101, 0xffffffff, 0xffffffff,
0xfefefefe, 0xfefefefe, 0xfefefefe, 0xfefefefe
},
{
0x02020202, 0x02020202, 0x00000000, 0x00000000,
0x02020202, 0x02020202, 0xffffffff, 0xffffffff,
0xfdfdfdfd, 0xfdfdfdfd, 0xfdfdfdfd, 0xfdfdfdfd,
0x02020202, 0x02020202, 0xfdfdfdfd, 0xfdfdfdfd,
0x02020202, 0x02020202, 0xfdfdfdfd, 0xfdfdfdfd,
0x02020202, 0x02020202, 0x02020202, 0x02020202,
0xfdfdfdfd, 0xfdfdfdfd, 0x02020202, 0x02020202,
0xfdfdfdfd, 0xfdfdfdfd, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x02020202, 0x02020202,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xfdfdfdfd, 0xfdfdfdfd,
0xfdfdfdfd, 0xfdfdfdfd, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xfdfdfdfd, 0xfdfdfdfd, 0x00000000, 0x00000000,
0xfdfdfdfd, 0xfdfdfdfd, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x02020202, 0x02020202,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x02020202, 0x02020202, 0x00000000, 0x00000000,
0x02020202, 0x02020202, 0xffffffff, 0xffffffff,
0xfdfdfdfd, 0xfdfdfdfd, 0xfdfdfdfd, 0xfdfdfdfd
},
{
0x04040404, 0x04040404, 0x00000000, 0x00000000,
0x04040404, 0x04040404, 0xffffffff, 0xffffffff,
0xfbfbfbfb, 0xfbfbfbfb, 0xfbfbfbfb, 0xfbfbfbfb,
0x04040404, 0x04040404, 0xfbfbfbfb, 0xfbfbfbfb,
0x04040404, 0x04040404, 0xfbfbfbfb, 0xfbfbfbfb,
0x04040404, 0x04040404, 0x04040404, 0x04040404,
0xfbfbfbfb, 0xfbfbfbfb, 0x04040404, 0x04040404,
0xfbfbfbfb, 0xfbfbfbfb, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x04040404, 0x04040404,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xfbfbfbfb, 0xfbfbfbfb,
0xfbfbfbfb, 0xfbfbfbfb, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xfbfbfbfb, 0xfbfbfbfb, 0x00000000, 0x00000000,
0xfbfbfbfb, 0xfbfbfbfb, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x04040404, 0x04040404,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x04040404, 0x04040404, 0x00000000, 0x00000000,
0x04040404, 0x04040404, 0xffffffff, 0xffffffff,
0xfbfbfbfb, 0xfbfbfbfb, 0xfbfbfbfb, 0xfbfbfbfb
},
{
0x08080808, 0x08080808, 0x00000000, 0x00000000,
0x08080808, 0x08080808, 0xffffffff, 0xffffffff,
0xf7f7f7f7, 0xf7f7f7f7, 0xf7f7f7f7, 0xf7f7f7f7,
0x08080808, 0x08080808, 0xf7f7f7f7, 0xf7f7f7f7,
0x08080808, 0x08080808, 0xf7f7f7f7, 0xf7f7f7f7,
0x08080808, 0x08080808, 0x08080808, 0x08080808,
0xf7f7f7f7, 0xf7f7f7f7, 0x08080808, 0x08080808,
0xf7f7f7f7, 0xf7f7f7f7, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x08080808, 0x08080808,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xf7f7f7f7, 0xf7f7f7f7,
0xf7f7f7f7, 0xf7f7f7f7, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xf7f7f7f7, 0xf7f7f7f7, 0x00000000, 0x00000000,
0xf7f7f7f7, 0xf7f7f7f7, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x08080808, 0x08080808,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x08080808, 0x08080808, 0x00000000, 0x00000000,
0x08080808, 0x08080808, 0xffffffff, 0xffffffff,
0xf7f7f7f7, 0xf7f7f7f7, 0xf7f7f7f7, 0xf7f7f7f7
},
{
0x10101010, 0x10101010, 0x00000000, 0x00000000,
0x10101010, 0x10101010, 0xffffffff, 0xffffffff,
0xefefefef, 0xefefefef, 0xefefefef, 0xefefefef,
0x10101010, 0x10101010, 0xefefefef, 0xefefefef,
0x10101010, 0x10101010, 0xefefefef, 0xefefefef,
0x10101010, 0x10101010, 0x10101010, 0x10101010,
0xefefefef, 0xefefefef, 0x10101010, 0x10101010,
0xefefefef, 0xefefefef, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x10101010, 0x10101010,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xefefefef, 0xefefefef,
0xefefefef, 0xefefefef, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xefefefef, 0xefefefef, 0x00000000, 0x00000000,
0xefefefef, 0xefefefef, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x10101010, 0x10101010,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x10101010, 0x10101010, 0x00000000, 0x00000000,
0x10101010, 0x10101010, 0xffffffff, 0xffffffff,
0xefefefef, 0xefefefef, 0xefefefef, 0xefefefef
},
{
0x20202020, 0x20202020, 0x00000000, 0x00000000,
0x20202020, 0x20202020, 0xffffffff, 0xffffffff,
0xdfdfdfdf, 0xdfdfdfdf, 0xdfdfdfdf, 0xdfdfdfdf,
0x20202020, 0x20202020, 0xdfdfdfdf, 0xdfdfdfdf,
0x20202020, 0x20202020, 0xdfdfdfdf, 0xdfdfdfdf,
0x20202020, 0x20202020, 0x20202020, 0x20202020,
0xdfdfdfdf, 0xdfdfdfdf, 0x20202020, 0x20202020,
0xdfdfdfdf, 0xdfdfdfdf, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x20202020, 0x20202020,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xdfdfdfdf, 0xdfdfdfdf,
0xdfdfdfdf, 0xdfdfdfdf, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xdfdfdfdf, 0xdfdfdfdf, 0x00000000, 0x00000000,
0xdfdfdfdf, 0xdfdfdfdf, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x20202020, 0x20202020,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x20202020, 0x20202020, 0x00000000, 0x00000000,
0x20202020, 0x20202020, 0xffffffff, 0xffffffff,
0xdfdfdfdf, 0xdfdfdfdf, 0xdfdfdfdf, 0xdfdfdfdf
},
{
0x40404040, 0x40404040, 0x00000000, 0x00000000,
0x40404040, 0x40404040, 0xffffffff, 0xffffffff,
0xbfbfbfbf, 0xbfbfbfbf, 0xbfbfbfbf, 0xbfbfbfbf,
0x40404040, 0x40404040, 0xbfbfbfbf, 0xbfbfbfbf,
0x40404040, 0x40404040, 0xbfbfbfbf, 0xbfbfbfbf,
0x40404040, 0x40404040, 0x40404040, 0x40404040,
0xbfbfbfbf, 0xbfbfbfbf, 0x40404040, 0x40404040,
0xbfbfbfbf, 0xbfbfbfbf, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x40404040, 0x40404040,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xbfbfbfbf, 0xbfbfbfbf,
0xbfbfbfbf, 0xbfbfbfbf, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xbfbfbfbf, 0xbfbfbfbf, 0x00000000, 0x00000000,
0xbfbfbfbf, 0xbfbfbfbf, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x40404040, 0x40404040,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x40404040, 0x40404040, 0x00000000, 0x00000000,
0x40404040, 0x40404040, 0xffffffff, 0xffffffff,
0xbfbfbfbf, 0xbfbfbfbf, 0xbfbfbfbf, 0xbfbfbfbf
},
{
0x80808080, 0x80808080, 0x00000000, 0x00000000,
0x80808080, 0x80808080, 0xffffffff, 0xffffffff,
0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f,
0x80808080, 0x80808080, 0x7f7f7f7f, 0x7f7f7f7f,
0x80808080, 0x80808080, 0x7f7f7f7f, 0x7f7f7f7f,
0x80808080, 0x80808080, 0x80808080, 0x80808080,
0x7f7f7f7f, 0x7f7f7f7f, 0x80808080, 0x80808080,
0x7f7f7f7f, 0x7f7f7f7f, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x80808080, 0x80808080,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x7f7f7f7f, 0x7f7f7f7f,
0x7f7f7f7f, 0x7f7f7f7f, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x7f7f7f7f, 0x7f7f7f7f, 0x00000000, 0x00000000,
0x7f7f7f7f, 0x7f7f7f7f, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x80808080, 0x80808080,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x80808080, 0x80808080, 0x00000000, 0x00000000,
0x80808080, 0x80808080, 0xffffffff, 0xffffffff,
0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f
}
};
u32 special_pattern[DQ_NUM][LEN_SPECIAL_PATTERN] __aligned(32) = {
{
0x00000000, 0x00000000, 0x01010101, 0x01010101,
0xffffffff, 0xffffffff, 0xfefefefe, 0xfefefefe,
0xfefefefe, 0xfefefefe, 0x01010101, 0x01010101,
0xfefefefe, 0xfefefefe, 0x01010101, 0x01010101,
0xfefefefe, 0xfefefefe, 0x01010101, 0x01010101,
0x01010101, 0x01010101, 0xfefefefe, 0xfefefefe,
0x01010101, 0x01010101, 0xfefefefe, 0xfefefefe,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x01010101, 0x01010101, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xfefefefe, 0xfefefefe, 0xfefefefe, 0xfefefefe,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xfefefefe, 0xfefefefe,
0x00000000, 0x00000000, 0xfefefefe, 0xfefefefe,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x01010101, 0x01010101, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x01010101, 0x01010101,
0x00000000, 0x00000000, 0x01010101, 0x01010101,
0xffffffff, 0xffffffff, 0xfefefefe, 0xfefefefe,
0xfefefefe, 0xfefefefe, 0x00000000, 0x00000000
},
{
0x00000000, 0x00000000, 0x02020202, 0x02020202,
0xffffffff, 0xffffffff, 0xfdfdfdfd, 0xfdfdfdfd,
0xfdfdfdfd, 0xfdfdfdfd, 0x02020202, 0x02020202,
0xfdfdfdfd, 0xfdfdfdfd, 0x02020202, 0x02020202,
0xfdfdfdfd, 0xfdfdfdfd, 0x02020202, 0x02020202,
0x02020202, 0x02020202, 0xfdfdfdfd, 0xfdfdfdfd,
0x02020202, 0x02020202, 0xfdfdfdfd, 0xfdfdfdfd,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x02020202, 0x02020202, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xfdfdfdfd, 0xfdfdfdfd, 0xfdfdfdfd, 0xfdfdfdfd,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xfdfdfdfd, 0xfdfdfdfd,
0x00000000, 0x00000000, 0xfdfdfdfd, 0xfdfdfdfd,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x02020202, 0x02020202, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x02020202, 0x02020202,
0x00000000, 0x00000000, 0x02020202, 0x02020202,
0xffffffff, 0xffffffff, 0xfdfdfdfd, 0xfdfdfdfd,
0xfdfdfdfd, 0xfdfdfdfd, 0x00000000, 0x00000000
},
{
0x00000000, 0x00000000, 0x04040404, 0x04040404,
0xffffffff, 0xffffffff, 0xfbfbfbfb, 0xfbfbfbfb,
0xfbfbfbfb, 0xfbfbfbfb, 0x04040404, 0x04040404,
0xfbfbfbfb, 0xfbfbfbfb, 0x04040404, 0x04040404,
0xfbfbfbfb, 0xfbfbfbfb, 0x04040404, 0x04040404,
0x04040404, 0x04040404, 0xfbfbfbfb, 0xfbfbfbfb,
0x04040404, 0x04040404, 0xfbfbfbfb, 0xfbfbfbfb,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x04040404, 0x04040404, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xfbfbfbfb, 0xfbfbfbfb, 0xfbfbfbfb, 0xfbfbfbfb,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xfbfbfbfb, 0xfbfbfbfb,
0x00000000, 0x00000000, 0xfbfbfbfb, 0xfbfbfbfb,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x04040404, 0x04040404, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x04040404, 0x04040404,
0x00000000, 0x00000000, 0x04040404, 0x04040404,
0xffffffff, 0xffffffff, 0xfbfbfbfb, 0xfbfbfbfb,
0xfbfbfbfb, 0xfbfbfbfb, 0x00000000, 0x00000000
},
{
0x00000000, 0x00000000, 0x08080808, 0x08080808,
0xffffffff, 0xffffffff, 0xf7f7f7f7, 0xf7f7f7f7,
0xf7f7f7f7, 0xf7f7f7f7, 0x08080808, 0x08080808,
0xf7f7f7f7, 0xf7f7f7f7, 0x08080808, 0x08080808,
0xf7f7f7f7, 0xf7f7f7f7, 0x08080808, 0x08080808,
0x08080808, 0x08080808, 0xf7f7f7f7, 0xf7f7f7f7,
0x08080808, 0x08080808, 0xf7f7f7f7, 0xf7f7f7f7,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x08080808, 0x08080808, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xf7f7f7f7, 0xf7f7f7f7, 0xf7f7f7f7, 0xf7f7f7f7,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xf7f7f7f7, 0xf7f7f7f7,
0x00000000, 0x00000000, 0xf7f7f7f7, 0xf7f7f7f7,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x08080808, 0x08080808, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x08080808, 0x08080808,
0x00000000, 0x00000000, 0x08080808, 0x08080808,
0xffffffff, 0xffffffff, 0xf7f7f7f7, 0xf7f7f7f7,
0xf7f7f7f7, 0xf7f7f7f7, 0x00000000, 0x00000000
},
{
0x00000000, 0x00000000, 0x10101010, 0x10101010,
0xffffffff, 0xffffffff, 0xefefefef, 0xefefefef,
0xefefefef, 0xefefefef, 0x10101010, 0x10101010,
0xefefefef, 0xefefefef, 0x10101010, 0x10101010,
0xefefefef, 0xefefefef, 0x10101010, 0x10101010,
0x10101010, 0x10101010, 0xefefefef, 0xefefefef,
0x10101010, 0x10101010, 0xefefefef, 0xefefefef,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x10101010, 0x10101010, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xefefefef, 0xefefefef, 0xefefefef, 0xefefefef,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xefefefef, 0xefefefef,
0x00000000, 0x00000000, 0xefefefef, 0xefefefef,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x10101010, 0x10101010, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x10101010, 0x10101010,
0x00000000, 0x00000000, 0x10101010, 0x10101010,
0xffffffff, 0xffffffff, 0xefefefef, 0xefefefef,
0xefefefef, 0xefefefef, 0x00000000, 0x00000000
},
{
0x00000000, 0x00000000, 0x20202020, 0x20202020,
0xffffffff, 0xffffffff, 0xdfdfdfdf, 0xdfdfdfdf,
0xdfdfdfdf, 0xdfdfdfdf, 0x20202020, 0x20202020,
0xdfdfdfdf, 0xdfdfdfdf, 0x20202020, 0x20202020,
0xdfdfdfdf, 0xdfdfdfdf, 0x20202020, 0x20202020,
0x20202020, 0x20202020, 0xdfdfdfdf, 0xdfdfdfdf,
0x20202020, 0x20202020, 0xdfdfdfdf, 0xdfdfdfdf,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x20202020, 0x20202020, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xdfdfdfdf, 0xdfdfdfdf, 0xdfdfdfdf, 0xdfdfdfdf,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xdfdfdfdf, 0xdfdfdfdf,
0x00000000, 0x00000000, 0xdfdfdfdf, 0xdfdfdfdf,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x20202020, 0x20202020, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x20202020, 0x20202020,
0x00000000, 0x00000000, 0x20202020, 0x20202020,
0xffffffff, 0xffffffff, 0xdfdfdfdf, 0xdfdfdfdf,
0xdfdfdfdf, 0xdfdfdfdf, 0x00000000, 0x00000000
},
{
0x00000000, 0x00000000, 0x40404040, 0x40404040,
0xffffffff, 0xffffffff, 0xbfbfbfbf, 0xbfbfbfbf,
0xbfbfbfbf, 0xbfbfbfbf, 0x40404040, 0x40404040,
0xbfbfbfbf, 0xbfbfbfbf, 0x40404040, 0x40404040,
0xbfbfbfbf, 0xbfbfbfbf, 0x40404040, 0x40404040,
0x40404040, 0x40404040, 0xbfbfbfbf, 0xbfbfbfbf,
0x40404040, 0x40404040, 0xbfbfbfbf, 0xbfbfbfbf,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x40404040, 0x40404040, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xbfbfbfbf, 0xbfbfbfbf, 0xbfbfbfbf, 0xbfbfbfbf,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xbfbfbfbf, 0xbfbfbfbf,
0x00000000, 0x00000000, 0xbfbfbfbf, 0xbfbfbfbf,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x40404040, 0x40404040, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x40404040, 0x40404040,
0x00000000, 0x00000000, 0x40404040, 0x40404040,
0xffffffff, 0xffffffff, 0xbfbfbfbf, 0xbfbfbfbf,
0xbfbfbfbf, 0xbfbfbfbf, 0x00000000, 0x00000000
},
{
0x00000000, 0x00000000, 0x80808080, 0x80808080,
0xffffffff, 0xffffffff, 0x7f7f7f7f, 0x7f7f7f7f,
0x7f7f7f7f, 0x7f7f7f7f, 0x80808080, 0x80808080,
0x7f7f7f7f, 0x7f7f7f7f, 0x80808080, 0x80808080,
0x7f7f7f7f, 0x7f7f7f7f, 0x80808080, 0x80808080,
0x80808080, 0x80808080, 0x7f7f7f7f, 0x7f7f7f7f,
0x80808080, 0x80808080, 0x7f7f7f7f, 0x7f7f7f7f,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0x80808080, 0x80808080, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f, 0x7f7f7f7f,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0x00000000, 0x00000000, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0x7f7f7f7f, 0x7f7f7f7f,
0x00000000, 0x00000000, 0x7f7f7f7f, 0x7f7f7f7f,
0x00000000, 0x00000000, 0x00000000, 0x00000000,
0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff,
0x80808080, 0x80808080, 0xffffffff, 0xffffffff,
0xffffffff, 0xffffffff, 0x80808080, 0x80808080,
0x00000000, 0x00000000, 0x80808080, 0x80808080,
0xffffffff, 0xffffffff, 0x7f7f7f7f, 0x7f7f7f7f,
0x7f7f7f7f, 0x7f7f7f7f, 0x00000000, 0x00000000
}
};
/* Fabric ratios table */
u32 fabric_ratio[FAB_OPT] = {
0x04010204,
0x04020202,
0x08020306,
0x08020303,
0x04020303,
0x04020204,
0x04010202,
0x08030606,
0x08030505,
0x04020306,
0x0804050a,
0x04030606,
0x04020404,
0x04030306,
0x04020505,
0x08020505,
0x04010303,
0x08050a0a,
0x04030408,
0x04010102,
0x08030306
};
u32 pbs_dq_mapping[PUP_NUM_64BIT + 1][DQ_NUM] = {
{3, 2, 5, 7, 1, 0, 6, 4},
{2, 3, 6, 7, 1, 0, 4, 5},
{1, 3, 5, 6, 0, 2, 4, 7},
{0, 2, 4, 7, 1, 3, 5, 6},
{3, 0, 4, 6, 1, 2, 5, 7},
{0, 3, 5, 7, 1, 2, 4, 6},
{2, 3, 5, 7, 1, 0, 4, 6},
{0, 2, 5, 4, 1, 3, 6, 7},
{2, 3, 4, 7, 0, 1, 5, 6}
};
#endif /* __DDR3_PATTERNS_64_H */

View File

@ -0,0 +1,76 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_TOPOLOGY_DEF_H
#define _DDR3_TOPOLOGY_DEF_H
/* TOPOLOGY */
enum hws_speed_bin {
SPEED_BIN_DDR_800D,
SPEED_BIN_DDR_800E,
SPEED_BIN_DDR_1066E,
SPEED_BIN_DDR_1066F,
SPEED_BIN_DDR_1066G,
SPEED_BIN_DDR_1333F,
SPEED_BIN_DDR_1333G,
SPEED_BIN_DDR_1333H,
SPEED_BIN_DDR_1333J,
SPEED_BIN_DDR_1600G,
SPEED_BIN_DDR_1600H,
SPEED_BIN_DDR_1600J,
SPEED_BIN_DDR_1600K,
SPEED_BIN_DDR_1866J,
SPEED_BIN_DDR_1866K,
SPEED_BIN_DDR_1866L,
SPEED_BIN_DDR_1866M,
SPEED_BIN_DDR_2133K,
SPEED_BIN_DDR_2133L,
SPEED_BIN_DDR_2133M,
SPEED_BIN_DDR_2133N,
SPEED_BIN_DDR_1333H_EXT,
SPEED_BIN_DDR_1600K_EXT,
SPEED_BIN_DDR_1866M_EXT
};
enum hws_ddr_freq {
DDR_FREQ_LOW_FREQ,
DDR_FREQ_400,
DDR_FREQ_533,
DDR_FREQ_667,
DDR_FREQ_800,
DDR_FREQ_933,
DDR_FREQ_1066,
DDR_FREQ_311,
DDR_FREQ_333,
DDR_FREQ_467,
DDR_FREQ_850,
DDR_FREQ_600,
DDR_FREQ_300,
DDR_FREQ_900,
DDR_FREQ_360,
DDR_FREQ_1000,
DDR_FREQ_LIMIT
};
enum speed_bin_table_elements {
SPEED_BIN_TRCD,
SPEED_BIN_TRP,
SPEED_BIN_TRAS,
SPEED_BIN_TRC,
SPEED_BIN_TRRD1K,
SPEED_BIN_TRRD2K,
SPEED_BIN_TPD,
SPEED_BIN_TFAW1K,
SPEED_BIN_TFAW2K,
SPEED_BIN_TWTR,
SPEED_BIN_TRTP,
SPEED_BIN_TWR,
SPEED_BIN_TMOD
};
#endif /* _DDR3_TOPOLOGY_DEF_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,289 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
static u32 bist_offset = 32;
enum hws_pattern sweep_pattern = PATTERN_KILLER_DQ0;
static int ddr3_tip_bist_operation(u32 dev_num,
enum hws_access_type access_type,
u32 if_id,
enum hws_bist_operation oper_type);
/*
* BIST activate
*/
int ddr3_tip_bist_activate(u32 dev_num, enum hws_pattern pattern,
enum hws_access_type access_type, u32 if_num,
enum hws_dir direction,
enum hws_stress_jump addr_stress_jump,
enum hws_pattern_duration duration,
enum hws_bist_operation oper_type,
u32 offset, u32 cs_num, u32 pattern_addr_length)
{
u32 tx_burst_size;
u32 delay_between_burst;
u32 rd_mode, val;
u32 poll_cnt = 0, max_poll = 1000, i, start_if, end_if;
struct pattern_info *pattern_table = ddr3_tip_get_pattern_table();
u32 read_data[MAX_INTERFACE_NUM];
struct hws_topology_map *tm = ddr3_get_topology_map();
/* ODPG Write enable from BIST */
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_DATA_CONTROL_REG, 0x1, 0x1));
/* ODPG Read enable/disable from BIST */
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_DATA_CONTROL_REG,
(direction == OPER_READ) ?
0x2 : 0, 0x2));
CHECK_STATUS(ddr3_tip_load_pattern_to_odpg(dev_num, access_type, if_num,
pattern, offset));
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_DATA_BUF_SIZE_REG,
pattern_addr_length, MASK_ALL_BITS));
tx_burst_size = (direction == OPER_WRITE) ?
pattern_table[pattern].tx_burst_size : 0;
delay_between_burst = (direction == OPER_WRITE) ? 2 : 0;
rd_mode = (direction == OPER_WRITE) ? 1 : 0;
CHECK_STATUS(ddr3_tip_configure_odpg
(dev_num, access_type, if_num, direction,
pattern_table[pattern].num_of_phases_tx, tx_burst_size,
pattern_table[pattern].num_of_phases_rx,
delay_between_burst,
rd_mode, cs_num, addr_stress_jump, duration));
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_PATTERN_ADDR_OFFSET_REG,
offset, MASK_ALL_BITS));
if (oper_type == BIST_STOP) {
CHECK_STATUS(ddr3_tip_bist_operation(dev_num, access_type,
if_num, BIST_STOP));
} else {
CHECK_STATUS(ddr3_tip_bist_operation(dev_num, access_type,
if_num, BIST_START));
if (duration != DURATION_CONT) {
/*
* This pdelay is a WA, becuase polling fives "done"
* also the odpg did nmot finish its task
*/
if (access_type == ACCESS_TYPE_MULTICAST) {
start_if = 0;
end_if = MAX_INTERFACE_NUM - 1;
} else {
start_if = if_num;
end_if = if_num;
}
for (i = start_if; i <= end_if; i++) {
VALIDATE_ACTIVE(tm->
if_act_mask, i);
for (poll_cnt = 0; poll_cnt < max_poll;
poll_cnt++) {
CHECK_STATUS(ddr3_tip_if_read
(dev_num,
ACCESS_TYPE_UNICAST,
if_num, ODPG_BIST_DONE,
read_data,
MASK_ALL_BITS));
val = read_data[i];
if ((val & 0x1) == 0x0) {
/*
* In SOC type devices this bit
* is self clear so, if it was
* cleared all good
*/
break;
}
}
if (poll_cnt >= max_poll) {
DEBUG_TRAINING_BIST_ENGINE
(DEBUG_LEVEL_ERROR,
("Bist poll failure 2\n"));
CHECK_STATUS(ddr3_tip_if_write
(dev_num,
ACCESS_TYPE_UNICAST,
if_num,
ODPG_DATA_CONTROL_REG, 0,
MASK_ALL_BITS));
return MV_FAIL;
}
}
CHECK_STATUS(ddr3_tip_bist_operation
(dev_num, access_type, if_num, BIST_STOP));
}
}
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_num,
ODPG_DATA_CONTROL_REG, 0,
MASK_ALL_BITS));
return MV_OK;
}
/*
* BIST read result
*/
int ddr3_tip_bist_read_result(u32 dev_num, u32 if_id,
struct bist_result *pst_bist_result)
{
int ret;
u32 read_data[MAX_INTERFACE_NUM];
struct hws_topology_map *tm = ddr3_get_topology_map();
if (IS_ACTIVE(tm->if_act_mask, if_id) == 0)
return MV_NOT_SUPPORTED;
DEBUG_TRAINING_BIST_ENGINE(DEBUG_LEVEL_TRACE,
("ddr3_tip_bist_read_result if_id %d\n",
if_id));
ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
ODPG_BIST_FAILED_DATA_HI_REG, read_data,
MASK_ALL_BITS);
if (ret != MV_OK)
return ret;
pst_bist_result->bist_fail_high = read_data[if_id];
ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
ODPG_BIST_FAILED_DATA_LOW_REG, read_data,
MASK_ALL_BITS);
if (ret != MV_OK)
return ret;
pst_bist_result->bist_fail_low = read_data[if_id];
ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
ODPG_BIST_LAST_FAIL_ADDR_REG, read_data,
MASK_ALL_BITS);
if (ret != MV_OK)
return ret;
pst_bist_result->bist_last_fail_addr = read_data[if_id];
ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id,
ODPG_BIST_DATA_ERROR_COUNTER_REG, read_data,
MASK_ALL_BITS);
if (ret != MV_OK)
return ret;
pst_bist_result->bist_error_cnt = read_data[if_id];
return MV_OK;
}
/*
* BIST flow - Activate & read result
*/
int hws_ddr3_run_bist(u32 dev_num, enum hws_pattern pattern, u32 *result,
u32 cs_num)
{
int ret;
u32 i = 0;
u32 win_base;
struct bist_result st_bist_result;
struct hws_topology_map *tm = ddr3_get_topology_map();
for (i = 0; i < MAX_INTERFACE_NUM; i++) {
VALIDATE_ACTIVE(tm->if_act_mask, i);
hws_ddr3_cs_base_adr_calc(i, cs_num, &win_base);
ret = ddr3_tip_bist_activate(dev_num, pattern,
ACCESS_TYPE_UNICAST,
i, OPER_WRITE, STRESS_NONE,
DURATION_SINGLE, BIST_START,
bist_offset + win_base,
cs_num, 15);
if (ret != MV_OK) {
printf("ddr3_tip_bist_activate failed (0x%x)\n", ret);
return ret;
}
ret = ddr3_tip_bist_activate(dev_num, pattern,
ACCESS_TYPE_UNICAST,
i, OPER_READ, STRESS_NONE,
DURATION_SINGLE, BIST_START,
bist_offset + win_base,
cs_num, 15);
if (ret != MV_OK) {
printf("ddr3_tip_bist_activate failed (0x%x)\n", ret);
return ret;
}
ret = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result);
if (ret != MV_OK) {
printf("ddr3_tip_bist_read_result failed\n");
return ret;
}
result[i] = st_bist_result.bist_error_cnt;
}
return MV_OK;
}
/*
* Set BIST Operation
*/
static int ddr3_tip_bist_operation(u32 dev_num,
enum hws_access_type access_type,
u32 if_id, enum hws_bist_operation oper_type)
{
if (oper_type == BIST_STOP) {
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
ODPG_BIST_DONE, 1 << 8, 1 << 8));
} else {
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
ODPG_BIST_DONE, 1, 1));
}
return MV_OK;
}
/*
* Print BIST result
*/
void ddr3_tip_print_bist_res(void)
{
u32 dev_num = 0;
u32 i;
struct bist_result st_bist_result[MAX_INTERFACE_NUM];
int res;
struct hws_topology_map *tm = ddr3_get_topology_map();
for (i = 0; i < MAX_INTERFACE_NUM; i++) {
if (IS_ACTIVE(tm->if_act_mask, i) == 0)
continue;
res = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result[i]);
if (res != MV_OK) {
DEBUG_TRAINING_BIST_ENGINE(
DEBUG_LEVEL_ERROR,
("ddr3_tip_bist_read_result failed\n"));
return;
}
}
DEBUG_TRAINING_BIST_ENGINE(
DEBUG_LEVEL_INFO,
("interface | error_cnt | fail_low | fail_high | fail_addr\n"));
for (i = 0; i < MAX_INTERFACE_NUM; i++) {
if (IS_ACTIVE(tm->if_act_mask, i) ==
0)
continue;
DEBUG_TRAINING_BIST_ENGINE(
DEBUG_LEVEL_INFO,
("%d | 0x%08x | 0x%08x | 0x%08x | 0x%08x\n",
i, st_bist_result[i].bist_error_cnt,
st_bist_result[i].bist_fail_low,
st_bist_result[i].bist_fail_high,
st_bist_result[i].bist_last_fail_addr));
}
}

View File

@ -0,0 +1,714 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define VALIDATE_WIN_LENGTH(e1, e2, maxsize) \
(((e2) + 1 > (e1) + (u8)MIN_WINDOW_SIZE) && \
((e2) + 1 < (e1) + (u8)maxsize))
#define IS_WINDOW_OUT_BOUNDARY(e1, e2, maxsize) \
(((e1) == 0 && (e2) != 0) || \
((e1) != (maxsize - 1) && (e2) == (maxsize - 1)))
#define CENTRAL_TX 0
#define CENTRAL_RX 1
#define NUM_OF_CENTRAL_TYPES 2
u32 start_pattern = PATTERN_KILLER_DQ0, end_pattern = PATTERN_KILLER_DQ7;
u32 start_if = 0, end_if = (MAX_INTERFACE_NUM - 1);
u8 bus_end_window[NUM_OF_CENTRAL_TYPES][MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 bus_start_window[NUM_OF_CENTRAL_TYPES][MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 centralization_state[MAX_INTERFACE_NUM][MAX_BUS_NUM];
static u8 ddr3_tip_special_rx_run_once_flag;
static int ddr3_tip_centralization(u32 dev_num, u32 mode);
/*
* Centralization RX Flow
*/
int ddr3_tip_centralization_rx(u32 dev_num)
{
CHECK_STATUS(ddr3_tip_special_rx(dev_num));
CHECK_STATUS(ddr3_tip_centralization(dev_num, CENTRAL_RX));
return MV_OK;
}
/*
* Centralization TX Flow
*/
int ddr3_tip_centralization_tx(u32 dev_num)
{
CHECK_STATUS(ddr3_tip_centralization(dev_num, CENTRAL_TX));
return MV_OK;
}
/*
* Centralization Flow
*/
static int ddr3_tip_centralization(u32 dev_num, u32 mode)
{
enum hws_training_ip_stat training_result[MAX_INTERFACE_NUM];
u32 if_id, pattern_id, bit_id;
u8 bus_id;
u8 cur_start_win[BUS_WIDTH_IN_BITS];
u8 centralization_result[MAX_INTERFACE_NUM][BUS_WIDTH_IN_BITS];
u8 cur_end_win[BUS_WIDTH_IN_BITS];
u8 current_window[BUS_WIDTH_IN_BITS];
u8 opt_window, waste_window, start_window_skew, end_window_skew;
u8 final_pup_window[MAX_INTERFACE_NUM][BUS_WIDTH_IN_BITS];
struct hws_topology_map *tm = ddr3_get_topology_map();
enum hws_training_result result_type = RESULT_PER_BIT;
enum hws_dir direction;
u32 *result[HWS_SEARCH_DIR_LIMIT];
u32 reg_phy_off, reg;
u8 max_win_size;
int lock_success = 1;
u8 cur_end_win_min, cur_start_win_max;
u32 cs_enable_reg_val[MAX_INTERFACE_NUM];
int is_if_fail = 0;
enum hws_result *flow_result = ddr3_tip_get_result_ptr(training_stage);
u32 pup_win_length = 0;
enum hws_search_dir search_dir_id;
u8 cons_tap = (mode == CENTRAL_TX) ? (64) : (0);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
/* save current cs enable reg val */
CHECK_STATUS(ddr3_tip_if_read
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, cs_enable_reg_val, MASK_ALL_BITS));
/* enable single cs */
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, (1 << 3), (1 << 3)));
}
if (mode == CENTRAL_TX) {
max_win_size = MAX_WINDOW_SIZE_TX;
reg_phy_off = WRITE_CENTRALIZATION_PHY_REG + (effective_cs * 4);
direction = OPER_WRITE;
} else {
max_win_size = MAX_WINDOW_SIZE_RX;
reg_phy_off = READ_CENTRALIZATION_PHY_REG + (effective_cs * 4);
direction = OPER_READ;
}
/* DB initialization */
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (bus_id = 0;
bus_id < tm->num_of_bus_per_interface; bus_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
centralization_state[if_id][bus_id] = 0;
bus_end_window[mode][if_id][bus_id] =
(max_win_size - 1) + cons_tap;
bus_start_window[mode][if_id][bus_id] = 0;
centralization_result[if_id][bus_id] = 0;
}
}
/* start flow */
for (pattern_id = start_pattern; pattern_id <= end_pattern;
pattern_id++) {
ddr3_tip_ip_training_wrapper(dev_num, ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE, result_type,
HWS_CONTROL_ELEMENT_ADLL,
PARAM_NOT_CARE, direction,
tm->
if_act_mask, 0x0,
max_win_size - 1,
max_win_size - 1,
pattern_id, EDGE_FPF, CS_SINGLE,
PARAM_NOT_CARE, training_result);
for (if_id = start_if; if_id <= end_if; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (bus_id = 0;
bus_id <= tm->num_of_bus_per_interface - 1;
bus_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
for (search_dir_id = HWS_LOW2HIGH;
search_dir_id <= HWS_HIGH2LOW;
search_dir_id++) {
CHECK_STATUS
(ddr3_tip_read_training_result
(dev_num, if_id,
ACCESS_TYPE_UNICAST, bus_id,
ALL_BITS_PER_PUP,
search_dir_id,
direction, result_type,
TRAINING_LOAD_OPERATION_UNLOAD,
CS_SINGLE,
&result[search_dir_id],
1, 0, 0));
DEBUG_CENTRALIZATION_ENGINE
(DEBUG_LEVEL_INFO,
("%s pat %d IF %d pup %d Regs: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
((mode ==
CENTRAL_TX) ? "TX" : "RX"),
pattern_id, if_id, bus_id,
result[search_dir_id][0],
result[search_dir_id][1],
result[search_dir_id][2],
result[search_dir_id][3],
result[search_dir_id][4],
result[search_dir_id][5],
result[search_dir_id][6],
result[search_dir_id][7]));
}
for (bit_id = 0; bit_id < BUS_WIDTH_IN_BITS;
bit_id++) {
/* check if this code is valid for 2 edge, probably not :( */
cur_start_win[bit_id] =
GET_TAP_RESULT(result
[HWS_LOW2HIGH]
[bit_id],
EDGE_1);
cur_end_win[bit_id] =
GET_TAP_RESULT(result
[HWS_HIGH2LOW]
[bit_id],
EDGE_1);
/* window length */
current_window[bit_id] =
cur_end_win[bit_id] -
cur_start_win[bit_id] + 1;
DEBUG_CENTRALIZATION_ENGINE
(DEBUG_LEVEL_TRACE,
("cs %x patern %d IF %d pup %d cur_start_win %d cur_end_win %d current_window %d\n",
effective_cs, pattern_id,
if_id, bus_id,
cur_start_win[bit_id],
cur_end_win[bit_id],
current_window[bit_id]));
}
if ((ddr3_tip_is_pup_lock
(result[HWS_LOW2HIGH], result_type)) &&
(ddr3_tip_is_pup_lock
(result[HWS_HIGH2LOW], result_type))) {
/* read result success */
DEBUG_CENTRALIZATION_ENGINE
(DEBUG_LEVEL_INFO,
("Pup locked, pat %d IF %d pup %d\n",
pattern_id, if_id, bus_id));
} else {
/* read result failure */
DEBUG_CENTRALIZATION_ENGINE
(DEBUG_LEVEL_INFO,
("fail Lock, pat %d IF %d pup %d\n",
pattern_id, if_id, bus_id));
if (centralization_state[if_id][bus_id]
== 1) {
/* continue with next pup */
DEBUG_CENTRALIZATION_ENGINE
(DEBUG_LEVEL_TRACE,
("continue to next pup %d %d\n",
if_id, bus_id));
continue;
}
for (bit_id = 0;
bit_id < BUS_WIDTH_IN_BITS;
bit_id++) {
/*
* the next check is relevant
* only when using search
* machine 2 edges
*/
if (cur_start_win[bit_id] > 0 &&
cur_end_win[bit_id] == 0) {
cur_end_win
[bit_id] =
max_win_size - 1;
DEBUG_CENTRALIZATION_ENGINE
(DEBUG_LEVEL_TRACE,
("fail, IF %d pup %d bit %d fail #1\n",
if_id, bus_id,
bit_id));
/* the next bit */
continue;
} else {
centralization_state
[if_id][bus_id] = 1;
DEBUG_CENTRALIZATION_ENGINE
(DEBUG_LEVEL_TRACE,
("fail, IF %d pup %d bit %d fail #2\n",
if_id, bus_id,
bit_id));
}
}
if (centralization_state[if_id][bus_id]
== 1) {
/* going to next pup */
continue;
}
} /*bit */
opt_window =
ddr3_tip_get_buf_min(current_window);
/* final pup window length */
final_pup_window[if_id][bus_id] =
ddr3_tip_get_buf_min(cur_end_win) -
ddr3_tip_get_buf_max(cur_start_win) +
1;
waste_window =
opt_window -
final_pup_window[if_id][bus_id];
start_window_skew =
ddr3_tip_get_buf_max(cur_start_win) -
ddr3_tip_get_buf_min(
cur_start_win);
end_window_skew =
ddr3_tip_get_buf_max(
cur_end_win) -
ddr3_tip_get_buf_min(
cur_end_win);
/* min/max updated with pattern change */
cur_end_win_min =
ddr3_tip_get_buf_min(
cur_end_win);
cur_start_win_max =
ddr3_tip_get_buf_max(
cur_start_win);
bus_end_window[mode][if_id][bus_id] =
GET_MIN(bus_end_window[mode][if_id]
[bus_id],
cur_end_win_min);
bus_start_window[mode][if_id][bus_id] =
GET_MAX(bus_start_window[mode][if_id]
[bus_id],
cur_start_win_max);
DEBUG_CENTRALIZATION_ENGINE(
DEBUG_LEVEL_INFO,
("pat %d IF %d pup %d opt_win %d final_win %d waste_win %d st_win_skew %d end_win_skew %d cur_st_win_max %d cur_end_win_min %d bus_st_win %d bus_end_win %d\n",
pattern_id, if_id, bus_id, opt_window,
final_pup_window[if_id][bus_id],
waste_window, start_window_skew,
end_window_skew,
cur_start_win_max,
cur_end_win_min,
bus_start_window[mode][if_id][bus_id],
bus_end_window[mode][if_id][bus_id]));
/* check if window is valid */
if (ddr3_tip_centr_skip_min_win_check == 0) {
if ((VALIDATE_WIN_LENGTH
(bus_start_window[mode][if_id]
[bus_id],
bus_end_window[mode][if_id]
[bus_id],
max_win_size) == 1) ||
(IS_WINDOW_OUT_BOUNDARY
(bus_start_window[mode][if_id]
[bus_id],
bus_end_window[mode][if_id]
[bus_id],
max_win_size) == 1)) {
DEBUG_CENTRALIZATION_ENGINE
(DEBUG_LEVEL_INFO,
("win valid, pat %d IF %d pup %d\n",
pattern_id, if_id,
bus_id));
/* window is valid */
} else {
DEBUG_CENTRALIZATION_ENGINE
(DEBUG_LEVEL_INFO,
("fail win, pat %d IF %d pup %d bus_st_win %d bus_end_win %d\n",
pattern_id, if_id, bus_id,
bus_start_window[mode]
[if_id][bus_id],
bus_end_window[mode]
[if_id][bus_id]));
centralization_state[if_id]
[bus_id] = 1;
if (debug_mode == 0)
return MV_FAIL;
}
} /* ddr3_tip_centr_skip_min_win_check */
} /* pup */
} /* interface */
} /* pattern */
for (if_id = start_if; if_id <= end_if; if_id++) {
if (IS_ACTIVE(tm->if_act_mask, if_id) == 0)
continue;
is_if_fail = 0;
flow_result[if_id] = TEST_SUCCESS;
for (bus_id = 0;
bus_id <= (tm->num_of_bus_per_interface - 1); bus_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
/* continue only if lock */
if (centralization_state[if_id][bus_id] != 1) {
if (ddr3_tip_centr_skip_min_win_check == 0) {
if ((bus_end_window
[mode][if_id][bus_id] ==
(max_win_size - 1)) &&
((bus_end_window
[mode][if_id][bus_id] -
bus_start_window[mode][if_id]
[bus_id]) < MIN_WINDOW_SIZE) &&
((bus_end_window[mode][if_id]
[bus_id] - bus_start_window
[mode][if_id][bus_id]) > 2)) {
/* prevent false lock */
/* TBD change to enum */
centralization_state
[if_id][bus_id] = 2;
}
if ((bus_end_window[mode][if_id][bus_id]
== 0) &&
((bus_end_window[mode][if_id]
[bus_id] -
bus_start_window[mode][if_id]
[bus_id]) < MIN_WINDOW_SIZE) &&
((bus_end_window[mode][if_id]
[bus_id] -
bus_start_window[mode][if_id]
[bus_id]) > 2))
/*prevent false lock */
centralization_state[if_id]
[bus_id] = 3;
}
if ((bus_end_window[mode][if_id][bus_id] >
(max_win_size - 1)) && direction ==
OPER_WRITE) {
DEBUG_CENTRALIZATION_ENGINE
(DEBUG_LEVEL_INFO,
("Tx special pattern\n"));
cons_tap = 64;
}
}
/* check states */
if (centralization_state[if_id][bus_id] == 3) {
DEBUG_CENTRALIZATION_ENGINE(
DEBUG_LEVEL_INFO,
("SSW - TBD IF %d pup %d\n",
if_id, bus_id));
lock_success = 1;
} else if (centralization_state[if_id][bus_id] == 2) {
DEBUG_CENTRALIZATION_ENGINE(
DEBUG_LEVEL_INFO,
("SEW - TBD IF %d pup %d\n",
if_id, bus_id));
lock_success = 1;
} else if (centralization_state[if_id][bus_id] == 0) {
lock_success = 1;
} else {
DEBUG_CENTRALIZATION_ENGINE(
DEBUG_LEVEL_ERROR,
("fail, IF %d pup %d\n",
if_id, bus_id));
lock_success = 0;
}
if (lock_success == 1) {
centralization_result[if_id][bus_id] =
(bus_end_window[mode][if_id][bus_id] +
bus_start_window[mode][if_id][bus_id])
/ 2 - cons_tap;
DEBUG_CENTRALIZATION_ENGINE(
DEBUG_LEVEL_TRACE,
(" bus_id %d Res= %d\n", bus_id,
centralization_result[if_id][bus_id]));
/* copy results to registers */
pup_win_length =
bus_end_window[mode][if_id][bus_id] -
bus_start_window[mode][if_id][bus_id] +
1;
ddr3_tip_bus_read(dev_num, if_id,
ACCESS_TYPE_UNICAST, bus_id,
DDR_PHY_DATA,
RESULT_DB_PHY_REG_ADDR +
effective_cs, &reg);
reg = (reg & (~0x1f <<
((mode == CENTRAL_TX) ?
(RESULT_DB_PHY_REG_TX_OFFSET) :
(RESULT_DB_PHY_REG_RX_OFFSET))))
| pup_win_length <<
((mode == CENTRAL_TX) ?
(RESULT_DB_PHY_REG_TX_OFFSET) :
(RESULT_DB_PHY_REG_RX_OFFSET));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST,
bus_id, DDR_PHY_DATA,
RESULT_DB_PHY_REG_ADDR +
effective_cs, reg));
/* offset per CS is calculated earlier */
CHECK_STATUS(
ddr3_tip_bus_write(dev_num,
ACCESS_TYPE_UNICAST,
if_id,
ACCESS_TYPE_UNICAST,
bus_id,
DDR_PHY_DATA,
reg_phy_off,
centralization_result
[if_id]
[bus_id]));
} else {
is_if_fail = 1;
}
}
if (is_if_fail == 1)
flow_result[if_id] = TEST_FAILED;
}
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
/* restore cs enable value */
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST,
if_id, CS_ENABLE_REG,
cs_enable_reg_val[if_id],
MASK_ALL_BITS));
}
return is_if_fail;
}
/*
* Centralization Flow
*/
int ddr3_tip_special_rx(u32 dev_num)
{
enum hws_training_ip_stat training_result[MAX_INTERFACE_NUM];
u32 if_id, pup_id, pattern_id, bit_id;
u8 cur_start_win[BUS_WIDTH_IN_BITS];
u8 cur_end_win[BUS_WIDTH_IN_BITS];
enum hws_training_result result_type = RESULT_PER_BIT;
enum hws_dir direction;
enum hws_search_dir search_dir_id;
u32 *result[HWS_SEARCH_DIR_LIMIT];
u32 max_win_size;
u8 cur_end_win_min, cur_start_win_max;
u32 cs_enable_reg_val[MAX_INTERFACE_NUM];
u32 temp = 0;
int pad_num = 0;
struct hws_topology_map *tm = ddr3_get_topology_map();
if (ddr3_tip_special_rx_run_once_flag != 0)
return MV_OK;
ddr3_tip_special_rx_run_once_flag = 1;
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
/* save current cs enable reg val */
CHECK_STATUS(ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST,
if_id, CS_ENABLE_REG,
cs_enable_reg_val,
MASK_ALL_BITS));
/* enable single cs */
CHECK_STATUS(ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST,
if_id, CS_ENABLE_REG,
(1 << 3), (1 << 3)));
}
max_win_size = MAX_WINDOW_SIZE_RX;
direction = OPER_READ;
pattern_id = PATTERN_VREF;
/* start flow */
ddr3_tip_ip_training_wrapper(dev_num, ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE, ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE, result_type,
HWS_CONTROL_ELEMENT_ADLL,
PARAM_NOT_CARE, direction,
tm->if_act_mask, 0x0,
max_win_size - 1, max_win_size - 1,
pattern_id, EDGE_FPF, CS_SINGLE,
PARAM_NOT_CARE, training_result);
for (if_id = start_if; if_id <= end_if; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (pup_id = 0;
pup_id <= tm->num_of_bus_per_interface; pup_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup_id);
for (search_dir_id = HWS_LOW2HIGH;
search_dir_id <= HWS_HIGH2LOW;
search_dir_id++) {
CHECK_STATUS(ddr3_tip_read_training_result
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup_id,
ALL_BITS_PER_PUP, search_dir_id,
direction, result_type,
TRAINING_LOAD_OPERATION_UNLOAD,
CS_SINGLE, &result[search_dir_id],
1, 0, 0));
DEBUG_CENTRALIZATION_ENGINE(DEBUG_LEVEL_INFO,
("Special: pat %d IF %d pup %d Regs: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
pattern_id, if_id,
pup_id,
result
[search_dir_id][0],
result
[search_dir_id][1],
result
[search_dir_id][2],
result
[search_dir_id][3],
result
[search_dir_id][4],
result
[search_dir_id][5],
result
[search_dir_id][6],
result
[search_dir_id]
[7]));
}
for (bit_id = 0; bit_id < BUS_WIDTH_IN_BITS; bit_id++) {
/*
* check if this code is valid for 2 edge,
* probably not :(
*/
cur_start_win[bit_id] =
GET_TAP_RESULT(result[HWS_LOW2HIGH]
[bit_id], EDGE_1);
cur_end_win[bit_id] =
GET_TAP_RESULT(result[HWS_HIGH2LOW]
[bit_id], EDGE_1);
}
if (!((ddr3_tip_is_pup_lock
(result[HWS_LOW2HIGH], result_type)) &&
(ddr3_tip_is_pup_lock
(result[HWS_HIGH2LOW], result_type)))) {
DEBUG_CENTRALIZATION_ENGINE(
DEBUG_LEVEL_ERROR,
("Special: Pup lock fail, pat %d IF %d pup %d\n",
pattern_id, if_id, pup_id));
return MV_FAIL;
}
cur_end_win_min =
ddr3_tip_get_buf_min(cur_end_win);
cur_start_win_max =
ddr3_tip_get_buf_max(cur_start_win);
if (cur_start_win_max <= 1) { /* Align left */
for (bit_id = 0; bit_id < BUS_WIDTH_IN_BITS;
bit_id++) {
pad_num =
dq_map_table[bit_id +
pup_id *
BUS_WIDTH_IN_BITS +
if_id *
BUS_WIDTH_IN_BITS *
tm->
num_of_bus_per_interface];
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST,
pup_id, DDR_PHY_DATA,
PBS_RX_PHY_REG + pad_num,
&temp));
temp = (temp + 0xa > 31) ?
(31) : (temp + 0xa);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num,
ACCESS_TYPE_UNICAST,
if_id,
ACCESS_TYPE_UNICAST,
pup_id, DDR_PHY_DATA,
PBS_RX_PHY_REG + pad_num,
temp));
}
DEBUG_CENTRALIZATION_ENGINE(
DEBUG_LEVEL_INFO,
("Special: PBS:: I/F# %d , Bus# %d fix align to the Left\n",
if_id, pup_id));
}
if (cur_end_win_min > 30) { /* Align right */
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup_id,
DDR_PHY_DATA, PBS_RX_PHY_REG + 4,
&temp));
temp += 0xa;
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST,
pup_id, DDR_PHY_DATA,
PBS_RX_PHY_REG + 4, temp));
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup_id,
DDR_PHY_DATA, PBS_RX_PHY_REG + 5,
&temp));
temp += 0xa;
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST,
pup_id, DDR_PHY_DATA,
PBS_RX_PHY_REG + 5, temp));
DEBUG_CENTRALIZATION_ENGINE(
DEBUG_LEVEL_INFO,
("Special: PBS:: I/F# %d , Bus# %d fix align to the right\n",
if_id, pup_id));
}
vref_window_size[if_id][pup_id] =
cur_end_win_min -
cur_start_win_max + 1;
DEBUG_CENTRALIZATION_ENGINE(
DEBUG_LEVEL_INFO,
("Special: Winsize I/F# %d , Bus# %d is %d\n",
if_id, pup_id, vref_window_size
[if_id][pup_id]));
} /* pup */
} /* end of interface */
return MV_OK;
}
/*
* Print Centralization Result
*/
int ddr3_tip_print_centralization_result(u32 dev_num)
{
u32 if_id = 0, bus_id = 0;
struct hws_topology_map *tm = ddr3_get_topology_map();
dev_num = dev_num;
printf("Centralization Results\n");
printf("I/F0 Result[0 - success 1-fail 2 - state_2 3 - state_3] ...\n");
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (bus_id = 0; bus_id < tm->num_of_bus_per_interface;
bus_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_id);
printf("%d ,\n", centralization_state[if_id][bus_id]);
}
}
return MV_OK;
}

View File

@ -0,0 +1,652 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
/* List of allowed frequency listed in order of enum hws_ddr_freq */
u32 freq_val[DDR_FREQ_LIMIT] = {
0, /*DDR_FREQ_LOW_FREQ */
400, /*DDR_FREQ_400, */
533, /*DDR_FREQ_533, */
666, /*DDR_FREQ_667, */
800, /*DDR_FREQ_800, */
933, /*DDR_FREQ_933, */
1066, /*DDR_FREQ_1066, */
311, /*DDR_FREQ_311, */
333, /*DDR_FREQ_333, */
467, /*DDR_FREQ_467, */
850, /*DDR_FREQ_850, */
600, /*DDR_FREQ_600 */
300, /*DDR_FREQ_300 */
900, /*DDR_FREQ_900 */
360, /*DDR_FREQ_360 */
1000 /*DDR_FREQ_1000 */
};
/* Table for CL values per frequency for each speed bin index */
struct cl_val_per_freq cas_latency_table[] = {
/*
* 400M 667M 933M 311M 467M 600M 360
* 100M 533M 800M 1066M 333M 850M 900
* 1000 (the order is 100, 400, 533 etc.)
*/
/* DDR3-800D */
{ {6, 5, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 5, 0, 5, 0} },
/* DDR3-800E */
{ {6, 6, 0, 0, 0, 0, 0, 6, 6, 0, 0, 0, 6, 0, 6, 0} },
/* DDR3-1066E */
{ {6, 5, 6, 0, 0, 0, 0, 5, 5, 6, 0, 0, 5, 0, 5, 0} },
/* DDR3-1066F */
{ {6, 6, 7, 0, 0, 0, 0, 6, 6, 7, 0, 0, 6, 0, 6, 0} },
/* DDR3-1066G */
{ {6, 6, 8, 0, 0, 0, 0, 6, 6, 8, 0, 0, 6, 0, 6, 0} },
/* DDR3-1333F* */
{ {6, 5, 6, 7, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1333G */
{ {6, 5, 7, 8, 0, 0, 0, 5, 5, 7, 0, 8, 5, 0, 5, 0} },
/* DDR3-1333H */
{ {6, 6, 8, 9, 0, 0, 0, 6, 6, 8, 0, 9, 6, 0, 6, 0} },
/* DDR3-1333J* */
{ {6, 6, 8, 10, 0, 0, 0, 6, 6, 8, 0, 10, 6, 0, 6, 0}
/* DDR3-1600G* */},
{ {6, 5, 6, 7, 8, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1600H */
{ {6, 5, 6, 8, 9, 0, 0, 5, 5, 6, 0, 8, 5, 0, 5, 0} },
/* DDR3-1600J */
{ {6, 5, 7, 9, 10, 0, 0, 5, 5, 7, 0, 9, 5, 0, 5, 0} },
/* DDR3-1600K */
{ {6, 6, 8, 10, 11, 0, 0, 6, 6, 8, 0, 10, 6, 0, 6, 0 } },
/* DDR3-1866J* */
{ {6, 5, 6, 8, 9, 11, 0, 5, 5, 6, 11, 8, 5, 0, 5, 0} },
/* DDR3-1866K */
{ {6, 5, 7, 8, 10, 11, 0, 5, 5, 7, 11, 8, 5, 11, 5, 11} },
/* DDR3-1866L */
{ {6, 6, 7, 9, 11, 12, 0, 6, 6, 7, 12, 9, 6, 12, 6, 12} },
/* DDR3-1866M* */
{ {6, 6, 8, 10, 11, 13, 0, 6, 6, 8, 13, 10, 6, 13, 6, 13} },
/* DDR3-2133K* */
{ {6, 5, 6, 7, 9, 10, 11, 5, 5, 6, 10, 7, 5, 11, 5, 11} },
/* DDR3-2133L */
{ {6, 5, 6, 8, 9, 11, 12, 5, 5, 6, 11, 8, 5, 12, 5, 12} },
/* DDR3-2133M */
{ {6, 5, 7, 9, 10, 12, 13, 5, 5, 7, 12, 9, 5, 13, 5, 13} },
/* DDR3-2133N* */
{ {6, 6, 7, 9, 11, 13, 14, 6, 6, 7, 13, 9, 6, 14, 6, 14} },
/* DDR3-1333H-ext */
{ {6, 6, 7, 9, 0, 0, 0, 6, 6, 7, 0, 9, 6, 0, 6, 0} },
/* DDR3-1600K-ext */
{ {6, 6, 7, 9, 11, 0, 0, 6, 6, 7, 0, 9, 6, 0, 6, 0} },
/* DDR3-1866M-ext */
{ {6, 6, 7, 9, 11, 13, 0, 6, 6, 7, 13, 9, 6, 13, 6, 13} },
};
/* Table for CWL values per speedbin index */
struct cl_val_per_freq cas_write_latency_table[] = {
/*
* 400M 667M 933M 311M 467M 600M 360
* 100M 533M 800M 1066M 333M 850M 900
* (the order is 100, 400, 533 etc.)
*/
/* DDR3-800D */
{ {5, 5, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 5, 0, 5, 0} },
/* DDR3-800E */
{ {5, 5, 0, 0, 0, 0, 0, 5, 5, 0, 0, 0, 5, 0, 5, 0} },
/* DDR3-1066E */
{ {5, 5, 6, 0, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1066F */
{ {5, 5, 6, 0, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1066G */
{ {5, 5, 6, 0, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1333F* */
{ {5, 5, 6, 7, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1333G */
{ {5, 5, 6, 7, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1333H */
{ {5, 5, 6, 7, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1333J* */
{ {5, 5, 6, 7, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1600G* */
{ {5, 5, 6, 7, 8, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1600H */
{ {5, 5, 6, 7, 8, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1600J */
{ {5, 5, 6, 7, 8, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1600K */
{ {5, 5, 6, 7, 8, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1866J* */
{ {5, 5, 6, 7, 8, 9, 0, 5, 5, 6, 9, 7, 5, 0, 5, 0} },
/* DDR3-1866K */
{ {5, 5, 6, 7, 8, 9, 0, 5, 5, 6, 9, 7, 5, 0, 5, 0} },
/* DDR3-1866L */
{ {5, 5, 6, 7, 8, 9, 0, 5, 5, 6, 9, 7, 5, 9, 5, 9} },
/* DDR3-1866M* */
{ {5, 5, 6, 7, 8, 9, 0, 5, 5, 6, 9, 7, 5, 9, 5, 9} },
/* DDR3-2133K* */
{ {5, 5, 6, 7, 8, 9, 10, 5, 5, 6, 9, 7, 5, 9, 5, 10} },
/* DDR3-2133L */
{ {5, 5, 6, 7, 8, 9, 10, 5, 5, 6, 9, 7, 5, 9, 5, 10} },
/* DDR3-2133M */
{ {5, 5, 6, 7, 8, 9, 10, 5, 5, 6, 9, 7, 5, 9, 5, 10} },
/* DDR3-2133N* */
{ {5, 5, 6, 7, 8, 9, 10, 5, 5, 6, 9, 7, 5, 9, 5, 10} },
/* DDR3-1333H-ext */
{ {5, 5, 6, 7, 0, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1600K-ext */
{ {5, 5, 6, 7, 8, 0, 0, 5, 5, 6, 0, 7, 5, 0, 5, 0} },
/* DDR3-1866M-ext */
{ {5, 5, 6, 7, 8, 9, 0, 5, 5, 6, 9, 7, 5, 9, 5, 9} },
};
u8 twr_mask_table[] = {
10,
10,
10,
10,
10,
1, /*5 */
2, /*6 */
3, /*7 */
10,
10,
5, /*10 */
10,
6, /*12 */
10,
7, /*14 */
10,
0 /*16 */
};
u8 cl_mask_table[] = {
0,
0,
0,
0,
0,
0x2,
0x4,
0x6,
0x8,
0xa,
0xc,
0xe,
0x1,
0x3,
0x5,
0x5
};
u8 cwl_mask_table[] = {
0,
0,
0,
0,
0,
0,
0x1,
0x2,
0x3,
0x4,
0x5,
0x6,
0x7,
0x8,
0x9,
0x9
};
/* RFC values (in ns) */
u16 rfc_table[] = {
90, /* 512M */
110, /* 1G */
160, /* 2G */
260, /* 4G */
350 /* 8G */
};
u32 speed_bin_table_t_rc[] = {
50000,
52500,
48750,
50625,
52500,
46500,
48000,
49500,
51000,
45000,
46250,
47500,
48750,
44700,
45770,
46840,
47910,
43285,
44220,
45155,
46900
};
u32 speed_bin_table_t_rcd_t_rp[] = {
12500,
15000,
11250,
13125,
15000,
10500,
12000,
13500,
15000,
10000,
11250,
12500,
13750,
10700,
11770,
12840,
13910,
10285,
11022,
12155,
13090,
};
enum {
PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_AGGRESSOR = 0,
PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM
};
static u8 pattern_killer_pattern_table_map[KILLER_PATTERN_LENGTH * 2][2] = {
/*Aggressor / Victim */
{1, 0},
{0, 0},
{1, 0},
{1, 1},
{0, 1},
{0, 1},
{1, 0},
{0, 1},
{1, 0},
{0, 1},
{1, 0},
{1, 0},
{0, 1},
{1, 0},
{0, 1},
{0, 0},
{1, 1},
{0, 0},
{1, 1},
{0, 0},
{1, 1},
{0, 0},
{1, 1},
{1, 0},
{0, 0},
{1, 1},
{0, 0},
{1, 1},
{0, 0},
{0, 0},
{0, 0},
{0, 1},
{0, 1},
{1, 1},
{0, 0},
{0, 0},
{1, 1},
{1, 1},
{0, 0},
{1, 1},
{0, 0},
{1, 1},
{1, 1},
{0, 0},
{0, 0},
{1, 1},
{0, 0},
{1, 1},
{0, 1},
{0, 0},
{0, 1},
{0, 1},
{0, 0},
{1, 1},
{1, 1},
{1, 0},
{1, 0},
{1, 1},
{1, 1},
{1, 1},
{1, 1},
{1, 1},
{1, 1},
{1, 1}
};
static u8 pattern_vref_pattern_table_map[] = {
/* 1 means 0xffffffff, 0 is 0x0 */
0xb8,
0x52,
0x55,
0x8a,
0x33,
0xa6,
0x6d,
0xfe
};
/* Return speed Bin value for selected index and t* element */
u32 speed_bin_table(u8 index, enum speed_bin_table_elements element)
{
u32 result = 0;
switch (element) {
case SPEED_BIN_TRCD:
case SPEED_BIN_TRP:
result = speed_bin_table_t_rcd_t_rp[index];
break;
case SPEED_BIN_TRAS:
if (index < 6)
result = 37500;
else if (index < 10)
result = 36000;
else if (index < 14)
result = 35000;
else if (index < 18)
result = 34000;
else
result = 33000;
break;
case SPEED_BIN_TRC:
result = speed_bin_table_t_rc[index];
break;
case SPEED_BIN_TRRD1K:
if (index < 3)
result = 10000;
else if (index < 6)
result = 7005;
else if (index < 14)
result = 6000;
else
result = 5000;
break;
case SPEED_BIN_TRRD2K:
if (index < 6)
result = 10000;
else if (index < 14)
result = 7005;
else
result = 6000;
break;
case SPEED_BIN_TPD:
if (index < 3)
result = 7500;
else if (index < 10)
result = 5625;
else
result = 5000;
break;
case SPEED_BIN_TFAW1K:
if (index < 3)
result = 40000;
else if (index < 6)
result = 37500;
else if (index < 14)
result = 30000;
else if (index < 18)
result = 27000;
else
result = 25000;
break;
case SPEED_BIN_TFAW2K:
if (index < 6)
result = 50000;
else if (index < 10)
result = 45000;
else if (index < 14)
result = 40000;
else
result = 35000;
break;
case SPEED_BIN_TWTR:
result = 7500;
break;
case SPEED_BIN_TRTP:
result = 7500;
break;
case SPEED_BIN_TWR:
result = 15000;
break;
case SPEED_BIN_TMOD:
result = 15000;
break;
default:
break;
}
return result;
}
static inline u32 pattern_table_get_killer_word(u8 dqs, u8 index)
{
u8 i, byte = 0;
u8 role;
for (i = 0; i < 8; i++) {
role = (i == dqs) ?
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_AGGRESSOR) :
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM);
byte |= pattern_killer_pattern_table_map[index][role] << i;
}
return byte | (byte << 8) | (byte << 16) | (byte << 24);
}
static inline u32 pattern_table_get_killer_word16(u8 dqs, u8 index)
{
u8 i, byte0 = 0, byte1 = 0;
u8 role;
for (i = 0; i < 8; i++) {
role = (i == dqs) ?
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_AGGRESSOR) :
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM);
byte0 |= pattern_killer_pattern_table_map[index * 2][role] << i;
}
for (i = 0; i < 8; i++) {
role = (i == dqs) ?
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_AGGRESSOR) :
(PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM);
byte1 |= pattern_killer_pattern_table_map
[index * 2 + 1][role] << i;
}
return byte0 | (byte0 << 8) | (byte1 << 16) | (byte1 << 24);
}
static inline u32 pattern_table_get_sso_word(u8 sso, u8 index)
{
u8 step = sso + 1;
if (0 == ((index / step) & 1))
return 0x0;
else
return 0xffffffff;
}
static inline u32 pattern_table_get_vref_word(u8 index)
{
if (0 == ((pattern_vref_pattern_table_map[index / 8] >>
(index % 8)) & 1))
return 0x0;
else
return 0xffffffff;
}
static inline u32 pattern_table_get_vref_word16(u8 index)
{
if (0 == pattern_killer_pattern_table_map
[PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM][index * 2] &&
0 == pattern_killer_pattern_table_map
[PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM][index * 2 + 1])
return 0x00000000;
else if (1 == pattern_killer_pattern_table_map
[PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM][index * 2] &&
0 == pattern_killer_pattern_table_map
[PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM][index * 2 + 1])
return 0xffff0000;
else if (0 == pattern_killer_pattern_table_map
[PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM][index * 2] &&
1 == pattern_killer_pattern_table_map
[PATTERN_KILLER_PATTERN_TABLE_MAP_ROLE_VICTIM][index * 2 + 1])
return 0x0000ffff;
else
return 0xffffffff;
}
static inline u32 pattern_table_get_static_pbs_word(u8 index)
{
u16 temp;
temp = ((0x00ff << (index / 3)) & 0xff00) >> 8;
return temp | (temp << 8) | (temp << 16) | (temp << 24);
}
inline u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index)
{
u32 pattern;
struct hws_topology_map *tm = ddr3_get_topology_map();
if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) == 0) {
/* 32bit patterns */
switch (type) {
case PATTERN_PBS1:
case PATTERN_PBS2:
if (index == 0 || index == 2 || index == 5 ||
index == 7)
pattern = PATTERN_55;
else
pattern = PATTERN_AA;
break;
case PATTERN_PBS3:
if (0 == (index & 1))
pattern = PATTERN_55;
else
pattern = PATTERN_AA;
break;
case PATTERN_RL:
if (index < 6)
pattern = PATTERN_00;
else
pattern = PATTERN_80;
break;
case PATTERN_STATIC_PBS:
pattern = pattern_table_get_static_pbs_word(index);
break;
case PATTERN_KILLER_DQ0:
case PATTERN_KILLER_DQ1:
case PATTERN_KILLER_DQ2:
case PATTERN_KILLER_DQ3:
case PATTERN_KILLER_DQ4:
case PATTERN_KILLER_DQ5:
case PATTERN_KILLER_DQ6:
case PATTERN_KILLER_DQ7:
pattern = pattern_table_get_killer_word(
(u8)(type - PATTERN_KILLER_DQ0), index);
break;
case PATTERN_RL2:
if (index < 6)
pattern = PATTERN_00;
else
pattern = PATTERN_01;
break;
case PATTERN_TEST:
if (index > 1 && index < 6)
pattern = PATTERN_20;
else
pattern = PATTERN_00;
break;
case PATTERN_FULL_SSO0:
case PATTERN_FULL_SSO1:
case PATTERN_FULL_SSO2:
case PATTERN_FULL_SSO3:
pattern = pattern_table_get_sso_word(
(u8)(type - PATTERN_FULL_SSO0), index);
break;
case PATTERN_VREF:
pattern = pattern_table_get_vref_word(index);
break;
default:
pattern = 0;
break;
}
} else {
/* 16bit patterns */
switch (type) {
case PATTERN_PBS1:
case PATTERN_PBS2:
case PATTERN_PBS3:
pattern = PATTERN_55AA;
break;
case PATTERN_RL:
if (index < 3)
pattern = PATTERN_00;
else
pattern = PATTERN_80;
break;
case PATTERN_STATIC_PBS:
pattern = PATTERN_00FF;
break;
case PATTERN_KILLER_DQ0:
case PATTERN_KILLER_DQ1:
case PATTERN_KILLER_DQ2:
case PATTERN_KILLER_DQ3:
case PATTERN_KILLER_DQ4:
case PATTERN_KILLER_DQ5:
case PATTERN_KILLER_DQ6:
case PATTERN_KILLER_DQ7:
pattern = pattern_table_get_killer_word16(
(u8)(type - PATTERN_KILLER_DQ0), index);
break;
case PATTERN_RL2:
if (index < 3)
pattern = PATTERN_00;
else
pattern = PATTERN_01;
break;
case PATTERN_TEST:
pattern = PATTERN_0080;
break;
case PATTERN_FULL_SSO0:
pattern = 0x0000ffff;
break;
case PATTERN_FULL_SSO1:
case PATTERN_FULL_SSO2:
case PATTERN_FULL_SSO3:
pattern = pattern_table_get_sso_word(
(u8)(type - PATTERN_FULL_SSO1), index);
break;
case PATTERN_VREF:
pattern = pattern_table_get_vref_word16(index);
break;
default:
pattern = 0;
break;
}
}
return pattern;
}

View File

@ -0,0 +1,686 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define VREF_INITIAL_STEP 3
#define VREF_SECOND_STEP 1
#define VREF_MAX_INDEX 7
#define MAX_VALUE (1024 - 1)
#define MIN_VALUE (-MAX_VALUE)
#define GET_RD_SAMPLE_DELAY(data, cs) ((data >> rd_sample_mask[cs]) & 0xf)
u32 ck_delay = (u32)-1, ck_delay_16 = (u32)-1;
u32 ca_delay;
int ddr3_tip_centr_skip_min_win_check = 0;
u8 current_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM];
u8 last_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM];
u16 current_valid_window[MAX_BUS_NUM][MAX_INTERFACE_NUM];
u16 last_valid_window[MAX_BUS_NUM][MAX_INTERFACE_NUM];
u8 lim_vref[MAX_BUS_NUM][MAX_INTERFACE_NUM];
u8 interface_state[MAX_INTERFACE_NUM];
u8 vref_window_size[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 vref_window_size_th = 12;
static u8 pup_st[MAX_BUS_NUM][MAX_INTERFACE_NUM];
static u32 rd_sample_mask[] = {
0,
8,
16,
24
};
#define VREF_STEP_1 0
#define VREF_STEP_2 1
#define VREF_CONVERGE 2
/*
* ODT additional timing
*/
int ddr3_tip_write_additional_odt_setting(u32 dev_num, u32 if_id)
{
u32 cs_num = 0, max_read_sample = 0, min_read_sample = 0;
u32 data_read[MAX_INTERFACE_NUM] = { 0 };
u32 read_sample[MAX_CS_NUM];
u32 val;
u32 pup_index;
int max_phase = MIN_VALUE, current_phase;
enum hws_access_type access_type = ACCESS_TYPE_UNICAST;
struct hws_topology_map *tm = ddr3_get_topology_map();
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
DUNIT_ODT_CONTROL_REG,
0 << 8, 0x3 << 8));
CHECK_STATUS(ddr3_tip_if_read(dev_num, access_type, if_id,
READ_DATA_SAMPLE_DELAY,
data_read, MASK_ALL_BITS));
val = data_read[if_id];
for (cs_num = 0; cs_num < MAX_CS_NUM; cs_num++) {
read_sample[cs_num] = GET_RD_SAMPLE_DELAY(val, cs_num);
/* find maximum of read_samples */
if (read_sample[cs_num] >= max_read_sample) {
if (read_sample[cs_num] == max_read_sample)
max_phase = MIN_VALUE;
else
max_read_sample = read_sample[cs_num];
for (pup_index = 0;
pup_index < tm->num_of_bus_per_interface;
pup_index++) {
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup_index,
DDR_PHY_DATA,
RL_PHY_REG + CS_REG_VALUE(cs_num),
&val));
current_phase = ((int)val & 0xe0) >> 6;
if (current_phase >= max_phase)
max_phase = current_phase;
}
}
/* find minimum */
if (read_sample[cs_num] < min_read_sample)
min_read_sample = read_sample[cs_num];
}
min_read_sample = min_read_sample - 1;
max_read_sample = max_read_sample + 4 + (max_phase + 1) / 2 + 1;
if (min_read_sample >= 0xf)
min_read_sample = 0xf;
if (max_read_sample >= 0x1f)
max_read_sample = 0x1f;
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
ODT_TIMING_LOW,
((min_read_sample - 1) << 12),
0xf << 12));
CHECK_STATUS(ddr3_tip_if_write(dev_num, access_type, if_id,
ODT_TIMING_LOW,
(max_read_sample << 16),
0x1f << 16));
return MV_OK;
}
int get_valid_win_rx(u32 dev_num, u32 if_id, u8 res[4])
{
u32 reg_pup = RESULT_DB_PHY_REG_ADDR;
u32 reg_data;
u32 cs_num;
int i;
cs_num = 0;
/* TBD */
reg_pup += cs_num;
for (i = 0; i < 4; i++) {
CHECK_STATUS(ddr3_tip_bus_read(dev_num, if_id,
ACCESS_TYPE_UNICAST, i,
DDR_PHY_DATA, reg_pup,
&reg_data));
res[i] = (reg_data >> RESULT_DB_PHY_REG_RX_OFFSET) & 0x1f;
}
return 0;
}
/*
* This algorithm deals with the vertical optimum from Voltage point of view
* of the sample signal.
* Voltage sample point can improve the Eye / window size of the bit and the
* pup.
* The problem is that it is tune for all DQ the same so there isn't any
* PBS like code.
* It is more like centralization.
* But because we don't have The training SM support we do it a bit more
* smart search to save time.
*/
int ddr3_tip_vref(u32 dev_num)
{
/*
* The Vref register have non linear order. Need to check what will be
* in future projects.
*/
u32 vref_map[8] = {
1, 2, 3, 4, 5, 6, 7, 0
};
/* State and parameter definitions */
u32 initial_step = VREF_INITIAL_STEP;
/* need to be assign with minus ????? */
u32 second_step = VREF_SECOND_STEP;
u32 algo_run_flag = 0, currrent_vref = 0;
u32 while_count = 0;
u32 pup = 0, if_id = 0, num_pup = 0, rep = 0;
u32 val = 0;
u32 reg_addr = 0xa8;
u32 copy_start_pattern, copy_end_pattern;
enum hws_result *flow_result = ddr3_tip_get_result_ptr(training_stage);
u8 res[4];
struct hws_topology_map *tm = ddr3_get_topology_map();
CHECK_STATUS(ddr3_tip_special_rx(dev_num));
/* save start/end pattern */
copy_start_pattern = start_pattern;
copy_end_pattern = end_pattern;
/* set vref as centralization pattern */
start_pattern = PATTERN_VREF;
end_pattern = PATTERN_VREF;
/* init params */
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0;
pup < tm->num_of_bus_per_interface; pup++) {
current_vref[pup][if_id] = 0;
last_vref[pup][if_id] = 0;
lim_vref[pup][if_id] = 0;
current_valid_window[pup][if_id] = 0;
last_valid_window[pup][if_id] = 0;
if (vref_window_size[if_id][pup] >
vref_window_size_th) {
pup_st[pup][if_id] = VREF_CONVERGE;
DEBUG_TRAINING_HW_ALG(
DEBUG_LEVEL_INFO,
("VREF config, IF[ %d ]pup[ %d ] - Vref tune not requered (%d)\n",
if_id, pup, __LINE__));
} else {
pup_st[pup][if_id] = VREF_STEP_1;
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr, &val));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST,
pup, DDR_PHY_DATA, reg_addr,
(val & (~0xf)) | vref_map[0]));
DEBUG_TRAINING_HW_ALG(
DEBUG_LEVEL_INFO,
("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
if_id, pup,
(val & (~0xf)) | vref_map[0],
__LINE__));
}
}
interface_state[if_id] = 0;
}
/* TODO: Set number of active interfaces */
num_pup = tm->num_of_bus_per_interface * MAX_INTERFACE_NUM;
while ((algo_run_flag <= num_pup) & (while_count < 10)) {
while_count++;
for (rep = 1; rep < 4; rep++) {
ddr3_tip_centr_skip_min_win_check = 1;
ddr3_tip_centralization_rx(dev_num);
ddr3_tip_centr_skip_min_win_check = 0;
/* Read Valid window results only for non converge pups */
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
if (interface_state[if_id] != 4) {
get_valid_win_rx(dev_num, if_id, res);
for (pup = 0;
pup < tm->num_of_bus_per_interface;
pup++) {
VALIDATE_ACTIVE
(tm->bus_act_mask, pup);
if (pup_st[pup]
[if_id] ==
VREF_CONVERGE)
continue;
current_valid_window[pup]
[if_id] =
(current_valid_window[pup]
[if_id] * (rep - 1) +
1000 * res[pup]) / rep;
}
}
}
}
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
DEBUG_TRAINING_HW_ALG(
DEBUG_LEVEL_TRACE,
("current_valid_window: IF[ %d ] - ", if_id));
for (pup = 0;
pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE,
("%d ",
current_valid_window
[pup][if_id]));
}
DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE, ("\n"));
}
/* Compare results and respond as function of state */
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0;
pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE,
("I/F[ %d ], pup[ %d ] STATE #%d (%d)\n",
if_id, pup,
pup_st[pup]
[if_id], __LINE__));
if (pup_st[pup][if_id] == VREF_CONVERGE)
continue;
DEBUG_TRAINING_HW_ALG(DEBUG_LEVEL_TRACE,
("I/F[ %d ], pup[ %d ] CHECK progress - Current %d Last %d, limit VREF %d (%d)\n",
if_id, pup,
current_valid_window[pup]
[if_id],
last_valid_window[pup]
[if_id], lim_vref[pup]
[if_id], __LINE__));
/*
* The -1 is for solution resolution +/- 1 tap
* of ADLL
*/
if (current_valid_window[pup][if_id] + 200 >=
(last_valid_window[pup][if_id])) {
if (pup_st[pup][if_id] == VREF_STEP_1) {
/*
* We stay in the same state and
* step just update the window
* size (take the max) and Vref
*/
if (current_vref[pup]
[if_id] == VREF_MAX_INDEX) {
/*
* If we step to the end
* and didn't converge
* to some particular
* better Vref value
* define the pup as
* converge and step
* back to nominal
* Vref.
*/
pup_st[pup]
[if_id] =
VREF_CONVERGE;
algo_run_flag++;
interface_state
[if_id]++;
DEBUG_TRAINING_HW_ALG
(DEBUG_LEVEL_TRACE,
("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n",
if_id, pup,
current_vref[pup]
[if_id],
__LINE__));
} else {
/* continue to update the Vref index */
current_vref[pup]
[if_id] =
((current_vref[pup]
[if_id] +
initial_step) >
VREF_MAX_INDEX) ?
VREF_MAX_INDEX
: (current_vref[pup]
[if_id] +
initial_step);
if (current_vref[pup]
[if_id] ==
VREF_MAX_INDEX) {
pup_st[pup]
[if_id]
=
VREF_STEP_2;
}
lim_vref[pup]
[if_id] =
last_vref[pup]
[if_id] =
current_vref[pup]
[if_id];
}
last_valid_window[pup]
[if_id] =
GET_MAX(current_valid_window
[pup][if_id],
last_valid_window
[pup]
[if_id]);
/* update the Vref for next stage */
currrent_vref =
current_vref[pup]
[if_id];
CHECK_STATUS
(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
&val));
CHECK_STATUS
(ddr3_tip_bus_write
(dev_num,
ACCESS_TYPE_UNICAST,
if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
(val & (~0xf)) |
vref_map[currrent_vref]));
DEBUG_TRAINING_HW_ALG
(DEBUG_LEVEL_TRACE,
("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
if_id, pup,
(val & (~0xf)) |
vref_map[currrent_vref],
__LINE__));
} else if (pup_st[pup][if_id]
== VREF_STEP_2) {
/*
* We keep on search back with
* the same step size.
*/
last_valid_window[pup]
[if_id] =
GET_MAX(current_valid_window
[pup][if_id],
last_valid_window
[pup]
[if_id]);
last_vref[pup][if_id] =
current_vref[pup]
[if_id];
/* we finish all search space */
if ((current_vref[pup]
[if_id] - second_step) == lim_vref[pup][if_id]) {
/*
* If we step to the end
* and didn't converge
* to some particular
* better Vref value
* define the pup as
* converge and step
* back to nominal
* Vref.
*/
pup_st[pup]
[if_id] =
VREF_CONVERGE;
algo_run_flag++;
interface_state
[if_id]++;
current_vref[pup]
[if_id] =
(current_vref[pup]
[if_id] -
second_step);
DEBUG_TRAINING_HW_ALG
(DEBUG_LEVEL_TRACE,
("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n",
if_id, pup,
current_vref[pup]
[if_id],
__LINE__));
} else
/* we finish all search space */
if (current_vref[pup]
[if_id] ==
lim_vref[pup]
[if_id]) {
/*
* If we step to the end
* and didn't converge
* to some particular
* better Vref value
* define the pup as
* converge and step
* back to nominal
* Vref.
*/
pup_st[pup]
[if_id] =
VREF_CONVERGE;
algo_run_flag++;
interface_state
[if_id]++;
DEBUG_TRAINING_HW_ALG
(DEBUG_LEVEL_TRACE,
("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n",
if_id, pup,
current_vref[pup]
[if_id],
__LINE__));
} else {
current_vref[pup]
[if_id] =
current_vref[pup]
[if_id] -
second_step;
}
/* Update the Vref for next stage */
currrent_vref =
current_vref[pup]
[if_id];
CHECK_STATUS
(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
&val));
CHECK_STATUS
(ddr3_tip_bus_write
(dev_num,
ACCESS_TYPE_UNICAST,
if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
(val & (~0xf)) |
vref_map[currrent_vref]));
DEBUG_TRAINING_HW_ALG
(DEBUG_LEVEL_TRACE,
("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
if_id, pup,
(val & (~0xf)) |
vref_map[currrent_vref],
__LINE__));
}
} else {
/* we change state and change step */
if (pup_st[pup][if_id] == VREF_STEP_1) {
pup_st[pup][if_id] =
VREF_STEP_2;
lim_vref[pup][if_id] =
current_vref[pup]
[if_id] - initial_step;
last_valid_window[pup]
[if_id] =
current_valid_window[pup]
[if_id];
last_vref[pup][if_id] =
current_vref[pup]
[if_id];
current_vref[pup][if_id] =
last_vref[pup][if_id] -
second_step;
/* Update the Vref for next stage */
CHECK_STATUS
(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
&val));
CHECK_STATUS
(ddr3_tip_bus_write
(dev_num,
ACCESS_TYPE_UNICAST,
if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
(val & (~0xf)) |
vref_map[current_vref[pup]
[if_id]]));
DEBUG_TRAINING_HW_ALG
(DEBUG_LEVEL_TRACE,
("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
if_id, pup,
(val & (~0xf)) |
vref_map[current_vref[pup]
[if_id]],
__LINE__));
} else if (pup_st[pup][if_id] == VREF_STEP_2) {
/*
* The last search was the max
* point set value and exit
*/
CHECK_STATUS
(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
&val));
CHECK_STATUS
(ddr3_tip_bus_write
(dev_num,
ACCESS_TYPE_UNICAST,
if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
(val & (~0xf)) |
vref_map[last_vref[pup]
[if_id]]));
DEBUG_TRAINING_HW_ALG
(DEBUG_LEVEL_TRACE,
("VREF config, IF[ %d ]pup[ %d ] - Vref = %X (%d)\n",
if_id, pup,
(val & (~0xf)) |
vref_map[last_vref[pup]
[if_id]],
__LINE__));
pup_st[pup][if_id] =
VREF_CONVERGE;
algo_run_flag++;
interface_state[if_id]++;
DEBUG_TRAINING_HW_ALG
(DEBUG_LEVEL_TRACE,
("I/F[ %d ], pup[ %d ] VREF_CONVERGE - Vref = %X (%d)\n",
if_id, pup,
current_vref[pup]
[if_id], __LINE__));
}
}
}
}
}
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0;
pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr, &val));
DEBUG_TRAINING_HW_ALG(
DEBUG_LEVEL_INFO,
("FINAL values: I/F[ %d ], pup[ %d ] - Vref = %X (%d)\n",
if_id, pup, val, __LINE__));
}
}
flow_result[if_id] = TEST_SUCCESS;
/* restore start/end pattern */
start_pattern = copy_start_pattern;
end_pattern = copy_end_pattern;
return 0;
}
/*
* CK/CA Delay
*/
int ddr3_tip_cmd_addr_init_delay(u32 dev_num, u32 adll_tap)
{
u32 if_id = 0;
u32 ck_num_adll_tap = 0, ca_num_adll_tap = 0, data = 0;
struct hws_topology_map *tm = ddr3_get_topology_map();
/*
* ck_delay_table is delaying the of the clock signal only.
* (to overcome timing issues between_c_k & command/address signals)
*/
/*
* ca_delay is delaying the of the entire command & Address signals
* (include Clock signal to overcome DGL error on the Clock versus
* the DQS).
*/
/* Calc ADLL Tap */
if ((ck_delay == -1) || (ck_delay_16 == -1)) {
DEBUG_TRAINING_HW_ALG(
DEBUG_LEVEL_ERROR,
("ERROR: One of ck_delay values not initialized!!!\n"));
}
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
/* Calc delay ps in ADLL tap */
if (tm->interface_params[if_id].bus_width ==
BUS_WIDTH_16)
ck_num_adll_tap = ck_delay_16 / adll_tap;
else
ck_num_adll_tap = ck_delay / adll_tap;
ca_num_adll_tap = ca_delay / adll_tap;
data = (ck_num_adll_tap & 0x3f) +
((ca_num_adll_tap & 0x3f) << 10);
/*
* Set the ADLL number to the CK ADLL for Interfaces for
* all Pup
*/
DEBUG_TRAINING_HW_ALG(
DEBUG_LEVEL_TRACE,
("ck_num_adll_tap %d ca_num_adll_tap %d adll_tap %d\n",
ck_num_adll_tap, ca_num_adll_tap, adll_tap));
CHECK_STATUS(ddr3_tip_bus_write(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE, DDR_PHY_CONTROL,
0x0, data));
}
return MV_OK;
}

View File

@ -0,0 +1,14 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_TRAINING_HW_ALGO_H_
#define _DDR3_TRAINING_HW_ALGO_H_
int ddr3_tip_vref(u32 dev_num);
int ddr3_tip_write_additional_odt_setting(u32 dev_num, u32 if_id);
int ddr3_tip_cmd_addr_init_delay(u32 dev_num, u32 adll_tap);
#endif /* _DDR3_TRAINING_HW_ALGO_H_ */

View File

@ -0,0 +1,180 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_TRAINING_IP_H_
#define _DDR3_TRAINING_IP_H_
#include "ddr3_training_ip_def.h"
#include "ddr_topology_def.h"
#include "ddr_training_ip_db.h"
#define DDR3_TIP_VERSION_STRING "DDR3 Training Sequence - Ver TIP-1.29."
#define MAX_CS_NUM 4
#define MAX_TOTAL_BUS_NUM (MAX_INTERFACE_NUM * MAX_BUS_NUM)
#define MAX_DQ_NUM 40
#define GET_MIN(arg1, arg2) ((arg1) < (arg2)) ? (arg1) : (arg2)
#define GET_MAX(arg1, arg2) ((arg1) < (arg2)) ? (arg2) : (arg1)
#define INIT_CONTROLLER_MASK_BIT 0x00000001
#define STATIC_LEVELING_MASK_BIT 0x00000002
#define SET_LOW_FREQ_MASK_BIT 0x00000004
#define LOAD_PATTERN_MASK_BIT 0x00000008
#define SET_MEDIUM_FREQ_MASK_BIT 0x00000010
#define WRITE_LEVELING_MASK_BIT 0x00000020
#define LOAD_PATTERN_2_MASK_BIT 0x00000040
#define READ_LEVELING_MASK_BIT 0x00000080
#define SW_READ_LEVELING_MASK_BIT 0x00000100
#define WRITE_LEVELING_SUPP_MASK_BIT 0x00000200
#define PBS_RX_MASK_BIT 0x00000400
#define PBS_TX_MASK_BIT 0x00000800
#define SET_TARGET_FREQ_MASK_BIT 0x00001000
#define ADJUST_DQS_MASK_BIT 0x00002000
#define WRITE_LEVELING_TF_MASK_BIT 0x00004000
#define LOAD_PATTERN_HIGH_MASK_BIT 0x00008000
#define READ_LEVELING_TF_MASK_BIT 0x00010000
#define WRITE_LEVELING_SUPP_TF_MASK_BIT 0x00020000
#define DM_PBS_TX_MASK_BIT 0x00040000
#define CENTRALIZATION_RX_MASK_BIT 0x00100000
#define CENTRALIZATION_TX_MASK_BIT 0x00200000
#define TX_EMPHASIS_MASK_BIT 0x00400000
#define PER_BIT_READ_LEVELING_TF_MASK_BIT 0x00800000
#define VREF_CALIBRATION_MASK_BIT 0x01000000
enum hws_result {
TEST_FAILED = 0,
TEST_SUCCESS = 1,
NO_TEST_DONE = 2
};
enum hws_training_result {
RESULT_PER_BIT,
RESULT_PER_BYTE
};
enum auto_tune_stage {
INIT_CONTROLLER,
STATIC_LEVELING,
SET_LOW_FREQ,
LOAD_PATTERN,
SET_MEDIUM_FREQ,
WRITE_LEVELING,
LOAD_PATTERN_2,
READ_LEVELING,
WRITE_LEVELING_SUPP,
PBS_RX,
PBS_TX,
SET_TARGET_FREQ,
ADJUST_DQS,
WRITE_LEVELING_TF,
READ_LEVELING_TF,
WRITE_LEVELING_SUPP_TF,
DM_PBS_TX,
VREF_CALIBRATION,
CENTRALIZATION_RX,
CENTRALIZATION_TX,
TX_EMPHASIS,
LOAD_PATTERN_HIGH,
PER_BIT_READ_LEVELING_TF,
MAX_STAGE_LIMIT
};
enum hws_access_type {
ACCESS_TYPE_UNICAST = 0,
ACCESS_TYPE_MULTICAST = 1
};
enum hws_algo_type {
ALGO_TYPE_DYNAMIC,
ALGO_TYPE_STATIC
};
struct init_cntr_param {
int is_ctrl64_bit;
int do_mrs_phy;
int init_phy;
int msys_init;
};
struct pattern_info {
u8 num_of_phases_tx;
u8 tx_burst_size;
u8 delay_between_bursts;
u8 num_of_phases_rx;
u32 start_addr;
u8 pattern_len;
};
/* CL value for each frequency */
struct cl_val_per_freq {
u8 cl_val[DDR_FREQ_LIMIT];
};
struct cs_element {
u8 cs_num;
u8 num_of_cs;
};
struct mode_info {
/* 32 bits representing MRS bits */
u32 reg_mr0[MAX_INTERFACE_NUM];
u32 reg_mr1[MAX_INTERFACE_NUM];
u32 reg_mr2[MAX_INTERFACE_NUM];
u32 reg_m_r3[MAX_INTERFACE_NUM];
/*
* Each element in array represent read_data_sample register delay for
* a specific interface.
* Each register, 4 bits[0+CS*8 to 4+CS*8] represent Number of DDR
* cycles from read command until data is ready to be fetched from
* the PHY, when accessing CS.
*/
u32 read_data_sample[MAX_INTERFACE_NUM];
/*
* Each element in array represent read_data_sample register delay for
* a specific interface.
* Each register, 4 bits[0+CS*8 to 4+CS*8] represent the total delay
* from read command until opening the read mask, when accessing CS.
* This field defines the delay in DDR cycles granularity.
*/
u32 read_data_ready[MAX_INTERFACE_NUM];
};
struct hws_tip_freq_config_info {
u8 is_supported;
u8 bw_per_freq;
u8 rate_per_freq;
};
struct hws_cs_config_info {
u32 cs_reg_value;
u32 cs_cbe_value;
};
struct dfx_access {
u8 pipe;
u8 client;
};
struct hws_xsb_info {
struct dfx_access *dfx_table;
};
int ddr3_tip_register_dq_table(u32 dev_num, u32 *table);
int hws_ddr3_tip_select_ddr_controller(u32 dev_num, int enable);
int hws_ddr3_tip_init_controller(u32 dev_num,
struct init_cntr_param *init_cntr_prm);
int hws_ddr3_tip_load_topology_map(u32 dev_num,
struct hws_topology_map *topology);
int hws_ddr3_tip_run_alg(u32 dev_num, enum hws_algo_type algo_type);
int hws_ddr3_tip_mode_read(u32 dev_num, struct mode_info *mode_info);
int hws_ddr3_tip_read_training_result(u32 dev_num,
enum hws_result result[MAX_STAGE_LIMIT][MAX_INTERFACE_NUM]);
int ddr3_tip_is_pup_lock(u32 *pup_buf, enum hws_training_result read_mode);
u8 ddr3_tip_get_buf_min(u8 *buf_ptr);
u8 ddr3_tip_get_buf_max(u8 *buf_ptr);
#endif /* _DDR3_TRAINING_IP_H_ */

View File

@ -0,0 +1,54 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_TRAINING_IP_BIST_H_
#define _DDR3_TRAINING_IP_BIST_H_
#include "ddr3_training_ip.h"
enum hws_bist_operation {
BIST_STOP = 0,
BIST_START = 1
};
enum hws_stress_jump {
STRESS_NONE = 0,
STRESS_ENABLE = 1
};
enum hws_pattern_duration {
DURATION_SINGLE = 0,
DURATION_STOP_AT_FAIL = 1,
DURATION_ADDRESS = 2,
DURATION_CONT = 4
};
struct bist_result {
u32 bist_error_cnt;
u32 bist_fail_low;
u32 bist_fail_high;
u32 bist_last_fail_addr;
};
int ddr3_tip_bist_read_result(u32 dev_num, u32 if_id,
struct bist_result *pst_bist_result);
int ddr3_tip_bist_activate(u32 dev_num, enum hws_pattern pattern,
enum hws_access_type access_type,
u32 if_num, enum hws_dir direction,
enum hws_stress_jump addr_stress_jump,
enum hws_pattern_duration duration,
enum hws_bist_operation oper_type,
u32 offset, u32 cs_num, u32 pattern_addr_length);
int hws_ddr3_run_bist(u32 dev_num, enum hws_pattern pattern, u32 *result,
u32 cs_num);
int ddr3_tip_run_sweep_test(int dev_num, u32 repeat_num, u32 direction,
u32 mode);
int ddr3_tip_print_regs(u32 dev_num);
int ddr3_tip_reg_dump(u32 dev_num);
int run_xsb_test(u32 dev_num, u32 mem_addr, u32 write_type, u32 read_type,
u32 burst_length);
#endif /* _DDR3_TRAINING_IP_BIST_H_ */

View File

@ -0,0 +1,15 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_TRAINING_IP_CENTRALIZATION_H
#define _DDR3_TRAINING_IP_CENTRALIZATION_H
int ddr3_tip_centralization_tx(u32 dev_num);
int ddr3_tip_centralization_rx(u32 dev_num);
int ddr3_tip_print_centralization_result(u32 dev_num);
int ddr3_tip_special_rx(u32 dev_num);
#endif /* _DDR3_TRAINING_IP_CENTRALIZATION_H */

View File

@ -0,0 +1,34 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_TRAINING_IP_DB_H_
#define _DDR3_TRAINING_IP_DB_H_
enum hws_pattern {
PATTERN_PBS1,
PATTERN_PBS2,
PATTERN_RL,
PATTERN_STATIC_PBS,
PATTERN_KILLER_DQ0,
PATTERN_KILLER_DQ1,
PATTERN_KILLER_DQ2,
PATTERN_KILLER_DQ3,
PATTERN_KILLER_DQ4,
PATTERN_KILLER_DQ5,
PATTERN_KILLER_DQ6,
PATTERN_KILLER_DQ7,
PATTERN_PBS3,
PATTERN_RL2,
PATTERN_TEST,
PATTERN_FULL_SSO0,
PATTERN_FULL_SSO1,
PATTERN_FULL_SSO2,
PATTERN_FULL_SSO3,
PATTERN_VREF,
PATTERN_LIMIT
};
#endif /* _DDR3_TRAINING_IP_DB_H_ */

View File

@ -0,0 +1,173 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_TRAINING_IP_DEF_H
#define _DDR3_TRAINING_IP_DEF_H
#include "silicon_if.h"
#define PATTERN_55 0x55555555
#define PATTERN_AA 0xaaaaaaaa
#define PATTERN_80 0x80808080
#define PATTERN_20 0x20202020
#define PATTERN_01 0x01010101
#define PATTERN_FF 0xffffffff
#define PATTERN_00 0x00000000
/* 16bit bus width patterns */
#define PATTERN_55AA 0x5555aaaa
#define PATTERN_00FF 0x0000ffff
#define PATTERN_0080 0x00008080
#define INVALID_VALUE 0xffffffff
#define MAX_NUM_OF_DUNITS 32
/*
* length *2 = length in words of pattern, first low address,
* second high address
*/
#define TEST_PATTERN_LENGTH 4
#define KILLER_PATTERN_DQ_NUMBER 8
#define SSO_DQ_NUMBER 4
#define PATTERN_MAXIMUM_LENGTH 64
#define ADLL_TX_LENGTH 64
#define ADLL_RX_LENGTH 32
#define PARAM_NOT_CARE 0
#define READ_LEVELING_PHY_OFFSET 2
#define WRITE_LEVELING_PHY_OFFSET 0
#define MASK_ALL_BITS 0xffffffff
#define CS_BIT_MASK 0xf
/* DFX access */
#define BROADCAST_ID 28
#define MULTICAST_ID 29
#define XSB_BASE_ADDR 0x00004000
#define XSB_CTRL_0_REG 0x00000000
#define XSB_CTRL_1_REG 0x00000004
#define XSB_CMD_REG 0x00000008
#define XSB_ADDRESS_REG 0x0000000c
#define XSB_DATA_REG 0x00000010
#define PIPE_ENABLE_ADDR 0x000f8000
#define ENABLE_DDR_TUNING_ADDR 0x000f829c
#define CLIENT_BASE_ADDR 0x00002000
#define CLIENT_CTRL_REG 0x00000000
#define TARGET_INT 0x1801
#define TARGET_EXT 0x180e
#define BYTE_EN 0
#define CMD_READ 0
#define CMD_WRITE 1
#define INTERNAL_ACCESS_PORT 1
#define EXECUTING 1
#define ACCESS_EXT 1
#define CS2_EXIST_BIT 2
#define TRAINING_ID 0xf
#define EXT_TRAINING_ID 1
#define EXT_MODE 0x4
#define GET_RESULT_STATE(res) (res)
#define SET_RESULT_STATE(res, state) (res = state)
#define _1K 0x00000400
#define _4K 0x00001000
#define _8K 0x00002000
#define _16K 0x00004000
#define _32K 0x00008000
#define _64K 0x00010000
#define _128K 0x00020000
#define _256K 0x00040000
#define _512K 0x00080000
#define _1M 0x00100000
#define _2M 0x00200000
#define _4M 0x00400000
#define _8M 0x00800000
#define _16M 0x01000000
#define _32M 0x02000000
#define _64M 0x04000000
#define _128M 0x08000000
#define _256M 0x10000000
#define _512M 0x20000000
#define _1G 0x40000000
#define _2G 0x80000000
#define ADDR_SIZE_512MB 0x04000000
#define ADDR_SIZE_1GB 0x08000000
#define ADDR_SIZE_2GB 0x10000000
#define ADDR_SIZE_4GB 0x20000000
#define ADDR_SIZE_8GB 0x40000000
enum hws_edge_compare {
EDGE_PF,
EDGE_FP,
EDGE_FPF,
EDGE_PFP
};
enum hws_control_element {
HWS_CONTROL_ELEMENT_ADLL, /* per bit 1 edge */
HWS_CONTROL_ELEMENT_DQ_SKEW,
HWS_CONTROL_ELEMENT_DQS_SKEW
};
enum hws_search_dir {
HWS_LOW2HIGH,
HWS_HIGH2LOW,
HWS_SEARCH_DIR_LIMIT
};
enum hws_page_size {
PAGE_SIZE_1K,
PAGE_SIZE_2K
};
enum hws_operation {
OPERATION_READ = 0,
OPERATION_WRITE = 1
};
enum hws_training_ip_stat {
HWS_TRAINING_IP_STATUS_FAIL,
HWS_TRAINING_IP_STATUS_SUCCESS,
HWS_TRAINING_IP_STATUS_TIMEOUT
};
enum hws_ddr_cs {
CS_SINGLE,
CS_NON_SINGLE
};
enum hws_ddr_phy {
DDR_PHY_DATA = 0,
DDR_PHY_CONTROL = 1
};
enum hws_dir {
OPER_WRITE,
OPER_READ,
OPER_WRITE_AND_READ
};
enum hws_wl_supp {
PHASE_SHIFT,
CLOCK_SHIFT,
ALIGN_SHIFT
};
struct reg_data {
u32 reg_addr;
u32 reg_data;
u32 reg_mask;
};
#endif /* _DDR3_TRAINING_IP_DEF_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,85 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_TRAINING_IP_ENGINE_H_
#define _DDR3_TRAINING_IP_ENGINE_H_
#include "ddr3_training_ip_def.h"
#include "ddr3_training_ip_flow.h"
#define EDGE_1 0
#define EDGE_2 1
#define ALL_PUP_TRAINING 0xe
#define PUP_RESULT_EDGE_1_MASK 0xff
#define PUP_RESULT_EDGE_2_MASK (0xff << 8)
#define PUP_LOCK_RESULT_BIT 25
#define GET_TAP_RESULT(reg, edge) \
(((edge) == EDGE_1) ? ((reg) & PUP_RESULT_EDGE_1_MASK) : \
(((reg) & PUP_RESULT_EDGE_2_MASK) >> 8));
#define GET_LOCK_RESULT(reg) \
(((reg) & (1<<PUP_LOCK_RESULT_BIT)) >> PUP_LOCK_RESULT_BIT)
#define EDGE_FAILURE 128
#define ALL_BITS_PER_PUP 128
#define MIN_WINDOW_SIZE 6
#define MAX_WINDOW_SIZE_RX 32
#define MAX_WINDOW_SIZE_TX 64
int ddr3_tip_training_ip_test(u32 dev_num, enum hws_training_result result_type,
enum hws_search_dir search_dir,
enum hws_dir direction,
enum hws_edge_compare edge,
u32 init_val1, u32 init_val2,
u32 num_of_iterations, u32 start_pattern,
u32 end_pattern);
int ddr3_tip_load_pattern_to_mem(u32 dev_num, enum hws_pattern pattern);
int ddr3_tip_load_pattern_to_mem_by_cpu(u32 dev_num, enum hws_pattern pattern,
u32 offset);
int ddr3_tip_load_all_pattern_to_mem(u32 dev_num);
int ddr3_tip_read_training_result(u32 dev_num, u32 if_id,
enum hws_access_type pup_access_type,
u32 pup_num, u32 bit_num,
enum hws_search_dir search,
enum hws_dir direction,
enum hws_training_result result_type,
enum hws_training_load_op operation,
u32 cs_num_type, u32 **load_res,
int is_read_from_db, u8 cons_tap,
int is_check_result_validity);
int ddr3_tip_ip_training(u32 dev_num, enum hws_access_type access_type,
u32 interface_num,
enum hws_access_type pup_access_type,
u32 pup_num, enum hws_training_result result_type,
enum hws_control_element control_element,
enum hws_search_dir search_dir, enum hws_dir direction,
u32 interface_mask, u32 init_value, u32 num_iter,
enum hws_pattern pattern,
enum hws_edge_compare edge_comp,
enum hws_ddr_cs cs_type, u32 cs_num,
enum hws_training_ip_stat *train_status);
int ddr3_tip_ip_training_wrapper(u32 dev_num, enum hws_access_type access_type,
u32 if_id,
enum hws_access_type pup_access_type,
u32 pup_num,
enum hws_training_result result_type,
enum hws_control_element control_element,
enum hws_search_dir search_dir,
enum hws_dir direction,
u32 interface_mask, u32 init_value1,
u32 init_value2, u32 num_iter,
enum hws_pattern pattern,
enum hws_edge_compare edge_comp,
enum hws_ddr_cs train_cs_type, u32 cs_num,
enum hws_training_ip_stat *train_status);
int is_odpg_access_done(u32 dev_num, u32 if_id);
void ddr3_tip_print_bist_res(void);
struct pattern_info *ddr3_tip_get_pattern_table(void);
u16 *ddr3_tip_get_mask_results_dq_reg(void);
u16 *ddr3_tip_get_mask_results_pup_reg_map(void);
#endif /* _DDR3_TRAINING_IP_ENGINE_H_ */

View File

@ -0,0 +1,349 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_TRAINING_IP_FLOW_H_
#define _DDR3_TRAINING_IP_FLOW_H_
#include "ddr3_training_ip.h"
#include "ddr3_training_ip_pbs.h"
#define MRS0_CMD 0x3
#define MRS1_CMD 0x4
#define MRS2_CMD 0x8
#define MRS3_CMD 0x9
/*
* Definitions of INTERFACE registers
*/
#define READ_BUFFER_SELECT 0x14a4
/*
* Definitions of PHY registers
*/
#define KILLER_PATTERN_LENGTH 32
#define EXT_ACCESS_BURST_LENGTH 8
#define IS_ACTIVE(if_mask , if_id) \
((if_mask) & (1 << (if_id)))
#define VALIDATE_ACTIVE(mask, id) \
{ \
if (IS_ACTIVE(mask, id) == 0) \
continue; \
}
#define GET_TOPOLOGY_NUM_OF_BUSES() \
(ddr3_get_topology_map()->num_of_bus_per_interface)
#define DDR3_IS_ECC_PUP3_MODE(if_mask) \
(((if_mask) == 0xb) ? 1 : 0)
#define DDR3_IS_ECC_PUP4_MODE(if_mask) \
(((((if_mask) & 0x10) == 0)) ? 0 : 1)
#define DDR3_IS_16BIT_DRAM_MODE(mask) \
(((((mask) & 0x4) == 0)) ? 1 : 0)
#define MEGA 1000000
#define BUS_WIDTH_IN_BITS 8
/*
* DFX address Space
* Table 2: DFX address space
* Address Bits Value Description
* [31 : 20] 0x? DFX base address bases PCIe mapping
* [19 : 15] 0...Number_of_client-1 Client Index inside pipe.
* See also Table 1 Multi_cast = 29 Broadcast = 28
* [14 : 13] 2'b01 Access to Client Internal Register
* [12 : 0] Client Internal Register offset See related Client Registers
* [14 : 13] 2'b00 Access to Ram Wrappers Internal Register
* [12 : 6] 0 Number_of_rams-1 Ram Index inside Client
* [5 : 0] Ram Wrapper Internal Register offset See related Ram Wrappers
* Registers
*/
/* nsec */
#define TREFI_LOW 7800
#define TREFI_HIGH 3900
#define TR2R_VALUE_REG 0x180
#define TR2R_MASK_REG 0x180
#define TRFC_MASK_REG 0x7f
#define TR2W_MASK_REG 0x600
#define TW2W_HIGH_VALUE_REG 0x1800
#define TW2W_HIGH_MASK_REG 0xf800
#define TRFC_HIGH_VALUE_REG 0x20000
#define TRFC_HIGH_MASK_REG 0x70000
#define TR2R_HIGH_VALUE_REG 0x0
#define TR2R_HIGH_MASK_REG 0x380000
#define TMOD_VALUE_REG 0x16000000
#define TMOD_MASK_REG 0x1e000000
#define T_VALUE_REG 0x40000000
#define T_MASK_REG 0xc0000000
#define AUTO_ZQC_TIMING 15384
#define WRITE_XBAR_PORT1 0xc03f8077
#define READ_XBAR_PORT1 0xc03f8073
#define DISABLE_DDR_TUNING_DATA 0x02294285
#define ENABLE_DDR_TUNING_DATA 0x12294285
#define ODPG_TRAINING_STATUS_REG 0x18488
#define ODPG_TRAINING_TRIGGER_REG 0x1030
#define ODPG_STATUS_DONE_REG 0x16fc
#define ODPG_ENABLE_REG 0x186d4
#define ODPG_ENABLE_OFFS 0
#define ODPG_DISABLE_OFFS 8
#define ODPG_TRAINING_CONTROL_REG 0x1034
#define ODPG_OBJ1_OPCODE_REG 0x103c
#define ODPG_OBJ1_ITER_CNT_REG 0x10b4
#define CALIB_OBJ_PRFA_REG 0x10c4
#define ODPG_WRITE_LEVELING_DONE_CNTR_REG 0x10f8
#define ODPG_WRITE_READ_MODE_ENABLE_REG 0x10fc
#define TRAINING_OPCODE_1_REG 0x10b4
#define SDRAM_CONFIGURATION_REG 0x1400
#define DDR_CONTROL_LOW_REG 0x1404
#define SDRAM_TIMING_LOW_REG 0x1408
#define SDRAM_TIMING_HIGH_REG 0x140c
#define SDRAM_ACCESS_CONTROL_REG 0x1410
#define SDRAM_OPEN_PAGE_CONTROL_REG 0x1414
#define SDRAM_OPERATION_REG 0x1418
#define DUNIT_CONTROL_HIGH_REG 0x1424
#define ODT_TIMING_LOW 0x1428
#define DDR_TIMING_REG 0x142c
#define ODT_TIMING_HI_REG 0x147c
#define SDRAM_INIT_CONTROL_REG 0x1480
#define SDRAM_ODT_CONTROL_HIGH_REG 0x1498
#define DUNIT_ODT_CONTROL_REG 0x149c
#define READ_BUFFER_SELECT_REG 0x14a4
#define DUNIT_MMASK_REG 0x14b0
#define CALIB_MACHINE_CTRL_REG 0x14cc
#define DRAM_DLL_TIMING_REG 0x14e0
#define DRAM_ZQ_INIT_TIMIMG_REG 0x14e4
#define DRAM_ZQ_TIMING_REG 0x14e8
#define DFS_REG 0x1528
#define READ_DATA_SAMPLE_DELAY 0x1538
#define READ_DATA_READY_DELAY 0x153c
#define TRAINING_REG 0x15b0
#define TRAINING_SW_1_REG 0x15b4
#define TRAINING_SW_2_REG 0x15b8
#define TRAINING_PATTERN_BASE_ADDRESS_REG 0x15bc
#define TRAINING_DBG_1_REG 0x15c0
#define TRAINING_DBG_2_REG 0x15c4
#define TRAINING_DBG_3_REG 0x15c8
#define RANK_CTRL_REG 0x15e0
#define TIMING_REG 0x15e4
#define DRAM_PHY_CONFIGURATION 0x15ec
#define MR0_REG 0x15d0
#define MR1_REG 0x15d4
#define MR2_REG 0x15d8
#define MR3_REG 0x15dc
#define TIMING_REG 0x15e4
#define ODPG_CTRL_CONTROL_REG 0x1600
#define ODPG_DATA_CONTROL_REG 0x1630
#define ODPG_PATTERN_ADDR_OFFSET_REG 0x1638
#define ODPG_DATA_BUF_SIZE_REG 0x163c
#define PHY_LOCK_STATUS_REG 0x1674
#define PHY_REG_FILE_ACCESS 0x16a0
#define TRAINING_WRITE_LEVELING_REG 0x16ac
#define ODPG_PATTERN_ADDR_REG 0x16b0
#define ODPG_PATTERN_DATA_HI_REG 0x16b4
#define ODPG_PATTERN_DATA_LOW_REG 0x16b8
#define ODPG_BIST_LAST_FAIL_ADDR_REG 0x16bc
#define ODPG_BIST_DATA_ERROR_COUNTER_REG 0x16c0
#define ODPG_BIST_FAILED_DATA_HI_REG 0x16c4
#define ODPG_BIST_FAILED_DATA_LOW_REG 0x16c8
#define ODPG_WRITE_DATA_ERROR_REG 0x16cc
#define CS_ENABLE_REG 0x16d8
#define WR_LEVELING_DQS_PATTERN_REG 0x16dc
#define ODPG_BIST_DONE 0x186d4
#define ODPG_BIST_DONE_BIT_OFFS 0
#define ODPG_BIST_DONE_BIT_VALUE 0
#define RESULT_CONTROL_BYTE_PUP_0_REG 0x1830
#define RESULT_CONTROL_BYTE_PUP_1_REG 0x1834
#define RESULT_CONTROL_BYTE_PUP_2_REG 0x1838
#define RESULT_CONTROL_BYTE_PUP_3_REG 0x183c
#define RESULT_CONTROL_BYTE_PUP_4_REG 0x18b0
#define RESULT_CONTROL_PUP_0_BIT_0_REG 0x18b4
#define RESULT_CONTROL_PUP_0_BIT_1_REG 0x18b8
#define RESULT_CONTROL_PUP_0_BIT_2_REG 0x18bc
#define RESULT_CONTROL_PUP_0_BIT_3_REG 0x18c0
#define RESULT_CONTROL_PUP_0_BIT_4_REG 0x18c4
#define RESULT_CONTROL_PUP_0_BIT_5_REG 0x18c8
#define RESULT_CONTROL_PUP_0_BIT_6_REG 0x18cc
#define RESULT_CONTROL_PUP_0_BIT_7_REG 0x18f0
#define RESULT_CONTROL_PUP_1_BIT_0_REG 0x18f4
#define RESULT_CONTROL_PUP_1_BIT_1_REG 0x18f8
#define RESULT_CONTROL_PUP_1_BIT_2_REG 0x18fc
#define RESULT_CONTROL_PUP_1_BIT_3_REG 0x1930
#define RESULT_CONTROL_PUP_1_BIT_4_REG 0x1934
#define RESULT_CONTROL_PUP_1_BIT_5_REG 0x1938
#define RESULT_CONTROL_PUP_1_BIT_6_REG 0x193c
#define RESULT_CONTROL_PUP_1_BIT_7_REG 0x19b0
#define RESULT_CONTROL_PUP_2_BIT_0_REG 0x19b4
#define RESULT_CONTROL_PUP_2_BIT_1_REG 0x19b8
#define RESULT_CONTROL_PUP_2_BIT_2_REG 0x19bc
#define RESULT_CONTROL_PUP_2_BIT_3_REG 0x19c0
#define RESULT_CONTROL_PUP_2_BIT_4_REG 0x19c4
#define RESULT_CONTROL_PUP_2_BIT_5_REG 0x19c8
#define RESULT_CONTROL_PUP_2_BIT_6_REG 0x19cc
#define RESULT_CONTROL_PUP_2_BIT_7_REG 0x19f0
#define RESULT_CONTROL_PUP_3_BIT_0_REG 0x19f4
#define RESULT_CONTROL_PUP_3_BIT_1_REG 0x19f8
#define RESULT_CONTROL_PUP_3_BIT_2_REG 0x19fc
#define RESULT_CONTROL_PUP_3_BIT_3_REG 0x1a30
#define RESULT_CONTROL_PUP_3_BIT_4_REG 0x1a34
#define RESULT_CONTROL_PUP_3_BIT_5_REG 0x1a38
#define RESULT_CONTROL_PUP_3_BIT_6_REG 0x1a3c
#define RESULT_CONTROL_PUP_3_BIT_7_REG 0x1ab0
#define RESULT_CONTROL_PUP_4_BIT_0_REG 0x1ab4
#define RESULT_CONTROL_PUP_4_BIT_1_REG 0x1ab8
#define RESULT_CONTROL_PUP_4_BIT_2_REG 0x1abc
#define RESULT_CONTROL_PUP_4_BIT_3_REG 0x1ac0
#define RESULT_CONTROL_PUP_4_BIT_4_REG 0x1ac4
#define RESULT_CONTROL_PUP_4_BIT_5_REG 0x1ac8
#define RESULT_CONTROL_PUP_4_BIT_6_REG 0x1acc
#define RESULT_CONTROL_PUP_4_BIT_7_REG 0x1af0
#define WL_PHY_REG 0x0
#define WRITE_CENTRALIZATION_PHY_REG 0x1
#define RL_PHY_REG 0x2
#define READ_CENTRALIZATION_PHY_REG 0x3
#define PBS_RX_PHY_REG 0x50
#define PBS_TX_PHY_REG 0x10
#define PHY_CONTROL_PHY_REG 0x90
#define BW_PHY_REG 0x92
#define RATE_PHY_REG 0x94
#define CMOS_CONFIG_PHY_REG 0xa2
#define PAD_ZRI_CALIB_PHY_REG 0xa4
#define PAD_ODT_CALIB_PHY_REG 0xa6
#define PAD_CONFIG_PHY_REG 0xa8
#define PAD_PRE_DISABLE_PHY_REG 0xa9
#define TEST_ADLL_REG 0xbf
#define CSN_IOB_VREF_REG(cs) (0xdb + (cs * 12))
#define CSN_IO_BASE_VREF_REG(cs) (0xd0 + (cs * 12))
#define RESULT_DB_PHY_REG_ADDR 0xc0
#define RESULT_DB_PHY_REG_RX_OFFSET 5
#define RESULT_DB_PHY_REG_TX_OFFSET 0
/* TBD - for NP5 use only CS 0 */
#define PHY_WRITE_DELAY(cs) WL_PHY_REG
/*( ( _cs_ == 0 ) ? 0x0 : 0x4 )*/
/* TBD - for NP5 use only CS 0 */
#define PHY_READ_DELAY(cs) RL_PHY_REG
#define DDR0_ADDR_1 0xf8258
#define DDR0_ADDR_2 0xf8254
#define DDR1_ADDR_1 0xf8270
#define DDR1_ADDR_2 0xf8270
#define DDR2_ADDR_1 0xf825c
#define DDR2_ADDR_2 0xf825c
#define DDR3_ADDR_1 0xf8264
#define DDR3_ADDR_2 0xf8260
#define DDR4_ADDR_1 0xf8274
#define DDR4_ADDR_2 0xf8274
#define GENERAL_PURPOSE_RESERVED0_REG 0x182e0
#define GET_BLOCK_ID_MAX_FREQ(dev_num, block_id) 800000
#define CS0_RD_LVL_REF_DLY_OFFS 0
#define CS0_RD_LVL_REF_DLY_LEN 0
#define CS0_RD_LVL_PH_SEL_OFFS 0
#define CS0_RD_LVL_PH_SEL_LEN 0
#define CS_REGISTER_ADDR_OFFSET 4
#define CALIBRATED_OBJECTS_REG_ADDR_OFFSET 0x10
#define MAX_POLLING_ITERATIONS 100000
#define PHASE_REG_OFFSET 32
#define NUM_BYTES_IN_BURST 31
#define NUM_OF_CS 4
#define CS_REG_VALUE(cs_num) (cs_mask_reg[cs_num])
#define ADLL_LENGTH 32
struct write_supp_result {
enum hws_wl_supp stage;
int is_pup_fail;
};
struct page_element {
enum hws_page_size page_size_8bit;
/* page size in 8 bits bus width */
enum hws_page_size page_size_16bit;
/* page size in 16 bits bus width */
u32 ui_page_mask;
/* Mask used in register */
};
int ddr3_tip_write_leveling_static_config(u32 dev_num, u32 if_id,
enum hws_ddr_freq frequency,
u32 *round_trip_delay_arr);
int ddr3_tip_read_leveling_static_config(u32 dev_num, u32 if_id,
enum hws_ddr_freq frequency,
u32 *total_round_trip_delay_arr);
int ddr3_tip_if_write(u32 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 data_value, u32 mask);
int ddr3_tip_if_polling(u32 dev_num, enum hws_access_type access_type,
u32 if_id, u32 exp_value, u32 mask, u32 offset,
u32 poll_tries);
int ddr3_tip_if_read(u32 dev_num, enum hws_access_type interface_access,
u32 if_id, u32 reg_addr, u32 *data, u32 mask);
int ddr3_tip_bus_read_modify_write(u32 dev_num,
enum hws_access_type access_type,
u32 if_id, u32 phy_id,
enum hws_ddr_phy phy_type,
u32 reg_addr, u32 data_value, u32 reg_mask);
int ddr3_tip_bus_read(u32 dev_num, u32 if_id, enum hws_access_type phy_access,
u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr,
u32 *data);
int ddr3_tip_bus_write(u32 dev_num, enum hws_access_type e_interface_access,
u32 if_id, enum hws_access_type e_phy_access, u32 phy_id,
enum hws_ddr_phy e_phy_type, u32 reg_addr,
u32 data_value);
int ddr3_tip_freq_set(u32 dev_num, enum hws_access_type e_access, u32 if_id,
enum hws_ddr_freq memory_freq);
int ddr3_tip_adjust_dqs(u32 dev_num);
int ddr3_tip_init_controller(u32 dev_num);
int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr,
u32 num_of_bursts, u32 *addr);
int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr,
u32 num_of_bursts, u32 *addr);
int ddr3_tip_dynamic_read_leveling(u32 dev_num, u32 ui_freq);
int ddr3_tip_legacy_dynamic_read_leveling(u32 dev_num);
int ddr3_tip_dynamic_per_bit_read_leveling(u32 dev_num, u32 ui_freq);
int ddr3_tip_legacy_dynamic_write_leveling(u32 dev_num);
int ddr3_tip_dynamic_write_leveling(u32 dev_num);
int ddr3_tip_dynamic_write_leveling_supp(u32 dev_num);
int ddr3_tip_static_init_controller(u32 dev_num);
int ddr3_tip_configure_phy(u32 dev_num);
int ddr3_tip_load_pattern_to_odpg(u32 dev_num, enum hws_access_type access_type,
u32 if_id, enum hws_pattern pattern,
u32 load_addr);
int ddr3_tip_load_pattern_to_mem(u32 dev_num, enum hws_pattern e_pattern);
int ddr3_tip_configure_odpg(u32 dev_num, enum hws_access_type access_type,
u32 if_id, enum hws_dir direction, u32 tx_phases,
u32 tx_burst_size, u32 rx_phases,
u32 delay_between_burst, u32 rd_mode, u32 cs_num,
u32 addr_stress_jump, u32 single_pattern);
int ddr3_tip_set_atr(u32 dev_num, u32 flag_id, u32 value);
int ddr3_tip_write_mrs_cmd(u32 dev_num, u32 *cs_mask_arr, u32 cmd, u32 data,
u32 mask);
int ddr3_tip_write_cs_result(u32 dev_num, u32 offset);
int ddr3_tip_get_first_active_if(u8 dev_num, u32 interface_mask, u32 *if_id);
int ddr3_tip_reset_fifo_ptr(u32 dev_num);
int read_pup_value(int pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
int reg_addr, u32 mask);
int read_adll_value(u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
int reg_addr, u32 mask);
int write_adll_value(u32 pup_values[MAX_INTERFACE_NUM * MAX_BUS_NUM],
int reg_addr);
int ddr3_tip_tune_training_params(u32 dev_num,
struct tune_train_params *params);
#endif /* _DDR3_TRAINING_IP_FLOW_H_ */

View File

@ -0,0 +1,41 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_TRAINING_IP_PBS_H_
#define _DDR3_TRAINING_IP_PBS_H_
enum {
EBA_CONFIG,
EEBA_CONFIG,
SBA_CONFIG
};
enum hws_training_load_op {
TRAINING_LOAD_OPERATION_UNLOAD,
TRAINING_LOAD_OPERATION_LOAD
};
enum hws_edge {
TRAINING_EDGE_1,
TRAINING_EDGE_2
};
enum hws_edge_search {
TRAINING_EDGE_MAX,
TRAINING_EDGE_MIN
};
enum pbs_dir {
PBS_TX_MODE = 0,
PBS_RX_MODE,
NUM_OF_PBS_MODES
};
int ddr3_tip_pbs_rx(u32 dev_num);
int ddr3_tip_print_all_pbs_result(u32 dev_num);
int ddr3_tip_pbs_tx(u32 dev_num);
#endif /* _DDR3_TRAINING_IP_PBS_H_ */

View File

@ -0,0 +1,107 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_TRAINING_IP_PRV_IF_H
#define _DDR3_TRAINING_IP_PRV_IF_H
#include "ddr3_training_ip.h"
#include "ddr3_training_ip_flow.h"
#include "ddr3_training_ip_bist.h"
enum hws_static_config_type {
WRITE_LEVELING_STATIC,
READ_LEVELING_STATIC
};
struct ddr3_device_info {
u32 device_id;
u32 ck_delay;
};
typedef int (*HWS_TIP_DUNIT_MUX_SELECT_FUNC_PTR)(u8 dev_num, int enable);
typedef int (*HWS_TIP_DUNIT_REG_READ_FUNC_PTR)(
u8 dev_num, enum hws_access_type interface_access, u32 if_id,
u32 offset, u32 *data, u32 mask);
typedef int (*HWS_TIP_DUNIT_REG_WRITE_FUNC_PTR)(
u8 dev_num, enum hws_access_type interface_access, u32 if_id,
u32 offset, u32 data, u32 mask);
typedef int (*HWS_TIP_GET_FREQ_CONFIG_INFO)(
u8 dev_num, enum hws_ddr_freq freq,
struct hws_tip_freq_config_info *freq_config_info);
typedef int (*HWS_TIP_GET_DEVICE_INFO)(
u8 dev_num, struct ddr3_device_info *info_ptr);
typedef int (*HWS_GET_CS_CONFIG_FUNC_PTR)(
u8 dev_num, u32 cs_mask, struct hws_cs_config_info *cs_info);
typedef int (*HWS_SET_FREQ_DIVIDER_FUNC_PTR)(
u8 dev_num, u32 if_id, enum hws_ddr_freq freq);
typedef int (*HWS_GET_INIT_FREQ)(u8 dev_num, enum hws_ddr_freq *freq);
typedef int (*HWS_TRAINING_IP_IF_WRITE_FUNC_PTR)(
u32 dev_num, enum hws_access_type access_type, u32 dunit_id,
u32 reg_addr, u32 data, u32 mask);
typedef int (*HWS_TRAINING_IP_IF_READ_FUNC_PTR)(
u32 dev_num, enum hws_access_type access_type, u32 dunit_id,
u32 reg_addr, u32 *data, u32 mask);
typedef int (*HWS_TRAINING_IP_BUS_WRITE_FUNC_PTR)(
u32 dev_num, enum hws_access_type dunit_access_type, u32 if_id,
enum hws_access_type phy_access_type, u32 phy_id,
enum hws_ddr_phy phy_type, u32 reg_addr, u32 data);
typedef int (*HWS_TRAINING_IP_BUS_READ_FUNC_PTR)(
u32 dev_num, u32 if_id, enum hws_access_type phy_access_type,
u32 phy_id, enum hws_ddr_phy phy_type, u32 reg_addr, u32 *data);
typedef int (*HWS_TRAINING_IP_ALGO_RUN_FUNC_PTR)(
u32 dev_num, enum hws_algo_type algo_type);
typedef int (*HWS_TRAINING_IP_SET_FREQ_FUNC_PTR)(
u32 dev_num, enum hws_access_type access_type, u32 if_id,
enum hws_ddr_freq frequency);
typedef int (*HWS_TRAINING_IP_INIT_CONTROLLER_FUNC_PTR)(
u32 dev_num, struct init_cntr_param *init_cntr_prm);
typedef int (*HWS_TRAINING_IP_PBS_RX_FUNC_PTR)(u32 dev_num);
typedef int (*HWS_TRAINING_IP_PBS_TX_FUNC_PTR)(u32 dev_num);
typedef int (*HWS_TRAINING_IP_SELECT_CONTROLLER_FUNC_PTR)(
u32 dev_num, int enable);
typedef int (*HWS_TRAINING_IP_TOPOLOGY_MAP_LOAD_FUNC_PTR)(
u32 dev_num, struct hws_topology_map *topology_map);
typedef int (*HWS_TRAINING_IP_STATIC_CONFIG_FUNC_PTR)(
u32 dev_num, enum hws_ddr_freq frequency,
enum hws_static_config_type static_config_type, u32 if_id);
typedef int (*HWS_TRAINING_IP_EXTERNAL_READ_PTR)(
u32 dev_num, u32 if_id, u32 ddr_addr, u32 num_bursts, u32 *data);
typedef int (*HWS_TRAINING_IP_EXTERNAL_WRITE_PTR)(
u32 dev_num, u32 if_id, u32 ddr_addr, u32 num_bursts, u32 *data);
typedef int (*HWS_TRAINING_IP_BIST_ACTIVATE)(
u32 dev_num, enum hws_pattern pattern, enum hws_access_type access_type,
u32 if_num, enum hws_dir direction,
enum hws_stress_jump addr_stress_jump,
enum hws_pattern_duration duration,
enum hws_bist_operation oper_type, u32 offset, u32 cs_num,
u32 pattern_addr_length);
typedef int (*HWS_TRAINING_IP_BIST_READ_RESULT)(
u32 dev_num, u32 if_id, struct bist_result *pst_bist_result);
typedef int (*HWS_TRAINING_IP_LOAD_TOPOLOGY)(u32 dev_num, u32 config_num);
typedef int (*HWS_TRAINING_IP_READ_LEVELING)(u32 dev_num, u32 config_num);
typedef int (*HWS_TRAINING_IP_WRITE_LEVELING)(u32 dev_num, u32 config_num);
typedef u32 (*HWS_TRAINING_IP_GET_TEMP)(u8 dev_num);
struct hws_tip_config_func_db {
HWS_TIP_DUNIT_MUX_SELECT_FUNC_PTR tip_dunit_mux_select_func;
HWS_TIP_DUNIT_REG_READ_FUNC_PTR tip_dunit_read_func;
HWS_TIP_DUNIT_REG_WRITE_FUNC_PTR tip_dunit_write_func;
HWS_TIP_GET_FREQ_CONFIG_INFO tip_get_freq_config_info_func;
HWS_TIP_GET_DEVICE_INFO tip_get_device_info_func;
HWS_SET_FREQ_DIVIDER_FUNC_PTR tip_set_freq_divider_func;
HWS_GET_CS_CONFIG_FUNC_PTR tip_get_cs_config_info;
HWS_TRAINING_IP_GET_TEMP tip_get_temperature;
};
int ddr3_tip_init_config_func(u32 dev_num,
struct hws_tip_config_func_db *config_func);
int ddr3_tip_register_xsb_info(u32 dev_num,
struct hws_xsb_info *xsb_info_table);
enum hws_result *ddr3_tip_get_result_ptr(u32 stage);
int ddr3_set_freq_config_info(struct hws_tip_freq_config_info *table);
int print_device_info(u8 dev_num);
#endif /* _DDR3_TRAINING_IP_PRV_IF_H */

View File

@ -0,0 +1,31 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_TRAINING_IP_STATIC_H_
#define _DDR3_TRAINING_IP_STATIC_H_
#include "ddr3_training_ip_def.h"
#include "ddr3_training_ip.h"
struct trip_delay_element {
u32 dqs_delay; /* DQS delay (m_sec) */
u32 ck_delay; /* CK Delay (m_sec) */
};
struct hws_tip_static_config_info {
u32 silicon_delay;
struct trip_delay_element *package_trace_arr;
struct trip_delay_element *board_trace_arr;
};
int ddr3_tip_run_static_alg(u32 dev_num, enum hws_ddr_freq freq);
int ddr3_tip_init_static_config_db(
u32 dev_num, struct hws_tip_static_config_info *static_config_info);
int ddr3_tip_init_specific_reg_config(u32 dev_num,
struct reg_data *reg_config_arr);
int ddr3_tip_static_phy_init_controller(u32 dev_num);
#endif /* _DDR3_TRAINING_IP_STATIC_H_ */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,17 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR3_TRAINING_LEVELING_H_
#define _DDR3_TRAINING_LEVELING_H_
#define MAX_DQ_READ_LEVELING_DELAY 15
int ddr3_tip_print_wl_supp_result(u32 dev_num);
int ddr3_tip_calc_cs_mask(u32 dev_num, u32 if_id, u32 effective_cs,
u32 *cs_mask);
u32 hws_ddr3_tip_max_cs_get(void);
#endif /* _DDR3_TRAINING_LEVELING_H_ */

View File

@ -0,0 +1,995 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#define TYPICAL_PBS_VALUE 12
u32 nominal_adll[MAX_INTERFACE_NUM * MAX_BUS_NUM];
enum hws_training_ip_stat train_status[MAX_INTERFACE_NUM];
u8 result_mat[MAX_INTERFACE_NUM][MAX_BUS_NUM][BUS_WIDTH_IN_BITS];
u8 result_mat_rx_dqs[MAX_INTERFACE_NUM][MAX_BUS_NUM][MAX_CS_NUM];
/* 4-EEWA, 3-EWA, 2-SWA, 1-Fail, 0-Pass */
u8 result_all_bit[MAX_BUS_NUM * BUS_WIDTH_IN_BITS * MAX_INTERFACE_NUM];
u8 max_pbs_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 min_pbs_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 max_adll_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 min_adll_per_pup[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u32 pbsdelay_per_pup[NUM_OF_PBS_MODES][MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 adll_shift_lock[MAX_INTERFACE_NUM][MAX_BUS_NUM];
u8 adll_shift_val[MAX_INTERFACE_NUM][MAX_BUS_NUM];
enum hws_pattern pbs_pattern = PATTERN_VREF;
static u8 pup_state[MAX_INTERFACE_NUM][MAX_BUS_NUM];
/*
* Name: ddr3_tip_pbs
* Desc: PBS
* Args: TBD
* Notes:
* Returns: OK if success, other error code if fail.
*/
int ddr3_tip_pbs(u32 dev_num, enum pbs_dir pbs_mode)
{
u32 res0[MAX_INTERFACE_NUM];
int adll_tap = MEGA / freq_val[medium_freq] / 64;
int pad_num = 0;
enum hws_search_dir search_dir =
(pbs_mode == PBS_RX_MODE) ? HWS_HIGH2LOW : HWS_LOW2HIGH;
enum hws_dir dir = (pbs_mode == PBS_RX_MODE) ? OPER_READ : OPER_WRITE;
int iterations = (pbs_mode == PBS_RX_MODE) ? 31 : 63;
u32 res_valid_mask = (pbs_mode == PBS_RX_MODE) ? 0x1f : 0x3f;
int init_val = (search_dir == HWS_LOW2HIGH) ? 0 : iterations;
enum hws_edge_compare search_edge = EDGE_FP;
u32 pup = 0, bit = 0, if_id = 0, all_lock = 0, cs_num = 0;
int reg_addr = 0;
u32 validation_val = 0;
u32 cs_enable_reg_val[MAX_INTERFACE_NUM];
u16 *mask_results_dq_reg_map = ddr3_tip_get_mask_results_dq_reg();
u8 temp = 0;
struct hws_topology_map *tm = ddr3_get_topology_map();
/* save current cs enable reg val */
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
/* save current cs enable reg val */
CHECK_STATUS(ddr3_tip_if_read
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, cs_enable_reg_val, MASK_ALL_BITS));
/* enable single cs */
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, (1 << 3), (1 << 3)));
}
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(READ_CENTRALIZATION_PHY_REG +
(effective_cs * CS_REGISTER_ADDR_OFFSET)) :
(WRITE_CENTRALIZATION_PHY_REG +
(effective_cs * CS_REGISTER_ADDR_OFFSET));
read_adll_value(nominal_adll, reg_addr, MASK_ALL_BITS);
/* stage 1 shift ADLL */
ddr3_tip_ip_training(dev_num, ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE, ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE, RESULT_PER_BIT,
HWS_CONTROL_ELEMENT_ADLL, search_dir, dir,
tm->if_act_mask, init_val, iterations,
pbs_pattern, search_edge, CS_SINGLE, cs_num,
train_status);
validation_val = (pbs_mode == PBS_RX_MODE) ? 0x1f : 0;
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
min_adll_per_pup[if_id][pup] =
(pbs_mode == PBS_RX_MODE) ? 0x1f : 0x3f;
pup_state[if_id][pup] = 0x3;
adll_shift_lock[if_id][pup] = 1;
max_adll_per_pup[if_id][pup] = 0x0;
}
}
/* EBA */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
CHECK_STATUS(ddr3_tip_if_read
(dev_num, ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE,
mask_results_dq_reg_map[
bit + pup * BUS_WIDTH_IN_BITS],
res0, MASK_ALL_BITS));
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1;
if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE,
("FP I/F %d, bit:%d, pup:%d res0 0x%x\n",
if_id, bit, pup,
res0[if_id]));
if (pup_state[if_id][pup] != 3)
continue;
/* if not EBA state than move to next pup */
if ((res0[if_id] & 0x2000000) == 0) {
DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE,
("-- Fail Training IP\n"));
/* training machine failed */
pup_state[if_id][pup] = 1;
adll_shift_lock[if_id][pup] = 0;
continue;
}
else if ((res0[if_id] & res_valid_mask) ==
validation_val) {
DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE,
("-- FAIL EBA %d %d %d %d\n",
if_id, bit, pup,
res0[if_id]));
pup_state[if_id][pup] = 4;
/* this pup move to EEBA */
adll_shift_lock[if_id][pup] = 0;
continue;
} else {
/*
* The search ended in Pass we need
* Fail
*/
res0[if_id] =
(pbs_mode == PBS_RX_MODE) ?
((res0[if_id] &
res_valid_mask) + 1) :
((res0[if_id] &
res_valid_mask) - 1);
max_adll_per_pup[if_id][pup] =
(max_adll_per_pup[if_id][pup] <
res0[if_id]) ?
(u8)res0[if_id] :
max_adll_per_pup[if_id][pup];
min_adll_per_pup[if_id][pup] =
(res0[if_id] >
min_adll_per_pup[if_id][pup]) ?
min_adll_per_pup[if_id][pup] :
(u8)
res0[if_id];
/*
* vs the Rx we are searching for the
* smallest value of DQ shift so all
* Bus would fail
*/
adll_shift_val[if_id][pup] =
(pbs_mode == PBS_RX_MODE) ?
max_adll_per_pup[if_id][pup] :
min_adll_per_pup[if_id][pup];
}
}
}
}
/* EEBA */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
if (pup_state[if_id][pup] != 4)
continue;
/*
* if pup state different from EEBA than move to
* next pup
*/
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(0x54 + effective_cs * 0x10) :
(0x14 + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
ACCESS_TYPE_UNICAST, pup, DDR_PHY_DATA,
reg_addr, 0x1f));
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(0x55 + effective_cs * 0x10) :
(0x15 + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
ACCESS_TYPE_UNICAST, pup, DDR_PHY_DATA,
reg_addr, 0x1f));
/* initialize the Edge2 Max. */
adll_shift_val[if_id][pup] = 0;
min_adll_per_pup[if_id][pup] =
(pbs_mode == PBS_RX_MODE) ? 0x1f : 0x3f;
max_adll_per_pup[if_id][pup] = 0x0;
ddr3_tip_ip_training(dev_num, ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE, RESULT_PER_BIT,
HWS_CONTROL_ELEMENT_ADLL,
search_dir, dir,
tm->if_act_mask, init_val,
iterations, pbs_pattern,
search_edge, CS_SINGLE, cs_num,
train_status);
DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO,
("ADLL shift results:\n"));
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
CHECK_STATUS(ddr3_tip_if_read
(dev_num, ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE,
mask_results_dq_reg_map[
bit + pup *
BUS_WIDTH_IN_BITS],
res0, MASK_ALL_BITS));
DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE,
("FP I/F %d, bit:%d, pup:%d res0 0x%x\n",
if_id, bit, pup,
res0[if_id]));
if ((res0[if_id] & 0x2000000) == 0) {
DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE,
(" -- EEBA Fail\n"));
bit = BUS_WIDTH_IN_BITS;
/* exit bit loop */
DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE,
("-- EEBA Fail Training IP\n"));
/*
* training machine failed but pass
* before in the EBA so maybe the DQS
* shift change env.
*/
pup_state[if_id][pup] = 2;
adll_shift_lock[if_id][pup] = 0;
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(0x54 + effective_cs * 0x10) :
(0x14 + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num,
ACCESS_TYPE_UNICAST,
if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
0x0));
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(0x55 + effective_cs * 0x10) :
(0x15 + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num,
ACCESS_TYPE_UNICAST,
if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
0x0));
continue;
} else if ((res0[if_id] & res_valid_mask) ==
validation_val) {
/* exit bit loop */
bit = BUS_WIDTH_IN_BITS;
DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE,
("-- FAIL EEBA\n"));
/* this pup move to SBA */
pup_state[if_id][pup] = 2;
adll_shift_lock[if_id][pup] = 0;
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(0x54 + effective_cs * 0x10) :
(0x14 + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num,
ACCESS_TYPE_UNICAST,
if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
0x0));
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(0x55 + effective_cs * 0x10) :
(0x15 + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num,
ACCESS_TYPE_UNICAST,
if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
0x0));
continue;
} else {
adll_shift_lock[if_id][pup] = 1;
/*
* The search ended in Pass we need
* Fail
*/
res0[if_id] =
(pbs_mode == PBS_RX_MODE) ?
((res0[if_id] &
res_valid_mask) + 1) :
((res0[if_id] &
res_valid_mask) - 1);
max_adll_per_pup[if_id][pup] =
(max_adll_per_pup[if_id][pup] <
res0[if_id]) ?
(u8)res0[if_id] :
max_adll_per_pup[if_id][pup];
min_adll_per_pup[if_id][pup] =
(res0[if_id] >
min_adll_per_pup[if_id][pup]) ?
min_adll_per_pup[if_id][pup] :
(u8)res0[if_id];
/*
* vs the Rx we are searching for the
* smallest value of DQ shift so all Bus
* would fail
*/
adll_shift_val[if_id][pup] =
(pbs_mode == PBS_RX_MODE) ?
max_adll_per_pup[if_id][pup] :
min_adll_per_pup[if_id][pup];
}
}
}
}
/* Print Stage result */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE,
("FP I/F %d, ADLL Shift for EBA: pup[%d] Lock status = %d Lock Val = %d,%d\n",
if_id, pup,
adll_shift_lock[if_id][pup],
max_adll_per_pup[if_id][pup],
min_adll_per_pup[if_id][pup]));
}
}
DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO,
("Update ADLL Shift of all pups:\n"));
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
if (adll_shift_lock[if_id][pup] != 1)
continue;
/* if pup not locked continue to next pup */
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(0x3 + effective_cs * 4) :
(0x1 + effective_cs * 4);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
ACCESS_TYPE_UNICAST, pup, DDR_PHY_DATA,
reg_addr, adll_shift_val[if_id][pup]));
DEBUG_PBS_ENGINE(DEBUG_LEVEL_TRACE,
("FP I/F %d, Pup[%d] = %d\n", if_id,
pup, adll_shift_val[if_id][pup]));
}
}
/* PBS EEBA&EBA */
/* Start the Per Bit Skew search */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
max_pbs_per_pup[if_id][pup] = 0x0;
min_pbs_per_pup[if_id][pup] = 0x1f;
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
/* reset result for PBS */
result_all_bit[bit + pup * BUS_WIDTH_IN_BITS +
if_id * MAX_BUS_NUM *
BUS_WIDTH_IN_BITS] = 0;
}
}
}
iterations = 31;
search_dir = HWS_LOW2HIGH;
/* !!!!! ran sh (search_dir == HWS_LOW2HIGH)?0:iterations; */
init_val = 0;
ddr3_tip_ip_training(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
RESULT_PER_BIT, HWS_CONTROL_ELEMENT_DQ_SKEW,
search_dir, dir, tm->if_act_mask, init_val,
iterations, pbs_pattern, search_edge,
CS_SINGLE, cs_num, train_status);
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
if (adll_shift_lock[if_id][pup] != 1) {
/* if pup not lock continue to next pup */
continue;
}
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
CHECK_STATUS(ddr3_tip_if_read
(dev_num, ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE,
mask_results_dq_reg_map[
bit +
pup * BUS_WIDTH_IN_BITS],
res0, MASK_ALL_BITS));
DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO,
("Per Bit Skew search, FP I/F %d, bit:%d, pup:%d res0 0x%x\n",
if_id, bit, pup,
res0[if_id]));
if ((res0[if_id] & 0x2000000) == 0) {
DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO,
("--EBA PBS Fail - Training IP machine\n"));
/* exit the bit loop */
bit = BUS_WIDTH_IN_BITS;
/*
* ADLL is no long in lock need new
* search
*/
adll_shift_lock[if_id][pup] = 0;
/* Move to SBA */
pup_state[if_id][pup] = 2;
max_pbs_per_pup[if_id][pup] = 0x0;
min_pbs_per_pup[if_id][pup] = 0x1f;
continue;
} else {
temp = (u8)(res0[if_id] &
res_valid_mask);
max_pbs_per_pup[if_id][pup] =
(temp >
max_pbs_per_pup[if_id][pup]) ?
temp :
max_pbs_per_pup[if_id][pup];
min_pbs_per_pup[if_id][pup] =
(temp <
min_pbs_per_pup[if_id][pup]) ?
temp :
min_pbs_per_pup[if_id][pup];
result_all_bit[bit +
pup * BUS_WIDTH_IN_BITS +
if_id * MAX_BUS_NUM *
BUS_WIDTH_IN_BITS] =
temp;
}
}
}
}
/* Check all Pup lock */
all_lock = 1;
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
all_lock = all_lock * adll_shift_lock[if_id][pup];
}
}
/* Only if not all Pups Lock */
if (all_lock == 0) {
DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO,
("##########ADLL shift for SBA###########\n"));
/* ADLL shift for SBA */
search_dir = (pbs_mode == PBS_RX_MODE) ? HWS_LOW2HIGH :
HWS_HIGH2LOW;
init_val = (search_dir == HWS_LOW2HIGH) ? 0 : iterations;
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1;
if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
if (adll_shift_lock[if_id][pup] == 1) {
/*if pup lock continue to next pup */
continue;
}
/*init the var altogth init before */
adll_shift_lock[if_id][pup] = 0;
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(0x54 + effective_cs * 0x10) :
(0x14 + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr, 0));
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(0x55 + effective_cs * 0x10) :
(0x15 + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr, 0));
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(0x5f + effective_cs * 0x10) :
(0x1f + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr, 0));
/* initilaze the Edge2 Max. */
adll_shift_val[if_id][pup] = 0;
min_adll_per_pup[if_id][pup] = 0x1f;
max_adll_per_pup[if_id][pup] = 0x0;
ddr3_tip_ip_training(dev_num,
ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE,
RESULT_PER_BIT,
HWS_CONTROL_ELEMENT_ADLL,
search_dir, dir,
tm->if_act_mask,
init_val, iterations,
pbs_pattern,
search_edge, CS_SINGLE,
cs_num, train_status);
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
CHECK_STATUS(ddr3_tip_if_read
(dev_num,
ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE,
mask_results_dq_reg_map
[bit +
pup *
BUS_WIDTH_IN_BITS],
res0, MASK_ALL_BITS));
DEBUG_PBS_ENGINE(
DEBUG_LEVEL_INFO,
("FP I/F %d, bit:%d, pup:%d res0 0x%x\n",
if_id, bit, pup, res0[if_id]));
if ((res0[if_id] & 0x2000000) == 0) {
/* exit the bit loop */
bit = BUS_WIDTH_IN_BITS;
/* Fail SBA --> Fail PBS */
pup_state[if_id][pup] = 1;
DEBUG_PBS_ENGINE
(DEBUG_LEVEL_INFO,
(" SBA Fail\n"));
continue;
} else {
/*
* - increment to get all
* 8 bit lock.
*/
adll_shift_lock[if_id][pup]++;
/*
* The search ended in Pass
* we need Fail
*/
res0[if_id] =
(pbs_mode == PBS_RX_MODE) ?
((res0[if_id] & res_valid_mask) + 1) :
((res0[if_id] & res_valid_mask) - 1);
max_adll_per_pup[if_id][pup] =
(max_adll_per_pup[if_id]
[pup] < res0[if_id]) ?
(u8)res0[if_id] :
max_adll_per_pup[if_id][pup];
min_adll_per_pup[if_id][pup] =
(res0[if_id] >
min_adll_per_pup[if_id]
[pup]) ?
min_adll_per_pup[if_id][pup] :
(u8)res0[if_id];
/*
* vs the Rx we are searching for
* the smallest value of DQ shift
* so all Bus would fail
*/
adll_shift_val[if_id][pup] =
(pbs_mode == PBS_RX_MODE) ?
max_adll_per_pup[if_id][pup] :
min_adll_per_pup[if_id][pup];
}
}
/* 1 is lock */
adll_shift_lock[if_id][pup] =
(adll_shift_lock[if_id][pup] == 8) ?
1 : 0;
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(0x3 + effective_cs * 4) :
(0x1 + effective_cs * 4);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
adll_shift_val[if_id][pup]));
DEBUG_PBS_ENGINE(
DEBUG_LEVEL_INFO,
("adll_shift_lock[%x][%x] = %x\n",
if_id, pup,
adll_shift_lock[if_id][pup]));
}
}
/* End ADLL Shift for SBA */
/* Start the Per Bit Skew search */
/* The ADLL shift finished with a Pass */
search_edge = (pbs_mode == PBS_RX_MODE) ? EDGE_PF : EDGE_FP;
search_dir = (pbs_mode == PBS_RX_MODE) ?
HWS_LOW2HIGH : HWS_HIGH2LOW;
iterations = 0x1f;
/* - The initial value is different in Rx and Tx mode */
init_val = (pbs_mode == PBS_RX_MODE) ? 0 : iterations;
ddr3_tip_ip_training(dev_num, ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE, ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE, RESULT_PER_BIT,
HWS_CONTROL_ELEMENT_DQ_SKEW,
search_dir, dir, tm->if_act_mask,
init_val, iterations, pbs_pattern,
search_edge, CS_SINGLE, cs_num,
train_status);
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1;
if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
CHECK_STATUS(ddr3_tip_if_read
(dev_num,
ACCESS_TYPE_MULTICAST,
PARAM_NOT_CARE,
mask_results_dq_reg_map
[bit +
pup *
BUS_WIDTH_IN_BITS],
res0, MASK_ALL_BITS));
if (pup_state[if_id][pup] != 2) {
/*
* if pup is not SBA continue
* to next pup
*/
bit = BUS_WIDTH_IN_BITS;
continue;
}
DEBUG_PBS_ENGINE(
DEBUG_LEVEL_INFO,
("Per Bit Skew search, PF I/F %d, bit:%d, pup:%d res0 0x%x\n",
if_id, bit, pup, res0[if_id]));
if ((res0[if_id] & 0x2000000) == 0) {
DEBUG_PBS_ENGINE
(DEBUG_LEVEL_INFO,
("SBA Fail\n"));
max_pbs_per_pup[if_id][pup] =
0x1f;
result_all_bit[
bit + pup *
BUS_WIDTH_IN_BITS +
if_id * MAX_BUS_NUM *
BUS_WIDTH_IN_BITS] =
0x1f;
} else {
temp = (u8)(res0[if_id] &
res_valid_mask);
max_pbs_per_pup[if_id][pup] =
(temp >
max_pbs_per_pup[if_id]
[pup]) ? temp :
max_pbs_per_pup
[if_id][pup];
min_pbs_per_pup[if_id][pup] =
(temp <
min_pbs_per_pup[if_id]
[pup]) ? temp :
min_pbs_per_pup
[if_id][pup];
result_all_bit[
bit + pup *
BUS_WIDTH_IN_BITS +
if_id * MAX_BUS_NUM *
BUS_WIDTH_IN_BITS] =
temp;
adll_shift_lock[if_id][pup] = 1;
}
}
}
}
/* Check all Pup state */
all_lock = 1;
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
/*
* DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO,
* ("pup_state[%d][%d] = %d\n",if_id,pup,pup_state
* [if_id][pup]));
*/
}
}
/* END OF SBA */
/* Norm */
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1;
if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
/* if pup not lock continue to next pup */
if (adll_shift_lock[if_id][pup] != 1) {
DEBUG_PBS_ENGINE(
DEBUG_LEVEL_ERROR,
("PBS failed for IF #%d\n",
if_id));
training_result[training_stage][if_id]
= TEST_FAILED;
result_mat[if_id][pup][bit] = 0;
max_pbs_per_pup[if_id][pup] = 0;
min_pbs_per_pup[if_id][pup] = 0;
} else {
training_result[
training_stage][if_id] =
(training_result[training_stage]
[if_id] == TEST_FAILED) ?
TEST_FAILED : TEST_SUCCESS;
result_mat[if_id][pup][bit] =
result_all_bit[
bit + pup *
BUS_WIDTH_IN_BITS +
if_id * MAX_BUS_NUM *
BUS_WIDTH_IN_BITS] -
min_pbs_per_pup[if_id][pup];
}
DEBUG_PBS_ENGINE(
DEBUG_LEVEL_INFO,
("The abs min_pbs[%d][%d] = %d\n",
if_id, pup,
min_pbs_per_pup[if_id][pup]));
}
}
}
/* Clean all results */
ddr3_tip_clean_pbs_result(dev_num, pbs_mode);
/* DQ PBS register update with the final result */
for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup < tm->num_of_bus_per_interface; pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
DEBUG_PBS_ENGINE(
DEBUG_LEVEL_INFO,
("Final Results: if_id %d, pup %d, Pup State: %d\n",
if_id, pup, pup_state[if_id][pup]));
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
if (dq_map_table == NULL) {
DEBUG_PBS_ENGINE(
DEBUG_LEVEL_ERROR,
("dq_map_table not initialized\n"));
return MV_FAIL;
}
pad_num = dq_map_table[
bit + pup * BUS_WIDTH_IN_BITS +
if_id * BUS_WIDTH_IN_BITS *
tm->num_of_bus_per_interface];
DEBUG_PBS_ENGINE(DEBUG_LEVEL_INFO,
("result_mat: %d ",
result_mat[if_id][pup]
[bit]));
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(PBS_RX_PHY_REG + effective_cs * 0x10) :
(PBS_TX_PHY_REG + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr + pad_num,
result_mat[if_id][pup][bit]));
}
pbsdelay_per_pup[pbs_mode][if_id][pup] =
(max_pbs_per_pup[if_id][pup] ==
min_pbs_per_pup[if_id][pup]) ?
TYPICAL_PBS_VALUE :
((max_adll_per_pup[if_id][pup] -
min_adll_per_pup[if_id][pup]) * adll_tap /
(max_pbs_per_pup[if_id][pup] -
min_pbs_per_pup[if_id][pup]));
/* RX results ready, write RX also */
if (pbs_mode == PBS_TX_MODE) {
/* Write TX results */
reg_addr = (0x14 + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
(max_pbs_per_pup[if_id][pup] -
min_pbs_per_pup[if_id][pup]) /
2));
reg_addr = (0x15 + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
(max_pbs_per_pup[if_id][pup] -
min_pbs_per_pup[if_id][pup]) /
2));
/* Write previously stored RX results */
reg_addr = (0x54 + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
result_mat_rx_dqs[if_id][pup]
[effective_cs]));
reg_addr = (0x55 + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr,
result_mat_rx_dqs[if_id][pup]
[effective_cs]));
} else {
/*
* RX results may affect RL results correctess,
* so just store the results that will written
* in TX stage
*/
result_mat_rx_dqs[if_id][pup][effective_cs] =
(max_pbs_per_pup[if_id][pup] -
min_pbs_per_pup[if_id][pup]) / 2;
}
DEBUG_PBS_ENGINE(
DEBUG_LEVEL_INFO,
(", PBS tap=%d [psec] ==> skew observed = %d\n",
pbsdelay_per_pup[pbs_mode][if_id][pup],
((max_pbs_per_pup[if_id][pup] -
min_pbs_per_pup[if_id][pup]) *
pbsdelay_per_pup[pbs_mode][if_id][pup])));
}
}
/* Write back to the phy the default values */
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(READ_CENTRALIZATION_PHY_REG + effective_cs * 4) :
(WRITE_CENTRALIZATION_PHY_REG + effective_cs * 4);
write_adll_value(nominal_adll, reg_addr);
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
reg_addr = (pbs_mode == PBS_RX_MODE) ?
(0x5a + effective_cs * 0x10) :
(0x1a + effective_cs * 0x10);
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
ACCESS_TYPE_UNICAST, pup, DDR_PHY_DATA, reg_addr,
0));
/* restore cs enable value */
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
CS_ENABLE_REG, cs_enable_reg_val[if_id],
MASK_ALL_BITS));
}
/* exit test mode */
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ODPG_WRITE_READ_MODE_ENABLE_REG, 0xffff, MASK_ALL_BITS));
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
/*
* meaning that there is no VW exist at all (No lock at
* the EBA ADLL shift at EBS)
*/
if (pup_state[if_id][pup] == 1)
return MV_FAIL;
}
return MV_OK;
}
/*
* Name: ddr3_tip_pbs_rx.
* Desc: PBS TX
* Args: TBD
* Notes:
* Returns: OK if success, other error code if fail.
*/
int ddr3_tip_pbs_rx(u32 uidev_num)
{
return ddr3_tip_pbs(uidev_num, PBS_RX_MODE);
}
/*
* Name: ddr3_tip_pbs_tx.
* Desc: PBS TX
* Args: TBD
* Notes:
* Returns: OK if success, other error code if fail.
*/
int ddr3_tip_pbs_tx(u32 uidev_num)
{
return ddr3_tip_pbs(uidev_num, PBS_TX_MODE);
}
#ifndef EXCLUDE_SWITCH_DEBUG
/*
* Print PBS Result
*/
int ddr3_tip_print_all_pbs_result(u32 dev_num)
{
u32 curr_cs;
u32 max_cs = hws_ddr3_tip_max_cs_get();
for (curr_cs = 0; curr_cs < max_cs; curr_cs++) {
ddr3_tip_print_pbs_result(dev_num, curr_cs, PBS_RX_MODE);
ddr3_tip_print_pbs_result(dev_num, curr_cs, PBS_TX_MODE);
}
return MV_OK;
}
/*
* Print PBS Result
*/
int ddr3_tip_print_pbs_result(u32 dev_num, u32 cs_num, enum pbs_dir pbs_mode)
{
u32 data_value = 0, bit = 0, if_id = 0, pup = 0;
u32 reg_addr = (pbs_mode == PBS_RX_MODE) ?
(PBS_RX_PHY_REG + cs_num * 0x10) :
(PBS_TX_PHY_REG + cs_num * 0x10);
struct hws_topology_map *tm = ddr3_get_topology_map();
printf("CS%d, %s ,PBS\n", cs_num,
(pbs_mode == PBS_RX_MODE) ? "Rx" : "Tx");
for (bit = 0; bit < BUS_WIDTH_IN_BITS; bit++) {
printf("%s, DQ", (pbs_mode == PBS_RX_MODE) ? "Rx" : "Tx");
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
printf("%d ,PBS,,, ", bit);
for (pup = 0; pup <= tm->num_of_bus_per_interface;
pup++) {
VALIDATE_ACTIVE(tm->bus_act_mask, pup);
CHECK_STATUS(ddr3_tip_bus_read
(dev_num, if_id,
ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr + bit,
&data_value));
printf("%d , ", data_value);
}
}
printf("\n");
}
printf("\n");
return MV_OK;
}
#endif
/*
* Fixup PBS Result
*/
int ddr3_tip_clean_pbs_result(u32 dev_num, enum pbs_dir pbs_mode)
{
u32 if_id, pup, bit;
u32 reg_addr = (pbs_mode == PBS_RX_MODE) ?
(PBS_RX_PHY_REG + effective_cs * 0x10) :
(PBS_TX_PHY_REG + effective_cs * 0x10);
struct hws_topology_map *tm = ddr3_get_topology_map();
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (pup = 0; pup <= tm->num_of_bus_per_interface; pup++) {
for (bit = 0; bit <= BUS_WIDTH_IN_BITS + 3; bit++) {
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, ACCESS_TYPE_UNICAST, pup,
DDR_PHY_DATA, reg_addr + bit, 0));
}
}
}
return MV_OK;
}

View File

@ -0,0 +1,538 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
/* Design Guidelines parameters */
u32 g_zpri_data = 123; /* controller data - P drive strength */
u32 g_znri_data = 123; /* controller data - N drive strength */
u32 g_zpri_ctrl = 74; /* controller C/A - P drive strength */
u32 g_znri_ctrl = 74; /* controller C/A - N drive strength */
u32 g_zpodt_data = 45; /* controller data - P ODT */
u32 g_znodt_data = 45; /* controller data - N ODT */
u32 g_zpodt_ctrl = 45; /* controller data - P ODT */
u32 g_znodt_ctrl = 45; /* controller data - N ODT */
u32 g_odt_config = 0x120012;
u32 g_rtt_nom = 0x44;
u32 g_dic = 0x2;
#ifdef STATIC_ALGO_SUPPORT
#define PARAM_NOT_CARE 0
#define MAX_STATIC_SEQ 48
u32 silicon_delay[HWS_MAX_DEVICE_NUM];
struct hws_tip_static_config_info static_config[HWS_MAX_DEVICE_NUM];
static reg_data *static_init_controller_config[HWS_MAX_DEVICE_NUM];
/* debug delay in write leveling */
int wl_debug_delay = 0;
/* pup register #3 for functional board */
int function_reg_value = 8;
u32 silicon;
u32 read_ready_delay_phase_offset[] = { 4, 4, 4, 4, 6, 6, 6, 6 };
static struct cs_element chip_select_map[] = {
/* CS Value (single only) Num_CS */
{0, 0},
{0, 1},
{1, 1},
{0, 2},
{2, 1},
{0, 2},
{0, 2},
{0, 3},
{3, 1},
{0, 2},
{0, 2},
{0, 3},
{0, 2},
{0, 3},
{0, 3},
{0, 4}
};
/*
* Register static init controller DB
*/
int ddr3_tip_init_specific_reg_config(u32 dev_num, reg_data *reg_config_arr)
{
static_init_controller_config[dev_num] = reg_config_arr;
return MV_OK;
}
/*
* Register static info DB
*/
int ddr3_tip_init_static_config_db(
u32 dev_num, struct hws_tip_static_config_info *static_config_info)
{
static_config[dev_num].board_trace_arr =
static_config_info->board_trace_arr;
static_config[dev_num].package_trace_arr =
static_config_info->package_trace_arr;
silicon_delay[dev_num] = static_config_info->silicon_delay;
return MV_OK;
}
/*
* Static round trip flow - Calculates the total round trip delay.
*/
int ddr3_tip_static_round_trip_arr_build(u32 dev_num,
struct trip_delay_element *table_ptr,
int is_wl, u32 *round_trip_delay_arr)
{
u32 bus_index, global_bus;
u32 if_id;
u32 bus_per_interface;
int sign;
u32 temp;
u32 board_trace;
struct trip_delay_element *pkg_delay_ptr;
struct hws_topology_map *tm = ddr3_get_topology_map();
/*
* In WL we calc the diff between Clock to DQs in RL we sum the round
* trip of Clock and DQs
*/
sign = (is_wl) ? -1 : 1;
bus_per_interface = GET_TOPOLOGY_NUM_OF_BUSES();
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (bus_index = 0; bus_index < bus_per_interface;
bus_index++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
global_bus = (if_id * bus_per_interface) + bus_index;
/* calculate total trip delay (package and board) */
board_trace = (table_ptr[global_bus].dqs_delay * sign) +
table_ptr[global_bus].ck_delay;
temp = (board_trace * 163) / 1000;
/* Convert the length to delay in psec units */
pkg_delay_ptr =
static_config[dev_num].package_trace_arr;
round_trip_delay_arr[global_bus] = temp +
(int)(pkg_delay_ptr[global_bus].dqs_delay *
sign) +
(int)pkg_delay_ptr[global_bus].ck_delay +
(int)((is_wl == 1) ? wl_debug_delay :
(int)silicon_delay[dev_num]);
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_TRACE,
("Round Trip Build round_trip_delay_arr[0x%x]: 0x%x temp 0x%x\n",
global_bus, round_trip_delay_arr[global_bus],
temp));
}
}
return MV_OK;
}
/*
* Write leveling for static flow - calculating the round trip delay of the
* DQS signal.
*/
int ddr3_tip_write_leveling_static_config(u32 dev_num, u32 if_id,
enum hws_ddr_freq frequency,
u32 *round_trip_delay_arr)
{
u32 bus_index; /* index to the bus loop */
u32 bus_start_index;
u32 bus_per_interface;
u32 phase = 0;
u32 adll = 0, adll_cen, adll_inv, adll_final;
u32 adll_period = MEGA / freq_val[frequency] / 64;
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("ddr3_tip_write_leveling_static_config\n"));
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_TRACE,
("dev_num 0x%x IF 0x%x freq %d (adll_period 0x%x)\n",
dev_num, if_id, frequency, adll_period));
bus_per_interface = GET_TOPOLOGY_NUM_OF_BUSES();
bus_start_index = if_id * bus_per_interface;
for (bus_index = bus_start_index;
bus_index < (bus_start_index + bus_per_interface); bus_index++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
phase = round_trip_delay_arr[bus_index] / (32 * adll_period);
adll = (round_trip_delay_arr[bus_index] -
(phase * 32 * adll_period)) / adll_period;
adll = (adll > 31) ? 31 : adll;
adll_cen = 16 + adll;
adll_inv = adll_cen / 32;
adll_final = adll_cen - (adll_inv * 32);
adll_final = (adll_final > 31) ? 31 : adll_final;
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("\t%d - phase 0x%x adll 0x%x\n",
bus_index, phase, adll));
/*
* Writing to all 4 phy of Interface number,
* bit 0 \96 4 \96 ADLL, bit 6-8 phase
*/
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
(bus_index % 4), DDR_PHY_DATA,
PHY_WRITE_DELAY(cs),
((phase << 6) + (adll & 0x1f)), 0x1df));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
ACCESS_TYPE_UNICAST, (bus_index % 4),
DDR_PHY_DATA, WRITE_CENTRALIZATION_PHY_REG,
((adll_inv & 0x1) << 5) + adll_final));
}
return MV_OK;
}
/*
* Read leveling for static flow
*/
int ddr3_tip_read_leveling_static_config(u32 dev_num,
u32 if_id,
enum hws_ddr_freq frequency,
u32 *total_round_trip_delay_arr)
{
u32 cs, data0, data1, data3 = 0;
u32 bus_index; /* index to the bus loop */
u32 bus_start_index;
u32 phase0, phase1, max_phase;
u32 adll0, adll1;
u32 cl_value;
u32 min_delay;
u32 sdr_period = MEGA / freq_val[frequency];
u32 ddr_period = MEGA / freq_val[frequency] / 2;
u32 adll_period = MEGA / freq_val[frequency] / 64;
enum hws_speed_bin speed_bin_index;
u32 rd_sample_dly[MAX_CS_NUM] = { 0 };
u32 rd_ready_del[MAX_CS_NUM] = { 0 };
u32 bus_per_interface = GET_TOPOLOGY_NUM_OF_BUSES();
struct hws_topology_map *tm = ddr3_get_topology_map();
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("ddr3_tip_read_leveling_static_config\n"));
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("dev_num 0x%x ifc 0x%x freq %d\n", dev_num,
if_id, frequency));
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_TRACE,
("Sdr_period 0x%x Ddr_period 0x%x adll_period 0x%x\n",
sdr_period, ddr_period, adll_period));
if (tm->interface_params[first_active_if].memory_freq ==
frequency) {
cl_value = tm->interface_params[first_active_if].cas_l;
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("cl_value 0x%x\n", cl_value));
} else {
speed_bin_index = tm->interface_params[if_id].speed_bin_index;
cl_value = cas_latency_table[speed_bin_index].cl_val[frequency];
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("cl_value 0x%x speed_bin_index %d\n",
cl_value, speed_bin_index));
}
bus_start_index = if_id * bus_per_interface;
for (bus_index = bus_start_index;
bus_index < (bus_start_index + bus_per_interface);
bus_index += 2) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
cs = chip_select_map[
tm->interface_params[if_id].as_bus_params[
(bus_index % 4)].cs_bitmask].cs_num;
/* read sample delay calculation */
min_delay = (total_round_trip_delay_arr[bus_index] <
total_round_trip_delay_arr[bus_index + 1]) ?
total_round_trip_delay_arr[bus_index] :
total_round_trip_delay_arr[bus_index + 1];
/* round down */
rd_sample_dly[cs] = 2 * (min_delay / (sdr_period * 2));
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_TRACE,
("\t%d - min_delay 0x%x cs 0x%x rd_sample_dly[cs] 0x%x\n",
bus_index, min_delay, cs, rd_sample_dly[cs]));
/* phase calculation */
phase0 = (total_round_trip_delay_arr[bus_index] -
(sdr_period * rd_sample_dly[cs])) / (ddr_period);
phase1 = (total_round_trip_delay_arr[bus_index + 1] -
(sdr_period * rd_sample_dly[cs])) / (ddr_period);
max_phase = (phase0 > phase1) ? phase0 : phase1;
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_TRACE,
("\tphase0 0x%x phase1 0x%x max_phase 0x%x\n",
phase0, phase1, max_phase));
/* ADLL calculation */
adll0 = (u32)((total_round_trip_delay_arr[bus_index] -
(sdr_period * rd_sample_dly[cs]) -
(ddr_period * phase0)) / adll_period);
adll0 = (adll0 > 31) ? 31 : adll0;
adll1 = (u32)((total_round_trip_delay_arr[bus_index + 1] -
(sdr_period * rd_sample_dly[cs]) -
(ddr_period * phase1)) / adll_period);
adll1 = (adll1 > 31) ? 31 : adll1;
/* The Read delay close the Read FIFO */
rd_ready_del[cs] = rd_sample_dly[cs] +
read_ready_delay_phase_offset[max_phase];
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_TRACE,
("\tadll0 0x%x adll1 0x%x rd_ready_del[cs] 0x%x\n",
adll0, adll1, rd_ready_del[cs]));
/*
* Write to the phy of Interface (bit 0 \96 4 \96 ADLL,
* bit 6-8 phase)
*/
data0 = ((phase0 << 6) + (adll0 & 0x1f));
data1 = ((phase1 << 6) + (adll1 & 0x1f));
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
(bus_index % 4), DDR_PHY_DATA, PHY_READ_DELAY(cs),
data0, 0x1df));
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
((bus_index + 1) % 4), DDR_PHY_DATA,
PHY_READ_DELAY(cs), data1, 0x1df));
}
for (bus_index = 0; bus_index < bus_per_interface; bus_index++) {
VALIDATE_ACTIVE(tm->bus_act_mask, bus_index);
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
bus_index, DDR_PHY_DATA, 0x3, data3, 0x1f));
}
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
READ_DATA_SAMPLE_DELAY,
(rd_sample_dly[0] + cl_value) + (rd_sample_dly[1] << 8),
MASK_ALL_BITS));
/* Read_ready_del0 bit 0-4 , CS bits 8-12 */
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_UNICAST, if_id,
READ_DATA_READY_DELAY,
rd_ready_del[0] + (rd_ready_del[1] << 8) + cl_value,
MASK_ALL_BITS));
return MV_OK;
}
/*
* DDR3 Static flow
*/
int ddr3_tip_run_static_alg(u32 dev_num, enum hws_ddr_freq freq)
{
u32 if_id = 0;
struct trip_delay_element *table_ptr;
u32 wl_total_round_trip_delay_arr[MAX_TOTAL_BUS_NUM];
u32 rl_total_round_trip_delay_arr[MAX_TOTAL_BUS_NUM];
struct init_cntr_param init_cntr_prm;
int ret;
struct hws_topology_map *tm = ddr3_get_topology_map();
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("ddr3_tip_run_static_alg"));
init_cntr_prm.do_mrs_phy = 1;
init_cntr_prm.is_ctrl64_bit = 0;
init_cntr_prm.init_phy = 1;
ret = hws_ddr3_tip_init_controller(dev_num, &init_cntr_prm);
if (ret != MV_OK) {
DEBUG_TRAINING_STATIC_IP(
DEBUG_LEVEL_ERROR,
("hws_ddr3_tip_init_controller failure\n"));
}
/* calculate the round trip delay for Write Leveling */
table_ptr = static_config[dev_num].board_trace_arr;
CHECK_STATUS(ddr3_tip_static_round_trip_arr_build
(dev_num, table_ptr, 1,
wl_total_round_trip_delay_arr));
/* calculate the round trip delay for Read Leveling */
CHECK_STATUS(ddr3_tip_static_round_trip_arr_build
(dev_num, table_ptr, 0,
rl_total_round_trip_delay_arr));
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
/* check if the interface is enabled */
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
/*
* Static frequency is defined according to init-frequency
* (not target)
*/
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Static IF %d freq %d\n",
if_id, freq));
CHECK_STATUS(ddr3_tip_write_leveling_static_config
(dev_num, if_id, freq,
wl_total_round_trip_delay_arr));
CHECK_STATUS(ddr3_tip_read_leveling_static_config
(dev_num, if_id, freq,
rl_total_round_trip_delay_arr));
}
return MV_OK;
}
/*
* Init controller for static flow
*/
int ddr3_tip_static_init_controller(u32 dev_num)
{
u32 index_cnt = 0;
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("ddr3_tip_static_init_controller\n"));
while (static_init_controller_config[dev_num][index_cnt].reg_addr !=
0) {
CHECK_STATUS(ddr3_tip_if_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
static_init_controller_config[dev_num][index_cnt].
reg_addr,
static_init_controller_config[dev_num][index_cnt].
reg_data,
static_init_controller_config[dev_num][index_cnt].
reg_mask));
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Init_controller index_cnt %d\n",
index_cnt));
index_cnt++;
}
return MV_OK;
}
int ddr3_tip_static_phy_init_controller(u32 dev_num)
{
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Phy Init Controller 2\n"));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa4,
0x3dfe));
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Phy Init Controller 3\n"));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa6,
0xcb2));
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Phy Init Controller 4\n"));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa9,
0));
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Static Receiver Calibration\n"));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xd0,
0x1f));
DEBUG_TRAINING_STATIC_IP(DEBUG_LEVEL_TRACE,
("Static V-REF Calibration\n"));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0xa8,
0x434));
return MV_OK;
}
#endif
/*
* Configure phy (called by static init controller) for static flow
*/
int ddr3_tip_configure_phy(u32 dev_num)
{
u32 if_id, phy_id;
struct hws_topology_map *tm = ddr3_get_topology_map();
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
PAD_ZRI_CALIB_PHY_REG,
((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data))));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
PAD_ZRI_CALIB_PHY_REG,
((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl))));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
PAD_ODT_CALIB_PHY_REG,
((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data))));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
PAD_ODT_CALIB_PHY_REG,
((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl))));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
PAD_PRE_DISABLE_PHY_REG, 0));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA,
CMOS_CONFIG_PHY_REG, 0));
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL,
CMOS_CONFIG_PHY_REG, 0));
for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) {
/* check if the interface is enabled */
VALIDATE_ACTIVE(tm->if_act_mask, if_id);
for (phy_id = 0;
phy_id < tm->num_of_bus_per_interface;
phy_id++) {
VALIDATE_ACTIVE(tm->bus_act_mask, phy_id);
/* Vref & clamp */
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, phy_id, DDR_PHY_DATA,
PAD_CONFIG_PHY_REG,
((clamp_tbl[if_id] << 4) | vref),
((0x7 << 4) | 0x7)));
/* clamp not relevant for control */
CHECK_STATUS(ddr3_tip_bus_read_modify_write
(dev_num, ACCESS_TYPE_UNICAST,
if_id, phy_id, DDR_PHY_CONTROL,
PAD_CONFIG_PHY_REG, 0x4, 0x7));
}
}
CHECK_STATUS(ddr3_tip_bus_write
(dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE,
ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 0x90,
0x6002));
return MV_OK;
}

View File

@ -0,0 +1,112 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR_TOPOLOGY_DEF_H
#define _DDR_TOPOLOGY_DEF_H
#include "ddr3_training_ip_def.h"
#include "ddr3_topology_def.h"
#if defined(CONFIG_ARMADA_38X)
#include "ddr3_a38x.h"
#endif
/* bus width in bits */
enum hws_bus_width {
BUS_WIDTH_4,
BUS_WIDTH_8,
BUS_WIDTH_16,
BUS_WIDTH_32
};
enum hws_temperature {
HWS_TEMP_LOW,
HWS_TEMP_NORMAL,
HWS_TEMP_HIGH
};
enum hws_mem_size {
MEM_512M,
MEM_1G,
MEM_2G,
MEM_4G,
MEM_8G,
MEM_SIZE_LAST
};
struct bus_params {
/* Chip Select (CS) bitmask (bits 0-CS0, bit 1- CS1 ...) */
u8 cs_bitmask;
/*
* mirror enable/disable
* (bits 0-CS0 mirroring, bit 1- CS1 mirroring ...)
*/
int mirror_enable_bitmask;
/* DQS Swap (polarity) - true if enable */
int is_dqs_swap;
/* CK swap (polarity) - true if enable */
int is_ck_swap;
};
struct if_params {
/* bus configuration */
struct bus_params as_bus_params[MAX_BUS_NUM];
/* Speed Bin Table */
enum hws_speed_bin speed_bin_index;
/* bus width of memory */
enum hws_bus_width bus_width;
/* Bus memory size (MBit) */
enum hws_mem_size memory_size;
/* The DDR frequency for each interfaces */
enum hws_ddr_freq memory_freq;
/*
* delay CAS Write Latency
* - 0 for using default value (jedec suggested)
*/
u8 cas_wl;
/*
* delay CAS Latency
* - 0 for using default value (jedec suggested)
*/
u8 cas_l;
/* operation temperature */
enum hws_temperature interface_temp;
};
struct hws_topology_map {
/* Number of interfaces (default is 12) */
u8 if_act_mask;
/* Controller configuration per interface */
struct if_params interface_params[MAX_INTERFACE_NUM];
/* BUS per interface (default is 4) */
u8 num_of_bus_per_interface;
/* Bit mask for active buses */
u8 bus_act_mask;
};
/* DDR3 training global configuration parameters */
struct tune_train_params {
u32 ck_delay;
u32 ck_delay_16;
u32 p_finger;
u32 n_finger;
u32 phy_reg3_val;
};
#endif /* _DDR_TOPOLOGY_DEF_H */

View File

@ -0,0 +1,16 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _DDR_TRAINING_IP_DB_H_
#define _DDR_TRAINING_IP_DB_H_
#include "ddr_topology_def.h"
#include "ddr3_training_ip_db.h"
u32 speed_bin_table(u8 index, enum speed_bin_table_elements element);
u32 pattern_table_get_word(u32 dev_num, enum hws_pattern type, u8 index);
#endif /* _DDR3_TRAINING_IP_DB_H_ */

View File

@ -0,0 +1,17 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef __silicon_if_H
#define __silicon_if_H
/* max number of devices supported by driver */
#ifdef CO_CPU_RUN
#define HWS_MAX_DEVICE_NUM (1)
#else
#define HWS_MAX_DEVICE_NUM (16)
#endif
#endif /* __silicon_if_H */

View File

@ -0,0 +1,356 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#include <common.h>
#include <i2c.h>
#include <spl.h>
#include <asm/io.h>
#include <asm/arch/cpu.h>
#include <asm/arch/soc.h>
#include "ddr3_init.h"
#include "xor_regs.h"
/* defines */
#ifdef MV_DEBUG
#define DB(x) x
#else
#define DB(x)
#endif
static u32 ui_xor_regs_ctrl_backup;
static u32 ui_xor_regs_base_backup[MAX_CS];
static u32 ui_xor_regs_mask_backup[MAX_CS];
void mv_sys_xor_init(u32 num_of_cs, u32 cs_ena, u32 cs_size, u32 base_delta)
{
u32 reg, ui, base, cs_count;
ui_xor_regs_ctrl_backup = reg_read(XOR_WINDOW_CTRL_REG(0, 0));
for (ui = 0; ui < MAX_CS; ui++)
ui_xor_regs_base_backup[ui] =
reg_read(XOR_BASE_ADDR_REG(0, ui));
for (ui = 0; ui < MAX_CS; ui++)
ui_xor_regs_mask_backup[ui] =
reg_read(XOR_SIZE_MASK_REG(0, ui));
reg = 0;
for (ui = 0; ui < (num_of_cs); ui++) {
/* Enable Window x for each CS */
reg |= (0x1 << (ui));
/* Enable Window x for each CS */
reg |= (0x3 << ((ui * 2) + 16));
}
reg_write(XOR_WINDOW_CTRL_REG(0, 0), reg);
cs_count = 0;
for (ui = 0; ui < num_of_cs; ui++) {
if (cs_ena & (1 << ui)) {
/*
* window x - Base - 0x00000000,
* Attribute 0x0e - DRAM
*/
base = cs_size * ui + base_delta;
switch (ui) {
case 0:
base |= 0xe00;
break;
case 1:
base |= 0xd00;
break;
case 2:
base |= 0xb00;
break;
case 3:
base |= 0x700;
break;
}
reg_write(XOR_BASE_ADDR_REG(0, cs_count), base);
/* window x - Size */
reg_write(XOR_SIZE_MASK_REG(0, cs_count), 0x7fff0000);
cs_count++;
}
}
mv_xor_hal_init(1);
return;
}
void mv_sys_xor_finish(void)
{
u32 ui;
reg_write(XOR_WINDOW_CTRL_REG(0, 0), ui_xor_regs_ctrl_backup);
for (ui = 0; ui < MAX_CS; ui++)
reg_write(XOR_BASE_ADDR_REG(0, ui),
ui_xor_regs_base_backup[ui]);
for (ui = 0; ui < MAX_CS; ui++)
reg_write(XOR_SIZE_MASK_REG(0, ui),
ui_xor_regs_mask_backup[ui]);
reg_write(XOR_ADDR_OVRD_REG(0, 0), 0);
}
/*
* mv_xor_hal_init - Initialize XOR engine
*
* DESCRIPTION:
* This function initialize XOR unit.
* INPUT:
* None.
*
* OUTPUT:
* None.
*
* RETURN:
* MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
*/
void mv_xor_hal_init(u32 xor_chan_num)
{
u32 i;
/* Abort any XOR activity & set default configuration */
for (i = 0; i < xor_chan_num; i++) {
mv_xor_command_set(i, MV_STOP);
mv_xor_ctrl_set(i, (1 << XEXCR_REG_ACC_PROTECT_OFFS) |
(4 << XEXCR_DST_BURST_LIMIT_OFFS) |
(4 << XEXCR_SRC_BURST_LIMIT_OFFS));
}
}
/*
* mv_xor_ctrl_set - Set XOR channel control registers
*
* DESCRIPTION:
*
* INPUT:
*
* OUTPUT:
* None.
*
* RETURN:
* MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
* NOTE:
* This function does not modify the Operation_mode field of control register.
*/
int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl)
{
u32 old_value;
/* update the XOR Engine [0..1] Configuration Registers (XEx_c_r) */
old_value = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))) &
XEXCR_OPERATION_MODE_MASK;
xor_ctrl &= ~XEXCR_OPERATION_MODE_MASK;
xor_ctrl |= old_value;
reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), xor_ctrl);
return MV_OK;
}
int mv_xor_mem_init(u32 chan, u32 start_ptr, u32 block_size,
u32 init_val_high, u32 init_val_low)
{
u32 temp;
/* Parameter checking */
if (chan >= MV_XOR_MAX_CHAN)
return MV_BAD_PARAM;
if (MV_ACTIVE == mv_xor_state_get(chan))
return MV_BUSY;
if ((block_size < XEXBSR_BLOCK_SIZE_MIN_VALUE) ||
(block_size > XEXBSR_BLOCK_SIZE_MAX_VALUE))
return MV_BAD_PARAM;
/* set the operation mode to Memory Init */
temp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)));
temp &= ~XEXCR_OPERATION_MODE_MASK;
temp |= XEXCR_OPERATION_MODE_MEM_INIT;
reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp);
/*
* update the start_ptr field in XOR Engine [0..1] Destination Pointer
* Register
*/
reg_write(XOR_DST_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)), start_ptr);
/*
* update the Block_size field in the XOR Engine[0..1] Block Size
* Registers
*/
reg_write(XOR_BLOCK_SIZE_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
block_size);
/*
* update the field Init_val_l in the XOR Engine Initial Value Register
* Low (XEIVRL)
*/
reg_write(XOR_INIT_VAL_LOW_REG(XOR_UNIT(chan)), init_val_low);
/*
* update the field Init_val_h in the XOR Engine Initial Value Register
* High (XEIVRH)
*/
reg_write(XOR_INIT_VAL_HIGH_REG(XOR_UNIT(chan)), init_val_high);
/* start transfer */
reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
XEXACTR_XESTART_MASK);
return MV_OK;
}
/*
* mv_xor_state_get - Get XOR channel state.
*
* DESCRIPTION:
* XOR channel activity state can be active, idle, paused.
* This function retrunes the channel activity state.
*
* INPUT:
* chan - the channel number
*
* OUTPUT:
* None.
*
* RETURN:
* XOR_CHANNEL_IDLE - If the engine is idle.
* XOR_CHANNEL_ACTIVE - If the engine is busy.
* XOR_CHANNEL_PAUSED - If the engine is paused.
* MV_UNDEFINED_STATE - If the engine state is undefind or there is no
* such engine
*/
enum mv_state mv_xor_state_get(u32 chan)
{
u32 state;
/* Parameter checking */
if (chan >= MV_XOR_MAX_CHAN) {
DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan));
return MV_UNDEFINED_STATE;
}
/* read the current state */
state = reg_read(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)));
state &= XEXACTR_XESTATUS_MASK;
/* return the state */
switch (state) {
case XEXACTR_XESTATUS_IDLE:
return MV_IDLE;
case XEXACTR_XESTATUS_ACTIVE:
return MV_ACTIVE;
case XEXACTR_XESTATUS_PAUSED:
return MV_PAUSED;
}
return MV_UNDEFINED_STATE;
}
/*
* mv_xor_command_set - Set command of XOR channel
*
* DESCRIPTION:
* XOR channel can be started, idle, paused and restarted.
* Paused can be set only if channel is active.
* Start can be set only if channel is idle or paused.
* Restart can be set only if channel is paused.
* Stop can be set only if channel is active.
*
* INPUT:
* chan - The channel number
* command - The command type (start, stop, restart, pause)
*
* OUTPUT:
* None.
*
* RETURN:
* MV_OK on success , MV_BAD_PARAM on erroneous parameter, MV_ERROR on
* undefind XOR engine mode
*/
int mv_xor_command_set(u32 chan, enum mv_command command)
{
enum mv_state state;
/* Parameter checking */
if (chan >= MV_XOR_MAX_CHAN) {
DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan));
return MV_BAD_PARAM;
}
/* get the current state */
state = mv_xor_state_get(chan);
if ((command == MV_START) && (state == MV_IDLE)) {
/* command is start and current state is idle */
reg_bit_set(XOR_ACTIVATION_REG
(XOR_UNIT(chan), XOR_CHAN(chan)),
XEXACTR_XESTART_MASK);
return MV_OK;
} else if ((command == MV_STOP) && (state == MV_ACTIVE)) {
/* command is stop and current state is active */
reg_bit_set(XOR_ACTIVATION_REG
(XOR_UNIT(chan), XOR_CHAN(chan)),
XEXACTR_XESTOP_MASK);
return MV_OK;
} else if (((enum mv_state)command == MV_PAUSED) &&
(state == MV_ACTIVE)) {
/* command is paused and current state is active */
reg_bit_set(XOR_ACTIVATION_REG
(XOR_UNIT(chan), XOR_CHAN(chan)),
XEXACTR_XEPAUSE_MASK);
return MV_OK;
} else if ((command == MV_RESTART) && (state == MV_PAUSED)) {
/* command is restart and current state is paused */
reg_bit_set(XOR_ACTIVATION_REG
(XOR_UNIT(chan), XOR_CHAN(chan)),
XEXACTR_XERESTART_MASK);
return MV_OK;
} else if ((command == MV_STOP) && (state == MV_IDLE)) {
/* command is stop and current state is active */
return MV_OK;
}
/* illegal command */
DB(printf("%s: ERR. Illegal command\n", __func__));
return MV_BAD_PARAM;
}
void ddr3_new_tip_ecc_scrub(void)
{
u32 cs_c, max_cs;
u32 cs_ena = 0;
printf("DDR3 Training Sequence - Start scrubbing\n");
max_cs = hws_ddr3_tip_max_cs_get();
for (cs_c = 0; cs_c < max_cs; cs_c++)
cs_ena |= 1 << cs_c;
mv_sys_xor_init(max_cs, cs_ena, 0x80000000, 0);
mv_xor_mem_init(0, 0x00000000, 0x80000000, 0xdeadbeef, 0xdeadbeef);
/* wait for previous transfer completion */
while (mv_xor_state_get(0) != MV_IDLE)
;
mv_xor_mem_init(0, 0x80000000, 0x40000000, 0xdeadbeef, 0xdeadbeef);
/* wait for previous transfer completion */
while (mv_xor_state_get(0) != MV_IDLE)
;
/* Return XOR State */
mv_sys_xor_finish();
printf("DDR3 Training Sequence - End scrubbing\n");
}

View File

@ -0,0 +1,92 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _XOR_H
#define _XOR_H
#define SRAM_BASE 0x40000000
#include "ddr3_hws_hw_training_def.h"
#define MV_XOR_MAX_UNIT 2 /* XOR unit == XOR engine */
#define MV_XOR_MAX_CHAN 4 /* total channels for all units */
#define MV_XOR_MAX_CHAN_PER_UNIT 2 /* channels for units */
#define MV_IS_POWER_OF_2(num) (((num) != 0) && (((num) & ((num) - 1)) == 0))
/*
* This structure describes address space window. Window base can be
* 64 bit, window size up to 4GB
*/
struct addr_win {
u32 base_low; /* 32bit base low */
u32 base_high; /* 32bit base high */
u32 size; /* 32bit size */
};
/* This structure describes SoC units address decode window */
struct unit_win_info {
struct addr_win addr_win; /* An address window */
int enable; /* Address decode window is enabled/disabled */
u8 attrib; /* chip select attributes */
u8 target_id; /* Target Id of this MV_TARGET */
};
/*
* This enumerator describes the type of functionality the XOR channel
* can have while using the same data structures.
*/
enum xor_type {
MV_XOR, /* XOR channel functions as XOR accelerator */
MV_DMA, /* XOR channel functions as IDMA channel */
MV_CRC32 /* XOR channel functions as CRC 32 calculator */
};
enum mv_state {
MV_IDLE,
MV_ACTIVE,
MV_PAUSED,
MV_UNDEFINED_STATE
};
/*
* This enumerator describes the set of commands that can be applied on
* an engine (e.g. IDMA, XOR). Appling a comman depends on the current
* status (see MV_STATE enumerator)
*
* Start can be applied only when status is IDLE
* Stop can be applied only when status is IDLE, ACTIVE or PAUSED
* Pause can be applied only when status is ACTIVE
* Restart can be applied only when status is PAUSED
*/
enum mv_command {
MV_START, /* Start */
MV_STOP, /* Stop */
MV_PAUSE, /* Pause */
MV_RESTART /* Restart */
};
enum xor_override_target {
SRC_ADDR0, /* Source Address #0 Control */
SRC_ADDR1, /* Source Address #1 Control */
SRC_ADDR2, /* Source Address #2 Control */
SRC_ADDR3, /* Source Address #3 Control */
SRC_ADDR4, /* Source Address #4 Control */
SRC_ADDR5, /* Source Address #5 Control */
SRC_ADDR6, /* Source Address #6 Control */
SRC_ADDR7, /* Source Address #7 Control */
XOR_DST_ADDR, /* Destination Address Control */
XOR_NEXT_DESC /* Next Descriptor Address Control */
};
enum mv_state mv_xor_state_get(u32 chan);
void mv_xor_hal_init(u32 xor_chan_num);
int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl);
int mv_xor_command_set(u32 chan, enum mv_command command);
int mv_xor_override_set(u32 chan, enum xor_override_target target, u32 win_num,
int enable);
#endif

View File

@ -0,0 +1,236 @@
/*
* Copyright (C) Marvell International Ltd. and its affiliates
*
* SPDX-License-Identifier: GPL-2.0
*/
#ifndef _XOR_REGS_h
#define _XOR_REGS_h
/*
* For controllers that have two XOR units, then chans 2 & 3 will be
* mapped to channels 0 & 1 of unit 1
*/
#define XOR_UNIT(chan) ((chan) >> 1)
#define XOR_CHAN(chan) ((chan) & 1)
#define MV_XOR_REGS_OFFSET(unit) (0x60900)
#define MV_XOR_REGS_BASE(unit) (MV_XOR_REGS_OFFSET(unit))
/* XOR Engine Control Register Map */
#define XOR_CHANNEL_ARBITER_REG(unit) (MV_XOR_REGS_BASE(unit))
#define XOR_CONFIG_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \
(0x10 + ((chan) * 4)))
#define XOR_ACTIVATION_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \
(0x20 + ((chan) * 4)))
/* XOR Engine Interrupt Register Map */
#define XOR_CAUSE_REG(unit) (MV_XOR_REGS_BASE(unit)+(0x30))
#define XOR_MASK_REG(unit) (MV_XOR_REGS_BASE(unit)+(0x40))
#define XOR_ERROR_CAUSE_REG(unit) (MV_XOR_REGS_BASE(unit)+(0x50))
#define XOR_ERROR_ADDR_REG(unit) (MV_XOR_REGS_BASE(unit)+(0x60))
/* XOR Engine Descriptor Register Map */
#define XOR_NEXT_DESC_PTR_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \
(0x200 + ((chan) * 4)))
#define XOR_CURR_DESC_PTR_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \
(0x210 + ((chan) * 4)))
#define XOR_BYTE_COUNT_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \
(0x220 + ((chan) * 4)))
/* XOR Engine ECC/Mem_init Register Map */
#define XOR_DST_PTR_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \
(0x2b0 + ((chan) * 4)))
#define XOR_BLOCK_SIZE_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \
(0x2c0 + ((chan) * 4)))
#define XOR_TIMER_MODE_CTRL_REG(unit) (MV_XOR_REGS_BASE(unit) + (0x2d0))
#define XOR_TIMER_MODE_INIT_VAL_REG(unit) (MV_XOR_REGS_BASE(unit) + (0x2d4))
#define XOR_TIMER_MODE_CURR_VAL_REG(unit) (MV_XOR_REGS_BASE(unit) + (0x2d8))
#define XOR_INIT_VAL_LOW_REG(unit) (MV_XOR_REGS_BASE(unit) + (0x2e0))
#define XOR_INIT_VAL_HIGH_REG(unit) (MV_XOR_REGS_BASE(unit) + (0x2e4))
/* XOR Engine Debug Register Map */
#define XOR_DEBUG_REG(unit) (MV_XOR_REGS_BASE(unit) + (0x70))
/* XOR register fileds */
/* XOR Engine Channel Arbiter Register */
#define XECAR_SLICE_OFFS(slice_num) (slice_num)
#define XECAR_SLICE_MASK(slice_num) (1 << (XECAR_SLICE_OFFS(slice_num)))
/* XOR Engine [0..1] Configuration Registers */
#define XEXCR_OPERATION_MODE_OFFS (0)
#define XEXCR_OPERATION_MODE_MASK (7 << XEXCR_OPERATION_MODE_OFFS)
#define XEXCR_OPERATION_MODE_XOR (0 << XEXCR_OPERATION_MODE_OFFS)
#define XEXCR_OPERATION_MODE_CRC (1 << XEXCR_OPERATION_MODE_OFFS)
#define XEXCR_OPERATION_MODE_DMA (2 << XEXCR_OPERATION_MODE_OFFS)
#define XEXCR_OPERATION_MODE_ECC (3 << XEXCR_OPERATION_MODE_OFFS)
#define XEXCR_OPERATION_MODE_MEM_INIT (4 << XEXCR_OPERATION_MODE_OFFS)
#define XEXCR_SRC_BURST_LIMIT_OFFS (4)
#define XEXCR_SRC_BURST_LIMIT_MASK (7 << XEXCR_SRC_BURST_LIMIT_OFFS)
#define XEXCR_DST_BURST_LIMIT_OFFS (8)
#define XEXCR_DST_BURST_LIMIT_MASK (7 << XEXCR_DST_BURST_LIMIT_OFFS)
#define XEXCR_DRD_RES_SWP_OFFS (12)
#define XEXCR_DRD_RES_SWP_MASK (1 << XEXCR_DRD_RES_SWP_OFFS)
#define XEXCR_DWR_REQ_SWP_OFFS (13)
#define XEXCR_DWR_REQ_SWP_MASK (1 << XEXCR_DWR_REQ_SWP_OFFS)
#define XEXCR_DES_SWP_OFFS (14)
#define XEXCR_DES_SWP_MASK (1 << XEXCR_DES_SWP_OFFS)
#define XEXCR_REG_ACC_PROTECT_OFFS (15)
#define XEXCR_REG_ACC_PROTECT_MASK (1 << XEXCR_REG_ACC_PROTECT_OFFS)
/* XOR Engine [0..1] Activation Registers */
#define XEXACTR_XESTART_OFFS (0)
#define XEXACTR_XESTART_MASK (1 << XEXACTR_XESTART_OFFS)
#define XEXACTR_XESTOP_OFFS (1)
#define XEXACTR_XESTOP_MASK (1 << XEXACTR_XESTOP_OFFS)
#define XEXACTR_XEPAUSE_OFFS (2)
#define XEXACTR_XEPAUSE_MASK (1 << XEXACTR_XEPAUSE_OFFS)
#define XEXACTR_XERESTART_OFFS (3)
#define XEXACTR_XERESTART_MASK (1 << XEXACTR_XERESTART_OFFS)
#define XEXACTR_XESTATUS_OFFS (4)
#define XEXACTR_XESTATUS_MASK (3 << XEXACTR_XESTATUS_OFFS)
#define XEXACTR_XESTATUS_IDLE (0 << XEXACTR_XESTATUS_OFFS)
#define XEXACTR_XESTATUS_ACTIVE (1 << XEXACTR_XESTATUS_OFFS)
#define XEXACTR_XESTATUS_PAUSED (2 << XEXACTR_XESTATUS_OFFS)
/* XOR Engine Interrupt Cause Register (XEICR) */
#define XEICR_CHAN_OFFS 16
#define XEICR_CAUSE_OFFS(chan) (chan * XEICR_CHAN_OFFS)
#define XEICR_CAUSE_MASK(chan, cause) (1 << (cause + XEICR_CAUSE_OFFS(chan)))
#define XEICR_COMP_MASK_ALL 0x000f000f
#define XEICR_COMP_MASK(chan) (0x000f << XEICR_CAUSE_OFFS(chan))
#define XEICR_ERR_MASK 0x03800380
/* XOR Engine Error Cause Register (XEECR) */
#define XEECR_ERR_TYPE_OFFS 0
#define XEECR_ERR_TYPE_MASK (0x1f << XEECR_ERR_TYPE_OFFS)
/* XOR Engine Error Address Register (XEEAR) */
#define XEEAR_ERR_ADDR_OFFS (0)
#define XEEAR_ERR_ADDR_MASK (0xffffffff << XEEAR_ERR_ADDR_OFFS)
/* XOR Engine [0..1] Next Descriptor Pointer Register */
#define XEXNDPR_NEXT_DESC_PTR_OFFS (0)
#define XEXNDPR_NEXT_DESC_PTR_MASK (0xffffffff << \
XEXNDPR_NEXT_DESC_PTR_OFFS)
/* XOR Engine [0..1] Current Descriptor Pointer Register */
#define XEXCDPR_CURRENT_DESC_PTR_OFFS (0)
#define XEXCDPR_CURRENT_DESC_PTR_MASK (0xffffffff << \
XEXCDPR_CURRENT_DESC_PTR_OFFS)
/* XOR Engine [0..1] Byte Count Register */
#define XEXBCR_BYTE_CNT_OFFS (0)
#define XEXBCR_BYTE_CNT_MASK (0xffffffff << XEXBCR_BYTE_CNT_OFFS)
/* XOR Engine [0..1] Destination Pointer Register */
#define XEXDPR_DST_PTR_OFFS (0)
#define XEXDPR_DST_PTR_MASK (0xffffffff << XEXDPR_DST_PTR_OFFS)
#define XEXDPR_DST_PTR_XOR_MASK (0x3f)
#define XEXDPR_DST_PTR_DMA_MASK (0x1f)
#define XEXDPR_DST_PTR_CRC_MASK (0x1f)
/* XOR Engine[0..1] Block Size Registers */
#define XEXBSR_BLOCK_SIZE_OFFS (0)
#define XEXBSR_BLOCK_SIZE_MASK (0xffffffff << XEXBSR_BLOCK_SIZE_OFFS)
#define XEXBSR_BLOCK_SIZE_MIN_VALUE (128)
#define XEXBSR_BLOCK_SIZE_MAX_VALUE (0xffffffff)
/* XOR Engine Timer Mode Control Register (XETMCR) */
#define XETMCR_TIMER_EN_OFFS (0)
#define XETMCR_TIMER_EN_MASK (1 << XETMCR_TIMER_EN_OFFS)
#define XETMCR_TIMER_EN_ENABLE (1 << XETMCR_TIMER_EN_OFFS)
#define XETMCR_TIMER_EN_DISABLE (0 << XETMCR_TIMER_EN_OFFS)
#define XETMCR_SECTION_SIZE_CTRL_OFFS (8)
#define XETMCR_SECTION_SIZE_CTRL_MASK (0x1f << XETMCR_SECTION_SIZE_CTRL_OFFS)
#define XETMCR_SECTION_SIZE_MIN_VALUE (7)
#define XETMCR_SECTION_SIZE_MAX_VALUE (31)
/* XOR Engine Timer Mode Initial Value Register (XETMIVR) */
#define XETMIVR_TIMER_INIT_VAL_OFFS (0)
#define XETMIVR_TIMER_INIT_VAL_MASK (0xffffffff << \
XETMIVR_TIMER_INIT_VAL_OFFS)
/* XOR Engine Timer Mode Current Value Register (XETMCVR) */
#define XETMCVR_TIMER_CRNT_VAL_OFFS (0)
#define XETMCVR_TIMER_CRNT_VAL_MASK (0xffffffff << \
XETMCVR_TIMER_CRNT_VAL_OFFS)
/* XOR Engine Initial Value Register Low (XEIVRL) */
#define XEIVRL_INIT_VAL_L_OFFS (0)
#define XEIVRL_INIT_VAL_L_MASK (0xffffffff << XEIVRL_INIT_VAL_L_OFFS)
/* XOR Engine Initial Value Register High (XEIVRH) */
#define XEIVRH_INIT_VAL_H_OFFS (0)
#define XEIVRH_INIT_VAL_H_MASK (0xffffffff << XEIVRH_INIT_VAL_H_OFFS)
/* XOR Engine Debug Register (XEDBR) */
#define XEDBR_PARITY_ERR_INSR_OFFS (0)
#define XEDBR_PARITY_ERR_INSR_MASK (1 << XEDBR_PARITY_ERR_INSR_OFFS)
#define XEDBR_XBAR_ERR_INSR_OFFS (1)
#define XEDBR_XBAR_ERR_INSR_MASK (1 << XEDBR_XBAR_ERR_INSR_OFFS)
/* XOR Engine address decode registers. */
/* Maximum address decode windows */
#define XOR_MAX_ADDR_DEC_WIN 8
/* Maximum address arbiter windows */
#define XOR_MAX_REMAP_WIN 4
/* XOR Engine Address Decoding Register Map */
#define XOR_WINDOW_CTRL_REG(unit, chan) (MV_XOR_REGS_BASE(unit) + \
(0x240 + ((chan) * 4)))
#define XOR_BASE_ADDR_REG(unit, win_num) (MV_XOR_REGS_BASE(unit) + \
(0x250 + ((win_num) * 4)))
#define XOR_SIZE_MASK_REG(unit, win_num) (MV_XOR_REGS_BASE(unit) + \
(0x270 + ((win_num) * 4)))
#define XOR_HIGH_ADDR_REMAP_REG(unit, win_num) (MV_XOR_REGS_BASE(unit) + \
(0x290 + ((win_num) * 4)))
#define XOR_ADDR_OVRD_REG(unit, win_num) (MV_XOR_REGS_BASE(unit) + \
(0x2a0 + ((win_num) * 4)))
/* XOR Engine [0..1] Window Control Registers */
#define XEXWCR_WIN_EN_OFFS(win_num) (win_num)
#define XEXWCR_WIN_EN_MASK(win_num) (1 << (XEXWCR_WIN_EN_OFFS(win_num)))
#define XEXWCR_WIN_EN_ENABLE(win_num) (1 << (XEXWCR_WIN_EN_OFFS(win_num)))
#define XEXWCR_WIN_EN_DISABLE(win_num) (0 << (XEXWCR_WIN_EN_OFFS(win_num)))
#define XEXWCR_WIN_ACC_OFFS(win_num) ((2 * win_num) + 16)
#define XEXWCR_WIN_ACC_MASK(win_num) (3 << (XEXWCR_WIN_ACC_OFFS(win_num)))
#define XEXWCR_WIN_ACC_NO_ACC(win_num) (0 << (XEXWCR_WIN_ACC_OFFS(win_num)))
#define XEXWCR_WIN_ACC_RO(win_num) (1 << (XEXWCR_WIN_ACC_OFFS(win_num)))
#define XEXWCR_WIN_ACC_RW(win_num) (3 << (XEXWCR_WIN_ACC_OFFS(win_num)))
/* XOR Engine Base Address Registers (XEBARx) */
#define XEBARX_TARGET_OFFS (0)
#define XEBARX_TARGET_MASK (0xf << XEBARX_TARGET_OFFS)
#define XEBARX_ATTR_OFFS (8)
#define XEBARX_ATTR_MASK (0xff << XEBARX_ATTR_OFFS)
#define XEBARX_BASE_OFFS (16)
#define XEBARX_BASE_MASK (0xffff << XEBARX_BASE_OFFS)
/* XOR Engine Size Mask Registers (XESMRx) */
#define XESMRX_SIZE_MASK_OFFS (16)
#define XESMRX_SIZE_MASK_MASK (0xffff << XESMRX_SIZE_MASK_OFFS)
#define XOR_WIN_SIZE_ALIGN _64K
/* XOR Engine High Address Remap Register (XEHARRx1) */
#define XEHARRX_REMAP_OFFS (0)
#define XEHARRX_REMAP_MASK (0xffffffff << XEHARRX_REMAP_OFFS)
#define XOR_OVERRIDE_CTRL_REG(chan) (MV_XOR_REGS_BASE(XOR_UNIT(chan)) + \
(0x2a0 + ((XOR_CHAN(chan)) * 4)))
/* XOR Engine [0..1] Address Override Control Register */
#define XEXAOCR_OVR_EN_OFFS(target) (3 * target)
#define XEXAOCR_OVR_EN_MASK(target) (1 << (XEXAOCR_OVR_EN_OFFS(target)))
#define XEXAOCR_OVR_PTR_OFFS(target) ((3 * target) + 1)
#define XEXAOCR_OVR_PTR_MASK(target) (3 << (XEXAOCR_OVR_PTR_OFFS(target)))
#define XEXAOCR_OVR_BAR(win_num, target) (win_num << \
(XEXAOCR_OVR_PTR_OFFS(target)))
/* Maximum address override windows */
#define XOR_MAX_OVERRIDE_WIN 4
#endif /* _XOR_REGS_h */

View File

@ -11,6 +11,7 @@
* High Level Configuration Options (easy to change)
*/
#define CONFIG_ARMADA_XP /* SOC Family Name */
#define CONFIG_ARMADA_38X
#define CONFIG_DB_88F6820_GP /* Board target name for DDR training */
#define CONFIG_SYS_L2_PL310
@ -108,6 +109,40 @@
"fdt_high=0x10000000\0" \
"initrd_high=0x10000000\0"
/* SPL */
/* Defines for SPL */
#define CONFIG_SPL_FRAMEWORK
#define CONFIG_SPL_SIZE (140 << 10)
#define CONFIG_SPL_TEXT_BASE 0x40000030
#define CONFIG_SPL_MAX_SIZE (CONFIG_SPL_SIZE - 0x0030)
#define CONFIG_SPL_BSS_START_ADDR (0x40000000 + CONFIG_SPL_SIZE)
#define CONFIG_SPL_BSS_MAX_SIZE (16 << 10)
#define CONFIG_SYS_SPL_MALLOC_START (CONFIG_SPL_BSS_START_ADDR + \
CONFIG_SPL_BSS_MAX_SIZE)
#define CONFIG_SYS_SPL_MALLOC_SIZE (16 << 10)
#define CONFIG_SPL_STACK (0x40000000 + ((192 - 16) << 10))
#define CONFIG_SPL_BOOTROM_SAVE (CONFIG_SPL_STACK + 4)
#define CONFIG_SPL_LIBCOMMON_SUPPORT
#define CONFIG_SPL_LIBGENERIC_SUPPORT
#define CONFIG_SPL_SERIAL_SUPPORT
#define CONFIG_SPL_I2C_SUPPORT
/* SPL related SPI defines */
#define CONFIG_SPL_SPI_SUPPORT
#define CONFIG_SPL_SPI_FLASH_SUPPORT
#define CONFIG_SPL_SPI_LOAD
#define CONFIG_SPL_SPI_BUS 0
#define CONFIG_SPL_SPI_CS 0
#define CONFIG_SYS_SPI_U_BOOT_OFFS 0x20000
/* Enable DDR support in SPL (DDR3 training from Marvell bin_hdr) */
#define CONFIG_SYS_MVEBU_DDR_A38X
#define CONFIG_DDR3
/*
* mv-common.h should be defined after CMD configs since it used them
* to enable certain macros

View File

@ -27,6 +27,7 @@
#define CONFIG_CMD_DHCP
#define CONFIG_CMD_ENV
#define CONFIG_CMD_I2C
#define CONFIG_CMD_IDE
#define CONFIG_CMD_PING
#define CONFIG_CMD_SF
#define CONFIG_CMD_SPI
@ -60,6 +61,34 @@
#define CONFIG_SYS_CONSOLE_INFO_QUIET /* don't print console @ startup */
#define CONFIG_SYS_ALT_MEMTEST
/* SATA support */
#ifdef CONFIG_CMD_IDE
#define __io
#define CONFIG_IDE_PREINIT
#define CONFIG_MVSATA_IDE
/* Needs byte-swapping for ATA data register */
#define CONFIG_IDE_SWAP_IO
#define CONFIG_SYS_ATA_REG_OFFSET 0x0100 /* Offset for register access */
#define CONFIG_SYS_ATA_DATA_OFFSET 0x0100 /* Offset for data I/O */
#define CONFIG_SYS_ATA_ALT_OFFSET 0x0100
/* Each 8-bit ATA register is aligned to a 4-bytes address */
#define CONFIG_SYS_ATA_STRIDE 4
/* CONFIG_CMD_IDE requires some #defines for ATA registers */
#define CONFIG_SYS_IDE_MAXBUS 2
#define CONFIG_SYS_IDE_MAXDEVICE CONFIG_SYS_IDE_MAXBUS
/* ATA registers base is at SATA controller base */
#define CONFIG_SYS_ATA_BASE_ADDR MVEBU_AXP_SATA_BASE
#define CONFIG_SYS_ATA_IDE0_OFFSET 0x2000
#define CONFIG_SYS_ATA_IDE1_OFFSET 0x4000
#define CONFIG_DOS_PARTITION
#endif /* CONFIG_CMD_IDE */
/*
* mv-common.h should be defined after CMD configs since it used them
* to enable certain macros
@ -109,7 +138,7 @@
#define CONFIG_SYS_SPI_U_BOOT_OFFS 0x20000
/* Enable DDR support in SPL (DDR3 training from Marvell bin_hdr) */
#define CONFIG_SYS_MVEBU_DDR
#define CONFIG_SYS_MVEBU_DDR_AXP
#define CONFIG_SPD_EEPROM 0x4e
#endif /* _CONFIG_DB_MV7846MP_GP_H */

View File

@ -108,7 +108,7 @@
#define CONFIG_SYS_SPI_U_BOOT_OFFS 0x20000
/* Enable DDR support in SPL (DDR3 training from Marvell bin_hdr) */
#define CONFIG_SYS_MVEBU_DDR
#define CONFIG_SYS_MVEBU_DDR_AXP
#define CONFIG_DDR_FIXED_SIZE (1 << 20) /* 1GiB */
#endif /* _CONFIG_DB_MV7846MP_GP_H */

View File

@ -60,7 +60,8 @@ libs-$(CONFIG_SPL_I2C_SUPPORT) += drivers/i2c/
libs-$(CONFIG_SPL_GPIO_SUPPORT) += drivers/gpio/
libs-$(CONFIG_SPL_MMC_SUPPORT) += drivers/mmc/
libs-$(CONFIG_SPL_MPC8XXX_INIT_DDR_SUPPORT) += drivers/ddr/fsl/
libs-$(CONFIG_SYS_MVEBU_DDR) += drivers/ddr/mvebu/
libs-$(CONFIG_SYS_MVEBU_DDR_A38X) += drivers/ddr/marvell/a38x/
libs-$(CONFIG_SYS_MVEBU_DDR_AXP) += drivers/ddr/marvell/axp/
libs-$(CONFIG_SPL_SERIAL_SUPPORT) += drivers/serial/
libs-$(CONFIG_SPL_SPI_FLASH_SUPPORT) += drivers/mtd/spi/
libs-$(CONFIG_SPL_SPI_SUPPORT) += drivers/spi/