2020-07-09 15:31:33 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0+
|
|
|
|
/*
|
|
|
|
* Copyright 2020 NXP
|
|
|
|
* Layerscape PCIe EP driver
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include <common.h>
|
2020-09-28 10:56:04 +00:00
|
|
|
#include <asm/arch/fsl_serdes.h>
|
2020-07-09 15:31:33 +00:00
|
|
|
#include <dm.h>
|
|
|
|
#include <dm/devres.h>
|
|
|
|
#include <errno.h>
|
|
|
|
#include <pci_ep.h>
|
|
|
|
#include <asm/io.h>
|
|
|
|
#include <linux/sizes.h>
|
|
|
|
#include <linux/log2.h>
|
|
|
|
#include "pcie_layerscape.h"
|
|
|
|
|
|
|
|
DECLARE_GLOBAL_DATA_PTR;
|
|
|
|
|
|
|
|
static void ls_pcie_ep_enable_cfg(struct ls_pcie_ep *pcie_ep)
|
|
|
|
{
|
|
|
|
struct ls_pcie *pcie = pcie_ep->pcie;
|
|
|
|
u32 config;
|
|
|
|
|
|
|
|
config = ctrl_readl(pcie, PCIE_PF_CONFIG);
|
|
|
|
config |= PCIE_CONFIG_READY;
|
|
|
|
ctrl_writel(pcie, config, PCIE_PF_CONFIG);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ls_ep_set_bar(struct udevice *dev, uint fn, struct pci_bar *ep_bar)
|
|
|
|
{
|
|
|
|
struct ls_pcie_ep *pcie_ep = dev_get_priv(dev);
|
|
|
|
struct ls_pcie *pcie = pcie_ep->pcie;
|
|
|
|
dma_addr_t bar_phys = ep_bar->phys_addr;
|
|
|
|
enum pci_barno bar = ep_bar->barno;
|
|
|
|
u32 reg = PCI_BASE_ADDRESS_0 + (4 * bar);
|
|
|
|
int flags = ep_bar->flags;
|
|
|
|
int type, idx;
|
|
|
|
u64 size;
|
|
|
|
|
|
|
|
idx = bar;
|
|
|
|
/* BAR size is 2^(aperture + 11) */
|
|
|
|
size = max_t(size_t, ep_bar->size, FSL_PCIE_EP_MIN_APERTURE);
|
|
|
|
|
|
|
|
if (!(flags & PCI_BASE_ADDRESS_SPACE))
|
|
|
|
type = PCIE_ATU_TYPE_MEM;
|
|
|
|
else
|
|
|
|
type = PCIE_ATU_TYPE_IO;
|
|
|
|
|
2020-07-09 15:31:39 +00:00
|
|
|
ls_pcie_atu_inbound_set(pcie, fn, 0, type, idx, bar, bar_phys);
|
2020-07-09 15:31:33 +00:00
|
|
|
|
|
|
|
dbi_writel(pcie, lower_32_bits(size - 1), reg + PCIE_NO_SRIOV_BAR_BASE);
|
|
|
|
dbi_writel(pcie, flags, reg);
|
|
|
|
|
|
|
|
if (flags & PCI_BASE_ADDRESS_MEM_TYPE_64) {
|
|
|
|
dbi_writel(pcie, upper_32_bits(size - 1),
|
|
|
|
reg + 4 + PCIE_NO_SRIOV_BAR_BASE);
|
|
|
|
dbi_writel(pcie, 0, reg + 4);
|
|
|
|
}
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static struct pci_ep_ops ls_pcie_ep_ops = {
|
|
|
|
.set_bar = ls_ep_set_bar,
|
|
|
|
};
|
|
|
|
|
2020-07-09 15:31:36 +00:00
|
|
|
static void ls_pcie_ep_setup_atu(struct ls_pcie_ep *pcie_ep, u32 pf)
|
2020-07-09 15:31:33 +00:00
|
|
|
{
|
|
|
|
struct ls_pcie *pcie = pcie_ep->pcie;
|
2020-07-09 15:31:39 +00:00
|
|
|
u32 vf_flag = 0;
|
2020-07-09 15:31:36 +00:00
|
|
|
u64 phys = 0;
|
2020-07-09 15:31:33 +00:00
|
|
|
|
2020-07-09 15:31:36 +00:00
|
|
|
phys = CONFIG_SYS_PCI_EP_MEMORY_BASE + pf * SZ_64M;
|
|
|
|
|
|
|
|
phys = ALIGN(phys, PCIE_BAR0_SIZE);
|
2020-07-09 15:31:33 +00:00
|
|
|
/* ATU 0 : INBOUND : map BAR0 */
|
2020-07-09 15:31:39 +00:00
|
|
|
ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM,
|
2020-07-09 15:31:36 +00:00
|
|
|
0 + pf * BAR_NUM, 0, phys);
|
2020-07-09 15:31:33 +00:00
|
|
|
/* ATU 1 : INBOUND : map BAR1 */
|
2020-07-09 15:31:36 +00:00
|
|
|
phys = ALIGN(phys + PCIE_BAR0_SIZE, PCIE_BAR1_SIZE);
|
2020-07-09 15:31:39 +00:00
|
|
|
ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM,
|
2020-07-09 15:31:36 +00:00
|
|
|
1 + pf * BAR_NUM, 1, phys);
|
2020-07-09 15:31:33 +00:00
|
|
|
/* ATU 2 : INBOUND : map BAR2 */
|
2020-07-09 15:31:36 +00:00
|
|
|
phys = ALIGN(phys + PCIE_BAR1_SIZE, PCIE_BAR2_SIZE);
|
2020-07-09 15:31:39 +00:00
|
|
|
ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM,
|
2020-07-09 15:31:36 +00:00
|
|
|
2 + pf * BAR_NUM, 2, phys);
|
|
|
|
/* ATU 3 : INBOUND : map BAR2 */
|
|
|
|
phys = ALIGN(phys + PCIE_BAR2_SIZE, PCIE_BAR4_SIZE);
|
2020-07-09 15:31:39 +00:00
|
|
|
ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM,
|
2020-07-09 15:31:36 +00:00
|
|
|
3 + pf * BAR_NUM, 4, phys);
|
|
|
|
|
2020-07-09 15:31:39 +00:00
|
|
|
if (pcie_ep->sriov_flag) {
|
|
|
|
vf_flag = 1;
|
|
|
|
/* ATU 4 : INBOUND : map BAR0 */
|
|
|
|
phys = ALIGN(phys + PCIE_BAR4_SIZE, PCIE_BAR0_SIZE);
|
|
|
|
ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM,
|
|
|
|
4 + pf * BAR_NUM, 0, phys);
|
|
|
|
/* ATU 5 : INBOUND : map BAR1 */
|
|
|
|
phys = ALIGN(phys + PCIE_BAR0_SIZE * PCIE_VF_NUM,
|
|
|
|
PCIE_BAR1_SIZE);
|
|
|
|
ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM,
|
|
|
|
5 + pf * BAR_NUM, 1, phys);
|
|
|
|
/* ATU 6 : INBOUND : map BAR2 */
|
|
|
|
phys = ALIGN(phys + PCIE_BAR1_SIZE * PCIE_VF_NUM,
|
|
|
|
PCIE_BAR2_SIZE);
|
|
|
|
ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM,
|
|
|
|
6 + pf * BAR_NUM, 2, phys);
|
|
|
|
/* ATU 7 : INBOUND : map BAR4 */
|
|
|
|
phys = ALIGN(phys + PCIE_BAR2_SIZE * PCIE_VF_NUM,
|
|
|
|
PCIE_BAR4_SIZE);
|
|
|
|
ls_pcie_atu_inbound_set(pcie, pf, vf_flag, PCIE_ATU_TYPE_MEM,
|
|
|
|
7 + pf * BAR_NUM, 4, phys);
|
|
|
|
}
|
|
|
|
|
2020-07-09 15:31:36 +00:00
|
|
|
/* ATU: OUTBOUND : map MEM */
|
|
|
|
ls_pcie_atu_outbound_set(pcie, pf, PCIE_ATU_TYPE_MEM,
|
|
|
|
(u64)pcie_ep->addr_res.start +
|
|
|
|
pf * CONFIG_SYS_PCI_MEMORY_SIZE,
|
|
|
|
0, CONFIG_SYS_PCI_MEMORY_SIZE);
|
2020-07-09 15:31:33 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* BAR0 and BAR1 are 32bit BAR2 and BAR4 are 64bit */
|
|
|
|
static void ls_pcie_ep_setup_bar(void *bar_base, int bar, u32 size)
|
|
|
|
{
|
2020-07-09 15:31:36 +00:00
|
|
|
u32 mask;
|
|
|
|
|
2020-07-09 15:31:33 +00:00
|
|
|
/* The least inbound window is 4KiB */
|
2020-07-09 15:31:36 +00:00
|
|
|
if (size < SZ_4K)
|
|
|
|
mask = 0;
|
|
|
|
else
|
|
|
|
mask = size - 1;
|
2020-07-09 15:31:33 +00:00
|
|
|
|
|
|
|
switch (bar) {
|
|
|
|
case 0:
|
2020-07-09 15:31:36 +00:00
|
|
|
writel(mask, bar_base + PCI_BASE_ADDRESS_0);
|
2020-07-09 15:31:33 +00:00
|
|
|
break;
|
|
|
|
case 1:
|
2020-07-09 15:31:36 +00:00
|
|
|
writel(mask, bar_base + PCI_BASE_ADDRESS_1);
|
2020-07-09 15:31:33 +00:00
|
|
|
break;
|
|
|
|
case 2:
|
2020-07-09 15:31:36 +00:00
|
|
|
writel(mask, bar_base + PCI_BASE_ADDRESS_2);
|
2020-07-09 15:31:33 +00:00
|
|
|
writel(0, bar_base + PCI_BASE_ADDRESS_3);
|
|
|
|
break;
|
|
|
|
case 4:
|
2020-07-09 15:31:36 +00:00
|
|
|
writel(mask, bar_base + PCI_BASE_ADDRESS_4);
|
2020-07-09 15:31:33 +00:00
|
|
|
writel(0, bar_base + PCI_BASE_ADDRESS_5);
|
|
|
|
break;
|
|
|
|
default:
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ls_pcie_ep_setup_bars(void *bar_base)
|
|
|
|
{
|
2020-07-09 15:31:36 +00:00
|
|
|
/* BAR0 - 32bit - MEM */
|
|
|
|
ls_pcie_ep_setup_bar(bar_base, 0, PCIE_BAR0_SIZE);
|
|
|
|
/* BAR1 - 32bit - MEM*/
|
|
|
|
ls_pcie_ep_setup_bar(bar_base, 1, PCIE_BAR1_SIZE);
|
|
|
|
/* BAR2 - 64bit - MEM */
|
|
|
|
ls_pcie_ep_setup_bar(bar_base, 2, PCIE_BAR2_SIZE);
|
|
|
|
/* BAR4 - 64bit - MEM */
|
|
|
|
ls_pcie_ep_setup_bar(bar_base, 4, PCIE_BAR4_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ls_pcie_ep_setup_vf_bars(void *bar_base)
|
|
|
|
{
|
|
|
|
/* VF BAR0 MASK register at offset 0x19c*/
|
|
|
|
bar_base += PCIE_SRIOV_VFBAR0 - PCI_BASE_ADDRESS_0;
|
|
|
|
|
|
|
|
/* VF-BAR0 - 32bit - MEM */
|
2020-07-09 15:31:33 +00:00
|
|
|
ls_pcie_ep_setup_bar(bar_base, 0, PCIE_BAR0_SIZE);
|
2020-07-09 15:31:36 +00:00
|
|
|
/* VF-BAR1 - 32bit - MEM*/
|
2020-07-09 15:31:33 +00:00
|
|
|
ls_pcie_ep_setup_bar(bar_base, 1, PCIE_BAR1_SIZE);
|
2020-07-09 15:31:36 +00:00
|
|
|
/* VF-BAR2 - 64bit - MEM */
|
2020-07-09 15:31:33 +00:00
|
|
|
ls_pcie_ep_setup_bar(bar_base, 2, PCIE_BAR2_SIZE);
|
2020-07-09 15:31:36 +00:00
|
|
|
/* VF-BAR4 - 64bit - MEM */
|
2020-07-09 15:31:33 +00:00
|
|
|
ls_pcie_ep_setup_bar(bar_base, 4, PCIE_BAR4_SIZE);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void ls_pcie_setup_ep(struct ls_pcie_ep *pcie_ep)
|
|
|
|
{
|
|
|
|
u32 sriov;
|
2020-07-09 15:31:36 +00:00
|
|
|
u32 pf, vf;
|
|
|
|
void *bar_base = NULL;
|
2020-07-09 15:31:33 +00:00
|
|
|
struct ls_pcie *pcie = pcie_ep->pcie;
|
|
|
|
|
|
|
|
sriov = readl(pcie->dbi + PCIE_SRIOV);
|
|
|
|
if (PCI_EXT_CAP_ID(sriov) == PCI_EXT_CAP_ID_SRIOV) {
|
2020-07-09 15:31:36 +00:00
|
|
|
pcie_ep->sriov_flag = 1;
|
2020-07-09 15:31:33 +00:00
|
|
|
for (pf = 0; pf < PCIE_PF_NUM; pf++) {
|
2020-07-09 15:31:37 +00:00
|
|
|
/*
|
|
|
|
* The VF_BARn_REG register's Prefetchable and Type bit
|
|
|
|
* fields are overwritten by a write to VF's BAR Mask
|
|
|
|
* register. Before writing to the VF_BARn_MASK_REG
|
|
|
|
* register, write 0b to the PCIE_MISC_CONTROL_1_OFF
|
|
|
|
* register.
|
|
|
|
*/
|
|
|
|
writel(0, pcie->dbi + PCIE_MISC_CONTROL_1_OFF);
|
|
|
|
|
2020-07-09 15:31:38 +00:00
|
|
|
bar_base = pcie->dbi +
|
2020-07-09 15:31:41 +00:00
|
|
|
PCIE_MASK_OFFSET(pcie_ep->cfg2_flag, pf,
|
|
|
|
pcie_ep->pf1_offset);
|
2020-07-09 15:31:38 +00:00
|
|
|
|
2020-07-09 15:31:36 +00:00
|
|
|
if (pcie_ep->cfg2_flag) {
|
2020-07-09 15:31:38 +00:00
|
|
|
ctrl_writel(pcie,
|
|
|
|
PCIE_LCTRL0_VAL(pf, 0),
|
|
|
|
PCIE_PF_VF_CTRL);
|
|
|
|
ls_pcie_ep_setup_bars(bar_base);
|
|
|
|
|
|
|
|
for (vf = 1; vf <= PCIE_VF_NUM; vf++) {
|
2020-07-09 15:31:36 +00:00
|
|
|
ctrl_writel(pcie,
|
|
|
|
PCIE_LCTRL0_VAL(pf, vf),
|
|
|
|
PCIE_PF_VF_CTRL);
|
2020-07-09 15:31:38 +00:00
|
|
|
ls_pcie_ep_setup_vf_bars(bar_base);
|
2020-07-09 15:31:36 +00:00
|
|
|
}
|
2020-07-09 15:31:38 +00:00
|
|
|
} else {
|
|
|
|
ls_pcie_ep_setup_bars(bar_base);
|
|
|
|
ls_pcie_ep_setup_vf_bars(bar_base);
|
2020-07-09 15:31:33 +00:00
|
|
|
}
|
2020-07-09 15:31:36 +00:00
|
|
|
|
|
|
|
ls_pcie_ep_setup_atu(pcie_ep, pf);
|
2020-07-09 15:31:33 +00:00
|
|
|
}
|
2020-07-09 15:31:36 +00:00
|
|
|
|
|
|
|
if (pcie_ep->cfg2_flag) /* Disable CFG2 */
|
|
|
|
ctrl_writel(pcie, 0, PCIE_PF_VF_CTRL);
|
2020-07-09 15:31:33 +00:00
|
|
|
} else {
|
|
|
|
ls_pcie_ep_setup_bars(pcie->dbi + PCIE_NO_SRIOV_BAR_BASE);
|
2020-07-09 15:31:36 +00:00
|
|
|
ls_pcie_ep_setup_atu(pcie_ep, 0);
|
2020-07-09 15:31:33 +00:00
|
|
|
}
|
|
|
|
|
2020-07-09 15:31:40 +00:00
|
|
|
ls_pcie_dump_atu(pcie, PCIE_ATU_REGION_NUM_SRIOV,
|
|
|
|
PCIE_ATU_REGION_INBOUND);
|
|
|
|
|
2020-07-09 15:31:33 +00:00
|
|
|
ls_pcie_ep_enable_cfg(pcie_ep);
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ls_pcie_ep_probe(struct udevice *dev)
|
|
|
|
{
|
|
|
|
struct ls_pcie_ep *pcie_ep = dev_get_priv(dev);
|
|
|
|
struct ls_pcie *pcie;
|
|
|
|
u16 link_sta;
|
|
|
|
int ret;
|
2020-07-09 15:31:36 +00:00
|
|
|
u32 svr;
|
2020-07-09 15:31:33 +00:00
|
|
|
|
|
|
|
pcie = devm_kmalloc(dev, sizeof(*pcie), GFP_KERNEL);
|
|
|
|
if (!pcie)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pcie_ep->pcie = pcie;
|
|
|
|
|
|
|
|
pcie->dbi = (void __iomem *)devfdt_get_addr_index(dev, 0);
|
|
|
|
if (!pcie->dbi)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
pcie->ctrl = (void __iomem *)devfdt_get_addr_index(dev, 1);
|
|
|
|
if (!pcie->ctrl)
|
|
|
|
return -ENOMEM;
|
|
|
|
|
|
|
|
ret = fdt_get_named_resource(gd->fdt_blob, dev_of_offset(dev),
|
|
|
|
"reg", "reg-names",
|
|
|
|
"addr_space", &pcie_ep->addr_res);
|
|
|
|
if (ret) {
|
|
|
|
printf("%s: resource \"addr_space\" not found\n", dev->name);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
pcie->idx = ((unsigned long)pcie->dbi - PCIE_SYS_BASE_ADDR) /
|
|
|
|
PCIE_CCSR_SIZE;
|
|
|
|
|
|
|
|
pcie->big_endian = fdtdec_get_bool(gd->fdt_blob, dev_of_offset(dev),
|
|
|
|
"big-endian");
|
|
|
|
|
2020-07-09 15:31:36 +00:00
|
|
|
svr = SVR_SOC_VER(get_svr());
|
|
|
|
|
armv8: lx2162a: Add Soc changes to support LX2162A
LX2162 is LX2160 based SoC, it has same die as of LX2160
with different packaging.
LX2162A support 64-bit 2.9GT/s DDR4 memory, i2c, micro-click module,
microSD card, eMMC support, serial console, qspi nor flash, qsgmii,
sgmii, 25g, 40g, 50g network interface, one usb 3.0 and serdes
interface to support three PCIe gen3 interface.
Signed-off-by: Meenakshi Aggarwal <meenakshi.aggarwal@nxp.com>
[Fixed whitespace errors]
Signed-off-by: Priyanka Jain <priyanka.jain@nxp.com>
2020-10-29 13:46:16 +00:00
|
|
|
if (svr == SVR_LX2160A || svr == SVR_LX2162A ||
|
|
|
|
svr == SVR_LX2120A || svr == SVR_LX2080A ||
|
|
|
|
svr == SVR_LX2122A || svr == SVR_LX2082A)
|
2020-07-09 15:31:41 +00:00
|
|
|
pcie_ep->pf1_offset = LX2160_PCIE_PF1_OFFSET;
|
|
|
|
else
|
|
|
|
pcie_ep->pf1_offset = LS_PCIE_PF1_OFFSET;
|
|
|
|
|
2020-07-09 15:31:36 +00:00
|
|
|
if (svr == SVR_LS2080A || svr == SVR_LS2085A)
|
|
|
|
pcie_ep->cfg2_flag = 1;
|
|
|
|
else
|
|
|
|
pcie_ep->cfg2_flag = 0;
|
|
|
|
|
2020-07-09 15:31:33 +00:00
|
|
|
pcie->mode = readb(pcie->dbi + PCI_HEADER_TYPE) & 0x7f;
|
|
|
|
if (pcie->mode != PCI_HEADER_TYPE_NORMAL)
|
|
|
|
return 0;
|
|
|
|
|
|
|
|
pcie_ep->max_functions = fdtdec_get_int(gd->fdt_blob,
|
|
|
|
dev_of_offset(dev),
|
|
|
|
"max-functions", 1);
|
|
|
|
pcie_ep->num_ib_wins = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
|
|
|
|
"num-ib-windows", 8);
|
|
|
|
pcie_ep->num_ob_wins = fdtdec_get_int(gd->fdt_blob, dev_of_offset(dev),
|
|
|
|
"num-ob-windows", 8);
|
|
|
|
|
2020-09-28 10:56:04 +00:00
|
|
|
printf("PCIe%u: %s %s", PCIE_SRDS_PRTCL(pcie->idx), dev->name,
|
|
|
|
"Endpoint");
|
2020-07-09 15:31:33 +00:00
|
|
|
ls_pcie_setup_ep(pcie_ep);
|
|
|
|
|
|
|
|
if (!ls_pcie_link_up(pcie)) {
|
|
|
|
/* Let the user know there's no PCIe link */
|
|
|
|
printf(": no link\n");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Print the negotiated PCIe link width */
|
|
|
|
link_sta = readw(pcie->dbi + PCIE_LINK_STA);
|
|
|
|
printf(": x%d gen%d\n", (link_sta & PCIE_LINK_WIDTH_MASK) >> 4,
|
|
|
|
link_sta & PCIE_LINK_SPEED_MASK);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
static int ls_pcie_ep_remove(struct udevice *dev)
|
|
|
|
{
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
const struct udevice_id ls_pcie_ep_ids[] = {
|
|
|
|
{ .compatible = "fsl,ls-pcie-ep" },
|
|
|
|
{ }
|
|
|
|
};
|
|
|
|
|
|
|
|
U_BOOT_DRIVER(pci_layerscape_ep) = {
|
|
|
|
.name = "pci_layerscape_ep",
|
|
|
|
.id = UCLASS_PCI_EP,
|
|
|
|
.of_match = ls_pcie_ep_ids,
|
|
|
|
.ops = &ls_pcie_ep_ops,
|
|
|
|
.probe = ls_pcie_ep_probe,
|
|
|
|
.remove = ls_pcie_ep_remove,
|
|
|
|
.priv_auto_alloc_size = sizeof(struct ls_pcie_ep),
|
|
|
|
};
|