mirror of
https://github.com/AsahiLinux/u-boot
synced 2025-02-17 22:49:02 +00:00
net: Add NIC controller driver for OcteonTX
Adds support for Network Interface controllers found on OcteonTX SoC platforms. Signed-off-by: Suneel Garapati <sgarapati@marvell.com> Signed-off-by: Stefan Roese <sr@denx.de> Cc: Joe Hershberger <joe.hershberger@ni.com>
This commit is contained in:
parent
05c7606ac9
commit
0008e9a69d
14 changed files with 6656 additions and 0 deletions
|
@ -407,6 +407,20 @@ config MT7628_ETH
|
|||
The MediaTek MT7628 ethernet interface is used on MT7628 and
|
||||
MT7688 based boards.
|
||||
|
||||
config NET_OCTEONTX
|
||||
bool "OcteonTX Ethernet support"
|
||||
depends on ARCH_OCTEONTX
|
||||
depends on PCI_SRIOV
|
||||
help
|
||||
You must select Y to enable network device support for
|
||||
OcteonTX SoCs. If unsure, say n
|
||||
config OCTEONTX_SMI
|
||||
bool "OcteonTX SMI Device support"
|
||||
depends on ARCH_OCTEONTX || ARCH_OCTEONTX2
|
||||
help
|
||||
You must select Y to enable SMI controller support for
|
||||
OcteonTX or OcteonTX2 SoCs. If unsure, say n
|
||||
|
||||
config PCH_GBE
|
||||
bool "Intel Platform Controller Hub EG20T GMAC driver"
|
||||
depends on DM_ETH && DM_PCI
|
||||
|
|
|
@ -65,6 +65,8 @@ obj-$(CONFIG_RENESAS_RAVB) += ravb.o
|
|||
obj-$(CONFIG_SMC91111) += smc91111.o
|
||||
obj-$(CONFIG_SMC911X) += smc911x.o
|
||||
obj-$(CONFIG_TSEC_ENET) += tsec.o fsl_mdio.o
|
||||
obj-$(CONFIG_NET_OCTEONTX) += octeontx/
|
||||
obj-$(CONFIG_OCTEONTX_SMI) += octeontx/smi.o
|
||||
obj-$(CONFIG_FMAN_ENET) += fsl_mdio.o
|
||||
obj-$(CONFIG_ULI526X) += uli526x.o
|
||||
obj-$(CONFIG_VSC7385_ENET) += vsc7385.o
|
||||
|
|
7
drivers/net/octeontx/Makefile
Normal file
7
drivers/net/octeontx/Makefile
Normal file
|
@ -0,0 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
#
|
||||
# Copyright (C) 2018 Marvell International Ltd.
|
||||
#
|
||||
|
||||
obj-$(CONFIG_NET_OCTEONTX) += bgx.o nic_main.o nicvf_queues.o nicvf_main.o \
|
||||
xcv.o
|
1565
drivers/net/octeontx/bgx.c
Normal file
1565
drivers/net/octeontx/bgx.c
Normal file
File diff suppressed because it is too large
Load diff
259
drivers/net/octeontx/bgx.h
Normal file
259
drivers/net/octeontx/bgx.h
Normal file
|
@ -0,0 +1,259 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef BGX_H
|
||||
#define BGX_H
|
||||
|
||||
#include <asm/arch/board.h>
|
||||
|
||||
/* PCI device IDs */
|
||||
#define PCI_DEVICE_ID_OCTEONTX_BGX 0xA026
|
||||
#define PCI_DEVICE_ID_OCTEONTX_RGX 0xA054
|
||||
|
||||
#define MAX_LMAC_PER_BGX 4
|
||||
#define MAX_BGX_CHANS_PER_LMAC 16
|
||||
#define MAX_DMAC_PER_LMAC 8
|
||||
#define MAX_FRAME_SIZE 9216
|
||||
|
||||
#define MAX_DMAC_PER_LMAC_TNS_BYPASS_MODE 2
|
||||
|
||||
#define MAX_LMAC (MAX_BGX_PER_NODE * MAX_LMAC_PER_BGX)
|
||||
|
||||
#define NODE_ID_MASK 0x300000000000
|
||||
#define NODE_ID(x) (((x) & NODE_ID_MASK) >> 44)
|
||||
|
||||
/* Registers */
|
||||
#define GSERX_CFG(x) (0x87E090000080ull + (x) * 0x1000000ull)
|
||||
#define GSERX_SCRATCH(x) (0x87E090000020ull + (x) * 0x1000000ull)
|
||||
#define GSERX_PHY_CTL(x) (0x87E090000000ull + (x) * 0x1000000ull)
|
||||
#define GSERX_CFG_BGX BIT(2)
|
||||
#define GSER_RX_EIE_DETSTS(x) (0x87E090000150ull + (x) * 0x1000000ull)
|
||||
#define GSER_CDRLOCK (8)
|
||||
#define GSER_BR_RXX_CTL(x, y) (0x87E090000400ull + (x) * 0x1000000ull + \
|
||||
(y) * 0x80)
|
||||
#define GSER_BR_RXX_CTL_RXT_SWM BIT(2)
|
||||
#define GSER_BR_RXX_EER(x, y) (0x87E090000418ull + (x) * 0x1000000ull + \
|
||||
(y) * 0x80)
|
||||
#define GSER_BR_RXX_EER_RXT_ESV BIT(14)
|
||||
#define GSER_BR_RXX_EER_RXT_EER BIT(15)
|
||||
#define EER_RXT_ESV (14)
|
||||
|
||||
#define BGX_CMRX_CFG 0x00
|
||||
#define CMR_PKT_TX_EN BIT_ULL(13)
|
||||
#define CMR_PKT_RX_EN BIT_ULL(14)
|
||||
#define CMR_EN BIT_ULL(15)
|
||||
#define BGX_CMR_GLOBAL_CFG 0x08
|
||||
#define CMR_GLOBAL_CFG_FCS_STRIP BIT_ULL(6)
|
||||
#define BGX_CMRX_RX_ID_MAP 0x60
|
||||
#define BGX_CMRX_RX_STAT0 0x70
|
||||
#define BGX_CMRX_RX_STAT1 0x78
|
||||
#define BGX_CMRX_RX_STAT2 0x80
|
||||
#define BGX_CMRX_RX_STAT3 0x88
|
||||
#define BGX_CMRX_RX_STAT4 0x90
|
||||
#define BGX_CMRX_RX_STAT5 0x98
|
||||
#define BGX_CMRX_RX_STAT6 0xA0
|
||||
#define BGX_CMRX_RX_STAT7 0xA8
|
||||
#define BGX_CMRX_RX_STAT8 0xB0
|
||||
#define BGX_CMRX_RX_STAT9 0xB8
|
||||
#define BGX_CMRX_RX_STAT10 0xC0
|
||||
#define BGX_CMRX_RX_BP_DROP 0xC8
|
||||
#define BGX_CMRX_RX_DMAC_CTL 0x0E8
|
||||
#define BGX_CMR_RX_DMACX_CAM 0x200
|
||||
#define RX_DMACX_CAM_EN BIT_ULL(48)
|
||||
#define RX_DMACX_CAM_LMACID(x) ((x) << 49)
|
||||
#define RX_DMAC_COUNT 32
|
||||
#define BGX_CMR_RX_STREERING 0x300
|
||||
#define RX_TRAFFIC_STEER_RULE_COUNT 8
|
||||
#define BGX_CMR_CHAN_MSK_AND 0x450
|
||||
#define BGX_CMR_BIST_STATUS 0x460
|
||||
#define BGX_CMR_RX_LMACS 0x468
|
||||
#define BGX_CMRX_TX_STAT0 0x600
|
||||
#define BGX_CMRX_TX_STAT1 0x608
|
||||
#define BGX_CMRX_TX_STAT2 0x610
|
||||
#define BGX_CMRX_TX_STAT3 0x618
|
||||
#define BGX_CMRX_TX_STAT4 0x620
|
||||
#define BGX_CMRX_TX_STAT5 0x628
|
||||
#define BGX_CMRX_TX_STAT6 0x630
|
||||
#define BGX_CMRX_TX_STAT7 0x638
|
||||
#define BGX_CMRX_TX_STAT8 0x640
|
||||
#define BGX_CMRX_TX_STAT9 0x648
|
||||
#define BGX_CMRX_TX_STAT10 0x650
|
||||
#define BGX_CMRX_TX_STAT11 0x658
|
||||
#define BGX_CMRX_TX_STAT12 0x660
|
||||
#define BGX_CMRX_TX_STAT13 0x668
|
||||
#define BGX_CMRX_TX_STAT14 0x670
|
||||
#define BGX_CMRX_TX_STAT15 0x678
|
||||
#define BGX_CMRX_TX_STAT16 0x680
|
||||
#define BGX_CMRX_TX_STAT17 0x688
|
||||
#define BGX_CMR_TX_LMACS 0x1000
|
||||
|
||||
#define BGX_SPUX_CONTROL1 0x10000
|
||||
#define SPU_CTL_LOW_POWER BIT_ULL(11)
|
||||
#define SPU_CTL_LOOPBACK BIT_ULL(14)
|
||||
#define SPU_CTL_RESET BIT_ULL(15)
|
||||
#define BGX_SPUX_STATUS1 0x10008
|
||||
#define SPU_STATUS1_RCV_LNK BIT_ULL(2)
|
||||
#define BGX_SPUX_STATUS2 0x10020
|
||||
#define SPU_STATUS2_RCVFLT BIT_ULL(10)
|
||||
#define BGX_SPUX_BX_STATUS 0x10028
|
||||
#define SPU_BX_STATUS_RX_ALIGN BIT_ULL(12)
|
||||
#define BGX_SPUX_BR_STATUS1 0x10030
|
||||
#define SPU_BR_STATUS_BLK_LOCK BIT_ULL(0)
|
||||
#define SPU_BR_STATUS_RCV_LNK BIT_ULL(12)
|
||||
#define BGX_SPUX_BR_PMD_CRTL 0x10068
|
||||
#define SPU_PMD_CRTL_TRAIN_EN BIT_ULL(1)
|
||||
#define BGX_SPUX_BR_PMD_LP_CUP 0x10078
|
||||
#define BGX_SPUX_BR_PMD_LD_CUP 0x10088
|
||||
#define BGX_SPUX_BR_PMD_LD_REP 0x10090
|
||||
#define BGX_SPUX_FEC_CONTROL 0x100A0
|
||||
#define SPU_FEC_CTL_FEC_EN BIT_ULL(0)
|
||||
#define SPU_FEC_CTL_ERR_EN BIT_ULL(1)
|
||||
#define BGX_SPUX_AN_CONTROL 0x100C8
|
||||
#define SPU_AN_CTL_AN_EN BIT_ULL(12)
|
||||
#define SPU_AN_CTL_XNP_EN BIT_ULL(13)
|
||||
#define SPU_AN_CTL_AN_RESTART BIT_ULL(15)
|
||||
#define BGX_SPUX_AN_STATUS 0x100D0
|
||||
#define SPU_AN_STS_AN_COMPLETE BIT_ULL(5)
|
||||
#define BGX_SPUX_AN_ADV 0x100D8
|
||||
#define BGX_SPUX_MISC_CONTROL 0x10218
|
||||
#define SPU_MISC_CTL_INTLV_RDISP BIT_ULL(10)
|
||||
#define SPU_MISC_CTL_RX_DIS BIT_ULL(12)
|
||||
#define BGX_SPUX_INT 0x10220 /* +(0..3) << 20 */
|
||||
#define BGX_SPUX_INT_W1S 0x10228
|
||||
#define BGX_SPUX_INT_ENA_W1C 0x10230
|
||||
#define BGX_SPUX_INT_ENA_W1S 0x10238
|
||||
#define BGX_SPU_DBG_CONTROL 0x10300
|
||||
#define SPU_DBG_CTL_AN_ARB_LINK_CHK_EN BIT_ULL(18)
|
||||
#define SPU_DBG_CTL_AN_NONCE_MCT_DIS BIT_ULL(29)
|
||||
|
||||
#define BGX_SMUX_RX_INT 0x20000
|
||||
#define BGX_SMUX_RX_JABBER 0x20030
|
||||
#define BGX_SMUX_RX_CTL 0x20048
|
||||
#define SMU_RX_CTL_STATUS (3ull << 0)
|
||||
#define BGX_SMUX_TX_APPEND 0x20100
|
||||
#define SMU_TX_APPEND_FCS_D BIT_ULL(2)
|
||||
#define BGX_SMUX_TX_MIN_PKT 0x20118
|
||||
#define BGX_SMUX_TX_INT 0x20140
|
||||
#define BGX_SMUX_TX_CTL 0x20178
|
||||
#define SMU_TX_CTL_DIC_EN BIT_ULL(0)
|
||||
#define SMU_TX_CTL_UNI_EN BIT_ULL(1)
|
||||
#define SMU_TX_CTL_LNK_STATUS (3ull << 4)
|
||||
#define BGX_SMUX_TX_THRESH 0x20180
|
||||
#define BGX_SMUX_CTL 0x20200
|
||||
#define SMU_CTL_RX_IDLE BIT_ULL(0)
|
||||
#define SMU_CTL_TX_IDLE BIT_ULL(1)
|
||||
|
||||
#define BGX_GMP_PCS_MRX_CTL 0x30000
|
||||
#define PCS_MRX_CTL_RST_AN BIT_ULL(9)
|
||||
#define PCS_MRX_CTL_PWR_DN BIT_ULL(11)
|
||||
#define PCS_MRX_CTL_AN_EN BIT_ULL(12)
|
||||
#define PCS_MRX_CTL_LOOPBACK1 BIT_ULL(14)
|
||||
#define PCS_MRX_CTL_RESET BIT_ULL(15)
|
||||
#define BGX_GMP_PCS_MRX_STATUS 0x30008
|
||||
#define PCS_MRX_STATUS_AN_CPT BIT_ULL(5)
|
||||
#define BGX_GMP_PCS_ANX_AN_RESULTS 0x30020
|
||||
#define BGX_GMP_PCS_SGM_AN_ADV 0x30068
|
||||
#define BGX_GMP_PCS_MISCX_CTL 0x30078
|
||||
#define PCS_MISCX_CTL_DISP_EN BIT_ULL(13)
|
||||
#define PCS_MISC_CTL_GMX_ENO BIT_ULL(11)
|
||||
#define PCS_MISC_CTL_SAMP_PT_MASK 0x7Full
|
||||
#define PCS_MISC_CTL_MODE BIT_ULL(8)
|
||||
#define BGX_GMP_GMI_PRTX_CFG 0x38020
|
||||
#define GMI_PORT_CFG_SPEED BIT_ULL(1)
|
||||
#define GMI_PORT_CFG_DUPLEX BIT_ULL(2)
|
||||
#define GMI_PORT_CFG_SLOT_TIME BIT_ULL(3)
|
||||
#define GMI_PORT_CFG_SPEED_MSB BIT_ULL(8)
|
||||
#define BGX_GMP_GMI_RXX_JABBER 0x38038
|
||||
#define BGX_GMP_GMI_TXX_THRESH 0x38210
|
||||
#define BGX_GMP_GMI_TXX_APPEND 0x38218
|
||||
#define BGX_GMP_GMI_TXX_SLOT 0x38220
|
||||
#define BGX_GMP_GMI_TXX_BURST 0x38228
|
||||
#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
|
||||
#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
|
||||
|
||||
#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
|
||||
#define BGX_MSIX_VEC_0_29_CTL 0x400008
|
||||
#define BGX_MSIX_PBA_0 0x4F0000
|
||||
|
||||
/* MSI-X interrupts */
|
||||
#define BGX_MSIX_VECTORS 30
|
||||
#define BGX_LMAC_VEC_OFFSET 7
|
||||
#define BGX_MSIX_VEC_SHIFT 4
|
||||
|
||||
#define CMRX_INT 0
|
||||
#define SPUX_INT 1
|
||||
#define SMUX_RX_INT 2
|
||||
#define SMUX_TX_INT 3
|
||||
#define GMPX_PCS_INT 4
|
||||
#define GMPX_GMI_RX_INT 5
|
||||
#define GMPX_GMI_TX_INT 6
|
||||
#define CMR_MEM_INT 28
|
||||
#define SPU_MEM_INT 29
|
||||
|
||||
#define LMAC_INTR_LINK_UP BIT(0)
|
||||
#define LMAC_INTR_LINK_DOWN BIT(1)
|
||||
|
||||
/* RX_DMAC_CTL configuration*/
|
||||
enum MCAST_MODE {
|
||||
MCAST_MODE_REJECT,
|
||||
MCAST_MODE_ACCEPT,
|
||||
MCAST_MODE_CAM_FILTER,
|
||||
RSVD
|
||||
};
|
||||
|
||||
#define BCAST_ACCEPT 1
|
||||
#define CAM_ACCEPT 1
|
||||
|
||||
int octeontx_bgx_initialize(unsigned int bgx_idx, unsigned int node);
|
||||
void bgx_add_dmac_addr(u64 dmac, int node, int bgx_idx, int lmac);
|
||||
void bgx_get_count(int node, int *bgx_count);
|
||||
int bgx_get_lmac_count(int node, int bgx);
|
||||
void bgx_print_stats(int bgx_idx, int lmac);
|
||||
void xcv_init_hw(void);
|
||||
void xcv_setup_link(bool link_up, int link_speed);
|
||||
|
||||
#undef LINK_INTR_ENABLE
|
||||
|
||||
enum qlm_mode {
|
||||
QLM_MODE_SGMII, /* SGMII, each lane independent */
|
||||
QLM_MODE_XAUI, /* 1 XAUI or DXAUI, 4 lanes */
|
||||
QLM_MODE_RXAUI, /* 2 RXAUI, 2 lanes each */
|
||||
QLM_MODE_XFI, /* 4 XFI, 1 lane each */
|
||||
QLM_MODE_XLAUI, /* 1 XLAUI, 4 lanes each */
|
||||
QLM_MODE_10G_KR, /* 4 10GBASE-KR, 1 lane each */
|
||||
QLM_MODE_40G_KR4, /* 1 40GBASE-KR4, 4 lanes each */
|
||||
QLM_MODE_QSGMII, /* 4 QSGMII, each lane independent */
|
||||
QLM_MODE_RGMII, /* 1 RGX */
|
||||
};
|
||||
|
||||
struct phy_info {
|
||||
int mdio_bus;
|
||||
int phy_addr;
|
||||
bool autoneg_dis;
|
||||
};
|
||||
|
||||
struct bgx_board_info {
|
||||
struct phy_info phy_info[MAX_LMAC_PER_BGX];
|
||||
bool lmac_reg[MAX_LMAC_PER_BGX];
|
||||
bool lmac_enable[MAX_LMAC_PER_BGX];
|
||||
};
|
||||
|
||||
enum LMAC_TYPE {
|
||||
BGX_MODE_SGMII = 0, /* 1 lane, 1.250 Gbaud */
|
||||
BGX_MODE_XAUI = 1, /* 4 lanes, 3.125 Gbaud */
|
||||
BGX_MODE_DXAUI = 1, /* 4 lanes, 6.250 Gbaud */
|
||||
BGX_MODE_RXAUI = 2, /* 2 lanes, 6.250 Gbaud */
|
||||
BGX_MODE_XFI = 3, /* 1 lane, 10.3125 Gbaud */
|
||||
BGX_MODE_XLAUI = 4, /* 4 lanes, 10.3125 Gbaud */
|
||||
BGX_MODE_10G_KR = 3,/* 1 lane, 10.3125 Gbaud */
|
||||
BGX_MODE_40G_KR = 4,/* 4 lanes, 10.3125 Gbaud */
|
||||
BGX_MODE_RGMII = 5,
|
||||
BGX_MODE_QSGMII = 6,
|
||||
BGX_MODE_INVALID = 7,
|
||||
};
|
||||
|
||||
int rxaui_phy_xs_init(struct mii_dev *bus, int phy_addr);
|
||||
|
||||
#endif /* BGX_H */
|
508
drivers/net/octeontx/nic.h
Normal file
508
drivers/net/octeontx/nic.h
Normal file
|
@ -0,0 +1,508 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef NIC_H
|
||||
#define NIC_H
|
||||
|
||||
#include <linux/netdevice.h>
|
||||
#include "bgx.h"
|
||||
|
||||
#define PCI_DEVICE_ID_CAVIUM_NICVF_1 0x0011
|
||||
|
||||
/* Subsystem device IDs */
|
||||
#define PCI_SUBSYS_DEVID_88XX_NIC_PF 0xA11E
|
||||
#define PCI_SUBSYS_DEVID_81XX_NIC_PF 0xA21E
|
||||
#define PCI_SUBSYS_DEVID_83XX_NIC_PF 0xA31E
|
||||
|
||||
#define PCI_SUBSYS_DEVID_88XX_PASS1_NIC_VF 0xA11E
|
||||
#define PCI_SUBSYS_DEVID_88XX_NIC_VF 0xA134
|
||||
#define PCI_SUBSYS_DEVID_81XX_NIC_VF 0xA234
|
||||
#define PCI_SUBSYS_DEVID_83XX_NIC_VF 0xA334
|
||||
|
||||
#define NIC_INTF_COUNT 2 /* Interfaces btw VNIC and TNS/BGX */
|
||||
#define NIC_CHANS_PER_INF 128
|
||||
#define NIC_MAX_CHANS (NIC_INTF_COUNT * NIC_CHANS_PER_INF)
|
||||
|
||||
/* PCI BAR nos */
|
||||
#define PCI_CFG_REG_BAR_NUM 0
|
||||
#define PCI_MSIX_REG_BAR_NUM 4
|
||||
|
||||
/* NIC SRIOV VF count */
|
||||
#define MAX_NUM_VFS_SUPPORTED 128
|
||||
#define DEFAULT_NUM_VF_ENABLED 8
|
||||
|
||||
#define NIC_TNS_BYPASS_MODE 0
|
||||
#define NIC_TNS_MODE 1
|
||||
|
||||
/* NIC priv flags */
|
||||
#define NIC_SRIOV_ENABLED BIT(0)
|
||||
#define NIC_TNS_ENABLED BIT(1)
|
||||
|
||||
/* VNIC HW optimiation features */
|
||||
#define VNIC_RX_CSUM_OFFLOAD_SUPPORT
|
||||
#undef VNIC_TX_CSUM_OFFLOAD_SUPPORT
|
||||
#undef VNIC_SG_SUPPORT
|
||||
#undef VNIC_TSO_SUPPORT
|
||||
#undef VNIC_LRO_SUPPORT
|
||||
#undef VNIC_RSS_SUPPORT
|
||||
|
||||
/* TSO not supported in Thunder pass1 */
|
||||
#ifdef VNIC_TSO_SUPPORT
|
||||
#define VNIC_SW_TSO_SUPPORT
|
||||
#undef VNIC_HW_TSO_SUPPORT
|
||||
#endif
|
||||
|
||||
/* ETHTOOL enable or disable, undef this to disable */
|
||||
#define NICVF_ETHTOOL_ENABLE
|
||||
|
||||
/* Min/Max packet size */
|
||||
#define NIC_HW_MIN_FRS 64
|
||||
#define NIC_HW_MAX_FRS 9200 /* 9216 max packet including FCS */
|
||||
|
||||
/* Max pkinds */
|
||||
#define NIC_MAX_PKIND 16
|
||||
|
||||
/* Max when CPI_ALG is IP diffserv */
|
||||
#define NIC_MAX_CPI_PER_LMAC 64
|
||||
|
||||
/* NIC VF Interrupts */
|
||||
#define NICVF_INTR_CQ 0
|
||||
#define NICVF_INTR_SQ 1
|
||||
#define NICVF_INTR_RBDR 2
|
||||
#define NICVF_INTR_PKT_DROP 3
|
||||
#define NICVF_INTR_TCP_TIMER 4
|
||||
#define NICVF_INTR_MBOX 5
|
||||
#define NICVF_INTR_QS_ERR 6
|
||||
|
||||
#define NICVF_INTR_CQ_SHIFT 0
|
||||
#define NICVF_INTR_SQ_SHIFT 8
|
||||
#define NICVF_INTR_RBDR_SHIFT 16
|
||||
#define NICVF_INTR_PKT_DROP_SHIFT 20
|
||||
#define NICVF_INTR_TCP_TIMER_SHIFT 21
|
||||
#define NICVF_INTR_MBOX_SHIFT 22
|
||||
#define NICVF_INTR_QS_ERR_SHIFT 23
|
||||
|
||||
#define NICVF_INTR_CQ_MASK (0xFF << NICVF_INTR_CQ_SHIFT)
|
||||
#define NICVF_INTR_SQ_MASK (0xFF << NICVF_INTR_SQ_SHIFT)
|
||||
#define NICVF_INTR_RBDR_MASK (0x03 << NICVF_INTR_RBDR_SHIFT)
|
||||
#define NICVF_INTR_PKT_DROP_MASK BIT(NICVF_INTR_PKT_DROP_SHIFT)
|
||||
#define NICVF_INTR_TCP_TIMER_MASK BIT(NICVF_INTR_TCP_TIMER_SHIFT)
|
||||
#define NICVF_INTR_MBOX_MASK BIT(NICVF_INTR_MBOX_SHIFT)
|
||||
#define NICVF_INTR_QS_ERR_MASK BIT(NICVF_INTR_QS_ERR_SHIFT)
|
||||
|
||||
/* MSI-X interrupts */
|
||||
#define NIC_PF_MSIX_VECTORS 10
|
||||
#define NIC_VF_MSIX_VECTORS 20
|
||||
|
||||
#define NIC_PF_INTR_ID_ECC0_SBE 0
|
||||
#define NIC_PF_INTR_ID_ECC0_DBE 1
|
||||
#define NIC_PF_INTR_ID_ECC1_SBE 2
|
||||
#define NIC_PF_INTR_ID_ECC1_DBE 3
|
||||
#define NIC_PF_INTR_ID_ECC2_SBE 4
|
||||
#define NIC_PF_INTR_ID_ECC2_DBE 5
|
||||
#define NIC_PF_INTR_ID_ECC3_SBE 6
|
||||
#define NIC_PF_INTR_ID_ECC3_DBE 7
|
||||
#define NIC_PF_INTR_ID_MBOX0 8
|
||||
#define NIC_PF_INTR_ID_MBOX1 9
|
||||
|
||||
/* Global timer for CQ timer thresh interrupts
|
||||
* Calculated for SCLK of 700Mhz
|
||||
* value written should be a 1/16thof what is expected
|
||||
*
|
||||
* 1 tick per ms
|
||||
*/
|
||||
#define NICPF_CLK_PER_INT_TICK 43750
|
||||
|
||||
struct nicvf_cq_poll {
|
||||
u8 cq_idx; /* Completion queue index */
|
||||
};
|
||||
|
||||
#define NIC_MAX_RSS_HASH_BITS 8
|
||||
#define NIC_MAX_RSS_IDR_TBL_SIZE BIT(NIC_MAX_RSS_HASH_BITS)
|
||||
#define RSS_HASH_KEY_SIZE 5 /* 320 bit key */
|
||||
|
||||
#ifdef VNIC_RSS_SUPPORT
|
||||
struct nicvf_rss_info {
|
||||
bool enable;
|
||||
#define RSS_L2_EXTENDED_HASH_ENA BIT(0)
|
||||
#define RSS_IP_HASH_ENA BIT(1)
|
||||
#define RSS_TCP_HASH_ENA BIT(2)
|
||||
#define RSS_TCP_SYN_DIS BIT(3)
|
||||
#define RSS_UDP_HASH_ENA BIT(4)
|
||||
#define RSS_L4_EXTENDED_HASH_ENA BIT(5)
|
||||
#define RSS_ROCE_ENA BIT(6)
|
||||
#define RSS_L3_BI_DIRECTION_ENA BIT(7)
|
||||
#define RSS_L4_BI_DIRECTION_ENA BIT(8)
|
||||
u64 cfg;
|
||||
u8 hash_bits;
|
||||
u16 rss_size;
|
||||
u8 ind_tbl[NIC_MAX_RSS_IDR_TBL_SIZE];
|
||||
u64 key[RSS_HASH_KEY_SIZE];
|
||||
};
|
||||
#endif
|
||||
|
||||
enum rx_stats_reg_offset {
|
||||
RX_OCTS = 0x0,
|
||||
RX_UCAST = 0x1,
|
||||
RX_BCAST = 0x2,
|
||||
RX_MCAST = 0x3,
|
||||
RX_RED = 0x4,
|
||||
RX_RED_OCTS = 0x5,
|
||||
RX_ORUN = 0x6,
|
||||
RX_ORUN_OCTS = 0x7,
|
||||
RX_FCS = 0x8,
|
||||
RX_L2ERR = 0x9,
|
||||
RX_DRP_BCAST = 0xa,
|
||||
RX_DRP_MCAST = 0xb,
|
||||
RX_DRP_L3BCAST = 0xc,
|
||||
RX_DRP_L3MCAST = 0xd,
|
||||
RX_STATS_ENUM_LAST,
|
||||
};
|
||||
|
||||
enum tx_stats_reg_offset {
|
||||
TX_OCTS = 0x0,
|
||||
TX_UCAST = 0x1,
|
||||
TX_BCAST = 0x2,
|
||||
TX_MCAST = 0x3,
|
||||
TX_DROP = 0x4,
|
||||
TX_STATS_ENUM_LAST,
|
||||
};
|
||||
|
||||
struct nicvf_hw_stats {
|
||||
u64 rx_bytes_ok;
|
||||
u64 rx_ucast_frames_ok;
|
||||
u64 rx_bcast_frames_ok;
|
||||
u64 rx_mcast_frames_ok;
|
||||
u64 rx_fcs_errors;
|
||||
u64 rx_l2_errors;
|
||||
u64 rx_drop_red;
|
||||
u64 rx_drop_red_bytes;
|
||||
u64 rx_drop_overrun;
|
||||
u64 rx_drop_overrun_bytes;
|
||||
u64 rx_drop_bcast;
|
||||
u64 rx_drop_mcast;
|
||||
u64 rx_drop_l3_bcast;
|
||||
u64 rx_drop_l3_mcast;
|
||||
u64 tx_bytes_ok;
|
||||
u64 tx_ucast_frames_ok;
|
||||
u64 tx_bcast_frames_ok;
|
||||
u64 tx_mcast_frames_ok;
|
||||
u64 tx_drops;
|
||||
};
|
||||
|
||||
struct nicvf_drv_stats {
|
||||
/* Rx */
|
||||
u64 rx_frames_ok;
|
||||
u64 rx_frames_64;
|
||||
u64 rx_frames_127;
|
||||
u64 rx_frames_255;
|
||||
u64 rx_frames_511;
|
||||
u64 rx_frames_1023;
|
||||
u64 rx_frames_1518;
|
||||
u64 rx_frames_jumbo;
|
||||
u64 rx_drops;
|
||||
/* Tx */
|
||||
u64 tx_frames_ok;
|
||||
u64 tx_drops;
|
||||
u64 tx_busy;
|
||||
u64 tx_tso;
|
||||
};
|
||||
|
||||
struct hw_info {
|
||||
u8 bgx_cnt;
|
||||
u8 chans_per_lmac;
|
||||
u8 chans_per_bgx; /* Rx/Tx chans */
|
||||
u8 chans_per_rgx;
|
||||
u8 chans_per_lbk;
|
||||
u16 cpi_cnt;
|
||||
u16 rssi_cnt;
|
||||
u16 rss_ind_tbl_size;
|
||||
u16 tl4_cnt;
|
||||
u16 tl3_cnt;
|
||||
u8 tl2_cnt;
|
||||
u8 tl1_cnt;
|
||||
bool tl1_per_bgx; /* TL1 per BGX or per LMAC */
|
||||
u8 model_id;
|
||||
};
|
||||
|
||||
struct nicvf {
|
||||
struct udevice *dev;
|
||||
u8 vf_id;
|
||||
bool sqs_mode:1;
|
||||
bool loopback_supported:1;
|
||||
u8 tns_mode;
|
||||
u8 node;
|
||||
u16 mtu;
|
||||
struct queue_set *qs;
|
||||
#define MAX_SQS_PER_VF_SINGLE_NODE 5
|
||||
#define MAX_SQS_PER_VF 11
|
||||
u8 num_qs;
|
||||
void *addnl_qs;
|
||||
u16 vf_mtu;
|
||||
void __iomem *reg_base;
|
||||
#define MAX_QUEUES_PER_QSET 8
|
||||
struct nicvf_cq_poll *napi[8];
|
||||
|
||||
u8 cpi_alg;
|
||||
|
||||
struct nicvf_hw_stats stats;
|
||||
struct nicvf_drv_stats drv_stats;
|
||||
|
||||
struct nicpf *nicpf;
|
||||
|
||||
/* VF <-> PF mailbox communication */
|
||||
bool pf_acked;
|
||||
bool pf_nacked;
|
||||
bool set_mac_pending;
|
||||
|
||||
bool link_up;
|
||||
u8 duplex;
|
||||
u32 speed;
|
||||
u8 rev_id;
|
||||
u8 rx_queues;
|
||||
u8 tx_queues;
|
||||
|
||||
bool open;
|
||||
bool rb_alloc_fail;
|
||||
void *rcv_buf;
|
||||
bool hw_tso;
|
||||
};
|
||||
|
||||
static inline int node_id(void *addr)
|
||||
{
|
||||
return ((uintptr_t)addr >> 44) & 0x3;
|
||||
}
|
||||
|
||||
struct nicpf {
|
||||
struct udevice *udev;
|
||||
struct hw_info *hw;
|
||||
u8 node;
|
||||
unsigned int flags;
|
||||
u16 total_vf_cnt; /* Total num of VF supported */
|
||||
u16 num_vf_en; /* No of VF enabled */
|
||||
void __iomem *reg_base; /* Register start address */
|
||||
u16 rss_ind_tbl_size;
|
||||
u8 num_sqs_en; /* Secondary qsets enabled */
|
||||
u64 nicvf[MAX_NUM_VFS_SUPPORTED];
|
||||
u8 vf_sqs[MAX_NUM_VFS_SUPPORTED][MAX_SQS_PER_VF];
|
||||
u8 pqs_vf[MAX_NUM_VFS_SUPPORTED];
|
||||
bool sqs_used[MAX_NUM_VFS_SUPPORTED];
|
||||
struct pkind_cfg pkind;
|
||||
u8 bgx_cnt;
|
||||
u8 rev_id;
|
||||
#define NIC_SET_VF_LMAC_MAP(bgx, lmac) ((((bgx) & 0xF) << 4) | ((lmac) & 0xF))
|
||||
#define NIC_GET_BGX_FROM_VF_LMAC_MAP(map) (((map) >> 4) & 0xF)
|
||||
#define NIC_GET_LMAC_FROM_VF_LMAC_MAP(map) ((map) & 0xF)
|
||||
u8 vf_lmac_map[MAX_LMAC];
|
||||
u16 cpi_base[MAX_NUM_VFS_SUPPORTED];
|
||||
u64 mac[MAX_NUM_VFS_SUPPORTED];
|
||||
bool mbx_lock[MAX_NUM_VFS_SUPPORTED];
|
||||
u8 link[MAX_LMAC];
|
||||
u8 duplex[MAX_LMAC];
|
||||
u32 speed[MAX_LMAC];
|
||||
bool vf_enabled[MAX_NUM_VFS_SUPPORTED];
|
||||
u16 rssi_base[MAX_NUM_VFS_SUPPORTED];
|
||||
u8 lmac_cnt;
|
||||
};
|
||||
|
||||
/* PF <--> VF Mailbox communication
|
||||
* Eight 64bit registers are shared between PF and VF.
|
||||
* Separate set for each VF.
|
||||
* Writing '1' into last register mbx7 means end of message.
|
||||
*/
|
||||
|
||||
/* PF <--> VF mailbox communication */
|
||||
#define NIC_PF_VF_MAILBOX_SIZE 2
|
||||
#define NIC_PF_VF_MBX_TIMEOUT 2000 /* ms */
|
||||
|
||||
/* Mailbox message types */
|
||||
#define NIC_MBOX_MSG_READY 0x01 /* Is PF ready to rcv msgs */
|
||||
#define NIC_MBOX_MSG_ACK 0x02 /* ACK the message received */
|
||||
#define NIC_MBOX_MSG_NACK 0x03 /* NACK the message received */
|
||||
#define NIC_MBOX_MSG_QS_CFG 0x04 /* Configure Qset */
|
||||
#define NIC_MBOX_MSG_RQ_CFG 0x05 /* Configure receive queue */
|
||||
#define NIC_MBOX_MSG_SQ_CFG 0x06 /* Configure Send queue */
|
||||
#define NIC_MBOX_MSG_RQ_DROP_CFG 0x07 /* Configure receive queue */
|
||||
#define NIC_MBOX_MSG_SET_MAC 0x08 /* Add MAC ID to DMAC filter */
|
||||
#define NIC_MBOX_MSG_SET_MAX_FRS 0x09 /* Set max frame size */
|
||||
#define NIC_MBOX_MSG_CPI_CFG 0x0A /* Config CPI, RSSI */
|
||||
#define NIC_MBOX_MSG_RSS_SIZE 0x0B /* Get RSS indir_tbl size */
|
||||
#define NIC_MBOX_MSG_RSS_CFG 0x0C /* Config RSS table */
|
||||
#define NIC_MBOX_MSG_RSS_CFG_CONT 0x0D /* RSS config continuation */
|
||||
#define NIC_MBOX_MSG_RQ_BP_CFG 0x0E /* RQ backpressure config */
|
||||
#define NIC_MBOX_MSG_RQ_SW_SYNC 0x0F /* Flush inflight pkts to RQ */
|
||||
#define NIC_MBOX_MSG_BGX_STATS 0x10 /* Get stats from BGX */
|
||||
#define NIC_MBOX_MSG_BGX_LINK_CHANGE 0x11 /* BGX:LMAC link status */
|
||||
#define NIC_MBOX_MSG_ALLOC_SQS 0x12 /* Allocate secondary Qset */
|
||||
#define NIC_MBOX_MSG_NICVF_PTR 0x13 /* Send nicvf ptr to PF */
|
||||
#define NIC_MBOX_MSG_PNICVF_PTR 0x14 /* Get primary qset nicvf ptr */
|
||||
#define NIC_MBOX_MSG_SNICVF_PTR 0x15 /* Send sqet nicvf ptr to PVF */
|
||||
#define NIC_MBOX_MSG_LOOPBACK 0x16 /* Set interface in loopback */
|
||||
#define NIC_MBOX_MSG_CFG_DONE 0xF0 /* VF configuration done */
|
||||
#define NIC_MBOX_MSG_SHUTDOWN 0xF1 /* VF is being shutdown */
|
||||
|
||||
struct nic_cfg_msg {
|
||||
u8 msg;
|
||||
u8 vf_id;
|
||||
u8 node_id;
|
||||
bool tns_mode:1;
|
||||
bool sqs_mode:1;
|
||||
bool loopback_supported:1;
|
||||
u8 mac_addr[6];
|
||||
};
|
||||
|
||||
/* Qset configuration */
|
||||
struct qs_cfg_msg {
|
||||
u8 msg;
|
||||
u8 num;
|
||||
u8 sqs_count;
|
||||
u64 cfg;
|
||||
};
|
||||
|
||||
/* Receive queue configuration */
|
||||
struct rq_cfg_msg {
|
||||
u8 msg;
|
||||
u8 qs_num;
|
||||
u8 rq_num;
|
||||
u64 cfg;
|
||||
};
|
||||
|
||||
/* Send queue configuration */
|
||||
struct sq_cfg_msg {
|
||||
u8 msg;
|
||||
u8 qs_num;
|
||||
u8 sq_num;
|
||||
bool sqs_mode;
|
||||
u64 cfg;
|
||||
};
|
||||
|
||||
/* Set VF's MAC address */
|
||||
struct set_mac_msg {
|
||||
u8 msg;
|
||||
u8 vf_id;
|
||||
u8 mac_addr[6];
|
||||
};
|
||||
|
||||
/* Set Maximum frame size */
|
||||
struct set_frs_msg {
|
||||
u8 msg;
|
||||
u8 vf_id;
|
||||
u16 max_frs;
|
||||
};
|
||||
|
||||
/* Set CPI algorithm type */
|
||||
struct cpi_cfg_msg {
|
||||
u8 msg;
|
||||
u8 vf_id;
|
||||
u8 rq_cnt;
|
||||
u8 cpi_alg;
|
||||
};
|
||||
|
||||
/* Get RSS table size */
|
||||
struct rss_sz_msg {
|
||||
u8 msg;
|
||||
u8 vf_id;
|
||||
u16 ind_tbl_size;
|
||||
};
|
||||
|
||||
/* Set RSS configuration */
|
||||
struct rss_cfg_msg {
|
||||
u8 msg;
|
||||
u8 vf_id;
|
||||
u8 hash_bits;
|
||||
u8 tbl_len;
|
||||
u8 tbl_offset;
|
||||
#define RSS_IND_TBL_LEN_PER_MBX_MSG 8
|
||||
u8 ind_tbl[RSS_IND_TBL_LEN_PER_MBX_MSG];
|
||||
};
|
||||
|
||||
struct bgx_stats_msg {
|
||||
u8 msg;
|
||||
u8 vf_id;
|
||||
u8 rx;
|
||||
u8 idx;
|
||||
u64 stats;
|
||||
};
|
||||
|
||||
/* Physical interface link status */
|
||||
struct bgx_link_status {
|
||||
u8 msg;
|
||||
u8 link_up;
|
||||
u8 duplex;
|
||||
u32 speed;
|
||||
};
|
||||
|
||||
#ifdef VNIC_MULTI_QSET_SUPPORT
|
||||
/* Get Extra Qset IDs */
|
||||
struct sqs_alloc {
|
||||
u8 msg;
|
||||
u8 vf_id;
|
||||
u8 qs_count;
|
||||
};
|
||||
|
||||
struct nicvf_ptr {
|
||||
u8 msg;
|
||||
u8 vf_id;
|
||||
bool sqs_mode;
|
||||
u8 sqs_id;
|
||||
u64 nicvf;
|
||||
};
|
||||
#endif
|
||||
|
||||
/* Set interface in loopback mode */
|
||||
struct set_loopback {
|
||||
u8 msg;
|
||||
u8 vf_id;
|
||||
bool enable;
|
||||
};
|
||||
|
||||
/* 128 bit shared memory between PF and each VF */
|
||||
union nic_mbx {
|
||||
struct { u8 msg; } msg;
|
||||
struct nic_cfg_msg nic_cfg;
|
||||
struct qs_cfg_msg qs;
|
||||
struct rq_cfg_msg rq;
|
||||
struct sq_cfg_msg sq;
|
||||
struct set_mac_msg mac;
|
||||
struct set_frs_msg frs;
|
||||
struct cpi_cfg_msg cpi_cfg;
|
||||
struct rss_sz_msg rss_size;
|
||||
struct rss_cfg_msg rss_cfg;
|
||||
struct bgx_stats_msg bgx_stats;
|
||||
struct bgx_link_status link_status;
|
||||
#ifdef VNIC_MULTI_QSET_SUPPORT
|
||||
struct sqs_alloc sqs_alloc;
|
||||
struct nicvf_ptr nicvf;
|
||||
#endif
|
||||
struct set_loopback lbk;
|
||||
};
|
||||
|
||||
int nicvf_set_real_num_queues(struct udevice *dev,
|
||||
int tx_queues, int rx_queues);
|
||||
int nicvf_open(struct udevice *dev);
|
||||
void nicvf_stop(struct udevice *dev);
|
||||
int nicvf_send_msg_to_pf(struct nicvf *vf, union nic_mbx *mbx);
|
||||
void nicvf_update_stats(struct nicvf *nic);
|
||||
|
||||
void nic_handle_mbx_intr(struct nicpf *nic, int vf);
|
||||
|
||||
int bgx_poll_for_link(int node, int bgx_idx, int lmacid);
|
||||
const u8 *bgx_get_lmac_mac(int node, int bgx_idx, int lmacid);
|
||||
void bgx_set_lmac_mac(int node, int bgx_idx, int lmacid, const u8 *mac);
|
||||
void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable);
|
||||
void bgx_lmac_internal_loopback(int node, int bgx_idx,
|
||||
int lmac_idx, bool enable);
|
||||
|
||||
static inline bool pass1_silicon(unsigned int revision, int model_id)
|
||||
{
|
||||
return ((revision < 8) && (model_id == 0x88));
|
||||
}
|
||||
|
||||
static inline bool pass2_silicon(unsigned int revision, int model_id)
|
||||
{
|
||||
return ((revision >= 8) && (model_id == 0x88));
|
||||
}
|
||||
|
||||
#endif /* NIC_H */
|
778
drivers/net/octeontx/nic_main.c
Normal file
778
drivers/net/octeontx/nic_main.c
Normal file
|
@ -0,0 +1,778 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#include <config.h>
|
||||
#include <net.h>
|
||||
#include <netdev.h>
|
||||
#include <malloc.h>
|
||||
#include <miiphy.h>
|
||||
#include <dm.h>
|
||||
#include <misc.h>
|
||||
#include <pci.h>
|
||||
#include <pci_ids.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "nic_reg.h"
|
||||
#include "nic.h"
|
||||
#include "q_struct.h"
|
||||
|
||||
unsigned long rounddown_pow_of_two(unsigned long n)
|
||||
{
|
||||
n |= n >> 1;
|
||||
n |= n >> 2;
|
||||
n |= n >> 4;
|
||||
n |= n >> 8;
|
||||
n |= n >> 16;
|
||||
n |= n >> 32;
|
||||
|
||||
return(n + 1);
|
||||
}
|
||||
|
||||
static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg);
|
||||
static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
|
||||
struct sq_cfg_msg *sq);
|
||||
static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf);
|
||||
static int nic_rcv_queue_sw_sync(struct nicpf *nic);
|
||||
|
||||
/* Register read/write APIs */
|
||||
static void nic_reg_write(struct nicpf *nic, u64 offset, u64 val)
|
||||
{
|
||||
writeq(val, nic->reg_base + offset);
|
||||
}
|
||||
|
||||
static u64 nic_reg_read(struct nicpf *nic, u64 offset)
|
||||
{
|
||||
return readq(nic->reg_base + offset);
|
||||
}
|
||||
|
||||
static u64 nic_get_mbx_addr(int vf)
|
||||
{
|
||||
return NIC_PF_VF_0_127_MAILBOX_0_1 + (vf << NIC_VF_NUM_SHIFT);
|
||||
}
|
||||
|
||||
static void nic_send_msg_to_vf(struct nicpf *nic, int vf, union nic_mbx *mbx)
|
||||
{
|
||||
void __iomem *mbx_addr = (void *)(nic->reg_base + nic_get_mbx_addr(vf));
|
||||
u64 *msg = (u64 *)mbx;
|
||||
|
||||
/* In first revision HW, mbox interrupt is triggerred
|
||||
* when PF writes to MBOX(1), in next revisions when
|
||||
* PF writes to MBOX(0)
|
||||
*/
|
||||
if (pass1_silicon(nic->rev_id, nic->hw->model_id)) {
|
||||
/* see the comment for nic_reg_write()/nic_reg_read()
|
||||
* functions above
|
||||
*/
|
||||
writeq(msg[0], mbx_addr);
|
||||
writeq(msg[1], mbx_addr + 8);
|
||||
} else {
|
||||
writeq(msg[1], mbx_addr + 8);
|
||||
writeq(msg[0], mbx_addr);
|
||||
}
|
||||
}
|
||||
|
||||
static void nic_mbx_send_ready(struct nicpf *nic, int vf)
|
||||
{
|
||||
union nic_mbx mbx = {};
|
||||
int bgx_idx, lmac, timeout = 5, link = -1;
|
||||
const u8 *mac;
|
||||
|
||||
mbx.nic_cfg.msg = NIC_MBOX_MSG_READY;
|
||||
mbx.nic_cfg.vf_id = vf;
|
||||
|
||||
if (nic->flags & NIC_TNS_ENABLED)
|
||||
mbx.nic_cfg.tns_mode = NIC_TNS_MODE;
|
||||
else
|
||||
mbx.nic_cfg.tns_mode = NIC_TNS_BYPASS_MODE;
|
||||
|
||||
if (vf < nic->num_vf_en) {
|
||||
bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
|
||||
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
|
||||
|
||||
mac = bgx_get_lmac_mac(nic->node, bgx_idx, lmac);
|
||||
if (mac)
|
||||
memcpy((u8 *)&mbx.nic_cfg.mac_addr, mac, 6);
|
||||
|
||||
while (timeout-- && (link <= 0)) {
|
||||
link = bgx_poll_for_link(nic->node, bgx_idx, lmac);
|
||||
debug("Link status: %d\n", link);
|
||||
if (link <= 0)
|
||||
mdelay(2000);
|
||||
}
|
||||
}
|
||||
#ifdef VNIC_MULTI_QSET_SUPPORT
|
||||
mbx.nic_cfg.sqs_mode = (vf >= nic->num_vf_en) ? true : false;
|
||||
#endif
|
||||
mbx.nic_cfg.node_id = nic->node;
|
||||
|
||||
mbx.nic_cfg.loopback_supported = vf < nic->num_vf_en;
|
||||
|
||||
nic_send_msg_to_vf(nic, vf, &mbx);
|
||||
}
|
||||
|
||||
/* ACKs VF's mailbox message
|
||||
* @vf: VF to which ACK to be sent
|
||||
*/
|
||||
static void nic_mbx_send_ack(struct nicpf *nic, int vf)
|
||||
{
|
||||
union nic_mbx mbx = {};
|
||||
|
||||
mbx.msg.msg = NIC_MBOX_MSG_ACK;
|
||||
nic_send_msg_to_vf(nic, vf, &mbx);
|
||||
}
|
||||
|
||||
/* NACKs VF's mailbox message that PF is not able to
|
||||
* complete the action
|
||||
* @vf: VF to which ACK to be sent
|
||||
*/
|
||||
static void nic_mbx_send_nack(struct nicpf *nic, int vf)
|
||||
{
|
||||
union nic_mbx mbx = {};
|
||||
|
||||
mbx.msg.msg = NIC_MBOX_MSG_NACK;
|
||||
nic_send_msg_to_vf(nic, vf, &mbx);
|
||||
}
|
||||
|
||||
static int nic_config_loopback(struct nicpf *nic, struct set_loopback *lbk)
|
||||
{
|
||||
int bgx_idx, lmac_idx;
|
||||
|
||||
if (lbk->vf_id > nic->num_vf_en)
|
||||
return -1;
|
||||
|
||||
bgx_idx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
|
||||
lmac_idx = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lbk->vf_id]);
|
||||
|
||||
bgx_lmac_internal_loopback(nic->node, bgx_idx, lmac_idx, lbk->enable);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Interrupt handler to handle mailbox messages from VFs */
|
||||
void nic_handle_mbx_intr(struct nicpf *nic, int vf)
|
||||
{
|
||||
union nic_mbx mbx = {};
|
||||
u64 *mbx_data;
|
||||
u64 mbx_addr;
|
||||
u64 reg_addr;
|
||||
u64 cfg;
|
||||
int bgx, lmac;
|
||||
int i;
|
||||
int ret = 0;
|
||||
|
||||
nic->mbx_lock[vf] = true;
|
||||
|
||||
mbx_addr = nic_get_mbx_addr(vf);
|
||||
mbx_data = (u64 *)&mbx;
|
||||
|
||||
for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
|
||||
*mbx_data = nic_reg_read(nic, mbx_addr);
|
||||
mbx_data++;
|
||||
mbx_addr += sizeof(u64);
|
||||
}
|
||||
|
||||
debug("%s: Mailbox msg %d from VF%d\n", __func__, mbx.msg.msg, vf);
|
||||
switch (mbx.msg.msg) {
|
||||
case NIC_MBOX_MSG_READY:
|
||||
nic_mbx_send_ready(nic, vf);
|
||||
if (vf < nic->num_vf_en) {
|
||||
nic->link[vf] = 0;
|
||||
nic->duplex[vf] = 0;
|
||||
nic->speed[vf] = 0;
|
||||
}
|
||||
ret = 1;
|
||||
break;
|
||||
case NIC_MBOX_MSG_QS_CFG:
|
||||
reg_addr = NIC_PF_QSET_0_127_CFG |
|
||||
(mbx.qs.num << NIC_QS_ID_SHIFT);
|
||||
cfg = mbx.qs.cfg;
|
||||
#ifdef VNIC_MULTI_QSET_SUPPORT
|
||||
/* Check if its a secondary Qset */
|
||||
if (vf >= nic->num_vf_en) {
|
||||
cfg = cfg & (~0x7FULL);
|
||||
/* Assign this Qset to primary Qset's VF */
|
||||
cfg |= nic->pqs_vf[vf];
|
||||
}
|
||||
#endif
|
||||
nic_reg_write(nic, reg_addr, cfg);
|
||||
break;
|
||||
case NIC_MBOX_MSG_RQ_CFG:
|
||||
reg_addr = NIC_PF_QSET_0_127_RQ_0_7_CFG |
|
||||
(mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
|
||||
(mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
|
||||
nic_reg_write(nic, reg_addr, mbx.rq.cfg);
|
||||
/* Enable CQE_RX2_S extension in CQE_RX descriptor.
|
||||
* This gets appended by default on 81xx/83xx chips,
|
||||
* for consistency enabling the same on 88xx pass2
|
||||
* where this is introduced.
|
||||
*/
|
||||
if (pass2_silicon(nic->rev_id, nic->hw->model_id))
|
||||
nic_reg_write(nic, NIC_PF_RX_CFG, 0x01);
|
||||
break;
|
||||
case NIC_MBOX_MSG_RQ_BP_CFG:
|
||||
reg_addr = NIC_PF_QSET_0_127_RQ_0_7_BP_CFG |
|
||||
(mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
|
||||
(mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
|
||||
nic_reg_write(nic, reg_addr, mbx.rq.cfg);
|
||||
break;
|
||||
case NIC_MBOX_MSG_RQ_SW_SYNC:
|
||||
ret = nic_rcv_queue_sw_sync(nic);
|
||||
break;
|
||||
case NIC_MBOX_MSG_RQ_DROP_CFG:
|
||||
reg_addr = NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG |
|
||||
(mbx.rq.qs_num << NIC_QS_ID_SHIFT) |
|
||||
(mbx.rq.rq_num << NIC_Q_NUM_SHIFT);
|
||||
nic_reg_write(nic, reg_addr, mbx.rq.cfg);
|
||||
break;
|
||||
case NIC_MBOX_MSG_SQ_CFG:
|
||||
reg_addr = NIC_PF_QSET_0_127_SQ_0_7_CFG |
|
||||
(mbx.sq.qs_num << NIC_QS_ID_SHIFT) |
|
||||
(mbx.sq.sq_num << NIC_Q_NUM_SHIFT);
|
||||
nic_reg_write(nic, reg_addr, mbx.sq.cfg);
|
||||
nic_tx_channel_cfg(nic, mbx.qs.num,
|
||||
(struct sq_cfg_msg *)&mbx.sq);
|
||||
break;
|
||||
case NIC_MBOX_MSG_SET_MAC:
|
||||
#ifdef VNIC_MULTI_QSET_SUPPORT
|
||||
if (vf >= nic->num_vf_en)
|
||||
break;
|
||||
#endif
|
||||
lmac = mbx.mac.vf_id;
|
||||
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
|
||||
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[lmac]);
|
||||
bgx_set_lmac_mac(nic->node, bgx, lmac, mbx.mac.mac_addr);
|
||||
break;
|
||||
case NIC_MBOX_MSG_SET_MAX_FRS:
|
||||
ret = nic_update_hw_frs(nic, mbx.frs.max_frs,
|
||||
mbx.frs.vf_id);
|
||||
break;
|
||||
case NIC_MBOX_MSG_CPI_CFG:
|
||||
nic_config_cpi(nic, &mbx.cpi_cfg);
|
||||
break;
|
||||
#ifdef VNIC_RSS_SUPPORT
|
||||
case NIC_MBOX_MSG_RSS_SIZE:
|
||||
nic_send_rss_size(nic, vf);
|
||||
goto unlock;
|
||||
case NIC_MBOX_MSG_RSS_CFG:
|
||||
case NIC_MBOX_MSG_RSS_CFG_CONT:
|
||||
nic_config_rss(nic, &mbx.rss_cfg);
|
||||
break;
|
||||
#endif
|
||||
case NIC_MBOX_MSG_CFG_DONE:
|
||||
/* Last message of VF config msg sequence */
|
||||
nic->vf_enabled[vf] = true;
|
||||
if (vf >= nic->lmac_cnt)
|
||||
goto unlock;
|
||||
|
||||
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
|
||||
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
|
||||
|
||||
bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, true);
|
||||
goto unlock;
|
||||
case NIC_MBOX_MSG_SHUTDOWN:
|
||||
/* First msg in VF teardown sequence */
|
||||
nic->vf_enabled[vf] = false;
|
||||
#ifdef VNIC_MULTI_QSET_SUPPORT
|
||||
if (vf >= nic->num_vf_en)
|
||||
nic->sqs_used[vf - nic->num_vf_en] = false;
|
||||
nic->pqs_vf[vf] = 0;
|
||||
#endif
|
||||
if (vf >= nic->lmac_cnt)
|
||||
break;
|
||||
|
||||
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
|
||||
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vf]);
|
||||
|
||||
bgx_lmac_rx_tx_enable(nic->node, bgx, lmac, false);
|
||||
break;
|
||||
#ifdef VNIC_MULTI_QSET_SUPPORT
|
||||
case NIC_MBOX_MSG_ALLOC_SQS:
|
||||
nic_alloc_sqs(nic, &mbx.sqs_alloc);
|
||||
goto unlock;
|
||||
case NIC_MBOX_MSG_NICVF_PTR:
|
||||
nic->nicvf[vf] = mbx.nicvf.nicvf;
|
||||
break;
|
||||
case NIC_MBOX_MSG_PNICVF_PTR:
|
||||
nic_send_pnicvf(nic, vf);
|
||||
goto unlock;
|
||||
case NIC_MBOX_MSG_SNICVF_PTR:
|
||||
nic_send_snicvf(nic, &mbx.nicvf);
|
||||
goto unlock;
|
||||
#endif
|
||||
case NIC_MBOX_MSG_LOOPBACK:
|
||||
ret = nic_config_loopback(nic, &mbx.lbk);
|
||||
break;
|
||||
default:
|
||||
printf("Invalid msg from VF%d, msg 0x%x\n", vf, mbx.msg.msg);
|
||||
break;
|
||||
}
|
||||
|
||||
if (!ret)
|
||||
nic_mbx_send_ack(nic, vf);
|
||||
else if (mbx.msg.msg != NIC_MBOX_MSG_READY)
|
||||
nic_mbx_send_nack(nic, vf);
|
||||
unlock:
|
||||
nic->mbx_lock[vf] = false;
|
||||
}
|
||||
|
||||
static int nic_rcv_queue_sw_sync(struct nicpf *nic)
|
||||
{
|
||||
int timeout = 20;
|
||||
|
||||
nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x01);
|
||||
while (timeout) {
|
||||
if (nic_reg_read(nic, NIC_PF_SW_SYNC_RX_DONE) & 0x1)
|
||||
break;
|
||||
udelay(2000);
|
||||
timeout--;
|
||||
}
|
||||
nic_reg_write(nic, NIC_PF_SW_SYNC_RX, 0x00);
|
||||
if (!timeout) {
|
||||
printf("Recevie queue software sync failed");
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nic_update_hw_frs(struct nicpf *nic, int new_frs, int vf)
|
||||
{
|
||||
u64 *pkind = (u64 *)&nic->pkind;
|
||||
|
||||
if (new_frs > NIC_HW_MAX_FRS || new_frs < NIC_HW_MIN_FRS) {
|
||||
printf("Invalid MTU setting from VF%d rejected,", vf);
|
||||
printf(" should be between %d and %d\n", NIC_HW_MIN_FRS,
|
||||
NIC_HW_MAX_FRS);
|
||||
return 1;
|
||||
}
|
||||
new_frs += ETH_HLEN;
|
||||
if (new_frs <= nic->pkind.maxlen)
|
||||
return 0;
|
||||
|
||||
nic->pkind.maxlen = new_frs;
|
||||
|
||||
nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG, *pkind);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Set minimum transmit packet size */
|
||||
static void nic_set_tx_pkt_pad(struct nicpf *nic, int size)
|
||||
{
|
||||
int lmac;
|
||||
u64 lmac_cfg;
|
||||
struct hw_info *hw = nic->hw;
|
||||
int max_lmac = nic->hw->bgx_cnt * MAX_LMAC_PER_BGX;
|
||||
|
||||
/* Max value that can be set is 60 */
|
||||
if (size > 52)
|
||||
size = 52;
|
||||
|
||||
/* CN81XX has RGX configured as FAKE BGX, adjust mac_lmac accordingly */
|
||||
if (hw->chans_per_rgx)
|
||||
max_lmac = ((nic->hw->bgx_cnt - 1) * MAX_LMAC_PER_BGX) + 1;
|
||||
|
||||
for (lmac = 0; lmac < max_lmac; lmac++) {
|
||||
lmac_cfg = nic_reg_read(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3));
|
||||
lmac_cfg &= ~(0xF << 2);
|
||||
lmac_cfg |= ((size / 4) << 2);
|
||||
nic_reg_write(nic, NIC_PF_LMAC_0_7_CFG | (lmac << 3), lmac_cfg);
|
||||
}
|
||||
}
|
||||
|
||||
/* Function to check number of LMACs present and set VF to LMAC mapping.
|
||||
* Mapping will be used while initializing channels.
|
||||
*/
|
||||
static void nic_set_lmac_vf_mapping(struct nicpf *nic)
|
||||
{
|
||||
int bgx, bgx_count, next_bgx_lmac = 0;
|
||||
int lmac, lmac_cnt = 0;
|
||||
u64 lmac_credit;
|
||||
|
||||
nic->num_vf_en = 0;
|
||||
if (nic->flags & NIC_TNS_ENABLED) {
|
||||
nic->num_vf_en = DEFAULT_NUM_VF_ENABLED;
|
||||
return;
|
||||
}
|
||||
|
||||
bgx_get_count(nic->node, &bgx_count);
|
||||
debug("bgx_count: %d\n", bgx_count);
|
||||
|
||||
for (bgx = 0; bgx < nic->hw->bgx_cnt; bgx++) {
|
||||
if (!(bgx_count & (1 << bgx)))
|
||||
continue;
|
||||
nic->bgx_cnt++;
|
||||
lmac_cnt = bgx_get_lmac_count(nic->node, bgx);
|
||||
debug("lmac_cnt: %d for BGX%d\n", lmac_cnt, bgx);
|
||||
for (lmac = 0; lmac < lmac_cnt; lmac++)
|
||||
nic->vf_lmac_map[next_bgx_lmac++] =
|
||||
NIC_SET_VF_LMAC_MAP(bgx, lmac);
|
||||
nic->num_vf_en += lmac_cnt;
|
||||
|
||||
/* Program LMAC credits */
|
||||
lmac_credit = (1ull << 1); /* chennel credit enable */
|
||||
lmac_credit |= (0x1ff << 2);
|
||||
lmac_credit |= (((((48 * 1024) / lmac_cnt) -
|
||||
NIC_HW_MAX_FRS) / 16) << 12);
|
||||
lmac = bgx * MAX_LMAC_PER_BGX;
|
||||
for (; lmac < lmac_cnt + (bgx * MAX_LMAC_PER_BGX); lmac++)
|
||||
nic_reg_write(nic, NIC_PF_LMAC_0_7_CREDIT + (lmac * 8),
|
||||
lmac_credit);
|
||||
}
|
||||
}
|
||||
|
||||
static void nic_get_hw_info(struct nicpf *nic)
|
||||
{
|
||||
u16 sdevid;
|
||||
struct hw_info *hw = nic->hw;
|
||||
|
||||
dm_pci_read_config16(nic->udev, PCI_SUBSYSTEM_ID, &sdevid);
|
||||
|
||||
switch (sdevid) {
|
||||
case PCI_SUBSYS_DEVID_88XX_NIC_PF:
|
||||
hw->bgx_cnt = MAX_BGX_PER_NODE;
|
||||
hw->chans_per_lmac = 16;
|
||||
hw->chans_per_bgx = 128;
|
||||
hw->cpi_cnt = 2048;
|
||||
hw->rssi_cnt = 4096;
|
||||
hw->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
|
||||
hw->tl3_cnt = 256;
|
||||
hw->tl2_cnt = 64;
|
||||
hw->tl1_cnt = 2;
|
||||
hw->tl1_per_bgx = true;
|
||||
hw->model_id = 0x88;
|
||||
break;
|
||||
case PCI_SUBSYS_DEVID_81XX_NIC_PF:
|
||||
hw->bgx_cnt = MAX_BGX_PER_NODE;
|
||||
hw->chans_per_lmac = 8;
|
||||
hw->chans_per_bgx = 32;
|
||||
hw->chans_per_rgx = 8;
|
||||
hw->chans_per_lbk = 24;
|
||||
hw->cpi_cnt = 512;
|
||||
hw->rssi_cnt = 256;
|
||||
hw->rss_ind_tbl_size = 32; /* Max RSSI / Max interfaces */
|
||||
hw->tl3_cnt = 64;
|
||||
hw->tl2_cnt = 16;
|
||||
hw->tl1_cnt = 10;
|
||||
hw->tl1_per_bgx = false;
|
||||
hw->model_id = 0x81;
|
||||
break;
|
||||
case PCI_SUBSYS_DEVID_83XX_NIC_PF:
|
||||
hw->bgx_cnt = MAX_BGX_PER_NODE;
|
||||
hw->chans_per_lmac = 8;
|
||||
hw->chans_per_bgx = 32;
|
||||
hw->chans_per_lbk = 64;
|
||||
hw->cpi_cnt = 2048;
|
||||
hw->rssi_cnt = 1024;
|
||||
hw->rss_ind_tbl_size = 64; /* Max RSSI / Max interfaces */
|
||||
hw->tl3_cnt = 256;
|
||||
hw->tl2_cnt = 64;
|
||||
hw->tl1_cnt = 18;
|
||||
hw->tl1_per_bgx = false;
|
||||
hw->model_id = 0x83;
|
||||
break;
|
||||
}
|
||||
|
||||
hw->tl4_cnt = MAX_QUEUES_PER_QSET * pci_sriov_get_totalvfs(nic->udev);
|
||||
}
|
||||
|
||||
static void nic_init_hw(struct nicpf *nic)
|
||||
{
|
||||
int i;
|
||||
u64 reg;
|
||||
u64 *pkind = (u64 *)&nic->pkind;
|
||||
|
||||
/* Get HW capability info */
|
||||
nic_get_hw_info(nic);
|
||||
|
||||
/* Enable NIC HW block */
|
||||
nic_reg_write(nic, NIC_PF_CFG, 0x3);
|
||||
|
||||
/* Enable backpressure */
|
||||
nic_reg_write(nic, NIC_PF_BP_CFG, (1ULL << 6) | 0x03);
|
||||
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG, (1ULL << 63) | 0x08);
|
||||
nic_reg_write(nic, NIC_PF_INTF_0_1_BP_CFG + (1 << 8),
|
||||
(1ULL << 63) | 0x09);
|
||||
|
||||
for (i = 0; i < NIC_MAX_CHANS; i++)
|
||||
nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (i << 3), 1);
|
||||
|
||||
if (nic->flags & NIC_TNS_ENABLED) {
|
||||
reg = NIC_TNS_MODE << 7;
|
||||
reg |= 0x06;
|
||||
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, reg);
|
||||
reg &= ~0xFull;
|
||||
reg |= 0x07;
|
||||
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), reg);
|
||||
} else {
|
||||
/* Disable TNS mode on both interfaces */
|
||||
reg = NIC_TNS_BYPASS_MODE << 7;
|
||||
reg |= 0x08; /* Block identifier */
|
||||
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG, reg);
|
||||
reg &= ~0xFull;
|
||||
reg |= 0x09;
|
||||
nic_reg_write(nic, NIC_PF_INTF_0_1_SEND_CFG | (1 << 8), reg);
|
||||
}
|
||||
|
||||
/* PKIND configuration */
|
||||
nic->pkind.minlen = 0;
|
||||
nic->pkind.maxlen = NIC_HW_MAX_FRS + ETH_HLEN;
|
||||
nic->pkind.lenerr_en = 1;
|
||||
nic->pkind.rx_hdr = 0;
|
||||
nic->pkind.hdr_sl = 0;
|
||||
|
||||
for (i = 0; i < NIC_MAX_PKIND; i++)
|
||||
nic_reg_write(nic, NIC_PF_PKIND_0_15_CFG | (i << 3), *pkind);
|
||||
|
||||
nic_set_tx_pkt_pad(nic, NIC_HW_MIN_FRS);
|
||||
|
||||
/* Timer config */
|
||||
nic_reg_write(nic, NIC_PF_INTR_TIMER_CFG, NICPF_CLK_PER_INT_TICK);
|
||||
}
|
||||
|
||||
/* Channel parse index configuration */
|
||||
static void nic_config_cpi(struct nicpf *nic, struct cpi_cfg_msg *cfg)
|
||||
{
|
||||
struct hw_info *hw = nic->hw;
|
||||
u32 vnic, bgx, lmac, chan;
|
||||
u32 padd, cpi_count = 0;
|
||||
u64 cpi_base, cpi, rssi_base, rssi;
|
||||
u8 qset, rq_idx = 0;
|
||||
|
||||
vnic = cfg->vf_id;
|
||||
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
|
||||
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[vnic]);
|
||||
|
||||
chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
|
||||
cpi_base = vnic * NIC_MAX_CPI_PER_LMAC;
|
||||
rssi_base = vnic * hw->rss_ind_tbl_size;
|
||||
|
||||
/* Rx channel configuration */
|
||||
nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_BP_CFG | (chan << 3),
|
||||
(1ull << 63) | (vnic << 0));
|
||||
nic_reg_write(nic, NIC_PF_CHAN_0_255_RX_CFG | (chan << 3),
|
||||
((u64)cfg->cpi_alg << 62) | (cpi_base << 48));
|
||||
|
||||
if (cfg->cpi_alg == CPI_ALG_NONE)
|
||||
cpi_count = 1;
|
||||
else if (cfg->cpi_alg == CPI_ALG_VLAN) /* 3 bits of PCP */
|
||||
cpi_count = 8;
|
||||
else if (cfg->cpi_alg == CPI_ALG_VLAN16) /* 3 bits PCP + DEI */
|
||||
cpi_count = 16;
|
||||
else if (cfg->cpi_alg == CPI_ALG_DIFF) /* 6bits DSCP */
|
||||
cpi_count = NIC_MAX_CPI_PER_LMAC;
|
||||
|
||||
/* RSS Qset, Qidx mapping */
|
||||
qset = cfg->vf_id;
|
||||
rssi = rssi_base;
|
||||
for (; rssi < (rssi_base + cfg->rq_cnt); rssi++) {
|
||||
nic_reg_write(nic, NIC_PF_RSSI_0_4097_RQ | (rssi << 3),
|
||||
(qset << 3) | rq_idx);
|
||||
rq_idx++;
|
||||
}
|
||||
|
||||
rssi = 0;
|
||||
cpi = cpi_base;
|
||||
for (; cpi < (cpi_base + cpi_count); cpi++) {
|
||||
/* Determine port to channel adder */
|
||||
if (cfg->cpi_alg != CPI_ALG_DIFF)
|
||||
padd = cpi % cpi_count;
|
||||
else
|
||||
padd = cpi % 8; /* 3 bits CS out of 6bits DSCP */
|
||||
|
||||
/* Leave RSS_SIZE as '0' to disable RSS */
|
||||
if (pass1_silicon(nic->rev_id, nic->hw->model_id)) {
|
||||
nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
|
||||
(vnic << 24) | (padd << 16) |
|
||||
(rssi_base + rssi));
|
||||
} else {
|
||||
/* Set MPI_ALG to '0' to disable MCAM parsing */
|
||||
nic_reg_write(nic, NIC_PF_CPI_0_2047_CFG | (cpi << 3),
|
||||
(padd << 16));
|
||||
/* MPI index is same as CPI if MPI_ALG is not enabled */
|
||||
nic_reg_write(nic, NIC_PF_MPI_0_2047_CFG | (cpi << 3),
|
||||
(vnic << 24) | (rssi_base + rssi));
|
||||
}
|
||||
|
||||
if ((rssi + 1) >= cfg->rq_cnt)
|
||||
continue;
|
||||
|
||||
if (cfg->cpi_alg == CPI_ALG_VLAN)
|
||||
rssi++;
|
||||
else if (cfg->cpi_alg == CPI_ALG_VLAN16)
|
||||
rssi = ((cpi - cpi_base) & 0xe) >> 1;
|
||||
else if (cfg->cpi_alg == CPI_ALG_DIFF)
|
||||
rssi = ((cpi - cpi_base) & 0x38) >> 3;
|
||||
}
|
||||
nic->cpi_base[cfg->vf_id] = cpi_base;
|
||||
nic->rssi_base[cfg->vf_id] = rssi_base;
|
||||
}
|
||||
|
||||
/* Transmit channel configuration (TL4 -> TL3 -> Chan)
|
||||
* VNIC0-SQ0 -> TL4(0) -> TL4A(0) -> TL3[0] -> BGX0/LMAC0/Chan0
|
||||
* VNIC1-SQ0 -> TL4(8) -> TL4A(2) -> TL3[2] -> BGX0/LMAC1/Chan0
|
||||
* VNIC2-SQ0 -> TL4(16) -> TL4A(4) -> TL3[4] -> BGX0/LMAC2/Chan0
|
||||
* VNIC3-SQ0 -> TL4(32) -> TL4A(6) -> TL3[6] -> BGX0/LMAC3/Chan0
|
||||
* VNIC4-SQ0 -> TL4(512) -> TL4A(128) -> TL3[128] -> BGX1/LMAC0/Chan0
|
||||
* VNIC5-SQ0 -> TL4(520) -> TL4A(130) -> TL3[130] -> BGX1/LMAC1/Chan0
|
||||
* VNIC6-SQ0 -> TL4(528) -> TL4A(132) -> TL3[132] -> BGX1/LMAC2/Chan0
|
||||
* VNIC7-SQ0 -> TL4(536) -> TL4A(134) -> TL3[134] -> BGX1/LMAC3/Chan0
|
||||
*/
|
||||
static void nic_tx_channel_cfg(struct nicpf *nic, u8 vnic,
|
||||
struct sq_cfg_msg *sq)
|
||||
{
|
||||
struct hw_info *hw = nic->hw;
|
||||
u32 bgx, lmac, chan;
|
||||
u32 tl2, tl3, tl4;
|
||||
u32 rr_quantum;
|
||||
u8 sq_idx = sq->sq_num;
|
||||
u8 pqs_vnic = vnic;
|
||||
int svf;
|
||||
u16 sdevid;
|
||||
|
||||
dm_pci_read_config16(nic->udev, PCI_SUBSYSTEM_ID, &sdevid);
|
||||
|
||||
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
|
||||
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(nic->vf_lmac_map[pqs_vnic]);
|
||||
|
||||
/* 24 bytes for FCS, IPG and preamble */
|
||||
rr_quantum = ((NIC_HW_MAX_FRS + 24) / 4);
|
||||
|
||||
/* For 88xx 0-511 TL4 transmits via BGX0 and
|
||||
* 512-1023 TL4s transmit via BGX1.
|
||||
*/
|
||||
if (hw->tl1_per_bgx) {
|
||||
tl4 = bgx * (hw->tl4_cnt / hw->bgx_cnt);
|
||||
if (!sq->sqs_mode) {
|
||||
tl4 += (lmac * MAX_QUEUES_PER_QSET);
|
||||
} else {
|
||||
for (svf = 0; svf < MAX_SQS_PER_VF_SINGLE_NODE; svf++) {
|
||||
if (nic->vf_sqs[pqs_vnic][svf] == vnic)
|
||||
break;
|
||||
}
|
||||
tl4 += (MAX_LMAC_PER_BGX * MAX_QUEUES_PER_QSET);
|
||||
tl4 += (lmac * MAX_QUEUES_PER_QSET *
|
||||
MAX_SQS_PER_VF_SINGLE_NODE);
|
||||
tl4 += (svf * MAX_QUEUES_PER_QSET);
|
||||
}
|
||||
} else {
|
||||
tl4 = (vnic * MAX_QUEUES_PER_QSET);
|
||||
}
|
||||
|
||||
tl4 += sq_idx;
|
||||
|
||||
tl3 = tl4 / (hw->tl4_cnt / hw->tl3_cnt);
|
||||
nic_reg_write(nic, NIC_PF_QSET_0_127_SQ_0_7_CFG2 |
|
||||
((u64)vnic << NIC_QS_ID_SHIFT) |
|
||||
((u32)sq_idx << NIC_Q_NUM_SHIFT), tl4);
|
||||
nic_reg_write(nic, NIC_PF_TL4_0_1023_CFG | (tl4 << 3),
|
||||
((u64)vnic << 27) | ((u32)sq_idx << 24) | rr_quantum);
|
||||
|
||||
nic_reg_write(nic, NIC_PF_TL3_0_255_CFG | (tl3 << 3), rr_quantum);
|
||||
|
||||
/* On 88xx 0-127 channels are for BGX0 and
|
||||
* 127-255 channels for BGX1.
|
||||
*
|
||||
* On 81xx/83xx TL3_CHAN reg should be configured with channel
|
||||
* within LMAC i.e 0-7 and not the actual channel number like on 88xx
|
||||
*/
|
||||
chan = (lmac * hw->chans_per_lmac) + (bgx * hw->chans_per_bgx);
|
||||
if (hw->tl1_per_bgx)
|
||||
nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), chan);
|
||||
else
|
||||
nic_reg_write(nic, NIC_PF_TL3_0_255_CHAN | (tl3 << 3), 0);
|
||||
|
||||
/* Enable backpressure on the channel */
|
||||
nic_reg_write(nic, NIC_PF_CHAN_0_255_TX_CFG | (chan << 3), 1);
|
||||
|
||||
tl2 = tl3 >> 2;
|
||||
nic_reg_write(nic, NIC_PF_TL3A_0_63_CFG | (tl2 << 3), tl2);
|
||||
nic_reg_write(nic, NIC_PF_TL2_0_63_CFG | (tl2 << 3), rr_quantum);
|
||||
/* No priorities as of now */
|
||||
nic_reg_write(nic, NIC_PF_TL2_0_63_PRI | (tl2 << 3), 0x00);
|
||||
|
||||
/* Unlike 88xx where TL2s 0-31 transmits to TL1 '0' and rest to TL1 '1'
|
||||
* on 81xx/83xx TL2 needs to be configured to transmit to one of the
|
||||
* possible LMACs.
|
||||
*
|
||||
* This register doesn't exist on 88xx.
|
||||
*/
|
||||
if (!hw->tl1_per_bgx)
|
||||
nic_reg_write(nic, NIC_PF_TL2_LMAC | (tl2 << 3),
|
||||
lmac + (bgx * MAX_LMAC_PER_BGX));
|
||||
}
|
||||
|
||||
int nic_initialize(struct udevice *dev)
|
||||
{
|
||||
struct nicpf *nic = dev_get_priv(dev);
|
||||
|
||||
nic->udev = dev;
|
||||
nic->hw = calloc(1, sizeof(struct hw_info));
|
||||
if (!nic->hw)
|
||||
return -ENOMEM;
|
||||
|
||||
/* MAP PF's configuration registers */
|
||||
nic->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
|
||||
PCI_REGION_MEM);
|
||||
if (!nic->reg_base) {
|
||||
printf("Cannot map config register space, aborting\n");
|
||||
goto exit;
|
||||
}
|
||||
|
||||
nic->node = node_id(nic->reg_base);
|
||||
dm_pci_read_config8(dev, PCI_REVISION_ID, &nic->rev_id);
|
||||
|
||||
/* By default set NIC in TNS bypass mode */
|
||||
nic->flags &= ~NIC_TNS_ENABLED;
|
||||
|
||||
/* Initialize hardware */
|
||||
nic_init_hw(nic);
|
||||
|
||||
nic_set_lmac_vf_mapping(nic);
|
||||
|
||||
/* Set RSS TBL size for each VF */
|
||||
nic->rss_ind_tbl_size = NIC_MAX_RSS_IDR_TBL_SIZE;
|
||||
|
||||
nic->rss_ind_tbl_size = rounddown_pow_of_two(nic->rss_ind_tbl_size);
|
||||
|
||||
return 0;
|
||||
exit:
|
||||
free(nic->hw);
|
||||
return -ENODEV;
|
||||
}
|
||||
|
||||
int octeontx_nic_probe(struct udevice *dev)
|
||||
{
|
||||
int ret = 0;
|
||||
struct nicpf *nicpf = dev_get_priv(dev);
|
||||
|
||||
nicpf->udev = dev;
|
||||
ret = nic_initialize(dev);
|
||||
if (ret < 0) {
|
||||
printf("couldn't initialize NIC PF\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = pci_sriov_init(dev, nicpf->num_vf_en);
|
||||
if (ret < 0)
|
||||
printf("enabling SRIOV failed for num VFs %d\n",
|
||||
nicpf->num_vf_en);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
U_BOOT_DRIVER(octeontx_nic) = {
|
||||
.name = "octeontx_nic",
|
||||
.id = UCLASS_MISC,
|
||||
.probe = octeontx_nic_probe,
|
||||
.priv_auto_alloc_size = sizeof(struct nicpf),
|
||||
};
|
||||
|
||||
static struct pci_device_id octeontx_nic_supported[] = {
|
||||
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NIC) },
|
||||
{}
|
||||
};
|
||||
|
||||
U_BOOT_PCI_DEVICE(octeontx_nic, octeontx_nic_supported);
|
||||
|
250
drivers/net/octeontx/nic_reg.h
Normal file
250
drivers/net/octeontx/nic_reg.h
Normal file
|
@ -0,0 +1,250 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef NIC_REG_H
|
||||
#define NIC_REG_H
|
||||
|
||||
#define NIC_PF_REG_COUNT 29573
|
||||
#define NIC_VF_REG_COUNT 249
|
||||
|
||||
/* Physical function register offsets */
|
||||
#define NIC_PF_CFG (0x0000)
|
||||
#define NIC_PF_STATUS (0x0010)
|
||||
|
||||
#define NIC_PF_INTR_TIMER_CFG (0x0030)
|
||||
#define NIC_PF_BIST_STATUS (0x0040)
|
||||
#define NIC_PF_SOFT_RESET (0x0050)
|
||||
|
||||
#define NIC_PF_TCP_TIMER (0x0060)
|
||||
#define NIC_PF_BP_CFG (0x0080)
|
||||
#define NIC_PF_RRM_CFG (0x0088)
|
||||
#define NIC_PF_CQM_CF (0x00A0)
|
||||
#define NIC_PF_CNM_CF (0x00A8)
|
||||
#define NIC_PF_CNM_STATUS (0x00B0)
|
||||
#define NIC_PF_CQ_AVG_CFG (0x00C0)
|
||||
#define NIC_PF_RRM_AVG_CFG (0x00C8)
|
||||
|
||||
#define NIC_PF_INTF_0_1_SEND_CFG (0x0200)
|
||||
#define NIC_PF_INTF_0_1_BP_CFG (0x0208)
|
||||
#define NIC_PF_INTF_0_1_BP_DIS_0_1 (0x0210)
|
||||
#define NIC_PF_INTF_0_1_BP_SW_0_1 (0x0220)
|
||||
#define NIC_PF_RBDR_BP_STATE_0_3 (0x0240)
|
||||
|
||||
#define NIC_PF_MAILBOX_INT (0x0410)
|
||||
#define NIC_PF_MAILBOX_INT_W1S (0x0430)
|
||||
#define NIC_PF_MAILBOX_ENA_W1C (0x0450)
|
||||
#define NIC_PF_MAILBOX_ENA_W1S (0x0470)
|
||||
|
||||
#define NIC_PF_RX_ETYPE_0_7 (0x0500)
|
||||
#define NIC_PF_RX_CFG (0x05D0)
|
||||
#define NIC_PF_PKIND_0_15_CFG (0x0600)
|
||||
|
||||
#define NIC_PF_ECC0_FLIP0 (0x1000)
|
||||
#define NIC_PF_ECC1_FLIP0 (0x1008)
|
||||
#define NIC_PF_ECC2_FLIP0 (0x1010)
|
||||
#define NIC_PF_ECC3_FLIP0 (0x1018)
|
||||
#define NIC_PF_ECC0_FLIP1 (0x1080)
|
||||
#define NIC_PF_ECC1_FLIP1 (0x1088)
|
||||
#define NIC_PF_ECC2_FLIP1 (0x1090)
|
||||
#define NIC_PF_ECC3_FLIP1 (0x1098)
|
||||
#define NIC_PF_ECC0_CDIS (0x1100)
|
||||
#define NIC_PF_ECC1_CDIS (0x1108)
|
||||
#define NIC_PF_ECC2_CDIS (0x1110)
|
||||
#define NIC_PF_ECC3_CDIS (0x1118)
|
||||
#define NIC_PF_BIST0_STATUS (0x1280)
|
||||
#define NIC_PF_BIST1_STATUS (0x1288)
|
||||
#define NIC_PF_BIST2_STATUS (0x1290)
|
||||
#define NIC_PF_BIST3_STATUS (0x1298)
|
||||
|
||||
#define NIC_PF_ECC0_SBE_INT (0x2000)
|
||||
#define NIC_PF_ECC0_SBE_INT_W1S (0x2008)
|
||||
#define NIC_PF_ECC0_SBE_ENA_W1C (0x2010)
|
||||
#define NIC_PF_ECC0_SBE_ENA_W1S (0x2018)
|
||||
#define NIC_PF_ECC0_DBE_INT (0x2100)
|
||||
#define NIC_PF_ECC0_DBE_INT_W1S (0x2108)
|
||||
#define NIC_PF_ECC0_DBE_ENA_W1C (0x2110)
|
||||
#define NIC_PF_ECC0_DBE_ENA_W1S (0x2118)
|
||||
|
||||
#define NIC_PF_ECC1_SBE_INT (0x2200)
|
||||
#define NIC_PF_ECC1_SBE_INT_W1S (0x2208)
|
||||
#define NIC_PF_ECC1_SBE_ENA_W1C (0x2210)
|
||||
#define NIC_PF_ECC1_SBE_ENA_W1S (0x2218)
|
||||
#define NIC_PF_ECC1_DBE_INT (0x2300)
|
||||
#define NIC_PF_ECC1_DBE_INT_W1S (0x2308)
|
||||
#define NIC_PF_ECC1_DBE_ENA_W1C (0x2310)
|
||||
#define NIC_PF_ECC1_DBE_ENA_W1S (0x2318)
|
||||
|
||||
#define NIC_PF_ECC2_SBE_INT (0x2400)
|
||||
#define NIC_PF_ECC2_SBE_INT_W1S (0x2408)
|
||||
#define NIC_PF_ECC2_SBE_ENA_W1C (0x2410)
|
||||
#define NIC_PF_ECC2_SBE_ENA_W1S (0x2418)
|
||||
#define NIC_PF_ECC2_DBE_INT (0x2500)
|
||||
#define NIC_PF_ECC2_DBE_INT_W1S (0x2508)
|
||||
#define NIC_PF_ECC2_DBE_ENA_W1C (0x2510)
|
||||
#define NIC_PF_ECC2_DBE_ENA_W1S (0x2518)
|
||||
|
||||
#define NIC_PF_ECC3_SBE_INT (0x2600)
|
||||
#define NIC_PF_ECC3_SBE_INT_W1S (0x2608)
|
||||
#define NIC_PF_ECC3_SBE_ENA_W1C (0x2610)
|
||||
#define NIC_PF_ECC3_SBE_ENA_W1S (0x2618)
|
||||
#define NIC_PF_ECC3_DBE_INT (0x2700)
|
||||
#define NIC_PF_ECC3_DBE_INT_W1S (0x2708)
|
||||
#define NIC_PF_ECC3_DBE_ENA_W1C (0x2710)
|
||||
#define NIC_PF_ECC3_DBE_ENA_W1S (0x2718)
|
||||
|
||||
#define NIC_PF_CPI_0_2047_CFG (0x200000)
|
||||
#define NIC_PF_MPI_0_2047_CFG (0x210000)
|
||||
#define NIC_PF_RSSI_0_4097_RQ (0x220000)
|
||||
#define NIC_PF_LMAC_0_7_CFG (0x240000)
|
||||
#define NIC_PF_LMAC_0_7_SW_XOFF (0x242000)
|
||||
#define NIC_PF_LMAC_0_7_CREDIT (0x244000)
|
||||
#define NIC_PF_CHAN_0_255_TX_CFG (0x400000)
|
||||
#define NIC_PF_CHAN_0_255_RX_CFG (0x420000)
|
||||
#define NIC_PF_CHAN_0_255_SW_XOFF (0x440000)
|
||||
#define NIC_PF_CHAN_0_255_CREDIT (0x460000)
|
||||
#define NIC_PF_CHAN_0_255_RX_BP_CFG (0x480000)
|
||||
|
||||
#define NIC_PF_SW_SYNC_RX (0x490000)
|
||||
|
||||
#define NIC_PF_SW_SYNC_RX_DONE (0x490008)
|
||||
#define NIC_PF_TL2_0_63_CFG (0x500000)
|
||||
#define NIC_PF_TL2_0_63_PRI (0x520000)
|
||||
#define NIC_PF_TL2_LMAC (0x540000)
|
||||
#define NIC_PF_TL2_0_63_SH_STATUS (0x580000)
|
||||
#define NIC_PF_TL3A_0_63_CFG (0x5F0000)
|
||||
#define NIC_PF_TL3_0_255_CFG (0x600000)
|
||||
#define NIC_PF_TL3_0_255_CHAN (0x620000)
|
||||
#define NIC_PF_TL3_0_255_PIR (0x640000)
|
||||
#define NIC_PF_TL3_0_255_SW_XOFF (0x660000)
|
||||
#define NIC_PF_TL3_0_255_CNM_RATE (0x680000)
|
||||
#define NIC_PF_TL3_0_255_SH_STATUS (0x6A0000)
|
||||
#define NIC_PF_TL4A_0_255_CFG (0x6F0000)
|
||||
#define NIC_PF_TL4_0_1023_CFG (0x800000)
|
||||
#define NIC_PF_TL4_0_1023_SW_XOFF (0x820000)
|
||||
#define NIC_PF_TL4_0_1023_SH_STATUS (0x840000)
|
||||
#define NIC_PF_TL4A_0_1023_CNM_RATE (0x880000)
|
||||
#define NIC_PF_TL4A_0_1023_CNM_STATUS (0x8A0000)
|
||||
|
||||
#define NIC_PF_VF_0_127_MAILBOX_0_1 (0x20002030)
|
||||
#define NIC_PF_VNIC_0_127_TX_STAT_0_4 (0x20004000)
|
||||
#define NIC_PF_VNIC_0_127_RX_STAT_0_13 (0x20004100)
|
||||
#define NIC_PF_QSET_0_127_LOCK_0_15 (0x20006000)
|
||||
#define NIC_PF_QSET_0_127_CFG (0x20010000)
|
||||
#define NIC_PF_QSET_0_127_RQ_0_7_CFG (0x20010400)
|
||||
#define NIC_PF_QSET_0_127_RQ_0_7_DROP_CFG (0x20010420)
|
||||
#define NIC_PF_QSET_0_127_RQ_0_7_BP_CFG (0x20010500)
|
||||
#define NIC_PF_QSET_0_127_RQ_0_7_STAT_0_1 (0x20010600)
|
||||
#define NIC_PF_QSET_0_127_SQ_0_7_CFG (0x20010C00)
|
||||
#define NIC_PF_QSET_0_127_SQ_0_7_CFG2 (0x20010C08)
|
||||
#define NIC_PF_QSET_0_127_SQ_0_7_STAT_0_1 (0x20010D00)
|
||||
|
||||
#define NIC_PF_MSIX_VEC_0_18_ADDR (0x000000)
|
||||
#define NIC_PF_MSIX_VEC_0_CTL (0x000008)
|
||||
#define NIC_PF_MSIX_PBA_0 (0x0F0000)
|
||||
|
||||
/* Virtual function register offsets */
|
||||
#define NIC_VNIC_CFG (0x000020)
|
||||
#define NIC_VF_PF_MAILBOX_0_1 (0x000130)
|
||||
#define NIC_VF_INT (0x000200)
|
||||
#define NIC_VF_INT_W1S (0x000220)
|
||||
#define NIC_VF_ENA_W1C (0x000240)
|
||||
#define NIC_VF_ENA_W1S (0x000260)
|
||||
|
||||
#define NIC_VNIC_RSS_CFG (0x0020E0)
|
||||
#define NIC_VNIC_RSS_KEY_0_4 (0x002200)
|
||||
#define NIC_VNIC_TX_STAT_0_4 (0x004000)
|
||||
#define NIC_VNIC_RX_STAT_0_13 (0x004100)
|
||||
#define NIC_QSET_RQ_GEN_CFG (0x010010)
|
||||
|
||||
#define NIC_QSET_CQ_0_7_CFG (0x010400)
|
||||
#define NIC_QSET_CQ_0_7_CFG2 (0x010408)
|
||||
#define NIC_QSET_CQ_0_7_THRESH (0x010410)
|
||||
#define NIC_QSET_CQ_0_7_BASE (0x010420)
|
||||
#define NIC_QSET_CQ_0_7_HEAD (0x010428)
|
||||
#define NIC_QSET_CQ_0_7_TAIL (0x010430)
|
||||
#define NIC_QSET_CQ_0_7_DOOR (0x010438)
|
||||
#define NIC_QSET_CQ_0_7_STATUS (0x010440)
|
||||
#define NIC_QSET_CQ_0_7_STATUS2 (0x010448)
|
||||
#define NIC_QSET_CQ_0_7_DEBUG (0x010450)
|
||||
|
||||
#define NIC_QSET_RQ_0_7_CFG (0x010600)
|
||||
#define NIC_QSET_RQ_0_7_STAT_0_1 (0x010700)
|
||||
|
||||
#define NIC_QSET_SQ_0_7_CFG (0x010800)
|
||||
#define NIC_QSET_SQ_0_7_THRESH (0x010810)
|
||||
#define NIC_QSET_SQ_0_7_BASE (0x010820)
|
||||
#define NIC_QSET_SQ_0_7_HEAD (0x010828)
|
||||
#define NIC_QSET_SQ_0_7_TAIL (0x010830)
|
||||
#define NIC_QSET_SQ_0_7_DOOR (0x010838)
|
||||
#define NIC_QSET_SQ_0_7_STATUS (0x010840)
|
||||
#define NIC_QSET_SQ_0_7_DEBUG (0x010848)
|
||||
#define NIC_QSET_SQ_0_7_CNM_CHG (0x010860)
|
||||
#define NIC_QSET_SQ_0_7_STAT_0_1 (0x010900)
|
||||
|
||||
#define NIC_QSET_RBDR_0_1_CFG (0x010C00)
|
||||
#define NIC_QSET_RBDR_0_1_THRESH (0x010C10)
|
||||
#define NIC_QSET_RBDR_0_1_BASE (0x010C20)
|
||||
#define NIC_QSET_RBDR_0_1_HEAD (0x010C28)
|
||||
#define NIC_QSET_RBDR_0_1_TAIL (0x010C30)
|
||||
#define NIC_QSET_RBDR_0_1_DOOR (0x010C38)
|
||||
#define NIC_QSET_RBDR_0_1_STATUS0 (0x010C40)
|
||||
#define NIC_QSET_RBDR_0_1_STATUS1 (0x010C48)
|
||||
#define NIC_QSET_RBDR_0_1_PREFETCH_STATUS (0x010C50)
|
||||
|
||||
#define NIC_VF_MSIX_VECTOR_0_19_ADDR (0x000000)
|
||||
#define NIC_VF_MSIX_VECTOR_0_19_CTL (0x000008)
|
||||
#define NIC_VF_MSIX_PBA (0x0F0000)
|
||||
|
||||
/* Offsets within registers */
|
||||
#define NIC_MSIX_VEC_SHIFT 4
|
||||
#define NIC_Q_NUM_SHIFT 18
|
||||
#define NIC_QS_ID_SHIFT 21
|
||||
#define NIC_VF_NUM_SHIFT 21
|
||||
|
||||
/* Port kind configuration register */
|
||||
struct pkind_cfg {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
uint64_t reserved_42_63:22;
|
||||
uint64_t hdr_sl:5; /* Header skip length */
|
||||
uint64_t rx_hdr:3; /* TNS Receive header present */
|
||||
uint64_t lenerr_en:1; /* L2 length error check enable */
|
||||
uint64_t reserved_32_32:1;
|
||||
uint64_t maxlen:16; /* Max frame size */
|
||||
uint64_t minlen:16; /* Min frame size */
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
uint64_t minlen:16;
|
||||
uint64_t maxlen:16;
|
||||
uint64_t reserved_32_32:1;
|
||||
uint64_t lenerr_en:1;
|
||||
uint64_t rx_hdr:3;
|
||||
uint64_t hdr_sl:5;
|
||||
uint64_t reserved_42_63:22;
|
||||
#endif
|
||||
};
|
||||
|
||||
static inline uint64_t BGXX_PF_BAR0(unsigned long param1)
|
||||
__attribute__ ((pure, always_inline));
|
||||
static inline uint64_t BGXX_PF_BAR0(unsigned long param1)
|
||||
{
|
||||
assert(param1 <= 1);
|
||||
return 0x87E0E0000000 + (param1 << 24);
|
||||
}
|
||||
|
||||
#define BGXX_PF_BAR0_SIZE 0x400000
|
||||
#define NIC_PF_BAR0 0x843000000000
|
||||
#define NIC_PF_BAR0_SIZE 0x40000000
|
||||
|
||||
static inline uint64_t NIC_VFX_BAR0(unsigned long param1)
|
||||
__attribute__ ((pure, always_inline));
|
||||
static inline uint64_t NIC_VFX_BAR0(unsigned long param1)
|
||||
{
|
||||
assert(param1 <= 127);
|
||||
|
||||
return 0x8430A0000000 + (param1 << 21);
|
||||
}
|
||||
|
||||
#define NIC_VFX_BAR0_SIZE 0x200000
|
||||
|
||||
#endif /* NIC_REG_H */
|
581
drivers/net/octeontx/nicvf_main.c
Normal file
581
drivers/net/octeontx/nicvf_main.c
Normal file
|
@ -0,0 +1,581 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#include <dm.h>
|
||||
#include <malloc.h>
|
||||
#include <misc.h>
|
||||
#include <net.h>
|
||||
#include <pci.h>
|
||||
#include <pci_ids.h>
|
||||
#include <phy.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#include "nic_reg.h"
|
||||
#include "nic.h"
|
||||
#include "nicvf_queues.h"
|
||||
|
||||
/* Register read/write APIs */
|
||||
void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val)
|
||||
{
|
||||
writeq(val, nic->reg_base + offset);
|
||||
}
|
||||
|
||||
u64 nicvf_reg_read(struct nicvf *nic, u64 offset)
|
||||
{
|
||||
return readq(nic->reg_base + offset);
|
||||
}
|
||||
|
||||
void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
|
||||
u64 qidx, u64 val)
|
||||
{
|
||||
void *addr = nic->reg_base + offset;
|
||||
|
||||
writeq(val, (void *)(addr + (qidx << NIC_Q_NUM_SHIFT)));
|
||||
}
|
||||
|
||||
u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx)
|
||||
{
|
||||
void *addr = nic->reg_base + offset;
|
||||
|
||||
return readq((void *)(addr + (qidx << NIC_Q_NUM_SHIFT)));
|
||||
}
|
||||
|
||||
static void nicvf_handle_mbx_intr(struct nicvf *nic);
|
||||
|
||||
/* VF -> PF mailbox communication */
|
||||
static void nicvf_write_to_mbx(struct nicvf *nic, union nic_mbx *mbx)
|
||||
{
|
||||
u64 *msg = (u64 *)mbx;
|
||||
|
||||
nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 0, msg[0]);
|
||||
nicvf_reg_write(nic, NIC_VF_PF_MAILBOX_0_1 + 8, msg[1]);
|
||||
}
|
||||
|
||||
int nicvf_send_msg_to_pf(struct nicvf *nic, union nic_mbx *mbx)
|
||||
{
|
||||
int timeout = NIC_PF_VF_MBX_TIMEOUT;
|
||||
int sleep = 10;
|
||||
|
||||
nic->pf_acked = false;
|
||||
nic->pf_nacked = false;
|
||||
|
||||
nicvf_write_to_mbx(nic, mbx);
|
||||
|
||||
nic_handle_mbx_intr(nic->nicpf, nic->vf_id);
|
||||
|
||||
/* Wait for previous message to be acked, timeout 2sec */
|
||||
while (!nic->pf_acked) {
|
||||
if (nic->pf_nacked)
|
||||
return -1;
|
||||
mdelay(sleep);
|
||||
nicvf_handle_mbx_intr(nic);
|
||||
|
||||
if (nic->pf_acked)
|
||||
break;
|
||||
timeout -= sleep;
|
||||
if (!timeout) {
|
||||
printf("PF didn't ack to mbox msg %d from VF%d\n",
|
||||
(mbx->msg.msg & 0xFF), nic->vf_id);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Checks if VF is able to comminicate with PF
|
||||
* and also gets the VNIC number this VF is associated to.
|
||||
*/
|
||||
static int nicvf_check_pf_ready(struct nicvf *nic)
|
||||
{
|
||||
union nic_mbx mbx = {};
|
||||
|
||||
mbx.msg.msg = NIC_MBOX_MSG_READY;
|
||||
if (nicvf_send_msg_to_pf(nic, &mbx)) {
|
||||
printf("PF didn't respond to READY msg\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
static void nicvf_handle_mbx_intr(struct nicvf *nic)
|
||||
{
|
||||
union nic_mbx mbx = {};
|
||||
struct eth_pdata *pdata = dev_get_platdata(nic->dev);
|
||||
u64 *mbx_data;
|
||||
u64 mbx_addr;
|
||||
int i;
|
||||
|
||||
mbx_addr = NIC_VF_PF_MAILBOX_0_1;
|
||||
mbx_data = (u64 *)&mbx;
|
||||
|
||||
for (i = 0; i < NIC_PF_VF_MAILBOX_SIZE; i++) {
|
||||
*mbx_data = nicvf_reg_read(nic, mbx_addr);
|
||||
mbx_data++;
|
||||
mbx_addr += sizeof(u64);
|
||||
}
|
||||
|
||||
debug("Mbox message: msg: 0x%x\n", mbx.msg.msg);
|
||||
switch (mbx.msg.msg) {
|
||||
case NIC_MBOX_MSG_READY:
|
||||
nic->pf_acked = true;
|
||||
nic->vf_id = mbx.nic_cfg.vf_id & 0x7F;
|
||||
nic->tns_mode = mbx.nic_cfg.tns_mode & 0x7F;
|
||||
nic->node = mbx.nic_cfg.node_id;
|
||||
if (!nic->set_mac_pending)
|
||||
memcpy(pdata->enetaddr,
|
||||
mbx.nic_cfg.mac_addr, 6);
|
||||
nic->loopback_supported = mbx.nic_cfg.loopback_supported;
|
||||
nic->link_up = false;
|
||||
nic->duplex = 0;
|
||||
nic->speed = 0;
|
||||
break;
|
||||
case NIC_MBOX_MSG_ACK:
|
||||
nic->pf_acked = true;
|
||||
break;
|
||||
case NIC_MBOX_MSG_NACK:
|
||||
nic->pf_nacked = true;
|
||||
break;
|
||||
case NIC_MBOX_MSG_BGX_LINK_CHANGE:
|
||||
nic->pf_acked = true;
|
||||
nic->link_up = mbx.link_status.link_up;
|
||||
nic->duplex = mbx.link_status.duplex;
|
||||
nic->speed = mbx.link_status.speed;
|
||||
if (nic->link_up) {
|
||||
printf("%s: Link is Up %d Mbps %s\n",
|
||||
nic->dev->name, nic->speed,
|
||||
nic->duplex == 1 ?
|
||||
"Full duplex" : "Half duplex");
|
||||
} else {
|
||||
printf("%s: Link is Down\n", nic->dev->name);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
printf("Invalid message from PF, msg 0x%x\n", mbx.msg.msg);
|
||||
break;
|
||||
}
|
||||
|
||||
nicvf_clear_intr(nic, NICVF_INTR_MBOX, 0);
|
||||
}
|
||||
|
||||
static int nicvf_hw_set_mac_addr(struct nicvf *nic, struct udevice *dev)
|
||||
{
|
||||
union nic_mbx mbx = {};
|
||||
struct eth_pdata *pdata = dev_get_platdata(dev);
|
||||
|
||||
mbx.mac.msg = NIC_MBOX_MSG_SET_MAC;
|
||||
mbx.mac.vf_id = nic->vf_id;
|
||||
memcpy(mbx.mac.mac_addr, pdata->enetaddr, 6);
|
||||
|
||||
return nicvf_send_msg_to_pf(nic, &mbx);
|
||||
}
|
||||
|
||||
static void nicvf_config_cpi(struct nicvf *nic)
|
||||
{
|
||||
union nic_mbx mbx = {};
|
||||
|
||||
mbx.cpi_cfg.msg = NIC_MBOX_MSG_CPI_CFG;
|
||||
mbx.cpi_cfg.vf_id = nic->vf_id;
|
||||
mbx.cpi_cfg.cpi_alg = nic->cpi_alg;
|
||||
mbx.cpi_cfg.rq_cnt = nic->qs->rq_cnt;
|
||||
|
||||
nicvf_send_msg_to_pf(nic, &mbx);
|
||||
}
|
||||
|
||||
static int nicvf_init_resources(struct nicvf *nic)
|
||||
{
|
||||
int err;
|
||||
|
||||
nic->num_qs = 1;
|
||||
|
||||
/* Enable Qset */
|
||||
nicvf_qset_config(nic, true);
|
||||
|
||||
/* Initialize queues and HW for data transfer */
|
||||
err = nicvf_config_data_transfer(nic, true);
|
||||
|
||||
if (err) {
|
||||
printf("Failed to alloc/config VF's QSet resources\n");
|
||||
return err;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nicvf_snd_pkt_handler(struct nicvf *nic,
|
||||
struct cmp_queue *cq,
|
||||
void *cq_desc, int cqe_type)
|
||||
{
|
||||
struct cqe_send_t *cqe_tx;
|
||||
struct snd_queue *sq;
|
||||
struct sq_hdr_subdesc *hdr;
|
||||
|
||||
cqe_tx = (struct cqe_send_t *)cq_desc;
|
||||
sq = &nic->qs->sq[cqe_tx->sq_idx];
|
||||
|
||||
hdr = (struct sq_hdr_subdesc *)GET_SQ_DESC(sq, cqe_tx->sqe_ptr);
|
||||
if (hdr->subdesc_type != SQ_DESC_TYPE_HEADER)
|
||||
return;
|
||||
|
||||
nicvf_check_cqe_tx_errs(nic, cq, cq_desc);
|
||||
nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
|
||||
}
|
||||
|
||||
static int nicvf_rcv_pkt_handler(struct nicvf *nic,
|
||||
struct cmp_queue *cq, void *cq_desc,
|
||||
void **ppkt, int cqe_type)
|
||||
{
|
||||
void *pkt;
|
||||
|
||||
size_t pkt_len;
|
||||
struct cqe_rx_t *cqe_rx = (struct cqe_rx_t *)cq_desc;
|
||||
int err = 0;
|
||||
|
||||
/* Check for errors */
|
||||
err = nicvf_check_cqe_rx_errs(nic, cq, cq_desc);
|
||||
if (err && !cqe_rx->rb_cnt)
|
||||
return -1;
|
||||
|
||||
pkt = nicvf_get_rcv_pkt(nic, cq_desc, &pkt_len);
|
||||
if (!pkt) {
|
||||
debug("Packet not received\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (pkt)
|
||||
*ppkt = pkt;
|
||||
|
||||
return pkt_len;
|
||||
}
|
||||
|
||||
int nicvf_cq_handler(struct nicvf *nic, void **ppkt, int *pkt_len)
|
||||
{
|
||||
int cq_qnum = 0;
|
||||
int processed_sq_cqe = 0;
|
||||
int processed_rq_cqe = 0;
|
||||
int processed_cqe = 0;
|
||||
|
||||
unsigned long cqe_count, cqe_head;
|
||||
struct queue_set *qs = nic->qs;
|
||||
struct cmp_queue *cq = &qs->cq[cq_qnum];
|
||||
struct cqe_rx_t *cq_desc;
|
||||
|
||||
/* Get num of valid CQ entries expect next one to be SQ completion */
|
||||
cqe_count = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, cq_qnum);
|
||||
cqe_count &= 0xFFFF;
|
||||
if (!cqe_count)
|
||||
return 0;
|
||||
|
||||
/* Get head of the valid CQ entries */
|
||||
cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_qnum);
|
||||
cqe_head >>= 9;
|
||||
cqe_head &= 0xFFFF;
|
||||
|
||||
if (cqe_count) {
|
||||
/* Get the CQ descriptor */
|
||||
cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
|
||||
cqe_head++;
|
||||
cqe_head &= (cq->dmem.q_len - 1);
|
||||
/* Initiate prefetch for next descriptor */
|
||||
prefetch((struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head));
|
||||
|
||||
switch (cq_desc->cqe_type) {
|
||||
case CQE_TYPE_RX:
|
||||
debug("%s: Got Rx CQE\n", nic->dev->name);
|
||||
*pkt_len = nicvf_rcv_pkt_handler(nic, cq, cq_desc,
|
||||
ppkt, CQE_TYPE_RX);
|
||||
processed_rq_cqe++;
|
||||
break;
|
||||
case CQE_TYPE_SEND:
|
||||
debug("%s: Got Tx CQE\n", nic->dev->name);
|
||||
nicvf_snd_pkt_handler(nic, cq, cq_desc, CQE_TYPE_SEND);
|
||||
processed_sq_cqe++;
|
||||
break;
|
||||
default:
|
||||
debug("%s: Got CQ type %u\n", nic->dev->name,
|
||||
cq_desc->cqe_type);
|
||||
break;
|
||||
}
|
||||
processed_cqe++;
|
||||
}
|
||||
|
||||
/* Dequeue CQE */
|
||||
nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
|
||||
cq_qnum, processed_cqe);
|
||||
|
||||
asm volatile ("dsb sy");
|
||||
|
||||
return (processed_sq_cqe | processed_rq_cqe);
|
||||
}
|
||||
|
||||
/* Qset error interrupt handler
|
||||
*
|
||||
* As of now only CQ errors are handled
|
||||
*/
|
||||
void nicvf_handle_qs_err(struct nicvf *nic)
|
||||
{
|
||||
struct queue_set *qs = nic->qs;
|
||||
int qidx;
|
||||
u64 status;
|
||||
|
||||
/* Check if it is CQ err */
|
||||
for (qidx = 0; qidx < qs->cq_cnt; qidx++) {
|
||||
status = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS,
|
||||
qidx);
|
||||
if (!(status & CQ_ERR_MASK))
|
||||
continue;
|
||||
/* Process already queued CQEs and reconfig CQ */
|
||||
nicvf_sq_disable(nic, qidx);
|
||||
nicvf_cmp_queue_config(nic, qs, qidx, true);
|
||||
nicvf_sq_free_used_descs(nic->dev, &qs->sq[qidx], qidx);
|
||||
nicvf_sq_enable(nic, &qs->sq[qidx], qidx);
|
||||
}
|
||||
}
|
||||
|
||||
static int nicvf_free_pkt(struct udevice *dev, uchar *pkt, int pkt_len)
|
||||
{
|
||||
struct nicvf *nic = dev_get_priv(dev);
|
||||
|
||||
if (pkt && pkt_len)
|
||||
free(pkt);
|
||||
nicvf_refill_rbdr(nic);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nicvf_xmit(struct udevice *dev, void *pkt, int pkt_len)
|
||||
{
|
||||
struct nicvf *nic = dev_get_priv(dev);
|
||||
int ret = 0;
|
||||
int rcv_len = 0;
|
||||
unsigned int timeout = 5000;
|
||||
void *rpkt = NULL;
|
||||
|
||||
if (!nicvf_sq_append_pkt(nic, pkt, pkt_len)) {
|
||||
printf("VF%d: TX ring full\n", nic->vf_id);
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* check and update CQ for pkt sent */
|
||||
while (!ret && timeout--) {
|
||||
ret = nicvf_cq_handler(nic, &rpkt, &rcv_len);
|
||||
if (!ret) {
|
||||
debug("%s: %d, Not sent\n", __func__, __LINE__);
|
||||
udelay(10);
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int nicvf_recv(struct udevice *dev, int flags, uchar **packetp)
|
||||
{
|
||||
struct nicvf *nic = dev_get_priv(dev);
|
||||
void *pkt;
|
||||
int pkt_len = 0;
|
||||
#ifdef DEBUG
|
||||
u8 *dpkt;
|
||||
int i, j;
|
||||
#endif
|
||||
|
||||
nicvf_cq_handler(nic, &pkt, &pkt_len);
|
||||
|
||||
if (pkt_len) {
|
||||
#ifdef DEBUG
|
||||
dpkt = pkt;
|
||||
printf("RX packet contents:\n");
|
||||
for (i = 0; i < 8; i++) {
|
||||
puts("\t");
|
||||
for (j = 0; j < 10; j++)
|
||||
printf("%02x ", dpkt[i * 10 + j]);
|
||||
puts("\n");
|
||||
}
|
||||
#endif
|
||||
*packetp = pkt;
|
||||
}
|
||||
|
||||
return pkt_len;
|
||||
}
|
||||
|
||||
void nicvf_stop(struct udevice *dev)
|
||||
{
|
||||
struct nicvf *nic = dev_get_priv(dev);
|
||||
|
||||
if (!nic->open)
|
||||
return;
|
||||
|
||||
/* Free resources */
|
||||
nicvf_config_data_transfer(nic, false);
|
||||
|
||||
/* Disable HW Qset */
|
||||
nicvf_qset_config(nic, false);
|
||||
|
||||
nic->open = false;
|
||||
}
|
||||
|
||||
int nicvf_open(struct udevice *dev)
|
||||
{
|
||||
int err;
|
||||
struct nicvf *nic = dev_get_priv(dev);
|
||||
|
||||
nicvf_hw_set_mac_addr(nic, dev);
|
||||
|
||||
/* Configure CPI alorithm */
|
||||
nic->cpi_alg = CPI_ALG_NONE;
|
||||
nicvf_config_cpi(nic);
|
||||
|
||||
/* Initialize the queues */
|
||||
err = nicvf_init_resources(nic);
|
||||
if (err)
|
||||
return -1;
|
||||
|
||||
if (!nicvf_check_pf_ready(nic))
|
||||
return -1;
|
||||
|
||||
nic->open = true;
|
||||
|
||||
/* Make sure queue initialization is written */
|
||||
asm volatile("dsb sy");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int nicvf_write_hwaddr(struct udevice *dev)
|
||||
{
|
||||
unsigned char ethaddr[ARP_HLEN];
|
||||
struct eth_pdata *pdata = dev_get_platdata(dev);
|
||||
struct nicvf *nic = dev_get_priv(dev);
|
||||
|
||||
/* If lower level firmware fails to set proper MAC
|
||||
* u-boot framework updates MAC to random address.
|
||||
* Use this hook to update mac address in environment.
|
||||
*/
|
||||
if (!eth_env_get_enetaddr_by_index("eth", dev->seq, ethaddr)) {
|
||||
eth_env_set_enetaddr_by_index("eth", dev->seq, pdata->enetaddr);
|
||||
debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
|
||||
}
|
||||
eth_env_get_enetaddr_by_index("eth", dev->seq, ethaddr);
|
||||
if (memcmp(ethaddr, pdata->enetaddr, ARP_HLEN)) {
|
||||
debug("%s: pMAC %pM\n", __func__, pdata->enetaddr);
|
||||
nicvf_hw_set_mac_addr(nic, dev);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void nicvf_probe_mdio_devices(void)
|
||||
{
|
||||
struct udevice *pdev;
|
||||
int err;
|
||||
static int probed;
|
||||
|
||||
if (probed)
|
||||
return;
|
||||
|
||||
err = dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
|
||||
PCI_DEVICE_ID_CAVIUM_SMI, 0,
|
||||
&pdev);
|
||||
if (err)
|
||||
debug("%s couldn't find SMI device\n", __func__);
|
||||
probed = 1;
|
||||
}
|
||||
|
||||
int nicvf_initialize(struct udevice *dev)
|
||||
{
|
||||
struct nicvf *nicvf = dev_get_priv(dev);
|
||||
struct eth_pdata *pdata = dev_get_platdata(dev);
|
||||
int ret = 0, bgx, lmac;
|
||||
char name[16];
|
||||
unsigned char ethaddr[ARP_HLEN];
|
||||
struct udevice *pfdev;
|
||||
struct nicpf *pf;
|
||||
static int vfid;
|
||||
|
||||
if (dm_pci_find_device(PCI_VENDOR_ID_CAVIUM,
|
||||
PCI_DEVICE_ID_CAVIUM_NIC, 0, &pfdev)) {
|
||||
printf("%s NIC PF device not found..VF probe failed\n",
|
||||
__func__);
|
||||
return -1;
|
||||
}
|
||||
pf = dev_get_priv(pfdev);
|
||||
nicvf->vf_id = vfid++;
|
||||
nicvf->dev = dev;
|
||||
nicvf->nicpf = pf;
|
||||
|
||||
nicvf_probe_mdio_devices();
|
||||
|
||||
/* Enable TSO support */
|
||||
nicvf->hw_tso = true;
|
||||
|
||||
nicvf->reg_base = dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0,
|
||||
PCI_REGION_MEM);
|
||||
|
||||
debug("nicvf->reg_base: %p\n", nicvf->reg_base);
|
||||
|
||||
if (!nicvf->reg_base) {
|
||||
printf("Cannot map config register space, aborting\n");
|
||||
ret = -1;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
ret = nicvf_set_qset_resources(nicvf);
|
||||
if (ret)
|
||||
return -1;
|
||||
|
||||
sprintf(name, "vnic%u", nicvf->vf_id);
|
||||
debug("%s name %s\n", __func__, name);
|
||||
device_set_name(dev, name);
|
||||
|
||||
bgx = NIC_GET_BGX_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]);
|
||||
lmac = NIC_GET_LMAC_FROM_VF_LMAC_MAP(pf->vf_lmac_map[nicvf->vf_id]);
|
||||
debug("%s VF %d BGX %d LMAC %d\n", __func__, nicvf->vf_id, bgx, lmac);
|
||||
debug("%s PF %p pfdev %p VF %p vfdev %p vf->pdata %p\n",
|
||||
__func__, nicvf->nicpf, nicvf->nicpf->udev, nicvf, nicvf->dev,
|
||||
pdata);
|
||||
|
||||
fdt_board_get_ethaddr(bgx, lmac, ethaddr);
|
||||
|
||||
debug("%s bgx %d lmac %d ethaddr %pM\n", __func__, bgx, lmac, ethaddr);
|
||||
|
||||
if (is_valid_ethaddr(ethaddr)) {
|
||||
memcpy(pdata->enetaddr, ethaddr, ARP_HLEN);
|
||||
eth_env_set_enetaddr_by_index("eth", dev->seq, ethaddr);
|
||||
}
|
||||
debug("%s enetaddr %pM ethaddr %pM\n", __func__,
|
||||
pdata->enetaddr, ethaddr);
|
||||
|
||||
fail:
|
||||
return ret;
|
||||
}
|
||||
|
||||
int octeontx_vnic_probe(struct udevice *dev)
|
||||
{
|
||||
return nicvf_initialize(dev);
|
||||
}
|
||||
|
||||
static const struct eth_ops octeontx_vnic_ops = {
|
||||
.start = nicvf_open,
|
||||
.stop = nicvf_stop,
|
||||
.send = nicvf_xmit,
|
||||
.recv = nicvf_recv,
|
||||
.free_pkt = nicvf_free_pkt,
|
||||
.write_hwaddr = nicvf_write_hwaddr,
|
||||
};
|
||||
|
||||
U_BOOT_DRIVER(octeontx_vnic) = {
|
||||
.name = "vnic",
|
||||
.id = UCLASS_ETH,
|
||||
.probe = octeontx_vnic_probe,
|
||||
.ops = &octeontx_vnic_ops,
|
||||
.priv_auto_alloc_size = sizeof(struct nicvf),
|
||||
.platdata_auto_alloc_size = sizeof(struct eth_pdata),
|
||||
};
|
||||
|
||||
static struct pci_device_id octeontx_vnic_supported[] = {
|
||||
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF) },
|
||||
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_NICVF_1) },
|
||||
{}
|
||||
};
|
||||
|
||||
U_BOOT_PCI_DEVICE(octeontx_vnic, octeontx_vnic_supported);
|
1140
drivers/net/octeontx/nicvf_queues.c
Normal file
1140
drivers/net/octeontx/nicvf_queues.c
Normal file
File diff suppressed because it is too large
Load diff
353
drivers/net/octeontx/nicvf_queues.h
Normal file
353
drivers/net/octeontx/nicvf_queues.h
Normal file
|
@ -0,0 +1,353 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef NICVF_QUEUES_H
|
||||
#define NICVF_QUEUES_H
|
||||
|
||||
#include "q_struct.h"
|
||||
|
||||
#define MAX_QUEUE_SET 128
|
||||
#define MAX_RCV_QUEUES_PER_QS 8
|
||||
#define MAX_RCV_BUF_DESC_RINGS_PER_QS 2
|
||||
#define MAX_SND_QUEUES_PER_QS 8
|
||||
#define MAX_CMP_QUEUES_PER_QS 8
|
||||
|
||||
/* VF's queue interrupt ranges */
|
||||
#define NICVF_INTR_ID_CQ 0
|
||||
#define NICVF_INTR_ID_SQ 8
|
||||
#define NICVF_INTR_ID_RBDR 16
|
||||
#define NICVF_INTR_ID_MISC 18
|
||||
#define NICVF_INTR_ID_QS_ERR 19
|
||||
|
||||
#define RBDR_SIZE0 0ULL /* 8K entries */
|
||||
#define RBDR_SIZE1 1ULL /* 16K entries */
|
||||
#define RBDR_SIZE2 2ULL /* 32K entries */
|
||||
#define RBDR_SIZE3 3ULL /* 64K entries */
|
||||
#define RBDR_SIZE4 4ULL /* 126K entries */
|
||||
#define RBDR_SIZE5 5ULL /* 256K entries */
|
||||
#define RBDR_SIZE6 6ULL /* 512K entries */
|
||||
|
||||
#define SND_QUEUE_SIZE0 0ULL /* 1K entries */
|
||||
#define SND_QUEUE_SIZE1 1ULL /* 2K entries */
|
||||
#define SND_QUEUE_SIZE2 2ULL /* 4K entries */
|
||||
#define SND_QUEUE_SIZE3 3ULL /* 8K entries */
|
||||
#define SND_QUEUE_SIZE4 4ULL /* 16K entries */
|
||||
#define SND_QUEUE_SIZE5 5ULL /* 32K entries */
|
||||
#define SND_QUEUE_SIZE6 6ULL /* 64K entries */
|
||||
|
||||
#define CMP_QUEUE_SIZE0 0ULL /* 1K entries */
|
||||
#define CMP_QUEUE_SIZE1 1ULL /* 2K entries */
|
||||
#define CMP_QUEUE_SIZE2 2ULL /* 4K entries */
|
||||
#define CMP_QUEUE_SIZE3 3ULL /* 8K entries */
|
||||
#define CMP_QUEUE_SIZE4 4ULL /* 16K entries */
|
||||
#define CMP_QUEUE_SIZE5 5ULL /* 32K entries */
|
||||
#define CMP_QUEUE_SIZE6 6ULL /* 64K entries */
|
||||
|
||||
/* Default queue count per QS, its lengths and threshold values */
|
||||
#define RBDR_CNT 1
|
||||
#define RCV_QUEUE_CNT 1
|
||||
#define SND_QUEUE_CNT 1
|
||||
#define CMP_QUEUE_CNT 1 /* Max of RCV and SND qcount */
|
||||
|
||||
#define SND_QSIZE SND_QUEUE_SIZE0
|
||||
#define SND_QUEUE_LEN BIT_ULL((SND_QSIZE + 10))
|
||||
#define SND_QUEUE_THRESH 2ULL
|
||||
#define MIN_SQ_DESC_PER_PKT_XMIT 2
|
||||
#define MAX_CQE_PER_PKT_XMIT 2
|
||||
|
||||
#define CMP_QSIZE CMP_QUEUE_SIZE0
|
||||
#define CMP_QUEUE_LEN BIT_ULL((CMP_QSIZE + 10))
|
||||
#define CMP_QUEUE_CQE_THRESH 0
|
||||
#define CMP_QUEUE_TIMER_THRESH 1 /* 1 ms */
|
||||
|
||||
#define RBDR_SIZE RBDR_SIZE0
|
||||
#define RCV_BUF_COUNT BIT_ULL((RBDR_SIZE + 13))
|
||||
#define RBDR_THRESH (RCV_BUF_COUNT / 2)
|
||||
#define DMA_BUFFER_LEN 2048 /* In multiples of 128bytes */
|
||||
#define RCV_FRAG_LEN DMA_BUFFER_LEN
|
||||
|
||||
#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) *\
|
||||
MAX_CQE_PER_PKT_XMIT)
|
||||
#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256)
|
||||
|
||||
/* Descriptor size */
|
||||
#define SND_QUEUE_DESC_SIZE 16 /* 128 bits */
|
||||
#define CMP_QUEUE_DESC_SIZE 512
|
||||
|
||||
/* Buffer / descriptor alignments */
|
||||
#define NICVF_RCV_BUF_ALIGN 7
|
||||
#define NICVF_RCV_BUF_ALIGN_BYTES BIT_ULL(NICVF_RCV_BUF_ALIGN)
|
||||
#define NICVF_CQ_BASE_ALIGN_BYTES 512 /* 9 bits */
|
||||
#define NICVF_SQ_BASE_ALIGN_BYTES 128 /* 7 bits */
|
||||
|
||||
#define NICVF_ALIGNED_ADDR(ADDR, ALIGN_BYTES) ALIGN(ADDR, ALIGN_BYTES)
|
||||
|
||||
/* Queue enable/disable */
|
||||
#define NICVF_SQ_EN BIT_ULL(19)
|
||||
|
||||
/* Queue reset */
|
||||
#define NICVF_CQ_RESET BIT_ULL(41)
|
||||
#define NICVF_SQ_RESET BIT_ULL(17)
|
||||
#define NICVF_RBDR_RESET BIT_ULL(43)
|
||||
|
||||
enum CQ_RX_ERRLVL_E {
|
||||
CQ_ERRLVL_MAC,
|
||||
CQ_ERRLVL_L2,
|
||||
CQ_ERRLVL_L3,
|
||||
CQ_ERRLVL_L4,
|
||||
};
|
||||
|
||||
enum CQ_RX_ERROP_E {
|
||||
CQ_RX_ERROP_RE_NONE = 0x0,
|
||||
CQ_RX_ERROP_RE_PARTIAL = 0x1,
|
||||
CQ_RX_ERROP_RE_JABBER = 0x2,
|
||||
CQ_RX_ERROP_RE_FCS = 0x7,
|
||||
CQ_RX_ERROP_RE_TERMINATE = 0x9,
|
||||
CQ_RX_ERROP_RE_RX_CTL = 0xb,
|
||||
CQ_RX_ERROP_PREL2_ERR = 0x1f,
|
||||
CQ_RX_ERROP_L2_FRAGMENT = 0x20,
|
||||
CQ_RX_ERROP_L2_OVERRUN = 0x21,
|
||||
CQ_RX_ERROP_L2_PFCS = 0x22,
|
||||
CQ_RX_ERROP_L2_PUNY = 0x23,
|
||||
CQ_RX_ERROP_L2_MAL = 0x24,
|
||||
CQ_RX_ERROP_L2_OVERSIZE = 0x25,
|
||||
CQ_RX_ERROP_L2_UNDERSIZE = 0x26,
|
||||
CQ_RX_ERROP_L2_LENMISM = 0x27,
|
||||
CQ_RX_ERROP_L2_PCLP = 0x28,
|
||||
CQ_RX_ERROP_IP_NOT = 0x41,
|
||||
CQ_RX_ERROP_IP_CSUM_ERR = 0x42,
|
||||
CQ_RX_ERROP_IP_MAL = 0x43,
|
||||
CQ_RX_ERROP_IP_MALD = 0x44,
|
||||
CQ_RX_ERROP_IP_HOP = 0x45,
|
||||
CQ_RX_ERROP_L3_ICRC = 0x46,
|
||||
CQ_RX_ERROP_L3_PCLP = 0x47,
|
||||
CQ_RX_ERROP_L4_MAL = 0x61,
|
||||
CQ_RX_ERROP_L4_CHK = 0x62,
|
||||
CQ_RX_ERROP_UDP_LEN = 0x63,
|
||||
CQ_RX_ERROP_L4_PORT = 0x64,
|
||||
CQ_RX_ERROP_TCP_FLAG = 0x65,
|
||||
CQ_RX_ERROP_TCP_OFFSET = 0x66,
|
||||
CQ_RX_ERROP_L4_PCLP = 0x67,
|
||||
CQ_RX_ERROP_RBDR_TRUNC = 0x70,
|
||||
};
|
||||
|
||||
enum CQ_TX_ERROP_E {
|
||||
CQ_TX_ERROP_GOOD = 0x0,
|
||||
CQ_TX_ERROP_DESC_FAULT = 0x10,
|
||||
CQ_TX_ERROP_HDR_CONS_ERR = 0x11,
|
||||
CQ_TX_ERROP_SUBDC_ERR = 0x12,
|
||||
CQ_TX_ERROP_IMM_SIZE_OFLOW = 0x80,
|
||||
CQ_TX_ERROP_DATA_SEQUENCE_ERR = 0x81,
|
||||
CQ_TX_ERROP_MEM_SEQUENCE_ERR = 0x82,
|
||||
CQ_TX_ERROP_LOCK_VIOL = 0x83,
|
||||
CQ_TX_ERROP_DATA_FAULT = 0x84,
|
||||
CQ_TX_ERROP_TSTMP_CONFLICT = 0x85,
|
||||
CQ_TX_ERROP_TSTMP_TIMEOUT = 0x86,
|
||||
CQ_TX_ERROP_MEM_FAULT = 0x87,
|
||||
CQ_TX_ERROP_CK_OVERLAP = 0x88,
|
||||
CQ_TX_ERROP_CK_OFLOW = 0x89,
|
||||
CQ_TX_ERROP_ENUM_LAST = 0x8a,
|
||||
};
|
||||
|
||||
struct cmp_queue_stats {
|
||||
struct rx_stats {
|
||||
struct {
|
||||
u64 mac_errs;
|
||||
u64 l2_errs;
|
||||
u64 l3_errs;
|
||||
u64 l4_errs;
|
||||
} errlvl;
|
||||
struct {
|
||||
u64 good;
|
||||
u64 partial_pkts;
|
||||
u64 jabber_errs;
|
||||
u64 fcs_errs;
|
||||
u64 terminate_errs;
|
||||
u64 bgx_rx_errs;
|
||||
u64 prel2_errs;
|
||||
u64 l2_frags;
|
||||
u64 l2_overruns;
|
||||
u64 l2_pfcs;
|
||||
u64 l2_puny;
|
||||
u64 l2_hdr_malformed;
|
||||
u64 l2_oversize;
|
||||
u64 l2_undersize;
|
||||
u64 l2_len_mismatch;
|
||||
u64 l2_pclp;
|
||||
u64 non_ip;
|
||||
u64 ip_csum_err;
|
||||
u64 ip_hdr_malformed;
|
||||
u64 ip_payload_malformed;
|
||||
u64 ip_hop_errs;
|
||||
u64 l3_icrc_errs;
|
||||
u64 l3_pclp;
|
||||
u64 l4_malformed;
|
||||
u64 l4_csum_errs;
|
||||
u64 udp_len_err;
|
||||
u64 bad_l4_port;
|
||||
u64 bad_tcp_flag;
|
||||
u64 tcp_offset_errs;
|
||||
u64 l4_pclp;
|
||||
u64 pkt_truncated;
|
||||
} errop;
|
||||
} rx;
|
||||
struct tx_stats {
|
||||
u64 good;
|
||||
u64 desc_fault;
|
||||
u64 hdr_cons_err;
|
||||
u64 subdesc_err;
|
||||
u64 imm_size_oflow;
|
||||
u64 data_seq_err;
|
||||
u64 mem_seq_err;
|
||||
u64 lock_viol;
|
||||
u64 data_fault;
|
||||
u64 tstmp_conflict;
|
||||
u64 tstmp_timeout;
|
||||
u64 mem_fault;
|
||||
u64 csum_overlap;
|
||||
u64 csum_overflow;
|
||||
} tx;
|
||||
};
|
||||
|
||||
enum RQ_SQ_STATS {
|
||||
RQ_SQ_STATS_OCTS,
|
||||
RQ_SQ_STATS_PKTS,
|
||||
};
|
||||
|
||||
struct rx_tx_queue_stats {
|
||||
u64 bytes;
|
||||
u64 pkts;
|
||||
};
|
||||
|
||||
struct q_desc_mem {
|
||||
uintptr_t dma;
|
||||
u64 size;
|
||||
u16 q_len;
|
||||
uintptr_t phys_base;
|
||||
void *base;
|
||||
void *unalign_base;
|
||||
bool allocated;
|
||||
};
|
||||
|
||||
struct rbdr {
|
||||
bool enable;
|
||||
u32 dma_size;
|
||||
u32 thresh; /* Threshold level for interrupt */
|
||||
void *desc;
|
||||
u32 head;
|
||||
u32 tail;
|
||||
struct q_desc_mem dmem;
|
||||
uintptr_t buf_mem;
|
||||
uintptr_t buffers;
|
||||
};
|
||||
|
||||
struct rcv_queue {
|
||||
bool enable;
|
||||
struct rbdr *rbdr_start;
|
||||
struct rbdr *rbdr_cont;
|
||||
bool en_tcp_reassembly;
|
||||
u8 cq_qs; /* CQ's QS to which this RQ is assigned */
|
||||
u8 cq_idx; /* CQ index (0 to 7) in the QS */
|
||||
u8 cont_rbdr_qs; /* Continue buffer ptrs - QS num */
|
||||
u8 cont_qs_rbdr_idx; /* RBDR idx in the cont QS */
|
||||
u8 start_rbdr_qs; /* First buffer ptrs - QS num */
|
||||
u8 start_qs_rbdr_idx; /* RBDR idx in the above QS */
|
||||
u8 caching;
|
||||
struct rx_tx_queue_stats stats;
|
||||
};
|
||||
|
||||
struct cmp_queue {
|
||||
bool enable;
|
||||
u16 intr_timer_thresh;
|
||||
u16 thresh;
|
||||
void *desc;
|
||||
struct q_desc_mem dmem;
|
||||
struct cmp_queue_stats stats;
|
||||
};
|
||||
|
||||
struct snd_queue {
|
||||
bool enable;
|
||||
u8 cq_qs; /* CQ's QS to which this SQ is pointing */
|
||||
u8 cq_idx; /* CQ index (0 to 7) in the above QS */
|
||||
u16 thresh;
|
||||
u32 free_cnt;
|
||||
u32 head;
|
||||
u32 tail;
|
||||
u64 *skbuff;
|
||||
void *desc;
|
||||
struct q_desc_mem dmem;
|
||||
struct rx_tx_queue_stats stats;
|
||||
};
|
||||
|
||||
struct queue_set {
|
||||
bool enable;
|
||||
bool be_en;
|
||||
u8 vnic_id;
|
||||
u8 rq_cnt;
|
||||
u8 cq_cnt;
|
||||
u64 cq_len;
|
||||
u8 sq_cnt;
|
||||
u64 sq_len;
|
||||
u8 rbdr_cnt;
|
||||
u64 rbdr_len;
|
||||
struct rcv_queue rq[MAX_RCV_QUEUES_PER_QS];
|
||||
struct cmp_queue cq[MAX_CMP_QUEUES_PER_QS];
|
||||
struct snd_queue sq[MAX_SND_QUEUES_PER_QS];
|
||||
struct rbdr rbdr[MAX_RCV_BUF_DESC_RINGS_PER_QS];
|
||||
};
|
||||
|
||||
#define GET_RBDR_DESC(RING, idx)\
|
||||
(&(((struct rbdr_entry_t *)((RING)->desc))[idx]))
|
||||
#define GET_SQ_DESC(RING, idx)\
|
||||
(&(((struct sq_hdr_subdesc *)((RING)->desc))[idx]))
|
||||
#define GET_CQ_DESC(RING, idx)\
|
||||
(&(((union cq_desc_t *)((RING)->desc))[idx]))
|
||||
|
||||
/* CQ status bits */
|
||||
#define CQ_WR_FULL BIT(26)
|
||||
#define CQ_WR_DISABLE BIT(25)
|
||||
#define CQ_WR_FAULT BIT(24)
|
||||
#define CQ_CQE_COUNT (0xFFFF << 0)
|
||||
|
||||
#define CQ_ERR_MASK (CQ_WR_FULL | CQ_WR_DISABLE | CQ_WR_FAULT)
|
||||
|
||||
int nicvf_set_qset_resources(struct nicvf *nic);
|
||||
int nicvf_config_data_transfer(struct nicvf *nic, bool enable);
|
||||
void nicvf_qset_config(struct nicvf *nic, bool enable);
|
||||
void nicvf_cmp_queue_config(struct nicvf *nic, struct queue_set *qs,
|
||||
int qidx, bool enable);
|
||||
|
||||
void nicvf_sq_enable(struct nicvf *nic, struct snd_queue *sq, int qidx);
|
||||
void nicvf_sq_disable(struct nicvf *nic, int qidx);
|
||||
void nicvf_put_sq_desc(struct snd_queue *sq, int desc_cnt);
|
||||
void nicvf_sq_free_used_descs(struct udevice *dev,
|
||||
struct snd_queue *sq, int qidx);
|
||||
int nicvf_sq_append_pkt(struct nicvf *nic, void *pkt, size_t pkt_len);
|
||||
|
||||
void *nicvf_get_rcv_pkt(struct nicvf *nic, void *cq_desc, size_t *pkt_len);
|
||||
void nicvf_refill_rbdr(struct nicvf *nic);
|
||||
|
||||
void nicvf_enable_intr(struct nicvf *nic, int int_type, int q_idx);
|
||||
void nicvf_disable_intr(struct nicvf *nic, int int_type, int q_idx);
|
||||
void nicvf_clear_intr(struct nicvf *nic, int int_type, int q_idx);
|
||||
int nicvf_is_intr_enabled(struct nicvf *nic, int int_type, int q_idx);
|
||||
|
||||
/* Register access APIs */
|
||||
void nicvf_reg_write(struct nicvf *nic, u64 offset, u64 val);
|
||||
u64 nicvf_reg_read(struct nicvf *nic, u64 offset);
|
||||
void nicvf_qset_reg_write(struct nicvf *nic, u64 offset, u64 val);
|
||||
u64 nicvf_qset_reg_read(struct nicvf *nic, u64 offset);
|
||||
void nicvf_queue_reg_write(struct nicvf *nic, u64 offset,
|
||||
u64 qidx, u64 val);
|
||||
u64 nicvf_queue_reg_read(struct nicvf *nic, u64 offset, u64 qidx);
|
||||
|
||||
/* Stats */
|
||||
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
|
||||
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
|
||||
int nicvf_check_cqe_rx_errs(struct nicvf *nic,
|
||||
struct cmp_queue *cq, void *cq_desc);
|
||||
int nicvf_check_cqe_tx_errs(struct nicvf *nic,
|
||||
struct cmp_queue *cq, void *cq_desc);
|
||||
#endif /* NICVF_QUEUES_H */
|
695
drivers/net/octeontx/q_struct.h
Normal file
695
drivers/net/octeontx/q_struct.h
Normal file
|
@ -0,0 +1,695 @@
|
|||
/* SPDX-License-Identifier: GPL-2.0
|
||||
*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#ifndef Q_STRUCT_H
|
||||
#define Q_STRUCT_H
|
||||
|
||||
/* Load transaction types for reading segment bytes specified by
|
||||
* NIC_SEND_GATHER_S[LD_TYPE].
|
||||
*/
|
||||
enum nic_send_ld_type_e {
|
||||
NIC_SEND_LD_TYPE_E_LDD = 0x0,
|
||||
NIC_SEND_LD_TYPE_E_LDT = 0x1,
|
||||
NIC_SEND_LD_TYPE_E_LDWB = 0x2,
|
||||
NIC_SEND_LD_TYPE_E_ENUM_LAST = 0x3,
|
||||
};
|
||||
|
||||
enum ether_type_algorithm {
|
||||
ETYPE_ALG_NONE = 0x0,
|
||||
ETYPE_ALG_SKIP = 0x1,
|
||||
ETYPE_ALG_ENDPARSE = 0x2,
|
||||
ETYPE_ALG_VLAN = 0x3,
|
||||
ETYPE_ALG_VLAN_STRIP = 0x4,
|
||||
};
|
||||
|
||||
enum layer3_type {
|
||||
L3TYPE_NONE = 0x00,
|
||||
L3TYPE_GRH = 0x01,
|
||||
L3TYPE_IPV4 = 0x04,
|
||||
L3TYPE_IPV4_OPTIONS = 0x05,
|
||||
L3TYPE_IPV6 = 0x06,
|
||||
L3TYPE_IPV6_OPTIONS = 0x07,
|
||||
L3TYPE_ET_STOP = 0x0D,
|
||||
L3TYPE_OTHER = 0x0E,
|
||||
};
|
||||
|
||||
enum layer4_type {
|
||||
L4TYPE_NONE = 0x00,
|
||||
L4TYPE_IPSEC_ESP = 0x01,
|
||||
L4TYPE_IPFRAG = 0x02,
|
||||
L4TYPE_IPCOMP = 0x03,
|
||||
L4TYPE_TCP = 0x04,
|
||||
L4TYPE_UDP = 0x05,
|
||||
L4TYPE_SCTP = 0x06,
|
||||
L4TYPE_GRE = 0x07,
|
||||
L4TYPE_ROCE_BTH = 0x08,
|
||||
L4TYPE_OTHER = 0x0E,
|
||||
};
|
||||
|
||||
/* CPI and RSSI configuration */
|
||||
enum cpi_algorithm_type {
|
||||
CPI_ALG_NONE = 0x0,
|
||||
CPI_ALG_VLAN = 0x1,
|
||||
CPI_ALG_VLAN16 = 0x2,
|
||||
CPI_ALG_DIFF = 0x3,
|
||||
};
|
||||
|
||||
enum rss_algorithm_type {
|
||||
RSS_ALG_NONE = 0x00,
|
||||
RSS_ALG_PORT = 0x01,
|
||||
RSS_ALG_IP = 0x02,
|
||||
RSS_ALG_TCP_IP = 0x03,
|
||||
RSS_ALG_UDP_IP = 0x04,
|
||||
RSS_ALG_SCTP_IP = 0x05,
|
||||
RSS_ALG_GRE_IP = 0x06,
|
||||
RSS_ALG_ROCE = 0x07,
|
||||
};
|
||||
|
||||
enum rss_hash_cfg {
|
||||
RSS_HASH_L2ETC = 0x00,
|
||||
RSS_HASH_IP = 0x01,
|
||||
RSS_HASH_TCP = 0x02,
|
||||
RSS_TCP_SYN_DIS = 0x03,
|
||||
RSS_HASH_UDP = 0x04,
|
||||
RSS_HASH_L4ETC = 0x05,
|
||||
RSS_HASH_ROCE = 0x06,
|
||||
RSS_L3_BIDI = 0x07,
|
||||
RSS_L4_BIDI = 0x08,
|
||||
};
|
||||
|
||||
/* Completion queue entry types */
|
||||
enum cqe_type {
|
||||
CQE_TYPE_INVALID = 0x0,
|
||||
CQE_TYPE_RX = 0x2,
|
||||
CQE_TYPE_RX_SPLIT = 0x3,
|
||||
CQE_TYPE_RX_TCP = 0x4,
|
||||
CQE_TYPE_SEND = 0x8,
|
||||
CQE_TYPE_SEND_PTP = 0x9,
|
||||
};
|
||||
|
||||
enum cqe_rx_tcp_status {
|
||||
CQE_RX_STATUS_VALID_TCP_CNXT = 0x00,
|
||||
CQE_RX_STATUS_INVALID_TCP_CNXT = 0x0F,
|
||||
};
|
||||
|
||||
enum cqe_send_status {
|
||||
CQE_SEND_STATUS_GOOD = 0x00,
|
||||
CQE_SEND_STATUS_DESC_FAULT = 0x01,
|
||||
CQE_SEND_STATUS_HDR_CONS_ERR = 0x11,
|
||||
CQE_SEND_STATUS_SUBDESC_ERR = 0x12,
|
||||
CQE_SEND_STATUS_IMM_SIZE_OFLOW = 0x80,
|
||||
CQE_SEND_STATUS_CRC_SEQ_ERR = 0x81,
|
||||
CQE_SEND_STATUS_DATA_SEQ_ERR = 0x82,
|
||||
CQE_SEND_STATUS_MEM_SEQ_ERR = 0x83,
|
||||
CQE_SEND_STATUS_LOCK_VIOL = 0x84,
|
||||
CQE_SEND_STATUS_LOCK_UFLOW = 0x85,
|
||||
CQE_SEND_STATUS_DATA_FAULT = 0x86,
|
||||
CQE_SEND_STATUS_TSTMP_CONFLICT = 0x87,
|
||||
CQE_SEND_STATUS_TSTMP_TIMEOUT = 0x88,
|
||||
CQE_SEND_STATUS_MEM_FAULT = 0x89,
|
||||
CQE_SEND_STATUS_CSUM_OVERLAP = 0x8A,
|
||||
CQE_SEND_STATUS_CSUM_OVERFLOW = 0x8B,
|
||||
};
|
||||
|
||||
enum cqe_rx_tcp_end_reason {
|
||||
CQE_RX_TCP_END_FIN_FLAG_DET = 0,
|
||||
CQE_RX_TCP_END_INVALID_FLAG = 1,
|
||||
CQE_RX_TCP_END_TIMEOUT = 2,
|
||||
CQE_RX_TCP_END_OUT_OF_SEQ = 3,
|
||||
CQE_RX_TCP_END_PKT_ERR = 4,
|
||||
CQE_RX_TCP_END_QS_DISABLED = 0x0F,
|
||||
};
|
||||
|
||||
/* Packet protocol level error enumeration */
|
||||
enum cqe_rx_err_level {
|
||||
CQE_RX_ERRLVL_RE = 0x0,
|
||||
CQE_RX_ERRLVL_L2 = 0x1,
|
||||
CQE_RX_ERRLVL_L3 = 0x2,
|
||||
CQE_RX_ERRLVL_L4 = 0x3,
|
||||
};
|
||||
|
||||
/* Packet protocol level error type enumeration */
|
||||
enum cqe_rx_err_opcode {
|
||||
CQE_RX_ERR_RE_NONE = 0x0,
|
||||
CQE_RX_ERR_RE_PARTIAL = 0x1,
|
||||
CQE_RX_ERR_RE_JABBER = 0x2,
|
||||
CQE_RX_ERR_RE_FCS = 0x7,
|
||||
CQE_RX_ERR_RE_TERMINATE = 0x9,
|
||||
CQE_RX_ERR_RE_RX_CTL = 0xb,
|
||||
CQE_RX_ERR_PREL2_ERR = 0x1f,
|
||||
CQE_RX_ERR_L2_FRAGMENT = 0x20,
|
||||
CQE_RX_ERR_L2_OVERRUN = 0x21,
|
||||
CQE_RX_ERR_L2_PFCS = 0x22,
|
||||
CQE_RX_ERR_L2_PUNY = 0x23,
|
||||
CQE_RX_ERR_L2_MAL = 0x24,
|
||||
CQE_RX_ERR_L2_OVERSIZE = 0x25,
|
||||
CQE_RX_ERR_L2_UNDERSIZE = 0x26,
|
||||
CQE_RX_ERR_L2_LENMISM = 0x27,
|
||||
CQE_RX_ERR_L2_PCLP = 0x28,
|
||||
CQE_RX_ERR_IP_NOT = 0x41,
|
||||
CQE_RX_ERR_IP_CHK = 0x42,
|
||||
CQE_RX_ERR_IP_MAL = 0x43,
|
||||
CQE_RX_ERR_IP_MALD = 0x44,
|
||||
CQE_RX_ERR_IP_HOP = 0x45,
|
||||
CQE_RX_ERR_L3_ICRC = 0x46,
|
||||
CQE_RX_ERR_L3_PCLP = 0x47,
|
||||
CQE_RX_ERR_L4_MAL = 0x61,
|
||||
CQE_RX_ERR_L4_CHK = 0x62,
|
||||
CQE_RX_ERR_UDP_LEN = 0x63,
|
||||
CQE_RX_ERR_L4_PORT = 0x64,
|
||||
CQE_RX_ERR_TCP_FLAG = 0x65,
|
||||
CQE_RX_ERR_TCP_OFFSET = 0x66,
|
||||
CQE_RX_ERR_L4_PCLP = 0x67,
|
||||
CQE_RX_ERR_RBDR_TRUNC = 0x70,
|
||||
};
|
||||
|
||||
struct cqe_rx_t {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 cqe_type:4; /* W0 */
|
||||
u64 stdn_fault:1;
|
||||
u64 rsvd0:1;
|
||||
u64 rq_qs:7;
|
||||
u64 rq_idx:3;
|
||||
u64 rsvd1:12;
|
||||
u64 rss_alg:4;
|
||||
u64 rsvd2:4;
|
||||
u64 rb_cnt:4;
|
||||
u64 vlan_found:1;
|
||||
u64 vlan_stripped:1;
|
||||
u64 vlan2_found:1;
|
||||
u64 vlan2_stripped:1;
|
||||
u64 l4_type:4;
|
||||
u64 l3_type:4;
|
||||
u64 l2_present:1;
|
||||
u64 err_level:3;
|
||||
u64 err_opcode:8;
|
||||
|
||||
u64 pkt_len:16; /* W1 */
|
||||
u64 l2_ptr:8;
|
||||
u64 l3_ptr:8;
|
||||
u64 l4_ptr:8;
|
||||
u64 cq_pkt_len:8;
|
||||
u64 align_pad:3;
|
||||
u64 rsvd3:1;
|
||||
u64 chan:12;
|
||||
|
||||
u64 rss_tag:32; /* W2 */
|
||||
u64 vlan_tci:16;
|
||||
u64 vlan_ptr:8;
|
||||
u64 vlan2_ptr:8;
|
||||
|
||||
u64 rb3_sz:16; /* W3 */
|
||||
u64 rb2_sz:16;
|
||||
u64 rb1_sz:16;
|
||||
u64 rb0_sz:16;
|
||||
|
||||
u64 rb7_sz:16; /* W4 */
|
||||
u64 rb6_sz:16;
|
||||
u64 rb5_sz:16;
|
||||
u64 rb4_sz:16;
|
||||
|
||||
u64 rb11_sz:16; /* W5 */
|
||||
u64 rb10_sz:16;
|
||||
u64 rb9_sz:16;
|
||||
u64 rb8_sz:16;
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 err_opcode:8;
|
||||
u64 err_level:3;
|
||||
u64 l2_present:1;
|
||||
u64 l3_type:4;
|
||||
u64 l4_type:4;
|
||||
u64 vlan2_stripped:1;
|
||||
u64 vlan2_found:1;
|
||||
u64 vlan_stripped:1;
|
||||
u64 vlan_found:1;
|
||||
u64 rb_cnt:4;
|
||||
u64 rsvd2:4;
|
||||
u64 rss_alg:4;
|
||||
u64 rsvd1:12;
|
||||
u64 rq_idx:3;
|
||||
u64 rq_qs:7;
|
||||
u64 rsvd0:1;
|
||||
u64 stdn_fault:1;
|
||||
u64 cqe_type:4; /* W0 */
|
||||
u64 chan:12;
|
||||
u64 rsvd3:1;
|
||||
u64 align_pad:3;
|
||||
u64 cq_pkt_len:8;
|
||||
u64 l4_ptr:8;
|
||||
u64 l3_ptr:8;
|
||||
u64 l2_ptr:8;
|
||||
u64 pkt_len:16; /* W1 */
|
||||
u64 vlan2_ptr:8;
|
||||
u64 vlan_ptr:8;
|
||||
u64 vlan_tci:16;
|
||||
u64 rss_tag:32; /* W2 */
|
||||
u64 rb0_sz:16;
|
||||
u64 rb1_sz:16;
|
||||
u64 rb2_sz:16;
|
||||
u64 rb3_sz:16; /* W3 */
|
||||
u64 rb4_sz:16;
|
||||
u64 rb5_sz:16;
|
||||
u64 rb6_sz:16;
|
||||
u64 rb7_sz:16; /* W4 */
|
||||
u64 rb8_sz:16;
|
||||
u64 rb9_sz:16;
|
||||
u64 rb10_sz:16;
|
||||
u64 rb11_sz:16; /* W5 */
|
||||
#endif
|
||||
u64 rb0_ptr:64;
|
||||
u64 rb1_ptr:64;
|
||||
u64 rb2_ptr:64;
|
||||
u64 rb3_ptr:64;
|
||||
u64 rb4_ptr:64;
|
||||
u64 rb5_ptr:64;
|
||||
u64 rb6_ptr:64;
|
||||
u64 rb7_ptr:64;
|
||||
u64 rb8_ptr:64;
|
||||
u64 rb9_ptr:64;
|
||||
u64 rb10_ptr:64;
|
||||
u64 rb11_ptr:64;
|
||||
};
|
||||
|
||||
struct cqe_rx_tcp_err_t {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 cqe_type:4; /* W0 */
|
||||
u64 rsvd0:60;
|
||||
|
||||
u64 rsvd1:4; /* W1 */
|
||||
u64 partial_first:1;
|
||||
u64 rsvd2:27;
|
||||
u64 rbdr_bytes:8;
|
||||
u64 rsvd3:24;
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 rsvd0:60;
|
||||
u64 cqe_type:4;
|
||||
|
||||
u64 rsvd3:24;
|
||||
u64 rbdr_bytes:8;
|
||||
u64 rsvd2:27;
|
||||
u64 partial_first:1;
|
||||
u64 rsvd1:4;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct cqe_rx_tcp_t {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 cqe_type:4; /* W0 */
|
||||
u64 rsvd0:52;
|
||||
u64 cq_tcp_status:8;
|
||||
|
||||
u64 rsvd1:32; /* W1 */
|
||||
u64 tcp_cntx_bytes:8;
|
||||
u64 rsvd2:8;
|
||||
u64 tcp_err_bytes:16;
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 cq_tcp_status:8;
|
||||
u64 rsvd0:52;
|
||||
u64 cqe_type:4; /* W0 */
|
||||
|
||||
u64 tcp_err_bytes:16;
|
||||
u64 rsvd2:8;
|
||||
u64 tcp_cntx_bytes:8;
|
||||
u64 rsvd1:32; /* W1 */
|
||||
#endif
|
||||
};
|
||||
|
||||
struct cqe_send_t {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 cqe_type:4; /* W0 */
|
||||
u64 rsvd0:4;
|
||||
u64 sqe_ptr:16;
|
||||
u64 rsvd1:4;
|
||||
u64 rsvd2:10;
|
||||
u64 sq_qs:7;
|
||||
u64 sq_idx:3;
|
||||
u64 rsvd3:8;
|
||||
u64 send_status:8;
|
||||
|
||||
u64 ptp_timestamp:64; /* W1 */
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 send_status:8;
|
||||
u64 rsvd3:8;
|
||||
u64 sq_idx:3;
|
||||
u64 sq_qs:7;
|
||||
u64 rsvd2:10;
|
||||
u64 rsvd1:4;
|
||||
u64 sqe_ptr:16;
|
||||
u64 rsvd0:4;
|
||||
u64 cqe_type:4; /* W0 */
|
||||
|
||||
u64 ptp_timestamp:64; /* W1 */
|
||||
#endif
|
||||
};
|
||||
|
||||
union cq_desc_t {
|
||||
u64 u[64];
|
||||
struct cqe_send_t snd_hdr;
|
||||
struct cqe_rx_t rx_hdr;
|
||||
struct cqe_rx_tcp_t rx_tcp_hdr;
|
||||
struct cqe_rx_tcp_err_t rx_tcp_err_hdr;
|
||||
};
|
||||
|
||||
struct rbdr_entry_t {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 rsvd0:15;
|
||||
u64 buf_addr:42;
|
||||
u64 cache_align:7;
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 cache_align:7;
|
||||
u64 buf_addr:42;
|
||||
u64 rsvd0:15;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* TCP reassembly context */
|
||||
struct rbe_tcp_cnxt_t {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 tcp_pkt_cnt:12;
|
||||
u64 rsvd1:4;
|
||||
u64 align_hdr_bytes:4;
|
||||
u64 align_ptr_bytes:4;
|
||||
u64 ptr_bytes:16;
|
||||
u64 rsvd2:24;
|
||||
u64 cqe_type:4;
|
||||
u64 rsvd0:54;
|
||||
u64 tcp_end_reason:2;
|
||||
u64 tcp_status:4;
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 tcp_status:4;
|
||||
u64 tcp_end_reason:2;
|
||||
u64 rsvd0:54;
|
||||
u64 cqe_type:4;
|
||||
u64 rsvd2:24;
|
||||
u64 ptr_bytes:16;
|
||||
u64 align_ptr_bytes:4;
|
||||
u64 align_hdr_bytes:4;
|
||||
u64 rsvd1:4;
|
||||
u64 tcp_pkt_cnt:12;
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Always Big endian */
|
||||
struct rx_hdr_t {
|
||||
u64 opaque:32;
|
||||
u64 rss_flow:8;
|
||||
u64 skip_length:6;
|
||||
u64 disable_rss:1;
|
||||
u64 disable_tcp_reassembly:1;
|
||||
u64 nodrop:1;
|
||||
u64 dest_alg:2;
|
||||
u64 rsvd0:2;
|
||||
u64 dest_rq:11;
|
||||
};
|
||||
|
||||
enum send_l4_csum_type {
|
||||
SEND_L4_CSUM_DISABLE = 0x00,
|
||||
SEND_L4_CSUM_UDP = 0x01,
|
||||
SEND_L4_CSUM_TCP = 0x02,
|
||||
SEND_L4_CSUM_SCTP = 0x03,
|
||||
};
|
||||
|
||||
enum send_crc_alg {
|
||||
SEND_CRCALG_CRC32 = 0x00,
|
||||
SEND_CRCALG_CRC32C = 0x01,
|
||||
SEND_CRCALG_ICRC = 0x02,
|
||||
};
|
||||
|
||||
enum send_load_type {
|
||||
SEND_LD_TYPE_LDD = 0x00,
|
||||
SEND_LD_TYPE_LDT = 0x01,
|
||||
SEND_LD_TYPE_LDWB = 0x02,
|
||||
};
|
||||
|
||||
enum send_mem_alg_type {
|
||||
SEND_MEMALG_SET = 0x00,
|
||||
SEND_MEMALG_ADD = 0x08,
|
||||
SEND_MEMALG_SUB = 0x09,
|
||||
SEND_MEMALG_ADDLEN = 0x0A,
|
||||
SEND_MEMALG_SUBLEN = 0x0B,
|
||||
};
|
||||
|
||||
enum send_mem_dsz_type {
|
||||
SEND_MEMDSZ_B64 = 0x00,
|
||||
SEND_MEMDSZ_B32 = 0x01,
|
||||
SEND_MEMDSZ_B8 = 0x03,
|
||||
};
|
||||
|
||||
enum sq_subdesc_type {
|
||||
SQ_DESC_TYPE_INVALID = 0x00,
|
||||
SQ_DESC_TYPE_HEADER = 0x01,
|
||||
SQ_DESC_TYPE_CRC = 0x02,
|
||||
SQ_DESC_TYPE_IMMEDIATE = 0x03,
|
||||
SQ_DESC_TYPE_GATHER = 0x04,
|
||||
SQ_DESC_TYPE_MEMORY = 0x05,
|
||||
};
|
||||
|
||||
struct sq_crc_subdesc {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 rsvd1:32;
|
||||
u64 crc_ival:32;
|
||||
u64 subdesc_type:4;
|
||||
u64 crc_alg:2;
|
||||
u64 rsvd0:10;
|
||||
u64 crc_insert_pos:16;
|
||||
u64 hdr_start:16;
|
||||
u64 crc_len:16;
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 crc_len:16;
|
||||
u64 hdr_start:16;
|
||||
u64 crc_insert_pos:16;
|
||||
u64 rsvd0:10;
|
||||
u64 crc_alg:2;
|
||||
u64 subdesc_type:4;
|
||||
u64 crc_ival:32;
|
||||
u64 rsvd1:32;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct sq_gather_subdesc {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 subdesc_type:4; /* W0 */
|
||||
u64 ld_type:2;
|
||||
u64 rsvd0:42;
|
||||
u64 size:16;
|
||||
|
||||
u64 rsvd1:15; /* W1 */
|
||||
u64 addr:49;
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 size:16;
|
||||
u64 rsvd0:42;
|
||||
u64 ld_type:2;
|
||||
u64 subdesc_type:4; /* W0 */
|
||||
|
||||
u64 addr:49;
|
||||
u64 rsvd1:15; /* W1 */
|
||||
#endif
|
||||
};
|
||||
|
||||
/* SQ immediate subdescriptor */
|
||||
struct sq_imm_subdesc {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 subdesc_type:4; /* W0 */
|
||||
u64 rsvd0:46;
|
||||
u64 len:14;
|
||||
|
||||
u64 data:64; /* W1 */
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 len:14;
|
||||
u64 rsvd0:46;
|
||||
u64 subdesc_type:4; /* W0 */
|
||||
|
||||
u64 data:64; /* W1 */
|
||||
#endif
|
||||
};
|
||||
|
||||
struct sq_mem_subdesc {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 subdesc_type:4; /* W0 */
|
||||
u64 mem_alg:4;
|
||||
u64 mem_dsz:2;
|
||||
u64 wmem:1;
|
||||
u64 rsvd0:21;
|
||||
u64 offset:32;
|
||||
|
||||
u64 rsvd1:15; /* W1 */
|
||||
u64 addr:49;
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 offset:32;
|
||||
u64 rsvd0:21;
|
||||
u64 wmem:1;
|
||||
u64 mem_dsz:2;
|
||||
u64 mem_alg:4;
|
||||
u64 subdesc_type:4; /* W0 */
|
||||
|
||||
u64 addr:49;
|
||||
u64 rsvd1:15; /* W1 */
|
||||
#endif
|
||||
};
|
||||
|
||||
struct sq_hdr_subdesc {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 subdesc_type:4;
|
||||
u64 tso:1;
|
||||
u64 post_cqe:1; /* Post CQE on no error also */
|
||||
u64 dont_send:1;
|
||||
u64 tstmp:1;
|
||||
u64 subdesc_cnt:8;
|
||||
u64 csum_l4:2;
|
||||
u64 csum_l3:1;
|
||||
u64 rsvd0:5;
|
||||
u64 l4_offset:8;
|
||||
u64 l3_offset:8;
|
||||
u64 rsvd1:4;
|
||||
u64 tot_len:20; /* W0 */
|
||||
|
||||
u64 tso_sdc_cont:8;
|
||||
u64 tso_sdc_first:8;
|
||||
u64 tso_l4_offset:8;
|
||||
u64 tso_flags_last:12;
|
||||
u64 tso_flags_first:12;
|
||||
u64 rsvd2:2;
|
||||
u64 tso_max_paysize:14; /* W1 */
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 tot_len:20;
|
||||
u64 rsvd1:4;
|
||||
u64 l3_offset:8;
|
||||
u64 l4_offset:8;
|
||||
u64 rsvd0:5;
|
||||
u64 csum_l3:1;
|
||||
u64 csum_l4:2;
|
||||
u64 subdesc_cnt:8;
|
||||
u64 tstmp:1;
|
||||
u64 dont_send:1;
|
||||
u64 post_cqe:1; /* Post CQE on no error also */
|
||||
u64 tso:1;
|
||||
u64 subdesc_type:4; /* W0 */
|
||||
|
||||
u64 tso_max_paysize:14;
|
||||
u64 rsvd2:2;
|
||||
u64 tso_flags_first:12;
|
||||
u64 tso_flags_last:12;
|
||||
u64 tso_l4_offset:8;
|
||||
u64 tso_sdc_first:8;
|
||||
u64 tso_sdc_cont:8; /* W1 */
|
||||
#endif
|
||||
};
|
||||
|
||||
/* Queue config register formats */
|
||||
struct rq_cfg {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 reserved_2_63:62;
|
||||
u64 ena:1;
|
||||
u64 tcp_ena:1;
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 tcp_ena:1;
|
||||
u64 ena:1;
|
||||
u64 reserved_2_63:62;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct cq_cfg {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 reserved_43_63:21;
|
||||
u64 ena:1;
|
||||
u64 reset:1;
|
||||
u64 caching:1;
|
||||
u64 reserved_35_39:5;
|
||||
u64 qsize:3;
|
||||
u64 reserved_25_31:7;
|
||||
u64 avg_con:9;
|
||||
u64 reserved_0_15:16;
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 reserved_0_15:16;
|
||||
u64 avg_con:9;
|
||||
u64 reserved_25_31:7;
|
||||
u64 qsize:3;
|
||||
u64 reserved_35_39:5;
|
||||
u64 caching:1;
|
||||
u64 reset:1;
|
||||
u64 ena:1;
|
||||
u64 reserved_43_63:21;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct sq_cfg {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 reserved_20_63:44;
|
||||
u64 ena:1;
|
||||
u64 reserved_18_18:1;
|
||||
u64 reset:1;
|
||||
u64 ldwb:1;
|
||||
u64 reserved_11_15:5;
|
||||
u64 qsize:3;
|
||||
u64 reserved_3_7:5;
|
||||
u64 tstmp_bgx_intf:3;
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 tstmp_bgx_intf:3;
|
||||
u64 reserved_3_7:5;
|
||||
u64 qsize:3;
|
||||
u64 reserved_11_15:5;
|
||||
u64 ldwb:1;
|
||||
u64 reset:1;
|
||||
u64 reserved_18_18:1;
|
||||
u64 ena:1;
|
||||
u64 reserved_20_63:44;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct rbdr_cfg {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 reserved_45_63:19;
|
||||
u64 ena:1;
|
||||
u64 reset:1;
|
||||
u64 ldwb:1;
|
||||
u64 reserved_36_41:6;
|
||||
u64 qsize:4;
|
||||
u64 reserved_25_31:7;
|
||||
u64 avg_con:9;
|
||||
u64 reserved_12_15:4;
|
||||
u64 lines:12;
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 lines:12;
|
||||
u64 reserved_12_15:4;
|
||||
u64 avg_con:9;
|
||||
u64 reserved_25_31:7;
|
||||
u64 qsize:4;
|
||||
u64 reserved_36_41:6;
|
||||
u64 ldwb:1;
|
||||
u64 reset:1;
|
||||
u64 ena: 1;
|
||||
u64 reserved_45_63:19;
|
||||
#endif
|
||||
};
|
||||
|
||||
struct qs_cfg {
|
||||
#if defined(__BIG_ENDIAN_BITFIELD)
|
||||
u64 reserved_32_63:32;
|
||||
u64 ena:1;
|
||||
u64 reserved_27_30:4;
|
||||
u64 sq_ins_ena:1;
|
||||
u64 sq_ins_pos:6;
|
||||
u64 lock_ena:1;
|
||||
u64 lock_viol_cqe_ena:1;
|
||||
u64 send_tstmp_ena:1;
|
||||
u64 be:1;
|
||||
u64 reserved_7_15:9;
|
||||
u64 vnic:7;
|
||||
#elif defined(__LITTLE_ENDIAN_BITFIELD)
|
||||
u64 vnic:7;
|
||||
u64 reserved_7_15:9;
|
||||
u64 be:1;
|
||||
u64 send_tstmp_ena:1;
|
||||
u64 lock_viol_cqe_ena:1;
|
||||
u64 lock_ena:1;
|
||||
u64 sq_ins_pos:6;
|
||||
u64 sq_ins_ena:1;
|
||||
u64 reserved_27_30:4;
|
||||
u64 ena:1;
|
||||
u64 reserved_32_63:32;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif /* Q_STRUCT_H */
|
380
drivers/net/octeontx/smi.c
Normal file
380
drivers/net/octeontx/smi.c
Normal file
|
@ -0,0 +1,380 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#include <dm.h>
|
||||
#include <malloc.h>
|
||||
#include <miiphy.h>
|
||||
#include <misc.h>
|
||||
#include <pci.h>
|
||||
#include <pci_ids.h>
|
||||
#include <phy.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/ctype.h>
|
||||
#include <linux/delay.h>
|
||||
|
||||
#define PCI_DEVICE_ID_OCTEONTX_SMI 0xA02B
|
||||
|
||||
DECLARE_GLOBAL_DATA_PTR;
|
||||
|
||||
enum octeontx_smi_mode {
|
||||
CLAUSE22 = 0,
|
||||
CLAUSE45 = 1,
|
||||
};
|
||||
|
||||
enum {
|
||||
SMI_OP_C22_WRITE = 0,
|
||||
SMI_OP_C22_READ = 1,
|
||||
|
||||
SMI_OP_C45_ADDR = 0,
|
||||
SMI_OP_C45_WRITE = 1,
|
||||
SMI_OP_C45_PRIA = 2,
|
||||
SMI_OP_C45_READ = 3,
|
||||
};
|
||||
|
||||
union smi_x_clk {
|
||||
u64 u;
|
||||
struct smi_x_clk_s {
|
||||
int phase:8;
|
||||
int sample:4;
|
||||
int preamble:1;
|
||||
int clk_idle:1;
|
||||
int reserved_14_14:1;
|
||||
int sample_mode:1;
|
||||
int sample_hi:5;
|
||||
int reserved_21_23:3;
|
||||
int mode:1;
|
||||
} s;
|
||||
};
|
||||
|
||||
union smi_x_cmd {
|
||||
u64 u;
|
||||
struct smi_x_cmd_s {
|
||||
int reg_adr:5;
|
||||
int reserved_5_7:3;
|
||||
int phy_adr:5;
|
||||
int reserved_13_15:3;
|
||||
int phy_op:2;
|
||||
} s;
|
||||
};
|
||||
|
||||
union smi_x_wr_dat {
|
||||
u64 u;
|
||||
struct smi_x_wr_dat_s {
|
||||
unsigned int dat:16;
|
||||
int val:1;
|
||||
int pending:1;
|
||||
} s;
|
||||
};
|
||||
|
||||
union smi_x_rd_dat {
|
||||
u64 u;
|
||||
struct smi_x_rd_dat_s {
|
||||
unsigned int dat:16;
|
||||
int val:1;
|
||||
int pending:1;
|
||||
} s;
|
||||
};
|
||||
|
||||
union smi_x_en {
|
||||
u64 u;
|
||||
struct smi_x_en_s {
|
||||
int en:1;
|
||||
} s;
|
||||
};
|
||||
|
||||
#define SMI_X_RD_DAT 0x10ull
|
||||
#define SMI_X_WR_DAT 0x08ull
|
||||
#define SMI_X_CMD 0x00ull
|
||||
#define SMI_X_CLK 0x18ull
|
||||
#define SMI_X_EN 0x20ull
|
||||
|
||||
struct octeontx_smi_priv {
|
||||
void __iomem *baseaddr;
|
||||
enum octeontx_smi_mode mode;
|
||||
};
|
||||
|
||||
#define MDIO_TIMEOUT 10000
|
||||
|
||||
void octeontx_smi_setmode(struct mii_dev *bus, enum octeontx_smi_mode mode)
|
||||
{
|
||||
struct octeontx_smi_priv *priv = bus->priv;
|
||||
union smi_x_clk smix_clk;
|
||||
|
||||
smix_clk.u = readq(priv->baseaddr + SMI_X_CLK);
|
||||
smix_clk.s.mode = mode;
|
||||
smix_clk.s.preamble = mode == CLAUSE45;
|
||||
writeq(smix_clk.u, priv->baseaddr + SMI_X_CLK);
|
||||
|
||||
priv->mode = mode;
|
||||
}
|
||||
|
||||
int octeontx_c45_addr(struct mii_dev *bus, int addr, int devad, int regnum)
|
||||
{
|
||||
struct octeontx_smi_priv *priv = bus->priv;
|
||||
|
||||
union smi_x_cmd smix_cmd;
|
||||
union smi_x_wr_dat smix_wr_dat;
|
||||
unsigned long timeout = MDIO_TIMEOUT;
|
||||
|
||||
smix_wr_dat.u = 0;
|
||||
smix_wr_dat.s.dat = regnum;
|
||||
|
||||
writeq(smix_wr_dat.u, priv->baseaddr + SMI_X_WR_DAT);
|
||||
|
||||
smix_cmd.u = 0;
|
||||
smix_cmd.s.phy_op = SMI_OP_C45_ADDR;
|
||||
smix_cmd.s.phy_adr = addr;
|
||||
smix_cmd.s.reg_adr = devad;
|
||||
|
||||
writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD);
|
||||
|
||||
do {
|
||||
smix_wr_dat.u = readq(priv->baseaddr + SMI_X_WR_DAT);
|
||||
udelay(100);
|
||||
timeout--;
|
||||
} while (smix_wr_dat.s.pending && timeout);
|
||||
|
||||
return timeout == 0;
|
||||
}
|
||||
|
||||
int octeontx_phy_read(struct mii_dev *bus, int addr, int devad, int regnum)
|
||||
{
|
||||
struct octeontx_smi_priv *priv = bus->priv;
|
||||
union smi_x_cmd smix_cmd;
|
||||
union smi_x_rd_dat smix_rd_dat;
|
||||
unsigned long timeout = MDIO_TIMEOUT;
|
||||
int ret;
|
||||
|
||||
enum octeontx_smi_mode mode = (devad < 0) ? CLAUSE22 : CLAUSE45;
|
||||
|
||||
debug("RD: Mode: %u, baseaddr: %p, addr: %d, devad: %d, reg: %d\n",
|
||||
mode, priv->baseaddr, addr, devad, regnum);
|
||||
|
||||
octeontx_smi_setmode(bus, mode);
|
||||
|
||||
if (mode == CLAUSE45) {
|
||||
ret = octeontx_c45_addr(bus, addr, devad, regnum);
|
||||
|
||||
debug("RD: ret: %u\n", ret);
|
||||
|
||||
if (ret)
|
||||
return 0;
|
||||
}
|
||||
|
||||
smix_cmd.u = 0;
|
||||
smix_cmd.s.phy_adr = addr;
|
||||
|
||||
if (mode == CLAUSE45) {
|
||||
smix_cmd.s.reg_adr = devad;
|
||||
smix_cmd.s.phy_op = SMI_OP_C45_READ;
|
||||
} else {
|
||||
smix_cmd.s.reg_adr = regnum;
|
||||
smix_cmd.s.phy_op = SMI_OP_C22_READ;
|
||||
}
|
||||
|
||||
writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD);
|
||||
|
||||
do {
|
||||
smix_rd_dat.u = readq(priv->baseaddr + SMI_X_RD_DAT);
|
||||
udelay(10);
|
||||
timeout--;
|
||||
} while (smix_rd_dat.s.pending && timeout);
|
||||
|
||||
debug("SMIX_RD_DAT: %lx\n", (unsigned long)smix_rd_dat.u);
|
||||
|
||||
return smix_rd_dat.s.dat;
|
||||
}
|
||||
|
||||
int octeontx_phy_write(struct mii_dev *bus, int addr, int devad, int regnum,
|
||||
u16 value)
|
||||
{
|
||||
struct octeontx_smi_priv *priv = bus->priv;
|
||||
union smi_x_cmd smix_cmd;
|
||||
union smi_x_wr_dat smix_wr_dat;
|
||||
unsigned long timeout = MDIO_TIMEOUT;
|
||||
int ret;
|
||||
|
||||
enum octeontx_smi_mode mode = (devad < 0) ? CLAUSE22 : CLAUSE45;
|
||||
|
||||
debug("WR: Mode: %u, baseaddr: %p, addr: %d, devad: %d, reg: %d\n",
|
||||
mode, priv->baseaddr, addr, devad, regnum);
|
||||
|
||||
if (mode == CLAUSE45) {
|
||||
ret = octeontx_c45_addr(bus, addr, devad, regnum);
|
||||
|
||||
debug("WR: ret: %u\n", ret);
|
||||
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
|
||||
smix_wr_dat.u = 0;
|
||||
smix_wr_dat.s.dat = value;
|
||||
|
||||
writeq(smix_wr_dat.u, priv->baseaddr + SMI_X_WR_DAT);
|
||||
|
||||
smix_cmd.u = 0;
|
||||
smix_cmd.s.phy_adr = addr;
|
||||
|
||||
if (mode == CLAUSE45) {
|
||||
smix_cmd.s.reg_adr = devad;
|
||||
smix_cmd.s.phy_op = SMI_OP_C45_WRITE;
|
||||
} else {
|
||||
smix_cmd.s.reg_adr = regnum;
|
||||
smix_cmd.s.phy_op = SMI_OP_C22_WRITE;
|
||||
}
|
||||
|
||||
writeq(smix_cmd.u, priv->baseaddr + SMI_X_CMD);
|
||||
|
||||
do {
|
||||
smix_wr_dat.u = readq(priv->baseaddr + SMI_X_WR_DAT);
|
||||
udelay(10);
|
||||
timeout--;
|
||||
} while (smix_wr_dat.s.pending && timeout);
|
||||
|
||||
debug("SMIX_WR_DAT: %lx\n", (unsigned long)smix_wr_dat.u);
|
||||
|
||||
return timeout == 0;
|
||||
}
|
||||
|
||||
int octeontx_smi_reset(struct mii_dev *bus)
|
||||
{
|
||||
struct octeontx_smi_priv *priv = bus->priv;
|
||||
|
||||
union smi_x_en smi_en;
|
||||
|
||||
smi_en.s.en = 0;
|
||||
writeq(smi_en.u, priv->baseaddr + SMI_X_EN);
|
||||
|
||||
smi_en.s.en = 1;
|
||||
writeq(smi_en.u, priv->baseaddr + SMI_X_EN);
|
||||
|
||||
octeontx_smi_setmode(bus, CLAUSE22);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* PHY XS initialization, primarily for RXAUI
|
||||
*
|
||||
*/
|
||||
int rxaui_phy_xs_init(struct mii_dev *bus, int phy_addr)
|
||||
{
|
||||
int reg;
|
||||
ulong start_time;
|
||||
int phy_id1, phy_id2;
|
||||
int oui, model_number;
|
||||
|
||||
phy_id1 = octeontx_phy_read(bus, phy_addr, 1, 0x2);
|
||||
phy_id2 = octeontx_phy_read(bus, phy_addr, 1, 0x3);
|
||||
model_number = (phy_id2 >> 4) & 0x3F;
|
||||
debug("%s model %x\n", __func__, model_number);
|
||||
oui = phy_id1;
|
||||
oui <<= 6;
|
||||
oui |= (phy_id2 >> 10) & 0x3F;
|
||||
debug("%s oui %x\n", __func__, oui);
|
||||
switch (oui) {
|
||||
case 0x5016:
|
||||
if (model_number == 9) {
|
||||
debug("%s +\n", __func__);
|
||||
/* Perform hardware reset in XGXS control */
|
||||
reg = octeontx_phy_read(bus, phy_addr, 4, 0x0);
|
||||
if ((reg & 0xffff) < 0)
|
||||
goto read_error;
|
||||
reg |= 0x8000;
|
||||
octeontx_phy_write(bus, phy_addr, 4, 0x0, reg);
|
||||
|
||||
start_time = get_timer(0);
|
||||
do {
|
||||
reg = octeontx_phy_read(bus, phy_addr, 4, 0x0);
|
||||
if ((reg & 0xffff) < 0)
|
||||
goto read_error;
|
||||
} while ((reg & 0x8000) && get_timer(start_time) < 500);
|
||||
if (reg & 0x8000) {
|
||||
printf("HW reset for M88X3120 PHY failed");
|
||||
printf("MII_BMCR: 0x%x\n", reg);
|
||||
return -1;
|
||||
}
|
||||
/* program 4.49155 with 0x5 */
|
||||
octeontx_phy_write(bus, phy_addr, 4, 0xc003, 0x5);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
read_error:
|
||||
debug("M88X3120 PHY config read failed\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
int octeontx_smi_probe(struct udevice *dev)
|
||||
{
|
||||
int ret, subnode, cnt = 0, node = dev->node.of_offset;
|
||||
struct mii_dev *bus;
|
||||
struct octeontx_smi_priv *priv;
|
||||
pci_dev_t bdf = dm_pci_get_bdf(dev);
|
||||
|
||||
debug("SMI PCI device: %x\n", bdf);
|
||||
dev->req_seq = PCI_FUNC(bdf);
|
||||
if (!dm_pci_map_bar(dev, PCI_BASE_ADDRESS_0, PCI_REGION_MEM)) {
|
||||
printf("Failed to map PCI region for bdf %x\n", bdf);
|
||||
return -1;
|
||||
}
|
||||
|
||||
fdt_for_each_subnode(subnode, gd->fdt_blob, node) {
|
||||
ret = fdt_node_check_compatible(gd->fdt_blob, subnode,
|
||||
"cavium,thunder-8890-mdio");
|
||||
if (ret)
|
||||
continue;
|
||||
|
||||
bus = mdio_alloc();
|
||||
priv = malloc(sizeof(*priv));
|
||||
if (!bus || !priv) {
|
||||
printf("Failed to allocate OcteonTX MDIO bus # %u\n",
|
||||
dev->seq);
|
||||
return -1;
|
||||
}
|
||||
|
||||
bus->read = octeontx_phy_read;
|
||||
bus->write = octeontx_phy_write;
|
||||
bus->reset = octeontx_smi_reset;
|
||||
bus->priv = priv;
|
||||
|
||||
priv->mode = CLAUSE22;
|
||||
priv->baseaddr = (void __iomem *)fdtdec_get_addr(gd->fdt_blob,
|
||||
subnode,
|
||||
"reg");
|
||||
debug("mdio base addr %p\n", priv->baseaddr);
|
||||
|
||||
/* use given name or generate its own unique name */
|
||||
snprintf(bus->name, MDIO_NAME_LEN, "smi%d", cnt++);
|
||||
|
||||
ret = mdio_register(bus);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct udevice_id octeontx_smi_ids[] = {
|
||||
{ .compatible = "cavium,thunder-8890-mdio-nexus" },
|
||||
{}
|
||||
};
|
||||
|
||||
U_BOOT_DRIVER(octeontx_smi) = {
|
||||
.name = "octeontx_smi",
|
||||
.id = UCLASS_MISC,
|
||||
.probe = octeontx_smi_probe,
|
||||
.of_match = octeontx_smi_ids,
|
||||
};
|
||||
|
||||
static struct pci_device_id octeontx_smi_supported[] = {
|
||||
{ PCI_VDEVICE(CAVIUM, PCI_DEVICE_ID_CAVIUM_SMI) },
|
||||
{}
|
||||
};
|
||||
|
||||
U_BOOT_PCI_DEVICE(octeontx_smi, octeontx_smi_supported);
|
124
drivers/net/octeontx/xcv.c
Normal file
124
drivers/net/octeontx/xcv.c
Normal file
|
@ -0,0 +1,124 @@
|
|||
// SPDX-License-Identifier: GPL-2.0
|
||||
/*
|
||||
* Copyright (C) 2018 Marvell International Ltd.
|
||||
*/
|
||||
|
||||
#include <config.h>
|
||||
#include <dm.h>
|
||||
#include <errno.h>
|
||||
#include <fdt_support.h>
|
||||
#include <pci.h>
|
||||
#include <malloc.h>
|
||||
#include <miiphy.h>
|
||||
#include <misc.h>
|
||||
#include <net.h>
|
||||
#include <netdev.h>
|
||||
#include <asm/io.h>
|
||||
#include <linux/delay.h>
|
||||
#include <linux/libfdt.h>
|
||||
|
||||
#include <asm/arch/csrs/csrs-xcv.h>
|
||||
|
||||
#define XCVX_BASE 0x87E0DB000000ULL
|
||||
|
||||
/* Initialize XCV block */
|
||||
void xcv_init_hw(void)
|
||||
{
|
||||
union xcvx_reset reset;
|
||||
union xcvx_dll_ctl xcv_dll_ctl;
|
||||
|
||||
/* Take the DLL out of reset */
|
||||
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
|
||||
reset.s.dllrst = 0;
|
||||
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
|
||||
|
||||
/* Take the clock tree out of reset */
|
||||
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
|
||||
reset.s.clkrst = 0;
|
||||
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
|
||||
|
||||
/* Once the 125MHz ref clock is stable, wait 10us for DLL to lock */
|
||||
udelay(10);
|
||||
|
||||
/* Optionally, bypass the DLL setting */
|
||||
xcv_dll_ctl.u = readq(XCVX_BASE + XCVX_DLL_CTL(0));
|
||||
xcv_dll_ctl.s.clkrx_set = 0;
|
||||
xcv_dll_ctl.s.clkrx_byp = 1;
|
||||
xcv_dll_ctl.s.clktx_byp = 0;
|
||||
writeq(xcv_dll_ctl.u, XCVX_BASE + XCVX_DLL_CTL(0));
|
||||
|
||||
/* Enable the compensation controller */
|
||||
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
|
||||
reset.s.comp = 1;
|
||||
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
|
||||
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
|
||||
|
||||
/* Wait for 1040 reference clock cycles for the compensation state
|
||||
* machine lock.
|
||||
*/
|
||||
udelay(100);
|
||||
|
||||
/* Enable the XCV block */
|
||||
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
|
||||
reset.s.enable = 1;
|
||||
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
|
||||
|
||||
/* set XCV(0)_RESET[CLKRST] to 1 */
|
||||
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
|
||||
reset.s.clkrst = 1;
|
||||
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
|
||||
}
|
||||
|
||||
/*
|
||||
* Configure XCV link based on the speed
|
||||
* link_up : Set to 1 when link is up otherwise 0
|
||||
* link_speed: The speed of the link.
|
||||
*/
|
||||
void xcv_setup_link(bool link_up, int link_speed)
|
||||
{
|
||||
union xcvx_ctl xcv_ctl;
|
||||
union xcvx_reset reset;
|
||||
union xcvx_batch_crd_ret xcv_crd_ret;
|
||||
int speed = 2;
|
||||
|
||||
/* Check RGMII link */
|
||||
if (link_speed == 100)
|
||||
speed = 1;
|
||||
else if (link_speed == 10)
|
||||
speed = 0;
|
||||
|
||||
if (link_up) {
|
||||
/* Set operating speed */
|
||||
xcv_ctl.u = readq(XCVX_BASE + XCVX_CTL(0));
|
||||
xcv_ctl.s.speed = speed;
|
||||
writeq(xcv_ctl.u, XCVX_BASE + XCVX_CTL(0));
|
||||
|
||||
/* Datapaths come out of reset
|
||||
* - The datapath resets will disengage BGX from the
|
||||
* RGMII interface
|
||||
* - XCV will continue to return TX credits for each tick
|
||||
* that is sent on the TX data path
|
||||
*/
|
||||
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
|
||||
reset.s.tx_dat_rst_n = 1;
|
||||
reset.s.rx_dat_rst_n = 1;
|
||||
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
|
||||
|
||||
/* Enable packet flow */
|
||||
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
|
||||
reset.s.tx_pkt_rst_n = 1;
|
||||
reset.s.rx_pkt_rst_n = 1;
|
||||
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
|
||||
|
||||
xcv_crd_ret.u = readq(XCVX_BASE + XCVX_BATCH_CRD_RET(0));
|
||||
xcv_crd_ret.s.crd_ret = 1;
|
||||
writeq(xcv_crd_ret.u, XCVX_BASE + XCVX_BATCH_CRD_RET(0));
|
||||
} else {
|
||||
/* Enable packet flow */
|
||||
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
|
||||
reset.s.tx_pkt_rst_n = 0;
|
||||
reset.s.rx_pkt_rst_n = 0;
|
||||
writeq(reset.u, XCVX_BASE + XCVX_RESET(0));
|
||||
reset.u = readq(XCVX_BASE + XCVX_RESET(0));
|
||||
}
|
||||
}
|
Loading…
Add table
Reference in a new issue